merge mozilla-inbound to mozilla-central a=merge

This commit is contained in:
Carsten "Tomcat" Book 2015-05-19 11:58:45 +02:00
commit 69c38ac58f
133 changed files with 3729 additions and 4604 deletions

View File

@ -6,8 +6,69 @@
#include "mozilla/BasePrincipal.h"
#include "nsIObjectInputStream.h"
#include "nsIObjectOutputStream.h"
#include "nsScriptSecurityManager.h"
namespace mozilla {
void
BasePrincipal::OriginAttributes::Serialize(nsIObjectOutputStream* aStream) const
{
aStream->Write32(mAppId);
aStream->WriteBoolean(mIsInBrowserElement);
}
nsresult
BasePrincipal::OriginAttributes::Deserialize(nsIObjectInputStream* aStream)
{
nsresult rv = aStream->Read32(&mAppId);
NS_ENSURE_SUCCESS(rv, rv);
rv = aStream->ReadBoolean(&mIsInBrowserElement);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
bool
BasePrincipal::Subsumes(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration)
{
MOZ_RELEASE_ASSERT(aOther, "The caller is performing a nonsensical security check!");
return SubsumesInternal(aOther, aConsideration);
}
NS_IMETHODIMP
BasePrincipal::Equals(nsIPrincipal *aOther, bool *aResult)
{
*aResult = Subsumes(aOther, DontConsiderDocumentDomain) &&
Cast(aOther)->Subsumes(this, DontConsiderDocumentDomain);
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::EqualsConsideringDomain(nsIPrincipal *aOther, bool *aResult)
{
*aResult = Subsumes(aOther, ConsiderDocumentDomain) &&
Cast(aOther)->Subsumes(this, ConsiderDocumentDomain);
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::Subsumes(nsIPrincipal *aOther, bool *aResult)
{
*aResult = Subsumes(aOther, DontConsiderDocumentDomain);
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::SubsumesConsideringDomain(nsIPrincipal *aOther, bool *aResult)
{
*aResult = Subsumes(aOther, ConsiderDocumentDomain);
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::GetCsp(nsIContentSecurityPolicy** aCsp)
{
@ -34,5 +95,53 @@ BasePrincipal::GetIsNullPrincipal(bool* aIsNullPrincipal)
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::GetJarPrefix(nsACString& aJarPrefix)
{
MOZ_ASSERT(AppId() != nsIScriptSecurityManager::UNKNOWN_APP_ID);
mozilla::GetJarPrefix(AppId(), IsInBrowserElement(), aJarPrefix);
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::GetAppStatus(uint16_t* aAppStatus)
{
if (AppId() == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
NS_WARNING("Asking for app status on a principal with an unknown app id");
*aAppStatus = nsIPrincipal::APP_STATUS_NOT_INSTALLED;
return NS_OK;
}
*aAppStatus = nsScriptSecurityManager::AppStatusForPrincipal(this);
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::GetAppId(uint32_t* aAppId)
{
if (AppId() == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
MOZ_ASSERT(false);
*aAppId = nsIScriptSecurityManager::NO_APP_ID;
return NS_OK;
}
*aAppId = AppId();
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
{
*aIsInBrowserElement = IsInBrowserElement();
return NS_OK;
}
NS_IMETHODIMP
BasePrincipal::GetUnknownAppId(bool* aUnknownAppId)
{
*aUnknownAppId = AppId() == nsIScriptSecurityManager::UNKNOWN_APP_ID;
return NS_OK;
}
} // namespace mozilla

View File

@ -8,8 +8,12 @@
#define mozilla_BasePrincipal_h
#include "nsIPrincipal.h"
#include "nsIScriptSecurityManager.h"
#include "nsJSPrincipals.h"
class nsIObjectOutputStream;
class nsIObjectInputStream;
namespace mozilla {
/*
@ -23,16 +27,62 @@ class BasePrincipal : public nsJSPrincipals
{
public:
BasePrincipal() {}
enum DocumentDomainConsideration { DontConsiderDocumentDomain, ConsiderDocumentDomain};
bool Subsumes(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration);
NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) final;
NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) final;
NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) final;
NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) final;
NS_IMETHOD GetCsp(nsIContentSecurityPolicy** aCsp) override;
NS_IMETHOD SetCsp(nsIContentSecurityPolicy* aCsp) override;
NS_IMETHOD GetIsNullPrincipal(bool* aIsNullPrincipal) override;
NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) final;
NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) final;
NS_IMETHOD GetAppId(uint32_t* aAppStatus) final;
NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) final;
NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) final;
virtual bool IsOnCSSUnprefixingWhitelist() override { return false; }
static BasePrincipal* Cast(nsIPrincipal* aPrin) { return static_cast<BasePrincipal*>(aPrin); }
struct OriginAttributes {
// NB: If you add any members here, you need to update Serialize/Deserialize
// and bump the CIDs of all the principal implementations that invoke those
// methods.
uint32_t mAppId;
bool mIsInBrowserElement;
OriginAttributes() : mAppId(nsIScriptSecurityManager::NO_APP_ID), mIsInBrowserElement(false) {}
OriginAttributes(uint32_t aAppId, bool aIsInBrowserElement)
: mAppId(aAppId), mIsInBrowserElement(aIsInBrowserElement) {}
bool operator==(const OriginAttributes& aOther) const
{
return mAppId == aOther.mAppId &&
mIsInBrowserElement == aOther.mIsInBrowserElement;
}
bool operator!=(const OriginAttributes& aOther) const
{
return !(*this == aOther);
}
void Serialize(nsIObjectOutputStream* aStream) const;
nsresult Deserialize(nsIObjectInputStream* aStream);
};
const OriginAttributes& OriginAttributesRef() { return mOriginAttributes; }
uint32_t AppId() const { return mOriginAttributes.mAppId; }
bool IsInBrowserElement() const { return mOriginAttributes.mIsInBrowserElement; }
protected:
virtual ~BasePrincipal() {}
virtual bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsider) = 0;
nsCOMPtr<nsIContentSecurityPolicy> mCSP;
OriginAttributes mOriginAttributes;
};
} // namespace mozilla

View File

@ -17,8 +17,6 @@
#include "nsMemory.h"
#include "nsNetUtil.h"
#include "nsIClassInfoImpl.h"
#include "nsIObjectInputStream.h"
#include "nsIObjectOutputStream.h"
#include "nsNetCID.h"
#include "nsError.h"
#include "nsIScriptSecurityManager.h"
@ -41,27 +39,25 @@ NS_IMPL_CI_INTERFACE_GETTER(nsNullPrincipal,
nsNullPrincipal::CreateWithInheritedAttributes(nsIPrincipal* aInheritFrom)
{
nsRefPtr<nsNullPrincipal> nullPrin = new nsNullPrincipal();
nsresult rv = nullPrin->Init(aInheritFrom->GetAppId(),
aInheritFrom->GetIsInBrowserElement());
nsresult rv = nullPrin->Init(Cast(aInheritFrom)->OriginAttributesRef());
return NS_SUCCEEDED(rv) ? nullPrin.forget() : nullptr;
}
/* static */ already_AddRefed<nsNullPrincipal>
nsNullPrincipal::Create(uint32_t aAppId, bool aInMozBrowser)
nsNullPrincipal::Create(const OriginAttributes& aOriginAttributes)
{
nsRefPtr<nsNullPrincipal> nullPrin = new nsNullPrincipal();
nsresult rv = nullPrin->Init(aAppId, aInMozBrowser);
nsresult rv = nullPrin->Init(aOriginAttributes);
NS_ENSURE_SUCCESS(rv, nullptr);
return nullPrin.forget();
}
nsresult
nsNullPrincipal::Init(uint32_t aAppId, bool aInMozBrowser)
nsNullPrincipal::Init(const OriginAttributes& aOriginAttributes)
{
MOZ_ASSERT(aAppId != nsIScriptSecurityManager::UNKNOWN_APP_ID);
mAppId = aAppId;
mInMozBrowser = aInMozBrowser;
mOriginAttributes = aOriginAttributes;
MOZ_ASSERT(AppId() != nsIScriptSecurityManager::UNKNOWN_APP_ID);
mURI = nsNullPrincipalURI::Create();
NS_ENSURE_TRUE(mURI, NS_ERROR_NOT_AVAILABLE);
@ -79,21 +75,6 @@ nsNullPrincipal::GetScriptLocation(nsACString &aStr)
* nsIPrincipal implementation
*/
NS_IMETHODIMP
nsNullPrincipal::Equals(nsIPrincipal *aOther, bool *aResult)
{
// Just equal to ourselves. Note that nsPrincipal::Equals will return false
// for us since we have a unique domain/origin/etc.
*aResult = (aOther == this);
return NS_OK;
}
NS_IMETHODIMP
nsNullPrincipal::EqualsConsideringDomain(nsIPrincipal *aOther, bool *aResult)
{
return Equals(aOther, aResult);
}
NS_IMETHODIMP
nsNullPrincipal::GetHashValue(uint32_t *aResult)
{
@ -127,22 +108,6 @@ nsNullPrincipal::GetOrigin(nsACString& aOrigin)
return mURI->GetSpec(aOrigin);
}
NS_IMETHODIMP
nsNullPrincipal::Subsumes(nsIPrincipal *aOther, bool *aResult)
{
// We don't subsume anything except ourselves. Note that nsPrincipal::Equals
// will return false for us, since we're not about:blank and not Equals to
// reasonable nsPrincipals.
*aResult = (aOther == this);
return NS_OK;
}
NS_IMETHODIMP
nsNullPrincipal::SubsumesConsideringDomain(nsIPrincipal *aOther, bool *aResult)
{
return Subsumes(aOther, aResult);
}
NS_IMETHODIMP
nsNullPrincipal::CheckMayLoad(nsIURI* aURI, bool aReport, bool aAllowIfInheritsPrincipal)
{
@ -171,41 +136,6 @@ nsNullPrincipal::CheckMayLoad(nsIURI* aURI, bool aReport, bool aAllowIfInheritsP
return NS_ERROR_DOM_BAD_URI;
}
NS_IMETHODIMP
nsNullPrincipal::GetJarPrefix(nsACString& aJarPrefix)
{
aJarPrefix.Truncate();
return NS_OK;
}
NS_IMETHODIMP
nsNullPrincipal::GetAppStatus(uint16_t* aAppStatus)
{
*aAppStatus = nsScriptSecurityManager::AppStatusForPrincipal(this);
return NS_OK;
}
NS_IMETHODIMP
nsNullPrincipal::GetAppId(uint32_t* aAppId)
{
*aAppId = mAppId;
return NS_OK;
}
NS_IMETHODIMP
nsNullPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
{
*aIsInBrowserElement = mInMozBrowser;
return NS_OK;
}
NS_IMETHODIMP
nsNullPrincipal::GetUnknownAppId(bool* aUnknownAppId)
{
*aUnknownAppId = false;
return NS_OK;
}
NS_IMETHODIMP
nsNullPrincipal::GetIsNullPrincipal(bool* aIsNullPrincipal)
{
@ -230,20 +160,13 @@ nsNullPrincipal::Read(nsIObjectInputStream* aStream)
// that the Init() method has already been invoked by the time we deserialize.
// This is in contrast to nsPrincipal, which uses NS_GENERIC_FACTORY_CONSTRUCTOR,
// in which case ::Read needs to invoke Init().
nsresult rv = aStream->Read32(&mAppId);
NS_ENSURE_SUCCESS(rv, rv);
rv = aStream->ReadBoolean(&mInMozBrowser);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
return mOriginAttributes.Deserialize(aStream);
}
NS_IMETHODIMP
nsNullPrincipal::Write(nsIObjectOutputStream* aStream)
{
aStream->Write32(mAppId);
aStream->WriteBoolean(mInMozBrowser);
OriginAttributesRef().Serialize(aStream);
return NS_OK;
}

View File

@ -40,21 +40,12 @@ public:
NS_DECL_NSISERIALIZABLE
NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
NS_IMETHOD GetURI(nsIURI** aURI) override;
NS_IMETHOD GetDomain(nsIURI** aDomain) override;
NS_IMETHOD SetDomain(nsIURI* aDomain) override;
NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
NS_IMETHOD GetIsNullPrincipal(bool* aIsNullPrincipal) override;
NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
@ -63,21 +54,22 @@ public:
// Returns null on failure.
static already_AddRefed<nsNullPrincipal>
Create(uint32_t aAppId = nsIScriptSecurityManager::NO_APP_ID,
bool aInMozBrowser = false);
Create(const OriginAttributes& aOriginAttributes = OriginAttributes());
nsresult Init(uint32_t aAppId = nsIScriptSecurityManager::NO_APP_ID,
bool aInMozBrowser = false);
nsresult Init(const OriginAttributes& aOriginAttributes = OriginAttributes());
virtual void GetScriptLocation(nsACString &aStr) override;
protected:
virtual ~nsNullPrincipal() {}
bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration) override
{
return aOther == this;
}
nsCOMPtr<nsIURI> mURI;
nsCOMPtr<nsIContentSecurityPolicy> mCSP;
uint32_t mAppId;
bool mInMozBrowser;
};
#endif // nsNullPrincipal_h__

View File

@ -15,8 +15,6 @@
#include "nsIURI.h"
#include "nsJSPrincipals.h"
#include "nsIEffectiveTLDService.h"
#include "nsIObjectInputStream.h"
#include "nsIObjectOutputStream.h"
#include "nsIClassInfoImpl.h"
#include "nsIProtocolHandler.h"
#include "nsError.h"
@ -69,9 +67,7 @@ nsPrincipal::InitializeStatics()
}
nsPrincipal::nsPrincipal()
: mAppId(nsIScriptSecurityManager::UNKNOWN_APP_ID)
, mInMozBrowser(false)
, mCodebaseImmutable(false)
: mCodebaseImmutable(false)
, mDomainImmutable(false)
, mInitialized(false)
{ }
@ -80,9 +76,7 @@ nsPrincipal::~nsPrincipal()
{ }
nsresult
nsPrincipal::Init(nsIURI *aCodebase,
uint32_t aAppId,
bool aInMozBrowser)
nsPrincipal::Init(nsIURI *aCodebase, const OriginAttributes& aOriginAttributes)
{
NS_ENSURE_STATE(!mInitialized);
NS_ENSURE_ARG(aCodebase);
@ -91,9 +85,7 @@ nsPrincipal::Init(nsIURI *aCodebase,
mCodebase = NS_TryToMakeImmutable(aCodebase);
mCodebaseImmutable = URIIsImmutable(mCodebase);
mAppId = aAppId;
mInMozBrowser = aInMozBrowser;
mOriginAttributes = aOriginAttributes;
return NS_OK;
}
@ -163,93 +155,45 @@ nsPrincipal::GetOrigin(nsACString& aOrigin)
return GetOriginForURI(mCodebase, aOrigin);
}
NS_IMETHODIMP
nsPrincipal::EqualsConsideringDomain(nsIPrincipal *aOther, bool *aResult)
bool
nsPrincipal::SubsumesInternal(nsIPrincipal* aOther,
BasePrincipal::DocumentDomainConsideration aConsideration)
{
*aResult = false;
if (!aOther) {
NS_WARNING("Need a principal to compare this to!");
return NS_OK;
}
MOZ_ASSERT(aOther);
// For nsPrincipal, Subsumes is equivalent to Equals.
if (aOther == this) {
*aResult = true;
return NS_OK;
return true;
}
if (!nsScriptSecurityManager::AppAttributesEqual(this, aOther)) {
return NS_OK;
if (OriginAttributesRef() != Cast(aOther)->OriginAttributesRef()) {
return false;
}
// If either the subject or the object has changed its principal by
// explicitly setting document.domain then the other must also have
// done so in order to be considered the same origin. This prevents
// DNS spoofing based on document.domain (154930)
nsresult rv;
if (aConsideration == ConsiderDocumentDomain) {
// Get .domain on each principal.
nsCOMPtr<nsIURI> thisDomain, otherDomain;
GetDomain(getter_AddRefs(thisDomain));
aOther->GetDomain(getter_AddRefs(otherDomain));
nsCOMPtr<nsIURI> thisURI;
this->GetDomain(getter_AddRefs(thisURI));
bool thisSetDomain = !!thisURI;
if (!thisURI) {
this->GetURI(getter_AddRefs(thisURI));
// If either has .domain set, we have equality i.f.f. the domains match.
// Otherwise, we fall through to the non-document-domain-considering case.
if (thisDomain || otherDomain) {
return nsScriptSecurityManager::SecurityCompareURIs(thisDomain, otherDomain);
}
}
nsCOMPtr<nsIURI> otherURI;
aOther->GetDomain(getter_AddRefs(otherURI));
bool otherSetDomain = !!otherURI;
if (!otherURI) {
aOther->GetURI(getter_AddRefs(otherURI));
}
*aResult = thisSetDomain == otherSetDomain &&
nsScriptSecurityManager::SecurityCompareURIs(thisURI, otherURI);
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::Equals(nsIPrincipal *aOther, bool *aResult)
{
*aResult = false;
if (!aOther) {
NS_WARNING("Need a principal to compare this to!");
return NS_OK;
}
if (aOther == this) {
*aResult = true;
return NS_OK;
}
if (!nsScriptSecurityManager::AppAttributesEqual(this, aOther)) {
return NS_OK;
}
nsCOMPtr<nsIURI> otherURI;
nsresult rv = aOther->GetURI(getter_AddRefs(otherURI));
if (NS_FAILED(rv)) {
return rv;
}
NS_ASSERTION(mCodebase,
"shouldn't be calling this on principals from preferences");
nsCOMPtr<nsIURI> otherURI;
rv = aOther->GetURI(getter_AddRefs(otherURI));
NS_ENSURE_SUCCESS(rv, false);
// Compare codebases.
*aResult = nsScriptSecurityManager::SecurityCompareURIs(mCodebase,
otherURI);
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::Subsumes(nsIPrincipal *aOther, bool *aResult)
{
return Equals(aOther, aResult);
}
NS_IMETHODIMP
nsPrincipal::SubsumesConsideringDomain(nsIPrincipal *aOther, bool *aResult)
{
return EqualsConsideringDomain(aOther, aResult);
return nsScriptSecurityManager::SecurityCompareURIs(mCodebase, otherURI);
}
NS_IMETHODIMP
@ -361,49 +305,6 @@ nsPrincipal::SetDomain(nsIURI* aDomain)
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::GetJarPrefix(nsACString& aJarPrefix)
{
MOZ_ASSERT(mAppId != nsIScriptSecurityManager::UNKNOWN_APP_ID);
mozilla::GetJarPrefix(mAppId, mInMozBrowser, aJarPrefix);
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::GetAppStatus(uint16_t* aAppStatus)
{
*aAppStatus = GetAppStatus();
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::GetAppId(uint32_t* aAppId)
{
if (mAppId == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
MOZ_ASSERT(false);
*aAppId = nsIScriptSecurityManager::NO_APP_ID;
return NS_OK;
}
*aAppId = mAppId;
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
{
*aIsInBrowserElement = mInMozBrowser;
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::GetUnknownAppId(bool* aUnknownAppId)
{
*aUnknownAppId = mAppId == nsIScriptSecurityManager::UNKNOWN_APP_ID;
return NS_OK;
}
NS_IMETHODIMP
nsPrincipal::GetBaseDomain(nsACString& aBaseDomain)
{
@ -459,12 +360,8 @@ nsPrincipal::Read(nsIObjectInputStream* aStream)
domain = do_QueryInterface(supports);
uint32_t appId;
rv = aStream->Read32(&appId);
NS_ENSURE_SUCCESS(rv, rv);
bool inMozBrowser;
rv = aStream->ReadBoolean(&inMozBrowser);
OriginAttributes attrs;
rv = attrs.Deserialize(aStream);
NS_ENSURE_SUCCESS(rv, rv);
rv = NS_ReadOptionalObject(aStream, true, getter_AddRefs(supports));
@ -473,7 +370,7 @@ nsPrincipal::Read(nsIObjectInputStream* aStream)
// This may be null.
nsCOMPtr<nsIContentSecurityPolicy> csp = do_QueryInterface(supports, &rv);
rv = Init(codebase, appId, inMozBrowser);
rv = Init(codebase, attrs);
NS_ENSURE_SUCCESS(rv, rv);
rv = SetCsp(csp);
@ -507,8 +404,7 @@ nsPrincipal::Write(nsIObjectOutputStream* aStream)
return rv;
}
aStream->Write32(mAppId);
aStream->WriteBoolean(mInMozBrowser);
OriginAttributesRef().Serialize(aStream);
rv = NS_WriteOptionalCompoundObject(aStream, mCSP,
NS_GET_IID(nsIContentSecurityPolicy),
@ -523,16 +419,6 @@ nsPrincipal::Write(nsIObjectOutputStream* aStream)
return NS_OK;
}
uint16_t
nsPrincipal::GetAppStatus()
{
if (mAppId == nsIScriptSecurityManager::UNKNOWN_APP_ID) {
NS_WARNING("Asking for app status on a principal with an unknown app id");
return nsIPrincipal::APP_STATUS_NOT_INSTALLED;
}
return nsScriptSecurityManager::AppStatusForPrincipal(this);
}
// Helper-function to indicate whether the CSS Unprefixing Service
// whitelist should include dummy domains that are only intended for
// use in testing. (Controlled by a pref.)
@ -792,95 +678,33 @@ nsExpandedPrincipal::GetOrigin(nsACString& aOrigin)
return NS_OK;
}
typedef nsresult (NS_STDCALL nsIPrincipal::*nsIPrincipalMemFn)(nsIPrincipal* aOther,
bool* aResult);
#define CALL_MEMBER_FUNCTION(THIS,MEM_FN) ((THIS)->*(MEM_FN))
// nsExpandedPrincipal::Equals and nsExpandedPrincipal::EqualsConsideringDomain
// shares the same logic. The difference only that Equals requires 'this'
// and 'aOther' to Subsume each other while EqualsConsideringDomain requires
// bidirectional SubsumesConsideringDomain.
static nsresult
Equals(nsExpandedPrincipal* aThis, nsIPrincipalMemFn aFn, nsIPrincipal* aOther,
bool* aResult)
bool
nsExpandedPrincipal::SubsumesInternal(nsIPrincipal* aOther,
BasePrincipal::DocumentDomainConsideration aConsideration)
{
// If (and only if) 'aThis' and 'aOther' both Subsume/SubsumesConsideringDomain
// each other, then they are Equal.
*aResult = false;
// Calling the corresponding subsume function on this (aFn).
nsresult rv = CALL_MEMBER_FUNCTION(aThis, aFn)(aOther, aResult);
NS_ENSURE_SUCCESS(rv, rv);
if (!*aResult)
return NS_OK;
// Calling the corresponding subsume function on aOther (aFn).
rv = CALL_MEMBER_FUNCTION(aOther, aFn)(aThis, aResult);
NS_ENSURE_SUCCESS(rv, rv);
return NS_OK;
}
NS_IMETHODIMP
nsExpandedPrincipal::Equals(nsIPrincipal* aOther, bool* aResult)
{
return ::Equals(this, &nsIPrincipal::Subsumes, aOther, aResult);
}
NS_IMETHODIMP
nsExpandedPrincipal::EqualsConsideringDomain(nsIPrincipal* aOther, bool* aResult)
{
return ::Equals(this, &nsIPrincipal::SubsumesConsideringDomain, aOther, aResult);
}
// nsExpandedPrincipal::Subsumes and nsExpandedPrincipal::SubsumesConsideringDomain
// shares the same logic. The difference only that Subsumes calls are replaced
//with SubsumesConsideringDomain calls in the second case.
static nsresult
Subsumes(nsExpandedPrincipal* aThis, nsIPrincipalMemFn aFn, nsIPrincipal* aOther,
bool* aResult)
{
nsresult rv;
// If aOther is an ExpandedPrincipal too, we break it down into its component
// nsIPrincipals, and check subsumes on each one.
nsCOMPtr<nsIExpandedPrincipal> expanded = do_QueryInterface(aOther);
if (expanded) {
// If aOther is an ExpandedPrincipal too, check if all of its
// principals are subsumed.
nsTArray< nsCOMPtr<nsIPrincipal> >* otherList;
expanded->GetWhiteList(&otherList);
for (uint32_t i = 0; i < otherList->Length(); ++i){
rv = CALL_MEMBER_FUNCTION(aThis, aFn)((*otherList)[i], aResult);
NS_ENSURE_SUCCESS(rv, rv);
if (!*aResult) {
// If we don't subsume at least one principal of aOther, return false.
return NS_OK;
if (!SubsumesInternal((*otherList)[i], aConsideration)) {
return false;
}
}
} else {
// For a regular aOther, one of our principals must subsume it.
nsTArray< nsCOMPtr<nsIPrincipal> >* list;
aThis->GetWhiteList(&list);
for (uint32_t i = 0; i < list->Length(); ++i){
rv = CALL_MEMBER_FUNCTION((*list)[i], aFn)(aOther, aResult);
NS_ENSURE_SUCCESS(rv, rv);
if (*aResult) {
// If one of our principal subsumes it, return true.
return NS_OK;
}
return true;
}
// We're dealing with a regular principal. One of our principals must subsume
// it.
for (uint32_t i = 0; i < mPrincipals.Length(); ++i) {
if (Cast(mPrincipals[i])->Subsumes(aOther, aConsideration)) {
return true;
}
}
return NS_OK;
}
#undef CALL_MEMBER_FUNCTION
NS_IMETHODIMP
nsExpandedPrincipal::Subsumes(nsIPrincipal* aOther, bool* aResult)
{
return ::Subsumes(this, &nsIPrincipal::Subsumes, aOther, aResult);
}
NS_IMETHODIMP
nsExpandedPrincipal::SubsumesConsideringDomain(nsIPrincipal* aOther, bool* aResult)
{
return ::Subsumes(this, &nsIPrincipal::SubsumesConsideringDomain, aOther, aResult);
return false;
}
NS_IMETHODIMP
@ -916,41 +740,6 @@ nsExpandedPrincipal::GetWhiteList(nsTArray<nsCOMPtr<nsIPrincipal> >** aWhiteList
return NS_OK;
}
NS_IMETHODIMP
nsExpandedPrincipal::GetJarPrefix(nsACString& aJarPrefix)
{
aJarPrefix.Truncate();
return NS_OK;
}
NS_IMETHODIMP
nsExpandedPrincipal::GetAppStatus(uint16_t* aAppStatus)
{
*aAppStatus = nsIPrincipal::APP_STATUS_NOT_INSTALLED;
return NS_OK;
}
NS_IMETHODIMP
nsExpandedPrincipal::GetAppId(uint32_t* aAppId)
{
*aAppId = nsIScriptSecurityManager::NO_APP_ID;
return NS_OK;
}
NS_IMETHODIMP
nsExpandedPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
{
*aIsInBrowserElement = false;
return NS_OK;
}
NS_IMETHODIMP
nsExpandedPrincipal::GetUnknownAppId(bool* aUnknownAppId)
{
*aUnknownAppId = false;
return NS_OK;
}
NS_IMETHODIMP
nsExpandedPrincipal::GetBaseDomain(nsACString& aBaseDomain)
{

View File

@ -22,30 +22,19 @@ class nsPrincipal final : public mozilla::BasePrincipal
public:
NS_DECL_NSISERIALIZABLE
NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
NS_IMETHOD GetURI(nsIURI** aURI) override;
NS_IMETHOD GetDomain(nsIURI** aDomain) override;
NS_IMETHOD SetDomain(nsIURI* aDomain) override;
NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
virtual bool IsOnCSSUnprefixingWhitelist() override;
nsPrincipal();
// Init() must be called before the principal is in a usable state.
nsresult Init(nsIURI* aCodebase,
uint32_t aAppId,
bool aInMozBrowser);
nsresult Init(nsIURI* aCodebase, const OriginAttributes& aOriginAttributes);
virtual void GetScriptLocation(nsACString& aStr) override;
void SetURI(nsIURI* aURI);
@ -79,8 +68,6 @@ public:
nsCOMPtr<nsIURI> mDomain;
nsCOMPtr<nsIURI> mCodebase;
uint32_t mAppId;
bool mInMozBrowser;
// If mCodebaseImmutable is true, mCodebase is non-null and immutable
bool mCodebaseImmutable;
bool mDomainImmutable;
@ -90,10 +77,7 @@ public:
protected:
virtual ~nsPrincipal();
/**
* Returns the app status of the principal based on mAppId and mInMozBrowser.
*/
uint16_t GetAppStatus();
bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration) override;
};
class nsExpandedPrincipal : public nsIExpandedPrincipal, public mozilla::BasePrincipal
@ -101,34 +85,26 @@ class nsExpandedPrincipal : public nsIExpandedPrincipal, public mozilla::BasePri
public:
explicit nsExpandedPrincipal(nsTArray< nsCOMPtr<nsIPrincipal> > &aWhiteList);
protected:
virtual ~nsExpandedPrincipal();
public:
NS_DECL_NSIEXPANDEDPRINCIPAL
NS_DECL_NSISERIALIZABLE
NS_IMETHODIMP_(MozExternalRefCountType) AddRef() override { return nsJSPrincipals::AddRef(); };
NS_IMETHODIMP_(MozExternalRefCountType) Release() override { return nsJSPrincipals::Release(); };
NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
NS_IMETHOD GetURI(nsIURI** aURI) override;
NS_IMETHOD GetDomain(nsIURI** aDomain) override;
NS_IMETHOD SetDomain(nsIURI* aDomain) override;
NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
virtual bool IsOnCSSUnprefixingWhitelist() override;
virtual void GetScriptLocation(nsACString &aStr) override;
protected:
virtual ~nsExpandedPrincipal();
bool SubsumesInternal(nsIPrincipal* aOther, DocumentDomainConsideration aConsideration) override;
private:
nsTArray< nsCOMPtr<nsIPrincipal> > mPrincipals;
};

View File

@ -487,26 +487,6 @@ nsScriptSecurityManager::HashPrincipalByOrigin(nsIPrincipal* aPrincipal)
return SecurityHashURI(uri);
}
/* static */ bool
nsScriptSecurityManager::AppAttributesEqual(nsIPrincipal* aFirst,
nsIPrincipal* aSecond)
{
MOZ_ASSERT(aFirst && aSecond, "Don't pass null pointers!");
uint32_t firstAppId = nsIScriptSecurityManager::UNKNOWN_APP_ID;
if (!aFirst->GetUnknownAppId()) {
firstAppId = aFirst->GetAppId();
}
uint32_t secondAppId = nsIScriptSecurityManager::UNKNOWN_APP_ID;
if (!aSecond->GetUnknownAppId()) {
secondAppId = aSecond->GetAppId();
}
return ((firstAppId == secondAppId) &&
(aFirst->GetIsInBrowserElement() == aSecond->GetIsInBrowserElement()));
}
NS_IMETHODIMP
nsScriptSecurityManager::CheckLoadURIFromScript(JSContext *cx, nsIURI *aURI)
{
@ -1022,9 +1002,9 @@ nsScriptSecurityManager::CreateCodebasePrincipal(nsIURI* aURI, uint32_t aAppId,
return NS_OK;
}
BasePrincipal::OriginAttributes attrs(aAppId, aInMozBrowser);
nsRefPtr<nsPrincipal> codebase = new nsPrincipal();
nsresult rv = codebase->Init(aURI, aAppId, aInMozBrowser);
nsresult rv = codebase->Init(aURI, attrs);
if (NS_FAILED(rv))
return rv;

View File

@ -80,22 +80,6 @@ public:
return sStrictFileOriginPolicy;
}
/**
* Returns true if the two principals share the same app attributes.
*
* App attributes are appId and the inBrowserElement flag.
* Two principals have the same app attributes if those information are
* equals.
* This method helps keeping principals from different apps isolated from
* each other. Also, it helps making sure mozbrowser (web views) and their
* parent are isolated from each other. All those entities do not share the
* same data (cookies, IndexedDB, localStorage, etc.) so we shouldn't allow
* violating that principle.
*/
static bool
AppAttributesEqual(nsIPrincipal* aFirst,
nsIPrincipal* aSecond);
void DeactivateDomainPolicy();
private:

View File

@ -41,33 +41,6 @@ nsSystemPrincipal::GetScriptLocation(nsACString &aStr)
// Methods implementing nsIPrincipal //
///////////////////////////////////////
NS_IMETHODIMP
nsSystemPrincipal::Equals(nsIPrincipal *other, bool *result)
{
*result = (other == this);
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::EqualsConsideringDomain(nsIPrincipal *other, bool *result)
{
return Equals(other, result);
}
NS_IMETHODIMP
nsSystemPrincipal::Subsumes(nsIPrincipal *other, bool *result)
{
*result = true;
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::SubsumesConsideringDomain(nsIPrincipal *other, bool *result)
{
*result = true;
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::CheckMayLoad(nsIURI* uri, bool aReport, bool aAllowIfInheritsPrincipal)
{
@ -122,41 +95,6 @@ nsSystemPrincipal::SetDomain(nsIURI* aDomain)
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::GetJarPrefix(nsACString& aJarPrefix)
{
aJarPrefix.Truncate();
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::GetAppStatus(uint16_t* aAppStatus)
{
*aAppStatus = nsIPrincipal::APP_STATUS_NOT_INSTALLED;
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::GetAppId(uint32_t* aAppId)
{
*aAppId = nsIScriptSecurityManager::NO_APP_ID;
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::GetIsInBrowserElement(bool* aIsInBrowserElement)
{
*aIsInBrowserElement = false;
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::GetUnknownAppId(bool* aUnknownAppId)
{
*aUnknownAppId = false;
return NS_OK;
}
NS_IMETHODIMP
nsSystemPrincipal::GetBaseDomain(nsACString& aBaseDomain)
{

View File

@ -25,23 +25,14 @@ class nsSystemPrincipal final : public mozilla::BasePrincipal
public:
NS_DECL_NSISERIALIZABLE
NS_IMETHOD QueryInterface(REFNSIID aIID, void** aInstancePtr) override;
NS_IMETHOD Equals(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD EqualsConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD GetHashValue(uint32_t* aHashValue) override;
NS_IMETHOD GetURI(nsIURI** aURI) override;
NS_IMETHOD GetDomain(nsIURI** aDomain) override;
NS_IMETHOD SetDomain(nsIURI* aDomain) override;
NS_IMETHOD GetOrigin(nsACString& aOrigin) override;
NS_IMETHOD Subsumes(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD SubsumesConsideringDomain(nsIPrincipal* other, bool* _retval) override;
NS_IMETHOD CheckMayLoad(nsIURI* uri, bool report, bool allowIfInheritsPrincipal) override;
NS_IMETHOD GetCsp(nsIContentSecurityPolicy** aCsp) override;
NS_IMETHOD SetCsp(nsIContentSecurityPolicy* aCsp) override;
NS_IMETHOD GetJarPrefix(nsACString& aJarPrefix) override;
NS_IMETHOD GetAppStatus(uint16_t* aAppStatus) override;
NS_IMETHOD GetAppId(uint32_t* aAppStatus) override;
NS_IMETHOD GetIsInBrowserElement(bool* aIsInBrowserElement) override;
NS_IMETHOD GetUnknownAppId(bool* aUnknownAppId) override;
NS_IMETHOD GetBaseDomain(nsACString& aBaseDomain) override;
nsSystemPrincipal() {}
@ -50,6 +41,11 @@ public:
protected:
virtual ~nsSystemPrincipal(void) {}
bool SubsumesInternal(nsIPrincipal *aOther, DocumentDomainConsideration aConsideration) override
{
return true;
}
};
#endif // nsSystemPrincipal_h__

View File

@ -153,7 +153,7 @@ nsSimpleContentList::WrapObject(JSContext *cx, JS::Handle<JSObject*> aGivenProto
}
// Hashtable for storing nsContentLists
static PLDHashTable gContentListHashTable;
static PLDHashTable2* gContentListHashTable;
#define RECENTLY_USED_CONTENT_LIST_CACHE_SIZE 31
static nsContentList*
@ -215,19 +215,17 @@ NS_GetContentList(nsINode* aRootNode,
};
// Initialize the hashtable if needed.
if (!gContentListHashTable.IsInitialized()) {
PL_DHashTableInit(&gContentListHashTable, &hash_table_ops,
sizeof(ContentListHashEntry));
if (!gContentListHashTable) {
gContentListHashTable =
new PLDHashTable2(&hash_table_ops, sizeof(ContentListHashEntry));
}
ContentListHashEntry *entry = nullptr;
// First we look in our hashtable. Then we create a content list if needed
if (gContentListHashTable.IsInitialized()) {
entry = static_cast<ContentListHashEntry *>
(PL_DHashTableAdd(&gContentListHashTable, &hashKey, fallible));
if (entry)
list = entry->mContentList;
}
entry = static_cast<ContentListHashEntry *>
(PL_DHashTableAdd(gContentListHashTable, &hashKey, fallible));
if (entry)
list = entry->mContentList;
if (!list) {
// We need to create a ContentList and add it to our new entry, if
@ -272,7 +270,7 @@ nsCacheableFuncStringHTMLCollection::WrapObject(JSContext *cx, JS::Handle<JSObje
}
// Hashtable for storing nsCacheableFuncStringContentList
static PLDHashTable gFuncStringContentListHashTable;
static PLDHashTable2* gFuncStringContentListHashTable;
struct FuncStringContentListHashEntry : public PLDHashEntryHdr
{
@ -321,18 +319,18 @@ GetFuncStringContentList(nsINode* aRootNode,
};
// Initialize the hashtable if needed.
if (!gFuncStringContentListHashTable.IsInitialized()) {
PL_DHashTableInit(&gFuncStringContentListHashTable, &hash_table_ops,
sizeof(FuncStringContentListHashEntry));
if (!gFuncStringContentListHashTable) {
gFuncStringContentListHashTable =
new PLDHashTable2(&hash_table_ops, sizeof(FuncStringContentListHashEntry));
}
FuncStringContentListHashEntry *entry = nullptr;
// First we look in our hashtable. Then we create a content list if needed
if (gFuncStringContentListHashTable.IsInitialized()) {
if (gFuncStringContentListHashTable) {
nsFuncStringCacheKey hashKey(aRootNode, aFunc, aString);
entry = static_cast<FuncStringContentListHashEntry *>
(PL_DHashTableAdd(&gFuncStringContentListHashTable, &hashKey, fallible));
(PL_DHashTableAdd(gFuncStringContentListHashTable, &hashKey, fallible));
if (entry) {
list = entry->mContentList;
#ifdef DEBUG
@ -970,13 +968,14 @@ nsContentList::RemoveFromHashtable()
sRecentlyUsedContentLists[recentlyUsedCacheIndex] = nullptr;
}
if (!gContentListHashTable.IsInitialized())
if (!gContentListHashTable)
return;
PL_DHashTableRemove(&gContentListHashTable, &key);
PL_DHashTableRemove(gContentListHashTable, &key);
if (gContentListHashTable.EntryCount() == 0) {
PL_DHashTableFinish(&gContentListHashTable);
if (gContentListHashTable->EntryCount() == 0) {
delete gContentListHashTable;
gContentListHashTable = nullptr;
}
}
@ -1008,15 +1007,16 @@ nsCacheableFuncStringContentList::~nsCacheableFuncStringContentList()
void
nsCacheableFuncStringContentList::RemoveFromFuncStringHashtable()
{
if (!gFuncStringContentListHashTable.IsInitialized()) {
if (!gFuncStringContentListHashTable) {
return;
}
nsFuncStringCacheKey key(mRootNode, mFunc, mString);
PL_DHashTableRemove(&gFuncStringContentListHashTable, &key);
PL_DHashTableRemove(gFuncStringContentListHashTable, &key);
if (gFuncStringContentListHashTable.EntryCount() == 0) {
PL_DHashTableFinish(&gFuncStringContentListHashTable);
if (gFuncStringContentListHashTable->EntryCount() == 0) {
delete gFuncStringContentListHashTable;
gFuncStringContentListHashTable = nullptr;
}
}

View File

@ -336,7 +336,7 @@ namespace {
static NS_DEFINE_CID(kParserServiceCID, NS_PARSERSERVICE_CID);
static NS_DEFINE_CID(kCParserCID, NS_PARSER_CID);
static PLDHashTable sEventListenerManagersHash;
static PLDHashTable2* sEventListenerManagersHash;
class DOMEventListenerManagersHashReporter final : public nsIMemoryReporter
{
@ -352,9 +352,9 @@ public:
{
// We don't measure the |EventListenerManager| objects pointed to by the
// entries because those references are non-owning.
int64_t amount = sEventListenerManagersHash.IsInitialized()
int64_t amount = sEventListenerManagersHash
? PL_DHashTableSizeOfExcludingThis(
&sEventListenerManagersHash, nullptr, MallocSizeOf)
sEventListenerManagersHash, nullptr, MallocSizeOf)
: 0;
return MOZ_COLLECT_REPORT(
@ -487,7 +487,7 @@ nsContentUtils::Init()
if (!InitializeEventTable())
return NS_ERROR_FAILURE;
if (!sEventListenerManagersHash.IsInitialized()) {
if (!sEventListenerManagersHash) {
static const PLDHashTableOps hash_table_ops =
{
PL_DHashVoidPtrKeyStub,
@ -497,8 +497,8 @@ nsContentUtils::Init()
EventListenerManagerHashInitEntry
};
PL_DHashTableInit(&sEventListenerManagersHash, &hash_table_ops,
sizeof(EventListenerManagerMapEntry));
sEventListenerManagersHash =
new PLDHashTable2(&hash_table_ops, sizeof(EventListenerManagerMapEntry));
RegisterStrongMemoryReporter(new DOMEventListenerManagersHashReporter());
}
@ -1803,8 +1803,8 @@ nsContentUtils::Shutdown()
delete sUserDefinedEvents;
sUserDefinedEvents = nullptr;
if (sEventListenerManagersHash.IsInitialized()) {
NS_ASSERTION(sEventListenerManagersHash.EntryCount() == 0,
if (sEventListenerManagersHash) {
NS_ASSERTION(sEventListenerManagersHash->EntryCount() == 0,
"Event listener manager hash not empty at shutdown!");
// See comment above.
@ -1816,8 +1816,9 @@ nsContentUtils::Shutdown()
// it could leave dangling references in DOMClassInfo's preserved
// wrapper table.
if (sEventListenerManagersHash.EntryCount() == 0) {
PL_DHashTableFinish(&sEventListenerManagersHash);
if (sEventListenerManagersHash->EntryCount() == 0) {
delete sEventListenerManagersHash;
sEventListenerManagersHash = nullptr;
}
}
@ -3982,8 +3983,8 @@ ListenerEnumerator(PLDHashTable* aTable, PLDHashEntryHdr* aEntry,
void
nsContentUtils::UnmarkGrayJSListenersInCCGenerationDocuments(uint32_t aGeneration)
{
if (sEventListenerManagersHash.IsInitialized()) {
PL_DHashTableEnumerate(&sEventListenerManagersHash, ListenerEnumerator,
if (sEventListenerManagersHash) {
PL_DHashTableEnumerate(sEventListenerManagersHash, ListenerEnumerator,
&aGeneration);
}
}
@ -3993,14 +3994,14 @@ void
nsContentUtils::TraverseListenerManager(nsINode *aNode,
nsCycleCollectionTraversalCallback &cb)
{
if (!sEventListenerManagersHash.IsInitialized()) {
if (!sEventListenerManagersHash) {
// We're already shut down, just return.
return;
}
EventListenerManagerMapEntry *entry =
static_cast<EventListenerManagerMapEntry *>
(PL_DHashTableSearch(&sEventListenerManagersHash, aNode));
(PL_DHashTableSearch(sEventListenerManagersHash, aNode));
if (entry) {
CycleCollectionNoteChild(cb, entry->mListenerManager.get(),
"[via hash] mListenerManager");
@ -4010,7 +4011,7 @@ nsContentUtils::TraverseListenerManager(nsINode *aNode,
EventListenerManager*
nsContentUtils::GetListenerManagerForNode(nsINode *aNode)
{
if (!sEventListenerManagersHash.IsInitialized()) {
if (!sEventListenerManagersHash) {
// We're already shut down, don't bother creating an event listener
// manager.
@ -4019,7 +4020,7 @@ nsContentUtils::GetListenerManagerForNode(nsINode *aNode)
EventListenerManagerMapEntry *entry =
static_cast<EventListenerManagerMapEntry *>
(PL_DHashTableAdd(&sEventListenerManagersHash, aNode, fallible));
(PL_DHashTableAdd(sEventListenerManagersHash, aNode, fallible));
if (!entry) {
return nullptr;
@ -4041,7 +4042,7 @@ nsContentUtils::GetExistingListenerManagerForNode(const nsINode *aNode)
return nullptr;
}
if (!sEventListenerManagersHash.IsInitialized()) {
if (!sEventListenerManagersHash) {
// We're already shut down, don't bother creating an event listener
// manager.
@ -4050,7 +4051,7 @@ nsContentUtils::GetExistingListenerManagerForNode(const nsINode *aNode)
EventListenerManagerMapEntry *entry =
static_cast<EventListenerManagerMapEntry *>
(PL_DHashTableSearch(&sEventListenerManagersHash, aNode));
(PL_DHashTableSearch(sEventListenerManagersHash, aNode));
if (entry) {
return entry->mListenerManager;
}
@ -4062,16 +4063,16 @@ nsContentUtils::GetExistingListenerManagerForNode(const nsINode *aNode)
void
nsContentUtils::RemoveListenerManager(nsINode *aNode)
{
if (sEventListenerManagersHash.IsInitialized()) {
if (sEventListenerManagersHash) {
EventListenerManagerMapEntry *entry =
static_cast<EventListenerManagerMapEntry *>
(PL_DHashTableSearch(&sEventListenerManagersHash, aNode));
(PL_DHashTableSearch(sEventListenerManagersHash, aNode));
if (entry) {
nsRefPtr<EventListenerManager> listenerManager;
listenerManager.swap(entry->mListenerManager);
// Remove the entry and *then* do operations that could cause further
// modification of sEventListenerManagersHash. See bug 334177.
PL_DHashTableRawRemove(&sEventListenerManagersHash, entry);
PL_DHashTableRawRemove(sEventListenerManagersHash, entry);
if (listenerManager) {
listenerManager->Disconnect();
}
@ -7749,4 +7750,4 @@ nsContentUtils::FirePageShowEvent(nsIDocShellTreeItem* aItem,
if (doc->IsShowing() == aFireIfShowing) {
doc->OnPageShow(true, aChromeEventHandler);
}
}
}

View File

@ -760,7 +760,7 @@ nsDOMMutationObserver::HandleMutation()
mozilla::dom::Sequence<mozilla::dom::OwningNonNull<nsDOMMutationRecord> >
mutations;
if (mutations.SetCapacity(mPendingMutationCount)) {
if (mutations.SetCapacity(mPendingMutationCount, mozilla::fallible)) {
// We can't use TakeRecords easily here, because it deals with a
// different type of array, and we want to optimize out any extra copying.
nsRefPtr<nsDOMMutationRecord> current;

View File

@ -363,7 +363,7 @@ nsJSScriptTimeoutHandler::Init(nsGlobalWindow *aWindow, bool *aIsInterval,
uint32_t argCount = std::max(argc, 2u) - 2;
FallibleTArray<JS::Heap<JS::Value> > args;
if (!args.SetCapacity(argCount)) {
if (!args.SetCapacity(argCount, fallible)) {
// No need to drop here, since we already have a non-null mFunction
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -5602,7 +5602,7 @@ class CGArgumentConverter(CGThing):
rooterDecl +
dedent("""
if (${argc} > ${index}) {
if (!${declName}.SetCapacity(${argc} - ${index})) {
if (!${declName}.SetCapacity(${argc} - ${index}, mozilla::fallible)) {
JS_ReportOutOfMemory(cx);
return false;
}

View File

@ -1374,7 +1374,7 @@ nsDOMCameraControl::OnFacesDetected(const nsTArray<ICameraControl::Face>& aFaces
Sequence<OwningNonNull<DOMCameraDetectedFace> > faces;
uint32_t len = aFaces.Length();
if (faces.SetCapacity(len)) {
if (faces.SetCapacity(len, fallible)) {
for (uint32_t i = 0; i < len; ++i) {
*faces.AppendElement() =
new DOMCameraDetectedFace(static_cast<DOMMediaStream*>(this), aFaces[i]);

View File

@ -19,8 +19,6 @@
#include "nsSVGEffects.h"
#include "nsPresContext.h"
#include "nsIPresShell.h"
#include "nsWidgetsCID.h"
#include "nsIAppShell.h"
#include "nsIInterfaceRequestorUtils.h"
#include "nsIFrame.h"
@ -116,7 +114,6 @@
#include "nsDeviceContext.h"
#include "nsFontMetrics.h"
#include "Units.h"
#include "mozilla/Services.h"
#undef free // apparently defined by some windows header, clashing with a free()
// method in SkTypes.h
@ -182,64 +179,6 @@ public:
NS_IMPL_ISUPPORTS(Canvas2dPixelsReporter, nsIMemoryReporter)
class CanvasShutdownObserver : public nsIObserver
{
virtual ~CanvasShutdownObserver() {}
public:
NS_DECL_ISUPPORTS
explicit CanvasShutdownObserver(CanvasRenderingContext2D* aCanvas)
: mCanvas(aCanvas)
{
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
observerService->AddObserver(this, NS_XPCOM_WILL_SHUTDOWN_OBSERVER_ID, false);
}
void Shutdown() {
nsCOMPtr<nsIObserverService> observerService =
mozilla::services::GetObserverService();
observerService->RemoveObserver(this, NS_XPCOM_WILL_SHUTDOWN_OBSERVER_ID);
}
NS_IMETHOD Observe(nsISupports* aSubject,
const char* aTopic,
const char16_t* aData) override
{
mCanvas->ShutdownTaskQueue();
return NS_OK;
}
private:
CanvasRenderingContext2D* mCanvas;
};
NS_IMPL_ISUPPORTS(CanvasShutdownObserver, nsIObserver);
static NS_DEFINE_CID(kAppShellCID, NS_APPSHELL_CID);
void
CanvasRenderingContext2D::RecordCommand()
{
static uint32_t kBatchSize = 5;
if (++mPendingCommands > kBatchSize) {
mPendingCommands = 0;
FlushDelayedTarget();
return;
}
if (mScheduledFlush) {
return;
}
mScheduledFlush = true;
nsCOMPtr<nsIAppShell> appShell = do_GetService(kAppShellCID);
nsCOMPtr<nsIRunnable> r = NS_NewRunnableMethod(this, &CanvasRenderingContext2D::StableStateReached);
appShell->RunInStableState(r);
}
class CanvasRadialGradient : public CanvasGradient
{
public:
@ -454,11 +393,6 @@ public:
mCtx->CurrentState().filterAdditionalImages,
mPostFilterBounds.TopLeft() - mOffset,
DrawOptions(1.0f, mCompositionOp));
// DrawTargetCapture doesn't properly support filter nodes because they are
// mutable. Block until drawing is done to avoid races.
mCtx->FlushDelayedTarget();
mCtx->FinishDelayedRendering();
}
DrawTarget* DT()
@ -883,9 +817,6 @@ public:
if (!context || !context->mTarget)
return;
context->FlushDelayedTarget();
context->FinishDelayedRendering();
// Since SkiaGL default to store drawing command until flush
// We will have to flush it before present.
context->mTarget->Flush();
@ -1007,8 +938,6 @@ CanvasRenderingContext2D::CanvasRenderingContext2D()
, mZero(false), mOpaque(false)
, mResetLayer(true)
, mIPC(false)
, mPendingCommands(0)
, mScheduledFlush(false)
, mDrawObserver(nullptr)
, mIsEntireFrameInvalid(false)
, mPredictManyRedrawCalls(false), mPathTransformWillUpdate(false)
@ -1016,14 +945,6 @@ CanvasRenderingContext2D::CanvasRenderingContext2D()
{
sNumLivingContexts++;
#ifdef XP_MACOSX
// Restrict async rendering to OSX for now until the failures on other
// platforms get resolved.
mTaskQueue = new MediaTaskQueue(SharedThreadPool::Get(NS_LITERAL_CSTRING("Canvas Rendering"),
4));
mShutdownObserver = new CanvasShutdownObserver(this);
#endif
// The default is to use OpenGL mode
if (!gfxPlatform::GetPlatform()->UseAcceleratedSkiaCanvas()) {
mRenderingMode = RenderingMode::SoftwareBackendMode;
@ -1036,9 +957,6 @@ CanvasRenderingContext2D::CanvasRenderingContext2D()
CanvasRenderingContext2D::~CanvasRenderingContext2D()
{
if (mTaskQueue) {
ShutdownTaskQueue();
}
RemoveDrawObserver();
RemovePostRefreshObserver();
Reset();
@ -1061,19 +979,6 @@ CanvasRenderingContext2D::~CanvasRenderingContext2D()
RemoveDemotableContext(this);
}
void
CanvasRenderingContext2D::ShutdownTaskQueue()
{
mShutdownObserver->Shutdown();
mShutdownObserver = nullptr;
FlushDelayedTarget();
FinishDelayedRendering();
mTaskQueue->BeginShutdown();
mTaskQueue = nullptr;
mDelayedTarget = nullptr;
}
JSObject*
CanvasRenderingContext2D::WrapObject(JSContext *cx, JS::Handle<JSObject*> aGivenProto)
{
@ -1129,10 +1034,7 @@ CanvasRenderingContext2D::Reset()
gCanvasAzureMemoryUsed -= mWidth * mHeight * 4;
}
FinishDelayedRendering();
mTarget = nullptr;
mDelayedTarget = nullptr;
mFinalTarget = nullptr;
// reset hit regions
mHitRegionsOptions.ClearAndRetainStorage();
@ -1199,8 +1101,6 @@ CanvasRenderingContext2D::StyleColorToString(const nscolor& aColor, nsAString& a
nsresult
CanvasRenderingContext2D::Redraw()
{
RecordCommand();
if (mIsEntireFrameInvalid) {
return NS_OK;
}
@ -1222,7 +1122,6 @@ CanvasRenderingContext2D::Redraw()
void
CanvasRenderingContext2D::Redraw(const mgfx::Rect &r)
{
RecordCommand();
++mInvalidateCount;
if (mIsEntireFrameInvalid) {
@ -1245,18 +1144,6 @@ CanvasRenderingContext2D::Redraw(const mgfx::Rect &r)
mCanvasElement->InvalidateCanvasContent(&r);
}
TemporaryRef<SourceSurface>
CanvasRenderingContext2D::GetSurfaceSnapshot(bool* aPremultAlpha /* = nullptr */)
{
EnsureTarget();
if (aPremultAlpha) {
*aPremultAlpha = true;
}
FlushDelayedTarget();
FinishDelayedRendering();
return mFinalTarget->Snapshot();
}
void
CanvasRenderingContext2D::DidRefresh()
{
@ -1274,7 +1161,6 @@ CanvasRenderingContext2D::RedrawUser(const gfxRect& r)
{
if (mIsEntireFrameInvalid) {
++mInvalidateCount;
RecordCommand();
return;
}
@ -1300,7 +1186,7 @@ bool CanvasRenderingContext2D::SwitchRenderingMode(RenderingMode aRenderingMode)
}
#endif
RefPtr<SourceSurface> snapshot = GetSurfaceSnapshot();
RefPtr<SourceSurface> snapshot = mTarget->Snapshot();
RefPtr<DrawTarget> oldTarget = mTarget;
mTarget = nullptr;
mResetLayer = true;
@ -1474,7 +1360,7 @@ CanvasRenderingContext2D::EnsureTarget(RenderingMode aRenderingMode)
SkiaGLGlue* glue = gfxPlatform::GetPlatform()->GetSkiaGLGlue();
if (glue && glue->GetGrContext() && glue->GetGLContext()) {
mFinalTarget = Factory::CreateDrawTargetSkiaWithGrContext(glue->GetGrContext(), size, format);
mTarget = Factory::CreateDrawTargetSkiaWithGrContext(glue->GetGrContext(), size, format);
if (mTarget) {
AddDemotableContext(this);
} else {
@ -1483,31 +1369,19 @@ CanvasRenderingContext2D::EnsureTarget(RenderingMode aRenderingMode)
}
}
#endif
if (!mFinalTarget) {
mFinalTarget = layerManager->CreateDrawTarget(size, format);
if (!mTarget) {
mTarget = layerManager->CreateDrawTarget(size, format);
}
} else {
mFinalTarget = layerManager->CreateDrawTarget(size, format);
mTarget = layerManager->CreateDrawTarget(size, format);
mode = RenderingMode::SoftwareBackendMode;
}
} else {
mFinalTarget = gfxPlatform::GetPlatform()->CreateOffscreenCanvasDrawTarget(size, format);
mTarget = gfxPlatform::GetPlatform()->CreateOffscreenCanvasDrawTarget(size, format);
mode = RenderingMode::SoftwareBackendMode;
}
}
// Restrict async canvas drawing to OSX for now since we get test failures
// on other platforms.
if (mFinalTarget) {
#ifdef XP_MACOSX
mTarget = mDelayedTarget = mFinalTarget->CreateCaptureDT(size);
#else
mTarget = mFinalTarget;
#endif
}
mPendingCommands = 0;
if (mTarget) {
static bool registered = false;
if (!registered) {
@ -1541,7 +1415,7 @@ CanvasRenderingContext2D::EnsureTarget(RenderingMode aRenderingMode)
Redraw();
} else {
EnsureErrorTarget();
mTarget = mFinalTarget = sErrorTarget;
mTarget = sErrorTarget;
}
return mode;
@ -1561,51 +1435,6 @@ CanvasRenderingContext2D::GetHeight() const
}
#endif
class DrawCaptureTask : public nsRunnable
{
public:
DrawCaptureTask(DrawTargetCapture *aReplay, DrawTarget* aDest)
: mReplay(aReplay)
, mDest(aDest)
{
}
NS_IMETHOD Run()
{
mDest->DrawCapturedDT(mReplay, Matrix());
return NS_OK;
}
private:
RefPtr<DrawTargetCapture> mReplay;
RefPtr<DrawTarget> mDest;
};
void
CanvasRenderingContext2D::FlushDelayedTarget()
{
if (!mDelayedTarget) {
return;
}
mPendingCommands = 0;
nsCOMPtr<nsIRunnable> task = new DrawCaptureTask(mDelayedTarget, mFinalTarget);
mTaskQueue->Dispatch(task.forget());
mDelayedTarget = mFinalTarget->CreateCaptureDT(IntSize(mWidth, mHeight));
mDelayedTarget->SetTransform(mTarget->GetTransform());
mTarget = mDelayedTarget;
}
void
CanvasRenderingContext2D::FinishDelayedRendering()
{
if (mTaskQueue) {
mTaskQueue->AwaitIdle();
}
}
NS_IMETHODIMP
CanvasRenderingContext2D::SetDimensions(int32_t width, int32_t height)
{
@ -1755,7 +1584,7 @@ CanvasRenderingContext2D::GetImageBuffer(uint8_t** aImageBuffer,
*aFormat = 0;
EnsureTarget();
RefPtr<SourceSurface> snapshot = GetSurfaceSnapshot();
RefPtr<SourceSurface> snapshot = mTarget->Snapshot();
if (!snapshot) {
return;
}
@ -2174,7 +2003,7 @@ CanvasRenderingContext2D::CreatePattern(const HTMLImageOrCanvasOrVideoElement& e
// of animated images
nsLayoutUtils::SurfaceFromElementResult res =
nsLayoutUtils::SurfaceFromElement(htmlElement,
nsLayoutUtils::SFE_WANT_FIRST_FRAME, mFinalTarget);
nsLayoutUtils::SFE_WANT_FIRST_FRAME, mTarget);
if (!res.mSourceSurface) {
error.Throw(NS_ERROR_NOT_AVAILABLE);
@ -4485,7 +4314,7 @@ CanvasRenderingContext2D::DrawImage(const HTMLImageOrCanvasOrVideoElement& image
nsLayoutUtils::SurfaceFromElementResult res =
CachedSurfaceFromElement(element);
if (!res.mSourceSurface)
res = nsLayoutUtils::SurfaceFromElement(element, sfeFlags, mFinalTarget);
res = nsLayoutUtils::SurfaceFromElement(element, sfeFlags, mTarget);
if (!res.mSourceSurface && !res.mDrawInfo.mImgContainer) {
// The spec says to silently do nothing in the following cases:
@ -4829,12 +4658,7 @@ CanvasRenderingContext2D::DrawWindow(nsGlobalWindow& window, double x,
if (gfxPlatform::GetPlatform()->SupportsAzureContentForDrawTarget(mTarget) &&
GlobalAlpha() == 1.0f)
{
// Complete any async rendering and use synchronous rendering for DrawWindow
// until we're confident it works for all content.
FlushDelayedTarget();
FinishDelayedRendering();
thebes = new gfxContext(mFinalTarget);
thebes = new gfxContext(mTarget);
thebes->SetMatrix(gfxMatrix(matrix._11, matrix._12, matrix._21,
matrix._22, matrix._31, matrix._32));
} else {
@ -5091,7 +4915,7 @@ CanvasRenderingContext2D::GetImageDataArray(JSContext* aCx,
IntRect srcReadRect = srcRect.Intersect(destRect);
RefPtr<DataSourceSurface> readback;
if (!srcReadRect.IsEmpty() && !mZero) {
RefPtr<SourceSurface> snapshot = GetSurfaceSnapshot();
RefPtr<SourceSurface> snapshot = mTarget->Snapshot();
if (snapshot) {
readback = snapshot->GetDataSurface();
}
@ -5477,7 +5301,7 @@ CanvasRenderingContext2D::GetCanvasLayer(nsDisplayListBuilder* aBuilder,
return nullptr;
}
FlushDelayedTarget();
mTarget->Flush();
if (!mResetLayer && aOldLayer) {
CanvasRenderingContext2DUserData* userData =
@ -5526,8 +5350,6 @@ CanvasRenderingContext2D::GetCanvasLayer(nsDisplayListBuilder* aBuilder,
userData = new CanvasRenderingContext2DUserData(this);
canvasLayer->SetDidTransactionCallback(
CanvasRenderingContext2DUserData::DidTransactionCallback, userData);
canvasLayer->SetPreTransactionCallback(
CanvasRenderingContext2DUserData::PreTransactionCallback, userData);
canvasLayer->SetUserData(&g2DContextLayerUserData, userData);
CanvasLayer::Data data;
@ -5536,13 +5358,16 @@ CanvasRenderingContext2D::GetCanvasLayer(nsDisplayListBuilder* aBuilder,
GLuint skiaGLTex = SkiaGLTex();
if (skiaGLTex) {
canvasLayer->SetPreTransactionCallback(
CanvasRenderingContext2DUserData::PreTransactionCallback, userData);
SkiaGLGlue* glue = gfxPlatform::GetPlatform()->GetSkiaGLGlue();
MOZ_ASSERT(glue);
data.mGLContext = glue->GetGLContext();
data.mFrontbufferGLTex = skiaGLTex;
} else {
data.mDrawTarget = mFinalTarget;
data.mDrawTarget = mTarget;
}
canvasLayer->Initialize(data);

View File

@ -10,7 +10,6 @@
#include "nsIDOMCanvasRenderingContext2D.h"
#include "nsICanvasRenderingContextInternal.h"
#include "mozilla/RefPtr.h"
#include "mozilla/Monitor.h"
#include "nsColor.h"
#include "mozilla/dom/HTMLCanvasElement.h"
#include "mozilla/dom/HTMLVideoElement.h"
@ -28,7 +27,6 @@
#include "mozilla/EnumeratedArray.h"
#include "FilterSupport.h"
#include "nsSVGEffects.h"
#include "MediaTaskQueue.h"
class nsGlobalWindow;
class nsXULElement;
@ -54,7 +52,6 @@ template<typename T> class Optional;
struct CanvasBidiProcessor;
class CanvasRenderingContext2DUserData;
class CanvasDrawObserver;
class CanvasShutdownObserver;
/**
** CanvasRenderingContext2D
@ -445,7 +442,14 @@ public:
const char16_t* aEncoderOptions,
nsIInputStream **aStream) override;
mozilla::TemporaryRef<mozilla::gfx::SourceSurface> GetSurfaceSnapshot(bool* aPremultAlpha = nullptr) override;
mozilla::TemporaryRef<mozilla::gfx::SourceSurface> GetSurfaceSnapshot(bool* aPremultAlpha = nullptr) override
{
EnsureTarget();
if (aPremultAlpha) {
*aPremultAlpha = true;
}
return mTarget->Snapshot();
}
NS_IMETHOD SetIsOpaque(bool isOpaque) override;
bool GetIsOpaque() override { return mOpaque; }
@ -517,7 +521,6 @@ public:
}
friend class CanvasRenderingContext2DUserData;
friend class CanvasShutdownObserver;
virtual void GetImageBuffer(uint8_t** aImageBuffer, int32_t* aFormat) override;
@ -529,21 +532,6 @@ public:
// return true and fills in the bound rect if element has a hit region.
bool GetHitRegionRect(Element* aElement, nsRect& aRect) override;
/**
* Deferred rendering functions
*/
/**
* Called when the event loop reaches a stable
* state, and trigger us to flush any outstanding
* commands to the rendering thread.
*/
void StableStateReached()
{
mScheduledFlush = false;
FlushDelayedTarget();
}
protected:
nsresult GetImageDataArray(JSContext* aCx, int32_t aX, int32_t aY,
uint32_t aWidth, uint32_t aHeight,
@ -562,8 +550,6 @@ protected:
nsresult InitializeWithTarget(mozilla::gfx::DrawTarget *surface,
int32_t width, int32_t height);
void ShutdownTaskQueue();
/**
* The number of living nsCanvasRenderingContexts. When this goes down to
* 0, we free the premultiply and unpremultiply tables, if they exist.
@ -727,54 +713,6 @@ protected:
// sErrorTarget.
mozilla::RefPtr<mozilla::gfx::DrawTarget> mTarget;
/**
* Deferred rendering implementation
*/
// If we are using deferred rendering, then this is the current
// deferred rendering target. It is the same pointer as mTarget.
mozilla::RefPtr<mozilla::gfx::DrawTargetCapture> mDelayedTarget;
// If we are using deferred rendering, then this is the actual destination
// buffer.
mozilla::RefPtr<mozilla::gfx::DrawTarget> mFinalTarget;
/**
* Add the current DelayedDrawTarget to the rendering queue,
* schedule a rendering job if required, and create a new
* DelayedDrawTarget.
*/
void FlushDelayedTarget();
/**
* Make sure all commands have been flushed to
* the rendering thread, and block until they
* are completed.
*/
void FinishDelayedRendering();
/**
* Called when a command is added to the current
* delayed draw target.
*
* Either flushes the current batch of commands to
* the rendering thread, or ensures that this happens
* the next time the event loop reaches a stable state.
*/
void RecordCommand();
// The number of commands currently waiting to be sent
// to the rendering thread.
uint32_t mPendingCommands;
// True if we have scheduled FlushDelayedTarget to be
// called in the next browser stable state.
bool mScheduledFlush;
nsRefPtr<MediaTaskQueue> mTaskQueue;
nsRefPtr<CanvasShutdownObserver> mShutdownObserver;
uint32_t SkiaGLTex() const;
// This observes our draw calls at the beginning of the canvas

View File

@ -376,7 +376,7 @@ WebGLElementArrayCacheTree<T>::Update(size_t firstByte, size_t lastByte)
// Step #0: If needed, resize our tree data storage.
if (requiredNumLeaves != NumLeaves()) {
// See class comment for why we the tree storage size is 2 * numLeaves.
if (!mTreeData.SetLength(2 * requiredNumLeaves)) {
if (!mTreeData.SetLength(2 * requiredNumLeaves, fallible)) {
mTreeData.SetLength(0);
return false;
}
@ -470,7 +470,7 @@ bool
WebGLElementArrayCache::BufferData(const void* ptr, size_t byteLength)
{
if (mBytes.Length() != byteLength) {
if (!mBytes.SetLength(byteLength)) {
if (!mBytes.SetLength(byteLength, fallible)) {
mBytes.SetLength(0);
return false;
}

View File

@ -17,13 +17,14 @@ CryptoBuffer::Assign(const CryptoBuffer& aData)
{
// Same as in nsTArray_Impl::operator=, but return the value
// returned from ReplaceElementsAt to enable OOM detection
return ReplaceElementsAt(0, Length(), aData.Elements(), aData.Length());
return ReplaceElementsAt(0, Length(), aData.Elements(), aData.Length(),
fallible);
}
uint8_t*
CryptoBuffer::Assign(const uint8_t* aData, uint32_t aLength)
{
return ReplaceElementsAt(0, Length(), aData, aLength);
return ReplaceElementsAt(0, Length(), aData, aLength, fallible);
}
uint8_t*

View File

@ -152,7 +152,7 @@ ReadBuffer(JSStructuredCloneReader* aReader, CryptoBuffer& aBuffer)
}
if (length > 0) {
if (!aBuffer.SetLength(length)) {
if (!aBuffer.SetLength(length, fallible)) {
return false;
}
ret = JS_ReadBytes(aReader, aBuffer.Elements(), aBuffer.Length());

View File

@ -555,7 +555,7 @@ private:
// Initialize the output buffer (enough space for padding / a full tag)
uint32_t dataLen = mData.Length();
uint32_t maxLen = dataLen + 16;
if (!mResult.SetLength(maxLen)) {
if (!mResult.SetLength(maxLen, fallible)) {
return NS_ERROR_DOM_UNKNOWN_ERR;
}
uint32_t outLen = 0;
@ -679,7 +679,7 @@ private:
// Encrypt and return the wrapped key
// AES-KW encryption results in a wrapped key 64 bits longer
if (!mResult.SetLength(mData.Length() + 8)) {
if (!mResult.SetLength(mData.Length() + 8, fallible)) {
return NS_ERROR_DOM_OPERATION_ERR;
}
SECItem resultItem = {siBuffer, mResult.Elements(),
@ -811,7 +811,7 @@ private:
// Ciphertext is an integer mod the modulus, so it will be
// no longer than mStrength octets
if (!mResult.SetLength(mStrength)) {
if (!mResult.SetLength(mStrength, fallible)) {
return NS_ERROR_DOM_UNKNOWN_ERR;
}
@ -901,7 +901,7 @@ private:
virtual nsresult DoCrypto() override
{
// Initialize the output buffer
if (!mResult.SetLength(HASH_LENGTH_MAX)) {
if (!mResult.SetLength(HASH_LENGTH_MAX, fallible)) {
return NS_ERROR_DOM_UNKNOWN_ERR;
}
@ -1183,7 +1183,7 @@ private:
{
// Resize the result buffer
uint32_t hashLen = HASH_ResultLenByOidTag(mOidTag);
if (!mResult.SetLength(hashLen)) {
if (!mResult.SetLength(hashLen, fallible)) {
return NS_ERROR_DOM_UNKNOWN_ERR;
}
@ -2597,7 +2597,7 @@ private:
return NS_ERROR_DOM_DATA_ERR;
}
if (!mResult.SetLength(mLength)) {
if (!mResult.SetLength(mLength, fallible)) {
return NS_ERROR_DOM_UNKNOWN_ERR;
}
@ -2696,7 +2696,7 @@ private:
return NS_ERROR_DOM_DATA_ERR;
}
if (!mResult.SetLength(mLength)) {
if (!mResult.SetLength(mLength, fallible)) {
return NS_ERROR_DOM_UNKNOWN_ERR;
}

View File

@ -2716,7 +2716,8 @@ InsertIndexDataValuesFunction::OnFunctionCall(mozIStorageValueArray* aValues,
}
// Update the array with the new addition.
if (NS_WARN_IF(!indexValues.SetCapacity(indexValues.Length() + 1))) {
if (NS_WARN_IF(!indexValues.SetCapacity(indexValues.Length() + 1,
fallible))) {
IDB_REPORT_INTERNAL_ERR();
return NS_ERROR_OUT_OF_MEMORY;
}
@ -8108,14 +8109,15 @@ ConvertBlobsToActors(PBackgroundParent* aBackgroundActor,
const uint32_t count = aFiles.Length();
if (NS_WARN_IF(!aActors.SetCapacity(count))) {
if (NS_WARN_IF(!aActors.SetCapacity(count, fallible))) {
return NS_ERROR_OUT_OF_MEMORY;
}
const bool collectFileInfos =
!BackgroundParent::IsOtherProcessActor(aBackgroundActor);
if (collectFileInfos && NS_WARN_IF(!aFileInfos.SetCapacity(count))) {
if (collectFileInfos &&
NS_WARN_IF(!aFileInfos.SetCapacity(count, fallible))) {
return NS_ERROR_OUT_OF_MEMORY;
}
@ -11579,7 +11581,7 @@ Database::Invalidate()
}
FallibleTArray<nsRefPtr<TransactionBase>> transactions;
if (NS_WARN_IF(!transactions.SetCapacity(count))) {
if (NS_WARN_IF(!transactions.SetCapacity(count, fallible))) {
return false;
}
@ -11927,7 +11929,7 @@ Database::AllocPBackgroundIDBTransactionParent(
}
FallibleTArray<nsRefPtr<FullObjectStoreMetadata>> fallibleObjectStores;
if (NS_WARN_IF(!fallibleObjectStores.SetCapacity(nameCount))) {
if (NS_WARN_IF(!fallibleObjectStores.SetCapacity(nameCount, fallible))) {
return nullptr;
}
@ -15769,7 +15771,7 @@ DatabaseOperationBase::GetStructuredCloneReadInfoFromBlob(
}
AutoFallibleTArray<uint8_t, 512> uncompressed;
if (NS_WARN_IF(!uncompressed.SetLength(uncompressedLength))) {
if (NS_WARN_IF(!uncompressed.SetLength(uncompressedLength, fallible))) {
return NS_ERROR_OUT_OF_MEMORY;
}
@ -15956,7 +15958,7 @@ DatabaseOperationBase::IndexDataValuesFromUpdateInfos(
return NS_OK;
}
if (NS_WARN_IF(!aIndexValues.SetCapacity(count))) {
if (NS_WARN_IF(!aIndexValues.SetCapacity(count, fallible))) {
IDB_REPORT_INTERNAL_ERR();
return NS_ERROR_OUT_OF_MEMORY;
}
@ -20390,7 +20392,7 @@ UpdateIndexDataValuesFunction::OnFunctionCall(mozIStorageValueArray* aValues,
const uint32_t updateInfoCount = updateInfos.Length();
if (NS_WARN_IF(!indexValues.SetCapacity(indexValues.Length() +
updateInfoCount))) {
updateInfoCount, fallible))) {
IDB_REPORT_INTERNAL_ERR();
return NS_ERROR_OUT_OF_MEMORY;
}
@ -21145,7 +21147,7 @@ ObjectStoreAddOrPutRequestOp::Init(TransactionBase* aTransaction)
if (!files.IsEmpty()) {
const uint32_t count = files.Length();
if (NS_WARN_IF(!mStoredFileInfos.SetCapacity(count))) {
if (NS_WARN_IF(!mStoredFileInfos.SetCapacity(count, fallible))) {
return false;
}
@ -21754,7 +21756,8 @@ ObjectStoreGetRequestOp::GetResponse(RequestResponse& aResponse)
if (!mResponse.IsEmpty()) {
FallibleTArray<SerializedStructuredCloneReadInfo> fallibleCloneInfos;
if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length()))) {
if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length(),
fallible))) {
aResponse = NS_ERROR_OUT_OF_MEMORY;
return;
}
@ -22302,7 +22305,8 @@ IndexGetRequestOp::GetResponse(RequestResponse& aResponse)
if (!mResponse.IsEmpty()) {
FallibleTArray<SerializedStructuredCloneReadInfo> fallibleCloneInfos;
if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length()))) {
if (NS_WARN_IF(!fallibleCloneInfos.SetLength(mResponse.Length(),
fallible))) {
aResponse = NS_ERROR_OUT_OF_MEMORY;
return;
}

View File

@ -1178,7 +1178,8 @@ IDBObjectStore::AddOrPut(JSContext* aCx,
}
FallibleTArray<uint8_t> cloneData;
if (NS_WARN_IF(!cloneData.SetLength(cloneWriteInfo.mCloneBuffer.nbytes()))) {
if (NS_WARN_IF(!cloneData.SetLength(cloneWriteInfo.mCloneBuffer.nbytes(),
fallible))) {
aRv = NS_ERROR_OUT_OF_MEMORY;
return nullptr;
}
@ -1206,7 +1207,7 @@ IDBObjectStore::AddOrPut(JSContext* aCx,
const uint32_t count = blobOrFileInfos.Length();
FallibleTArray<DatabaseFileOrMutableFileId> fileActorOrMutableFileIds;
if (NS_WARN_IF(!fileActorOrMutableFileIds.SetCapacity(count))) {
if (NS_WARN_IF(!fileActorOrMutableFileIds.SetCapacity(count, fallible))) {
aRv = NS_ERROR_OUT_OF_MEMORY;
return nullptr;
}

View File

@ -968,7 +968,7 @@ CreateBlobImpl(const nsTArray<BlobData>& aBlobDatas,
}
FallibleTArray<nsRefPtr<BlobImpl>> fallibleBlobImpls;
if (NS_WARN_IF(!fallibleBlobImpls.SetLength(aBlobDatas.Length()))) {
if (NS_WARN_IF(!fallibleBlobImpls.SetLength(aBlobDatas.Length(), fallible))) {
return nullptr;
}

View File

@ -226,7 +226,7 @@ public:
}
NS_IMETHODIMP
Notify(nsITimer* aTimer)
Notify(nsITimer* aTimer) override
{
nsresult rv = TakeSnapshot();
if (NS_FAILED(rv)) {

View File

@ -184,4 +184,30 @@ OutputStreamData::Init(MediaDecoder* aDecoder, ProcessedMediaStream* aStream)
aStream->AddListener(mListener);
}
DecodedStreamData*
DecodedStream::GetData()
{
return mData.get();
}
void
DecodedStream::DestroyData()
{
mData = nullptr;
}
void
DecodedStream::RecreateData(MediaDecoder* aDecoder, int64_t aInitialTime,
SourceMediaStream* aStream)
{
MOZ_ASSERT(!mData);
mData.reset(new DecodedStreamData(aDecoder, aInitialTime, aStream));
}
nsTArray<OutputStreamData>&
DecodedStream::OutputStreams()
{
return mOutputStreams;
}
} // namespace mozilla

View File

@ -8,6 +8,8 @@
#define DecodedStream_h_
#include "nsRefPtr.h"
#include "nsTArray.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/gfx/Point.h"
namespace mozilla {
@ -94,6 +96,20 @@ public:
nsRefPtr<OutputStreamListener> mListener;
};
class DecodedStream {
public:
DecodedStreamData* GetData();
void DestroyData();
void RecreateData(MediaDecoder* aDecoder, int64_t aInitialTime,
SourceMediaStream* aStream);
nsTArray<OutputStreamData>& OutputStreams();
private:
UniquePtr<DecodedStreamData> mData;
// Data about MediaStreams that are being fed by the decoder.
nsTArray<OutputStreamData> mOutputStreams;
};
} // namespace mozilla
#endif // DecodedStream_h_

View File

@ -3,6 +3,7 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MediaData.h"
#include "MediaInfo.h"
#ifdef MOZ_OMX_DECODER
@ -505,8 +506,10 @@ MediaRawData::MediaRawData(const uint8_t* aData, size_t aSize)
if (!EnsureCapacity(aSize)) {
return;
}
mBuffer->AppendElements(aData, aSize);
mBuffer->AppendElements(RAW_DATA_ALIGNMENT);
// We ensure sufficient capacity above so this shouldn't fail.
MOZ_ALWAYS_TRUE(mBuffer->AppendElements(aData, aSize));
MOZ_ALWAYS_TRUE(mBuffer->AppendElements(RAW_DATA_ALIGNMENT));
mSize = aSize;
}
@ -525,8 +528,10 @@ MediaRawData::Clone() const
if (!s->EnsureCapacity(mSize)) {
return nullptr;
}
s->mBuffer->AppendElements(mData, mSize);
s->mBuffer->AppendElements(RAW_DATA_ALIGNMENT);
// We ensure sufficient capacity above so this shouldn't fail.
MOZ_ALWAYS_TRUE(s->mBuffer->AppendElements(mData, mSize));
MOZ_ALWAYS_TRUE(s->mBuffer->AppendElements(RAW_DATA_ALIGNMENT));
s->mSize = mSize;
}
return s.forget();
@ -538,7 +543,7 @@ MediaRawData::EnsureCapacity(size_t aSize)
if (mData && mBuffer->Capacity() >= aSize + RAW_DATA_ALIGNMENT * 2) {
return true;
}
if (!mBuffer->SetCapacity(aSize + RAW_DATA_ALIGNMENT * 2)) {
if (!mBuffer->SetCapacity(aSize + RAW_DATA_ALIGNMENT * 2, fallible)) {
return false;
}
// Find alignment address.
@ -555,7 +560,8 @@ MediaRawData::EnsureCapacity(size_t aSize)
if (shift == 0) {
// Nothing to do.
} else if (shift > 0) {
mBuffer->InsertElementsAt(oldpadding, shift);
// We ensure sufficient capacity above so this shouldn't fail.
MOZ_ALWAYS_TRUE(mBuffer->InsertElementsAt(oldpadding, shift, fallible));
} else {
mBuffer->RemoveElementsAt(mPadding, -shift);
}
@ -616,8 +622,11 @@ MediaRawDataWriter::SetSize(size_t aSize)
if (aSize > mTarget->mSize && !EnsureSize(aSize)) {
return false;
}
// Pad our buffer.
mBuffer->SetLength(aSize + mTarget->mPadding + RAW_DATA_ALIGNMENT);
// Pad our buffer. We ensure sufficient capacity above so this shouldn't fail.
MOZ_ALWAYS_TRUE(
mBuffer->SetLength(aSize + mTarget->mPadding + RAW_DATA_ALIGNMENT,
fallible));
mTarget->mSize = mSize = aSize;
return true;
}
@ -628,7 +637,9 @@ MediaRawDataWriter::Prepend(const uint8_t* aData, size_t aSize)
if (!EnsureSize(aSize + mTarget->mSize)) {
return false;
}
mBuffer->InsertElementsAt(mTarget->mPadding, aData, aSize);
// We ensure sufficient capacity above so this shouldn't fail.
MOZ_ALWAYS_TRUE(mBuffer->InsertElementsAt(mTarget->mPadding, aData, aSize));
mTarget->mSize += aSize;
mSize = mTarget->mSize;
return true;
@ -640,7 +651,10 @@ MediaRawDataWriter::Replace(const uint8_t* aData, size_t aSize)
if (!EnsureSize(aSize)) {
return false;
}
mBuffer->ReplaceElementsAt(mTarget->mPadding, mTarget->mSize, aData, aSize);
// We ensure sufficient capacity above so this shouldn't fail.
MOZ_ALWAYS_TRUE(mBuffer->ReplaceElementsAt(mTarget->mPadding, mTarget->mSize,
aData, aSize, fallible));
mTarget->mSize = mSize = aSize;
return true;
}

View File

@ -295,7 +295,7 @@ void MediaDecoder::ConnectDecodedStreamToOutputStream(OutputStreamData* aStream)
// The output stream must stay in sync with the decoded stream, so if
// either stream is blocked, we block the other.
aStream->mPort = aStream->mStream->AllocateInputPort(mDecodedStream->mStream,
aStream->mPort = aStream->mStream->AllocateInputPort(GetDecodedStream()->mStream,
MediaInputPort::FLAG_BLOCK_INPUT | MediaInputPort::FLAG_BLOCK_OUTPUT);
// Unblock the output stream now. While it's connected to mDecodedStream,
// mDecodedStream is responsible for controlling blocking.
@ -307,11 +307,11 @@ void MediaDecoder::UpdateDecodedStream()
MOZ_ASSERT(NS_IsMainThread());
GetReentrantMonitor().AssertCurrentThreadIn();
if (mDecodedStream) {
if (GetDecodedStream()) {
bool blockForPlayState = mPlayState != PLAY_STATE_PLAYING || mLogicallySeeking;
if (mDecodedStream->mHaveBlockedForPlayState != blockForPlayState) {
mDecodedStream->mStream->ChangeExplicitBlockerCount(blockForPlayState ? 1 : -1);
mDecodedStream->mHaveBlockedForPlayState = blockForPlayState;
if (GetDecodedStream()->mHaveBlockedForPlayState != blockForPlayState) {
GetDecodedStream()->mStream->ChangeExplicitBlockerCount(blockForPlayState ? 1 : -1);
GetDecodedStream()->mHaveBlockedForPlayState = blockForPlayState;
}
}
}
@ -328,8 +328,9 @@ void MediaDecoder::DestroyDecodedStream()
// All streams are having their SourceMediaStream disconnected, so they
// need to be explicitly blocked again.
for (int32_t i = mOutputStreams.Length() - 1; i >= 0; --i) {
OutputStreamData& os = mOutputStreams[i];
auto& outputStreams = OutputStreams();
for (int32_t i = outputStreams.Length() - 1; i >= 0; --i) {
OutputStreamData& os = outputStreams[i];
// Explicitly remove all existing ports.
// This is not strictly necessary but it's good form.
MOZ_ASSERT(os.mPort, "Double-delete of the ports!");
@ -340,32 +341,32 @@ void MediaDecoder::DestroyDecodedStream()
// be careful not to send any messages after the Destroy().
if (os.mStream->IsDestroyed()) {
// Probably the DOM MediaStream was GCed. Clean up.
mOutputStreams.RemoveElementAt(i);
outputStreams.RemoveElementAt(i);
} else {
os.mStream->ChangeExplicitBlockerCount(1);
}
}
mDecodedStream = nullptr;
mDecodedStream.DestroyData();
}
void MediaDecoder::UpdateStreamBlockingForStateMachinePlaying()
{
GetReentrantMonitor().AssertCurrentThreadIn();
if (!mDecodedStream) {
if (!GetDecodedStream()) {
return;
}
bool blockForStateMachineNotPlaying =
mDecoderStateMachine && !mDecoderStateMachine->IsPlaying() &&
mDecoderStateMachine->GetState() != MediaDecoderStateMachine::DECODER_STATE_COMPLETED;
if (blockForStateMachineNotPlaying != mDecodedStream->mHaveBlockedForStateMachineNotPlaying) {
mDecodedStream->mHaveBlockedForStateMachineNotPlaying = blockForStateMachineNotPlaying;
if (blockForStateMachineNotPlaying != GetDecodedStream()->mHaveBlockedForStateMachineNotPlaying) {
GetDecodedStream()->mHaveBlockedForStateMachineNotPlaying = blockForStateMachineNotPlaying;
int32_t delta = blockForStateMachineNotPlaying ? 1 : -1;
if (NS_IsMainThread()) {
mDecodedStream->mStream->ChangeExplicitBlockerCount(delta);
GetDecodedStream()->mStream->ChangeExplicitBlockerCount(delta);
} else {
nsCOMPtr<nsIRunnable> runnable =
NS_NewRunnableMethodWithArg<int32_t>(mDecodedStream->mStream.get(),
NS_NewRunnableMethodWithArg<int32_t>(GetDecodedStream()->mStream.get(),
&MediaStream::ChangeExplicitBlockerCount, delta);
NS_DispatchToMainThread(runnable);
}
@ -380,28 +381,27 @@ void MediaDecoder::RecreateDecodedStream(int64_t aStartTimeUSecs,
DECODER_LOG("RecreateDecodedStream aStartTimeUSecs=%lld!", aStartTimeUSecs);
if (!aGraph) {
aGraph = mDecodedStream->mStream->Graph();
aGraph = GetDecodedStream()->mStream->Graph();
}
DestroyDecodedStream();
mDecodedStream = new DecodedStreamData(this,
aStartTimeUSecs,
aGraph->CreateSourceStream(nullptr));
mDecodedStream.RecreateData(this, aStartTimeUSecs, aGraph->CreateSourceStream(nullptr));
// Note that the delay between removing ports in DestroyDecodedStream
// and adding new ones won't cause a glitch since all graph operations
// between main-thread stable states take effect atomically.
for (int32_t i = mOutputStreams.Length() - 1; i >= 0; --i) {
OutputStreamData& os = mOutputStreams[i];
auto& outputStreams = OutputStreams();
for (int32_t i = outputStreams.Length() - 1; i >= 0; --i) {
OutputStreamData& os = outputStreams[i];
MOZ_ASSERT(!os.mStream->IsDestroyed(),
"Should've been removed in DestroyDecodedStream()");
ConnectDecodedStreamToOutputStream(&os);
}
UpdateStreamBlockingForStateMachinePlaying();
mDecodedStream->mHaveBlockedForPlayState = mPlayState != PLAY_STATE_PLAYING;
if (mDecodedStream->mHaveBlockedForPlayState) {
mDecodedStream->mStream->ChangeExplicitBlockerCount(1);
GetDecodedStream()->mHaveBlockedForPlayState = mPlayState != PLAY_STATE_PLAYING;
if (GetDecodedStream()->mHaveBlockedForPlayState) {
GetDecodedStream()->mStream->ChangeExplicitBlockerCount(1);
}
}
@ -419,7 +419,7 @@ void MediaDecoder::AddOutputStream(ProcessedMediaStream* aStream,
if (!GetDecodedStream()) {
RecreateDecodedStream(mLogicalPosition, aStream->Graph());
}
OutputStreamData* os = mOutputStreams.AppendElement();
OutputStreamData* os = OutputStreams().AppendElement();
os->Init(this, aStream);
ConnectDecodedStreamToOutputStream(os);
if (aFinishWhenEnded) {

View File

@ -420,15 +420,17 @@ public:
* Decoder monitor must be held.
*/
void UpdateStreamBlockingForStateMachinePlaying();
nsTArray<OutputStreamData>& OutputStreams()
{
GetReentrantMonitor().AssertCurrentThreadIn();
return mOutputStreams;
return mDecodedStream.OutputStreams();
}
DecodedStreamData* GetDecodedStream()
{
GetReentrantMonitor().AssertCurrentThreadIn();
return mDecodedStream;
return mDecodedStream.GetData();
}
// Add an output stream. All decoder output will be sent to the stream.
@ -1030,14 +1032,12 @@ private:
#endif
protected:
// Data about MediaStreams that are being fed by this decoder.
nsTArray<OutputStreamData> mOutputStreams;
// The SourceMediaStream we are using to feed the mOutputStreams. This stream
// is never exposed outside the decoder.
// Only written on the main thread while holding the monitor. Therefore it
// can be read on any thread while holding the monitor, or on the main thread
// without holding the monitor.
nsAutoPtr<DecodedStreamData> mDecodedStream;
DecodedStream mDecodedStream;
// Set to one of the valid play states.
// This can only be changed on the main thread while holding the decoder

View File

@ -27,7 +27,6 @@ MediaTaskQueue::~MediaTaskQueue()
{
MonitorAutoLock mon(mQueueMonitor);
MOZ_ASSERT(mIsShutdown);
MOZ_DIAGNOSTIC_ASSERT(mTasks.empty());
MOZ_COUNT_DTOR(MediaTaskQueue);
}

View File

@ -35,7 +35,7 @@ MP4Demuxer::Init()
return InitPromise::CreateAndReject(DemuxerFailureReason::WAITING_FOR_DATA, __func__);
}
if (!mInitData->SetLength(br.Length())) {
if (!mInitData->SetLength(br.Length(), fallible)) {
// OOM
return InitPromise::CreateAndReject(DemuxerFailureReason::DEMUXER_ERROR, __func__);
}

View File

@ -175,7 +175,7 @@ public:
if (initSegment || !HasCompleteInitData()) {
if (mParser.mInitEndOffset > 0) {
MOZ_ASSERT(mParser.mInitEndOffset <= mResource->GetLength());
if (!mInitData->SetLength(mParser.mInitEndOffset)) {
if (!mInitData->SetLength(mParser.mInitEndOffset, fallible)) {
// Super unlikely OOM
return false;
}
@ -306,7 +306,7 @@ public:
const MediaByteRange& range = mParser->mInitRange;
uint32_t length = range.mEnd - range.mStart;
if (length) {
if (!mInitData->SetLength(length)) {
if (!mInitData->SetLength(length, fallible)) {
// Super unlikely OOM
return false;
}

View File

@ -47,7 +47,7 @@ bool
MediaOmxCommonDecoder::CheckDecoderCanOffloadAudio()
{
return (mCanOffloadAudio && !mFallbackToStateMachine &&
!mOutputStreams.Length() && mPlaybackRate == 1.0);
!OutputStreams().Length() && mPlaybackRate == 1.0);
}
void

View File

@ -457,7 +457,7 @@ CFDictionaryRef
AppleVDADecoder::CreateOutputConfiguration()
{
// Construct IOSurface Properties
const void* IOSurfaceKeys[] = { MacIOSurfaceLib::kPropIsGlobal };
const void* IOSurfaceKeys[] = { CFSTR("kIOSurfaceIsGlobal") };
const void* IOSurfaceValues[] = { kCFBooleanTrue };
static_assert(ArrayLength(IOSurfaceKeys) == ArrayLength(IOSurfaceValues),
"Non matching keys/values array size");

View File

@ -62,7 +62,7 @@ public:
// These allocations might fail if content provides a huge number of
// channels or size, but it's OK since we'll deal with the failure
// gracefully.
if (mInputChannels.SetLength(mNumberOfChannels)) {
if (mInputChannels.SetLength(mNumberOfChannels, fallible)) {
for (uint32_t i = 0; i < mNumberOfChannels; ++i) {
mInputChannels[i] = new (fallible) float[mLength];
if (!mInputChannels[i]) {

View File

@ -110,7 +110,7 @@ public:
}
private:
AutoFallibleTArray<Storage,2> mContents;
nsAutoTArray<Storage, 2> mContents;
};
/**

View File

@ -196,7 +196,7 @@ DelayBuffer::EnsureBuffer()
// block size, so that no block of writes will need to wrap.
const int chunkCount = (mMaxDelayTicks + 2 * WEBAUDIO_BLOCK_SIZE - 1) >>
WEBAUDIO_BLOCK_SIZE_BITS;
if (!mChunks.SetLength(chunkCount)) {
if (!mChunks.SetLength(chunkCount, fallible)) {
return false;
}

View File

@ -352,7 +352,7 @@ MediaDecodeTask::FinishDecode()
// write fewer bytes than mResampledFrames to the output buffer, in which
// case mWriteIndex will tell us how many valid samples we have.
bool memoryAllocationSuccess = true;
if (!mDecodeJob.mChannelBuffers.SetLength(channelCount)) {
if (!mDecodeJob.mChannelBuffers.SetLength(channelCount, fallible)) {
memoryAllocationSuccess = false;
} else {
for (uint32_t i = 0; i < channelCount; ++i) {

View File

@ -80,7 +80,7 @@ MediaStreamAudioSourceNode::PrincipalChanged(DOMMediaStream* aDOMMediaStream)
if (doc) {
nsIPrincipal* docPrincipal = doc->NodePrincipal();
nsIPrincipal* streamPrincipal = mInputStream->GetPrincipal();
if (NS_FAILED(docPrincipal->Subsumes(streamPrincipal, &subsumes))) {
if (!streamPrincipal || NS_FAILED(docPrincipal->Subsumes(streamPrincipal, &subsumes))) {
subsumes = false;
}
}

View File

@ -356,7 +356,7 @@ MobileMessageManager::Delete(const Sequence<OwningLongOrMozSmsMessageOrMozMmsMes
{
const uint32_t size = aParams.Length();
FallibleTArray<int32_t> idArray;
if (!idArray.SetLength(size)) {
if (!idArray.SetLength(size, fallible)) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;
}

View File

@ -348,10 +348,10 @@ MobileMessageCursorChild::DoNotifyResult(const nsTArray<MobileMessageData>& aDat
MOZ_ASSERT(length);
AutoFallibleTArray<nsISupports*, 1> autoArray;
NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length));
NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length, fallible));
AutoFallibleTArray<nsCOMPtr<nsISupports>, 1> messages;
NS_ENSURE_TRUE_VOID(messages.SetCapacity(length));
NS_ENSURE_TRUE_VOID(messages.SetCapacity(length, fallible));
for (uint32_t i = 0; i < length; i++) {
nsCOMPtr<nsISupports> message = CreateMessageFromMessageData(aDataArray[i]);
@ -369,10 +369,10 @@ MobileMessageCursorChild::DoNotifyResult(const nsTArray<ThreadData>& aDataArray)
MOZ_ASSERT(length);
AutoFallibleTArray<nsISupports*, 1> autoArray;
NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length));
NS_ENSURE_TRUE_VOID(autoArray.SetCapacity(length, fallible));
AutoFallibleTArray<nsCOMPtr<nsISupports>, 1> threads;
NS_ENSURE_TRUE_VOID(threads.SetCapacity(length));
NS_ENSURE_TRUE_VOID(threads.SetCapacity(length, fallible));
for (uint32_t i = 0; i < length; i++) {
nsCOMPtr<nsISupports> thread = new MobileMessageThread(aDataArray[i]);

View File

@ -83,7 +83,7 @@ static JSObjWrapperTable sJSObjWrappers;
static bool sJSObjWrappersAccessible = false;
// Hash of NPObject wrappers that wrap NPObjects as JSObjects.
static PLDHashTable sNPObjWrappers;
static PLDHashTable2* sNPObjWrappers;
// Global wrapper count. This includes JSObject wrappers *and*
// NPObject wrappers. When this count goes to zero, there are no more
@ -401,23 +401,24 @@ DestroyJSObjWrapperTable()
static bool
CreateNPObjWrapperTable()
{
MOZ_ASSERT(!sNPObjWrappers.IsInitialized());
MOZ_ASSERT(!sNPObjWrappers);
if (!RegisterGCCallbacks()) {
return false;
}
PL_DHashTableInit(&sNPObjWrappers, PL_DHashGetStubOps(),
sizeof(NPObjWrapperHashEntry));
sNPObjWrappers =
new PLDHashTable2(PL_DHashGetStubOps(), sizeof(NPObjWrapperHashEntry));
return true;
}
static void
DestroyNPObjWrapperTable()
{
MOZ_ASSERT(sNPObjWrappers.EntryCount() == 0);
MOZ_ASSERT(sNPObjWrappers->EntryCount() == 0);
PL_DHashTableFinish(&sNPObjWrappers);
delete sNPObjWrappers;
sNPObjWrappers = nullptr;
}
static void
@ -436,7 +437,7 @@ OnWrapperDestroyed()
DestroyJSObjWrapperTable();
}
if (sNPObjWrappers.IsInitialized()) {
if (sNPObjWrappers) {
// No more wrappers, and our hash was initialized. Finish the
// hash to prevent leaking it.
DestroyNPObjWrapperTable();
@ -1761,8 +1762,8 @@ NPObjWrapper_Finalize(js::FreeOp *fop, JSObject *obj)
{
NPObject *npobj = (NPObject *)::JS_GetPrivate(obj);
if (npobj) {
if (sNPObjWrappers.IsInitialized()) {
PL_DHashTableRemove(&sNPObjWrappers, npobj);
if (sNPObjWrappers) {
PL_DHashTableRemove(sNPObjWrappers, npobj);
}
}
@ -1777,7 +1778,7 @@ NPObjWrapper_ObjectMoved(JSObject *obj, const JSObject *old)
// The wrapper JSObject has been moved, so we need to update the entry in the
// sNPObjWrappers hash table, if present.
if (!sNPObjWrappers.IsInitialized()) {
if (!sNPObjWrappers) {
return;
}
@ -1790,7 +1791,7 @@ NPObjWrapper_ObjectMoved(JSObject *obj, const JSObject *old)
JS::AutoSuppressGCAnalysis nogc;
NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
(PL_DHashTableSearch(&sNPObjWrappers, npobj));
(PL_DHashTableSearch(sNPObjWrappers, npobj));
MOZ_ASSERT(entry && entry->mJSObj);
MOZ_ASSERT(entry->mJSObj == old);
entry->mJSObj = obj;
@ -1836,14 +1837,14 @@ nsNPObjWrapper::OnDestroy(NPObject *npobj)
return;
}
if (!sNPObjWrappers.IsInitialized()) {
if (!sNPObjWrappers) {
// No hash yet (or any more), no used wrappers available.
return;
}
NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
(PL_DHashTableSearch(&sNPObjWrappers, npobj));
(PL_DHashTableSearch(sNPObjWrappers, npobj));
if (entry && entry->mJSObj) {
// Found a live NPObject wrapper, null out its JSObjects' private
@ -1852,7 +1853,7 @@ nsNPObjWrapper::OnDestroy(NPObject *npobj)
::JS_SetPrivate(entry->mJSObj, nullptr);
// Remove the npobj from the hash now that it went away.
PL_DHashTableRawRemove(&sNPObjWrappers, entry);
PL_DHashTableRawRemove(sNPObjWrappers, entry);
// The finalize hook will call OnWrapperDestroyed().
}
@ -1886,7 +1887,7 @@ nsNPObjWrapper::GetNewOrUsed(NPP npp, JSContext *cx, NPObject *npobj)
return nullptr;
}
if (!sNPObjWrappers.IsInitialized()) {
if (!sNPObjWrappers) {
// No hash yet (or any more), initialize it.
if (!CreateNPObjWrapperTable()) {
return nullptr;
@ -1894,7 +1895,7 @@ nsNPObjWrapper::GetNewOrUsed(NPP npp, JSContext *cx, NPObject *npobj)
}
NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
(PL_DHashTableAdd(&sNPObjWrappers, npobj, fallible));
(PL_DHashTableAdd(sNPObjWrappers, npobj, fallible));
if (!entry) {
// Out of memory
@ -1916,24 +1917,24 @@ nsNPObjWrapper::GetNewOrUsed(NPP npp, JSContext *cx, NPObject *npobj)
entry->mNPObj = npobj;
entry->mNpp = npp;
uint32_t generation = sNPObjWrappers.Generation();
uint32_t generation = sNPObjWrappers->Generation();
// No existing JSObject, create one.
JS::Rooted<JSObject*> obj(cx, ::JS_NewObject(cx, js::Jsvalify(&sNPObjectJSWrapperClass)));
if (generation != sNPObjWrappers.Generation()) {
if (generation != sNPObjWrappers->Generation()) {
// Reload entry if the JS_NewObject call caused a GC and reallocated
// the table (see bug 445229). This is guaranteed to succeed.
NS_ASSERTION(PL_DHashTableSearch(&sNPObjWrappers, npobj),
NS_ASSERTION(PL_DHashTableSearch(sNPObjWrappers, npobj),
"Hashtable didn't find what we just added?");
}
if (!obj) {
// OOM? Remove the stale entry from the hash.
PL_DHashTableRawRemove(&sNPObjWrappers, entry);
PL_DHashTableRawRemove(sNPObjWrappers, entry);
return nullptr;
}
@ -1967,10 +1968,10 @@ NPObjWrapperPluginDestroyedCallback(PLDHashTable *table, PLDHashEntryHdr *hdr,
NppAndCx *nppcx = reinterpret_cast<NppAndCx *>(arg);
if (entry->mNpp == nppcx->npp) {
// Prevent invalidate() and deallocate() from touching the hash
// we're enumerating.
const PLDHashTableOps *ops = table->Ops();
table->SetOps(nullptr);
// HACK: temporarily hide the hash we're enumerating so that invalidate()
// and deallocate() don't touch it.
PLDHashTable2 *tmp = static_cast<PLDHashTable2*>(table);
sNPObjWrappers = nullptr;
NPObject *npobj = entry->mNPObj;
@ -1998,7 +1999,7 @@ NPObjWrapperPluginDestroyedCallback(PLDHashTable *table, PLDHashEntryHdr *hdr,
::JS_SetPrivate(entry->mJSObj, nullptr);
table->SetOps(ops);
sNPObjWrappers = tmp;
if (sDelayedReleases && sDelayedReleases->RemoveElement(npobj)) {
OnWrapperDestroyed();
@ -2039,9 +2040,9 @@ nsJSNPRuntime::OnPluginDestroy(NPP npp)
// Use the safe JSContext here as we're not always able to find the
// JSContext associated with the NPP any more.
AutoSafeJSContext cx;
if (sNPObjWrappers.IsInitialized()) {
if (sNPObjWrappers) {
NppAndCx nppcx = { npp, cx };
PL_DHashTableEnumerate(&sNPObjWrappers,
PL_DHashTableEnumerate(sNPObjWrappers,
NPObjWrapperPluginDestroyedCallback, &nppcx);
}
}
@ -2074,7 +2075,7 @@ LookupNPP(NPObject *npobj)
}
NPObjWrapperHashEntry *entry = static_cast<NPObjWrapperHashEntry *>
(PL_DHashTableAdd(&sNPObjWrappers, npobj, fallible));
(PL_DHashTableAdd(sNPObjWrappers, npobj, fallible));
if (!entry) {
return nullptr;

View File

@ -778,7 +778,7 @@ PluginScriptableObjectChild::AnswerInvoke(const PluginIdentifier& aId,
AutoFallibleTArray<NPVariant, 10> convertedArgs;
uint32_t argCount = aArgs.Length();
if (!convertedArgs.SetLength(argCount)) {
if (!convertedArgs.SetLength(argCount, mozilla::fallible)) {
*aResult = void_t();
*aSuccess = false;
return true;
@ -848,7 +848,7 @@ PluginScriptableObjectChild::AnswerInvokeDefault(InfallibleTArray<Variant>&& aAr
AutoFallibleTArray<NPVariant, 10> convertedArgs;
uint32_t argCount = aArgs.Length();
if (!convertedArgs.SetLength(argCount)) {
if (!convertedArgs.SetLength(argCount, mozilla::fallible)) {
*aResult = void_t();
*aSuccess = false;
return true;
@ -1099,7 +1099,7 @@ PluginScriptableObjectChild::AnswerConstruct(InfallibleTArray<Variant>&& aArgs,
AutoFallibleTArray<NPVariant, 10> convertedArgs;
uint32_t argCount = aArgs.Length();
if (!convertedArgs.SetLength(argCount)) {
if (!convertedArgs.SetLength(argCount, mozilla::fallible)) {
*aResult = void_t();
*aSuccess = false;
return true;

View File

@ -824,7 +824,7 @@ PluginScriptableObjectParent::AnswerInvoke(const PluginIdentifier& aId,
AutoFallibleTArray<NPVariant, 10> convertedArgs;
uint32_t argCount = aArgs.Length();
if (!convertedArgs.SetLength(argCount)) {
if (!convertedArgs.SetLength(argCount, fallible)) {
*aResult = void_t();
*aSuccess = false;
return true;
@ -907,7 +907,7 @@ PluginScriptableObjectParent::AnswerInvokeDefault(InfallibleTArray<Variant>&& aA
AutoFallibleTArray<NPVariant, 10> convertedArgs;
uint32_t argCount = aArgs.Length();
if (!convertedArgs.SetLength(argCount)) {
if (!convertedArgs.SetLength(argCount, fallible)) {
*aResult = void_t();
*aSuccess = false;
return true;
@ -1227,7 +1227,7 @@ PluginScriptableObjectParent::AnswerConstruct(InfallibleTArray<Variant>&& aArgs,
AutoFallibleTArray<NPVariant, 10> convertedArgs;
uint32_t argCount = aArgs.Length();
if (!convertedArgs.SetLength(argCount)) {
if (!convertedArgs.SetLength(argCount, fallible)) {
*aResult = void_t();
*aSuccess = false;
return true;

View File

@ -2015,6 +2015,35 @@ QuotaManager::InitializeRepository(PersistenceType aPersistenceType)
return NS_OK;
}
namespace {
// The Cache API was creating top level morgue directories by accident for
// a short time in nightly. This unfortunately prevents all storage from
// working. So recover these profiles by removing these corrupt directories.
// This should be removed at some point in the future.
bool
MaybeRemoveCorruptDirectory(const nsAString& aLeafName, nsIFile* aDir)
{
#ifdef NIGHTLY_BUILD
MOZ_ASSERT(aDir);
if (aLeafName != NS_LITERAL_STRING("morgue")) {
return false;
}
NS_WARNING("QuotaManager removing corrupt morgue directory.");
nsresult rv = aDir->Remove(true /* recursive */);
NS_ENSURE_SUCCESS(rv, false);
return true;
#else
return false
#endif // NIGHTLY_BUILD
}
} // anonymous namespace
nsresult
QuotaManager::InitializeOrigin(PersistenceType aPersistenceType,
const nsACString& aGroup,
@ -2072,6 +2101,10 @@ QuotaManager::InitializeOrigin(PersistenceType aPersistenceType,
return NS_ERROR_UNEXPECTED;
}
if (MaybeRemoveCorruptDirectory(leafName, file)) {
continue;
}
Client::Type clientType;
rv = Client::TypeFromText(leafName, clientType);
if (NS_FAILED(rv)) {
@ -4173,6 +4206,10 @@ AsyncUsageRunnable::AddToUsage(QuotaManager* aQuotaManager,
}
}
if (MaybeRemoveCorruptDirectory(leafName, file)) {
continue;
}
Client::Type clientType;
rv = Client::TypeFromText(leafName, clientType);
if (NS_FAILED(rv)) {

View File

@ -780,7 +780,7 @@ nsSMILAnimationFunction::GetValues(const nsISMILAttr& aSMILAttr,
mValueNeedsReparsingEverySample = true;
}
if (!parseOk || !result.SetCapacity(2)) {
if (!parseOk || !result.SetCapacity(2, mozilla::fallible)) {
return NS_ERROR_FAILURE;
}

View File

@ -131,7 +131,7 @@ DOMSVGLengthList::InternalListLengthWillChange(uint32_t aNewLength)
}
}
if (!mItems.SetLength(aNewLength)) {
if (!mItems.SetLength(aNewLength, fallible)) {
// We silently ignore SetLength OOM failure since being out of sync is safe
// so long as we have *fewer* items than our internal list.
mItems.Clear();
@ -255,7 +255,7 @@ DOMSVGLengthList::InsertItemBefore(DOMSVGLength& newItem,
}
// Ensure we have enough memory so we can avoid complex error handling below:
if (!mItems.SetCapacity(mItems.Length() + 1) ||
if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
!InternalList().SetCapacity(InternalList().Length() + 1)) {
error.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;

View File

@ -132,7 +132,7 @@ DOMSVGNumberList::InternalListLengthWillChange(uint32_t aNewLength)
}
}
if (!mItems.SetLength(aNewLength)) {
if (!mItems.SetLength(aNewLength, fallible)) {
// We silently ignore SetLength OOM failure since being out of sync is safe
// so long as we have *fewer* items than our internal list.
mItems.Clear();
@ -239,7 +239,7 @@ DOMSVGNumberList::InsertItemBefore(DOMSVGNumber& aItem,
nsRefPtr<DOMSVGNumber> domItem = aItem.HasOwner() ? aItem.Clone() : &aItem;
// Ensure we have enough memory so we can avoid complex error handling below:
if (!mItems.SetCapacity(mItems.Length() + 1) ||
if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
!InternalList().SetCapacity(InternalList().Length() + 1)) {
error.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;

View File

@ -370,8 +370,9 @@ DOMSVGPathSegList::InsertItemBefore(DOMSVGPathSeg& aNewItem,
uint32_t argCount = SVGPathSegUtils::ArgCountForType(domItem->Type());
// Ensure we have enough memory so we can avoid complex error handling below:
if (!mItems.SetCapacity(mItems.Length() + 1) ||
!InternalList().mData.SetCapacity(InternalList().mData.Length() + 1 + argCount)) {
if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
!InternalList().mData.SetCapacity(InternalList().mData.Length() + 1 + argCount,
fallible)) {
aError.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;
}
@ -437,10 +438,9 @@ DOMSVGPathSegList::ReplaceItem(DOMSVGPathSeg& aNewItem,
float segAsRaw[1 + NS_SVG_PATH_SEG_MAX_ARGS];
domItem->ToSVGPathSegEncodedData(segAsRaw);
bool ok = !!InternalList().mData.ReplaceElementsAt(
internalIndex, 1 + oldArgCount,
segAsRaw, 1 + newArgCount);
if (!ok) {
if (!InternalList().mData.ReplaceElementsAt(internalIndex, 1 + oldArgCount,
segAsRaw, 1 + newArgCount,
fallible)) {
aError.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;
}

View File

@ -166,7 +166,7 @@ DOMSVGPointList::InternalListWillChangeTo(const SVGPointList& aNewValue)
}
}
if (!mItems.SetLength(newLength)) {
if (!mItems.SetLength(newLength, fallible)) {
// We silently ignore SetLength OOM failure since being out of sync is safe
// so long as we have *fewer* items than our internal list.
mItems.Clear();
@ -306,7 +306,7 @@ DOMSVGPointList::InsertItemBefore(nsISVGPoint& aNewItem, uint32_t aIndex,
}
// Ensure we have enough memory so we can avoid complex error handling below:
if (!mItems.SetCapacity(mItems.Length() + 1) ||
if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
!InternalList().SetCapacity(InternalList().Length() + 1)) {
aError.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;

View File

@ -132,7 +132,7 @@ DOMSVGTransformList::InternalListLengthWillChange(uint32_t aNewLength)
}
}
if (!mItems.SetLength(aNewLength)) {
if (!mItems.SetLength(aNewLength, fallible)) {
// We silently ignore SetLength OOM failure since being out of sync is safe
// so long as we have *fewer* items than our internal list.
mItems.Clear();
@ -247,7 +247,7 @@ DOMSVGTransformList::InsertItemBefore(SVGTransform& newItem,
}
// Ensure we have enough memory so we can avoid complex error handling below:
if (!mItems.SetCapacity(mItems.Length() + 1) ||
if (!mItems.SetCapacity(mItems.Length() + 1, fallible) ||
!InternalList().SetCapacity(InternalList().Length() + 1)) {
error.Throw(NS_ERROR_OUT_OF_MEMORY);
return nullptr;

View File

@ -18,7 +18,7 @@ namespace mozilla {
nsresult
SVGLengthList::CopyFrom(const SVGLengthList& rhs)
{
if (!mLengths.SetCapacity(rhs.Length())) {
if (!mLengths.SetCapacity(rhs.Length(), fallible)) {
// Yes, we do want fallible alloc here
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -59,7 +59,7 @@ public:
bool operator==(const SVGLengthList& rhs) const;
bool SetCapacity(uint32_t size) {
return mLengths.SetCapacity(size);
return mLengths.SetCapacity(size, fallible);
}
void Compact() {
@ -90,7 +90,7 @@ protected:
* increased, in which case the list will be left unmodified.
*/
bool SetLength(uint32_t aNumberOfItems) {
return mLengths.SetLength(aNumberOfItems);
return mLengths.SetLength(aNumberOfItems, fallible);
}
private:

View File

@ -199,7 +199,7 @@ SVGMotionSMILType::Assign(nsSMILValue& aDest, const nsSMILValue& aSrc) const
MotionSegmentArray& dstArr = ExtractMotionSegmentArray(aDest);
// Ensure we have sufficient memory.
if (!dstArr.SetCapacity(srcArr.Length())) {
if (!dstArr.SetCapacity(srcArr.Length(), fallible)) {
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -17,7 +17,7 @@ namespace mozilla {
nsresult
SVGNumberList::CopyFrom(const SVGNumberList& rhs)
{
if (!mNumbers.SetCapacity(rhs.Length())) {
if (!mNumbers.SetCapacity(rhs.Length(), fallible)) {
// Yes, we do want fallible alloc here
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -60,7 +60,7 @@ public:
}
bool SetCapacity(uint32_t size) {
return mNumbers.SetCapacity(size);
return mNumbers.SetCapacity(size, fallible);
}
void Compact() {
@ -91,7 +91,7 @@ protected:
* increased, in which case the list will be left unmodified.
*/
bool SetLength(uint32_t aNumberOfItems) {
return mNumbers.SetLength(aNumberOfItems);
return mNumbers.SetLength(aNumberOfItems, fallible);
}
private:

View File

@ -34,7 +34,7 @@ static bool IsMoveto(uint16_t aSegType)
nsresult
SVGPathData::CopyFrom(const SVGPathData& rhs)
{
if (!mData.SetCapacity(rhs.mData.Length())) {
if (!mData.SetCapacity(rhs.mData.Length(), fallible)) {
// Yes, we do want fallible alloc here
return NS_ERROR_OUT_OF_MEMORY;
}
@ -81,7 +81,7 @@ SVGPathData::AppendSeg(uint32_t aType, ...)
{
uint32_t oldLength = mData.Length();
uint32_t newLength = oldLength + 1 + SVGPathSegUtils::ArgCountForType(aType);
if (!mData.SetLength(newLength)) {
if (!mData.SetLength(newLength, fallible)) {
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -135,7 +135,7 @@ public:
}
bool SetCapacity(uint32_t aSize) {
return mData.SetCapacity(aSize);
return mData.SetCapacity(aSize, fallible);
}
void Compact() {
@ -202,7 +202,7 @@ protected:
* increased, in which case the list will be left unmodified.
*/
bool SetLength(uint32_t aLength) {
return mData.SetLength(aLength);
return mData.SetLength(aLength, fallible);
}
nsresult SetValueFromString(const nsAString& aValue);

View File

@ -68,7 +68,7 @@ public:
}
bool SetCapacity(uint32_t aSize) {
return mItems.SetCapacity(aSize);
return mItems.SetCapacity(aSize, fallible);
}
void Compact() {
@ -99,7 +99,7 @@ protected:
* increased, in which case the list will be left unmodified.
*/
bool SetLength(uint32_t aNumberOfItems) {
return mItems.SetLength(aNumberOfItems);
return mItems.SetLength(aNumberOfItems, fallible);
}
private:

View File

@ -16,7 +16,7 @@ namespace mozilla {
nsresult
SVGStringList::CopyFrom(const SVGStringList& rhs)
{
if (!mStrings.SetCapacity(rhs.Length())) {
if (!mStrings.SetCapacity(rhs.Length(), fallible)) {
// Yes, we do want fallible alloc here
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -56,7 +56,7 @@ public:
}
bool SetCapacity(uint32_t size) {
return mStrings.SetCapacity(size);
return mStrings.SetCapacity(size, fallible);
}
void Compact() {
@ -92,7 +92,7 @@ protected:
* increased, in which case the list will be left unmodified.
*/
bool SetLength(uint32_t aStringOfItems) {
return mStrings.SetLength(aStringOfItems);
return mStrings.SetLength(aStringOfItems, fallible);
}
private:

View File

@ -43,7 +43,7 @@ SVGTransformList::CopyFrom(const SVGTransformList& rhs)
nsresult
SVGTransformList::CopyFrom(const nsTArray<nsSVGTransform>& aTransformArray)
{
if (!mItems.SetCapacity(aTransformArray.Length())) {
if (!mItems.SetCapacity(aTransformArray.Length(), fallible)) {
// Yes, we do want fallible alloc here
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -60,7 +60,7 @@ public:
}
bool SetCapacity(uint32_t size) {
return mItems.SetCapacity(size);
return mItems.SetCapacity(size, fallible);
}
void Compact() {
@ -94,7 +94,7 @@ protected:
* increased, in which case the list will be left unmodified.
*/
bool SetLength(uint32_t aNumberOfItems) {
return mItems.SetLength(aNumberOfItems);
return mItems.SetLength(aNumberOfItems, fallible);
}
private:

View File

@ -50,7 +50,7 @@ SVGTransformListSMILType::Assign(nsSMILValue& aDest,
TransformArray* dstTransforms = static_cast<TransformArray*>(aDest.mU.mPtr);
// Before we assign, ensure we have sufficient memory
bool result = dstTransforms->SetCapacity(srcTransforms->Length());
bool result = dstTransforms->SetCapacity(srcTransforms->Length(), fallible);
NS_ENSURE_TRUE(result,NS_ERROR_OUT_OF_MEMORY);
*dstTransforms = *srcTransforms;
@ -336,7 +336,7 @@ SVGTransformListSMILType::AppendTransforms(const SVGTransformList& aList,
TransformArray& transforms = *static_cast<TransformArray*>(aValue.mU.mPtr);
if (!transforms.SetCapacity(transforms.Length() + aList.Length()))
if (!transforms.SetCapacity(transforms.Length() + aList.Length(), fallible))
return false;
for (uint32_t i = 0; i < aList.Length(); ++i) {
@ -358,7 +358,7 @@ SVGTransformListSMILType::GetTransforms(const nsSMILValue& aValue,
*static_cast<const TransformArray*>(aValue.mU.mPtr);
aTransforms.Clear();
if (!aTransforms.SetCapacity(smilTransforms.Length()))
if (!aTransforms.SetCapacity(smilTransforms.Length(), fallible))
return false;
for (uint32_t i = 0; i < smilTransforms.Length(); ++i) {

View File

@ -155,7 +155,7 @@ struct DrawSurfaceOptions {
* matching DrawTarget. Not adhering to this condition will make a draw call
* fail.
*/
class GradientStops : public external::AtomicRefCounted<GradientStops>
class GradientStops : public RefCounted<GradientStops>
{
public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(GradientStops)
@ -318,7 +318,7 @@ class DrawTargetCaptureImpl;
* which may be used as a source in a SurfacePattern or a DrawSurface call.
* They cannot be drawn to directly.
*/
class SourceSurface : public external::AtomicRefCounted<SourceSurface>
class SourceSurface : public RefCounted<SourceSurface>
{
public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(SourceSurface)
@ -476,7 +476,7 @@ class FlattenedPath;
/** The path class is used to create (sets of) figures of any shape that can be
* filled or stroked to a DrawTarget
*/
class Path : public external::AtomicRefCounted<Path>
class Path : public RefCounted<Path>
{
public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(Path)
@ -577,7 +577,7 @@ struct GlyphBuffer
* at a particular size. It is passed into text drawing calls to describe
* the font used for the drawing call.
*/
class ScaledFont : public external::AtomicRefCounted<ScaledFont>
class ScaledFont : public RefCounted<ScaledFont>
{
public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(ScaledFont)
@ -622,7 +622,7 @@ protected:
* parameters. This is because different platforms have unique rendering
* parameters.
*/
class GlyphRenderingOptions : public external::AtomicRefCounted<GlyphRenderingOptions>
class GlyphRenderingOptions : public RefCounted<GlyphRenderingOptions>
{
public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(GlyphRenderingOptions)
@ -641,7 +641,7 @@ class DrawTargetCapture;
* may be used either through a Snapshot or by flushing the target and directly
* accessing the backing store a DrawTarget was created with.
*/
class DrawTarget : public external::AtomicRefCounted<DrawTarget>
class DrawTarget : public RefCounted<DrawTarget>
{
public:
MOZ_DECLARE_REFCOUNTED_VIRTUAL_TYPENAME(DrawTarget)

View File

@ -148,7 +148,7 @@ class DrawFilterCommand : public DrawingCommand
public:
DrawFilterCommand(FilterNode* aFilter, const Rect& aSourceRect,
const Point& aDestPoint, const DrawOptions& aOptions)
: DrawingCommand(CommandType::DRAWFILTER)
: DrawingCommand(CommandType::DRAWSURFACE)
, mFilter(aFilter), mSourceRect(aSourceRect)
, mDestPoint(aDestPoint), mOptions(aOptions)
{
@ -166,36 +166,6 @@ private:
DrawOptions mOptions;
};
class DrawSurfaceWithShadowCommand : public DrawingCommand
{
public:
DrawSurfaceWithShadowCommand(SourceSurface* aSurface, const Point& aDest,
const Color& aColor, const Point& aOffset,
Float aSigma, CompositionOp aOperator)
: DrawingCommand(CommandType::DRAWSURFACEWITHSHADOW)
, mSurface(aSurface)
, mDest(aDest)
, mColor(aColor)
, mOffset(aOffset)
, mSigma(aSigma)
, mOperator(aOperator)
{
}
virtual void ExecuteOnDT(DrawTarget* aDT, const Matrix&)
{
aDT->DrawSurfaceWithShadow(mSurface, mDest, mColor, mOffset, mSigma, mOperator);
}
private:
RefPtr<SourceSurface> mSurface;
Point mDest;
Color mColor;
Point mOffset;
Float mSigma;
CompositionOp mOperator;
};
class ClearRectCommand : public DrawingCommand
{
public:

View File

@ -30,7 +30,6 @@ DrawTargetCaptureImpl::Init(const IntSize& aSize, DrawTarget* aRefDT)
}
mRefDT = aRefDT;
mFormat = mRefDT->GetFormat();
mSize = aSize;
return true;
@ -70,18 +69,6 @@ DrawTargetCaptureImpl::DrawFilter(FilterNode *aNode,
AppendCommand(DrawFilterCommand)(aNode, aSourceRect, aDestPoint, aOptions);
}
void
DrawTargetCaptureImpl::DrawSurfaceWithShadow(SourceSurface *aSurface,
const Point &aDest,
const Color &aColor,
const Point &aOffset,
Float aSigma,
CompositionOp aOperator)
{
aSurface->GuaranteePersistance();
AppendCommand(DrawSurfaceWithShadowCommand)(aSurface, aDest, aColor, aOffset, aSigma, aOperator);
}
void
DrawTargetCaptureImpl::ClearRect(const Rect &aRect)
{
@ -191,7 +178,6 @@ void
DrawTargetCaptureImpl::SetTransform(const Matrix& aTransform)
{
AppendCommand(SetTransformCommand)(aTransform);
mTransform = aTransform;
}
void

View File

@ -45,7 +45,7 @@ public:
const Color &aColor,
const Point &aOffset,
Float aSigma,
CompositionOp aOperator);
CompositionOp aOperator) { /* Not implemented */ }
virtual void ClearRect(const Rect &aRect);
virtual void MaskSurface(const Pattern &aSource,

View File

@ -461,7 +461,7 @@ gfxContext::CurrentDash(FallibleTArray<gfxFloat>& dashes, gfxFloat* offset) cons
const AzureState &state = CurrentState();
int count = state.strokeOptions.mDashLength;
if (count <= 0 || !dashes.SetLength(count)) {
if (count <= 0 || !dashes.SetLength(count, fallible)) {
return false;
}

View File

@ -315,7 +315,7 @@ gfxCoreTextShaper::SetGlyphsFromRun(gfxShapedText *aShapedText,
static const int32_t NO_GLYPH = -1;
AutoFallibleTArray<int32_t,SMALL_GLYPH_RUN> charToGlyphArray;
if (!charToGlyphArray.SetLength(stringRange.length)) {
if (!charToGlyphArray.SetLength(stringRange.length, fallible)) {
return NS_ERROR_OUT_OF_MEMORY;
}
int32_t *charToGlyph = charToGlyphArray.Elements();

View File

@ -213,10 +213,10 @@ gfxGraphiteShaper::SetGlyphsFromSegment(gfxContext *aContext,
AutoFallibleTArray<float,SMALL_GLYPH_RUN> xLocs;
AutoFallibleTArray<float,SMALL_GLYPH_RUN> yLocs;
if (!clusters.SetLength(aLength) ||
!gids.SetLength(glyphCount) ||
!xLocs.SetLength(glyphCount) ||
!yLocs.SetLength(glyphCount))
if (!clusters.SetLength(aLength, fallible) ||
!gids.SetLength(glyphCount, fallible) ||
!xLocs.SetLength(glyphCount, fallible) ||
!yLocs.SetLength(glyphCount, fallible))
{
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -1534,7 +1534,7 @@ gfxHarfBuzzShaper::SetGlyphsFromRun(gfxContext *aContext,
uint32_t wordLength = aLength;
static const int32_t NO_GLYPH = -1;
AutoFallibleTArray<int32_t,SMALL_GLYPH_RUN> charToGlyphArray;
if (!charToGlyphArray.SetLength(wordLength)) {
if (!charToGlyphArray.SetLength(wordLength, fallible)) {
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -370,7 +370,7 @@ CopyWOFFMetadata(const uint8_t* aFontData,
if (metaOffset >= aLength || metaCompLen > aLength - metaOffset) {
return;
}
if (!aMetadata->SetLength(woff->metaCompLen)) {
if (!aMetadata->SetLength(woff->metaCompLen, fallible)) {
return;
}
memcpy(aMetadata->Elements(), aFontData + metaOffset, metaCompLen);

View File

@ -46,7 +46,7 @@ nsresult
nsHyphenator::Hyphenate(const nsAString& aString,
FallibleTArray<bool>& aHyphens)
{
if (!aHyphens.SetLength(aString.Length())) {
if (!aHyphens.SetLength(aString.Length(), mozilla::fallible)) {
return NS_ERROR_OUT_OF_MEMORY;
}
memset(aHyphens.Elements(), false, aHyphens.Length() * sizeof(bool));

View File

@ -45,8 +45,8 @@ nsConverterInputStream::Init(nsIInputStream* aStream,
mConverter = EncodingUtils::DecoderForEncoding(encoding);
// set up our buffers
if (!mByteData.SetCapacity(aBufferSize) ||
!mUnicharData.SetCapacity(aBufferSize)) {
if (!mByteData.SetCapacity(aBufferSize, mozilla::fallible) ||
!mUnicharData.SetCapacity(aBufferSize, mozilla::fallible)) {
return NS_ERROR_OUT_OF_MEMORY;
}

View File

@ -520,7 +520,7 @@ struct ParamTraits<FallibleTArray<E> >
memcpy(elements, outdata, pickledLength);
} else {
if (!aResult->SetCapacity(length)) {
if (!aResult->SetCapacity(length, mozilla::fallible)) {
return false;
}

View File

@ -343,7 +343,7 @@ def _callCxxArrayLength(arr):
def _callCxxCheckedArraySetLength(arr, lenexpr, sel='.'):
ifbad = StmtIf(ExprNot(ExprCall(ExprSelect(arr, sel, 'SetLength'),
args=[ lenexpr ])))
args=[ lenexpr, ExprVar('mozilla::fallible') ])))
ifbad.addifstmt(_fatalError('Error setting the array length'))
ifbad.addifstmt(StmtReturn.FALSE)
return ifbad

View File

@ -13,7 +13,14 @@ function recur(n, limit) {
function checkRecursion(n, limit) {
print("checkRecursion(" + uneval(n) + ", " + uneval(limit) + ")");
var stack = recur(n, limit);
try {
var stack = recur(n, limit);
} catch (e) {
// Some platforms, like ASAN builds, can end up overrecursing. Tolerate
// these failures.
assertEq(/too much recursion/.test("" + e), true);
return;
}
// Async stacks are limited even if we didn't ask for a limit. There is a
// default limit on frames attached on top of any synchronous frames. In this

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,8 @@
#include "ds/PriorityQueue.h"
#include "ds/SplayTree.h"
#include "jit/LiveRangeAllocator.h"
#include "jit/RegisterAllocator.h"
#include "jit/StackSlotAllocator.h"
// Backtracking priority queue based register allocator based on that described
// in the following blog post:
@ -21,55 +22,489 @@
namespace js {
namespace jit {
// Information about a group of registers. Registers may be grouped together
// when (a) all of their lifetimes are disjoint, (b) they are of the same type
// (double / non-double) and (c) it is desirable that they have the same
// allocation.
struct VirtualRegisterGroup : public TempObject
class Requirement
{
// All virtual registers in the group.
Vector<uint32_t, 2, JitAllocPolicy> registers;
public:
enum Kind {
NONE,
REGISTER,
FIXED,
MUST_REUSE_INPUT
};
// Desired physical register to use for registers in the group.
LAllocation allocation;
Requirement()
: kind_(NONE)
{ }
// Spill location to be shared by registers in the group.
LAllocation spill;
explicit Requirement(Kind kind)
: kind_(kind)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
explicit VirtualRegisterGroup(TempAllocator& alloc)
: registers(alloc), allocation(LUse(0, LUse::ANY)), spill(LUse(0, LUse::ANY))
{}
Requirement(Kind kind, CodePosition at)
: kind_(kind),
position_(at)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
uint32_t canonicalReg() {
uint32_t minimum = registers[0];
for (size_t i = 1; i < registers.length(); i++)
minimum = Min(minimum, registers[i]);
return minimum;
explicit Requirement(LAllocation fixed)
: kind_(FIXED),
allocation_(fixed)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
// Only useful as a hint, encodes where the fixed requirement is used to
// avoid allocating a fixed register too early.
Requirement(LAllocation fixed, CodePosition at)
: kind_(FIXED),
allocation_(fixed),
position_(at)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
Requirement(uint32_t vreg, CodePosition at)
: kind_(MUST_REUSE_INPUT),
allocation_(LUse(vreg, LUse::ANY)),
position_(at)
{ }
Kind kind() const {
return kind_;
}
LAllocation allocation() const {
MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
return allocation_;
}
uint32_t virtualRegister() const {
MOZ_ASSERT(allocation_.isUse());
MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
return allocation_.toUse()->virtualRegister();
}
CodePosition pos() const {
return position_;
}
int priority() const;
bool merge(const Requirement& newRequirement) {
// Merge newRequirement with any existing requirement, returning false
// if the new and old requirements conflict.
MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
if (newRequirement.kind() == Requirement::FIXED) {
if (kind() == Requirement::FIXED)
return newRequirement.allocation() == allocation();
*this = newRequirement;
return true;
}
MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
if (kind() == Requirement::FIXED)
return allocation().isRegister();
*this = newRequirement;
return true;
}
void dump() const;
private:
Kind kind_;
LAllocation allocation_;
CodePosition position_;
};
struct UsePosition : public TempObject,
public InlineForwardListNode<UsePosition>
{
LUse* use;
CodePosition pos;
UsePosition(LUse* use, CodePosition pos) :
use(use),
pos(pos)
{
// Verify that the usedAtStart() flag is consistent with the
// subposition. For now ignore fixed registers, because they
// are handled specially around calls.
MOZ_ASSERT_IF(!use->isFixedRegister(),
pos.subpos() == (use->usedAtStart()
? CodePosition::INPUT
: CodePosition::OUTPUT));
}
};
class BacktrackingVirtualRegister : public VirtualRegister
typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
// Backtracking allocator data structures overview.
//
// LiveRange: A continuous range of positions where a virtual register is live.
// LiveBundle: A set of LiveRanges which do not overlap.
// VirtualRegister: A set of all LiveRanges used for some LDefinition.
//
// The allocator first performs a liveness ananlysis on the LIR graph which
// constructs LiveRanges for each VirtualRegister, determining where the
// registers are live.
//
// The ranges are then bundled together according to heuristics, and placed on
// the allocation queue.
//
// As bundles are removed from the allocation queue, we attempt to find a
// physical register or stack slot allocation for all ranges in the removed
// bundle, possibly evicting already-allocated bundles. See processBundle()
// for details.
//
// If we are not able to allocate a bundle, it is split according to heuristics
// into two or more smaller bundles which cover all the ranges of the original.
// These smaller bundles are then allocated independently.
class LiveBundle;
class LiveRange : public TempObject
{
public:
// Linked lists are used to keep track of the ranges in each LiveBundle and
// VirtualRegister. Since a LiveRange may be in two lists simultaneously, use
// these auxiliary classes to keep things straight.
class BundleLink : public InlineForwardListNode<BundleLink> {};
class RegisterLink : public InlineForwardListNode<RegisterLink> {};
typedef InlineForwardListIterator<BundleLink> BundleLinkIterator;
typedef InlineForwardListIterator<RegisterLink> RegisterLinkIterator;
// Links in the lists in LiveBundle and VirtualRegister.
BundleLink bundleLink;
RegisterLink registerLink;
static LiveRange* get(BundleLink* link) {
return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
offsetof(LiveRange, bundleLink));
}
static LiveRange* get(RegisterLink* link) {
return reinterpret_cast<LiveRange*>(reinterpret_cast<uint8_t*>(link) -
offsetof(LiveRange, registerLink));
}
struct Range
{
// The beginning of this range, inclusive.
CodePosition from;
// The end of this range, exclusive.
CodePosition to;
Range() {}
Range(CodePosition from, CodePosition to)
: from(from), to(to)
{
MOZ_ASSERT(!empty());
}
bool empty() {
MOZ_ASSERT(from <= to);
return from == to;
}
};
private:
// The virtual register this range is for, or zero if this does not have a
// virtual register (for example, it is in the callRanges bundle).
uint32_t vreg_;
// The bundle containing this range, null if liveness information is being
// constructed and we haven't started allocating bundles yet.
LiveBundle* bundle_;
// The code positions in this range.
Range range_;
// All uses of the virtual register in this range, ordered by location.
InlineForwardList<UsePosition> uses_;
// Whether this range contains the virtual register's definition.
bool hasDefinition_;
LiveRange(uint32_t vreg, Range range)
: vreg_(vreg), bundle_(nullptr), range_(range), hasDefinition_(false)
{
MOZ_ASSERT(!range.empty());
}
public:
static LiveRange* New(TempAllocator& alloc, uint32_t vreg,
CodePosition from, CodePosition to) {
return new(alloc) LiveRange(vreg, Range(from, to));
}
uint32_t vreg() const {
MOZ_ASSERT(hasVreg());
return vreg_;
}
bool hasVreg() const {
return vreg_ != 0;
}
LiveBundle* bundle() const {
return bundle_;
}
CodePosition from() const {
return range_.from;
}
CodePosition to() const {
return range_.to;
}
bool covers(CodePosition pos) const {
return pos >= from() && pos < to();
}
// Whether this range wholly contains other.
bool contains(LiveRange* other) const;
// Intersect this range with other, returning the subranges of this
// that are before, inside, or after other.
void intersect(LiveRange* other, Range* pre, Range* inside, Range* post) const;
// Whether this range has any intersection with other.
bool intersects(LiveRange* other) const;
UsePositionIterator usesBegin() const {
return uses_.begin();
}
UsePosition* lastUse() const {
return uses_.back();
}
bool hasUses() const {
return !!usesBegin();
}
UsePosition* popUse() {
return uses_.popFront();
}
bool hasDefinition() const {
return hasDefinition_;
}
void setFrom(CodePosition from) {
range_.from = from;
MOZ_ASSERT(!range_.empty());
}
void setTo(CodePosition to) {
range_.to = to;
MOZ_ASSERT(!range_.empty());
}
void setBundle(LiveBundle* bundle) {
bundle_ = bundle;
}
void addUse(UsePosition* use);
void distributeUses(LiveRange* other);
void setHasDefinition() {
MOZ_ASSERT(!hasDefinition_);
hasDefinition_ = true;
}
// Return a string describing this range. This is not re-entrant!
#ifdef DEBUG
const char* toString() const;
#else
const char* toString() const { return "???"; }
#endif
// Comparator for use in range splay trees.
static int compare(LiveRange* v0, LiveRange* v1) {
// LiveRange includes 'from' but excludes 'to'.
if (v0->to() <= v1->from())
return -1;
if (v0->from() >= v1->to())
return 1;
return 0;
}
};
// Tracks information about bundles that should all be spilled to the same
// physical location. At the beginning of allocation, each bundle has its own
// spill set. As bundles are split, the new smaller bundles continue to use the
// same spill set.
class SpillSet : public TempObject
{
// All bundles with this spill set which have been spilled. All bundles in
// this list will be given the same physical slot.
Vector<LiveBundle*, 1, JitAllocPolicy> list_;
explicit SpillSet(TempAllocator& alloc)
: list_(alloc)
{ }
public:
static SpillSet* New(TempAllocator& alloc) {
return new(alloc) SpillSet(alloc);
}
bool addSpilledBundle(LiveBundle* bundle) {
return list_.append(bundle);
}
size_t numSpilledBundles() const {
return list_.length();
}
LiveBundle* spilledBundle(size_t i) const {
return list_[i];
}
void setAllocation(LAllocation alloc);
};
// A set of live ranges which are all pairwise disjoint. The register allocator
// attempts to find allocations for an entire bundle, and if it fails the
// bundle will be broken into smaller ones which are allocated independently.
class LiveBundle : public TempObject
{
// Set to use if this bundle or one it is split into is spilled.
SpillSet* spill_;
// All the ranges in this set, ordered by location.
InlineForwardList<LiveRange::BundleLink> ranges_;
// Allocation to use for ranges in this set, bogus if unallocated or spilled
// and not yet given a physical stack slot.
LAllocation alloc_;
// Bundle which entirely contains this one and has no register uses. This
// may or may not be spilled by the allocator, but it can be spilled and
// will not be split.
LiveBundle* spillParent_;
LiveBundle(SpillSet* spill, LiveBundle* spillParent)
: spill_(spill), spillParent_(spillParent)
{ }
public:
static LiveBundle* New(TempAllocator& alloc, SpillSet* spill, LiveBundle* spillParent) {
return new(alloc) LiveBundle(spill, spillParent);
}
SpillSet* spillSet() const {
return spill_;
}
void setSpillSet(SpillSet* spill) {
spill_ = spill;
}
LiveRange::BundleLinkIterator rangesBegin() const {
return ranges_.begin();
}
bool hasRanges() const {
return !!rangesBegin();
}
LiveRange* firstRange() const {
return LiveRange::get(*rangesBegin());
}
LiveRange* lastRange() const {
return LiveRange::get(ranges_.back());
}
LiveRange* rangeFor(CodePosition pos) const;
void removeRange(LiveRange* range);
void removeRangeAndIncrementIterator(LiveRange::BundleLinkIterator& iter) {
ranges_.removeAndIncrement(iter);
}
void addRange(LiveRange* range);
bool addRange(TempAllocator& alloc, uint32_t vreg, CodePosition from, CodePosition to);
bool addRangeAndDistributeUses(TempAllocator& alloc, LiveRange* oldRange,
CodePosition from, CodePosition to);
LiveRange* popFirstRange();
#ifdef DEBUG
size_t numRanges() const;
#endif
LAllocation allocation() const {
return alloc_;
}
void setAllocation(LAllocation alloc) {
alloc_ = alloc;
}
LiveBundle* spillParent() const {
return spillParent_;
}
// Return a string describing this bundle. This is not re-entrant!
#ifdef DEBUG
const char* toString() const;
#else
const char* toString() const { return "???"; }
#endif
};
// Information about the allocation for a virtual register.
class VirtualRegister
{
// Instruction which defines this register.
LNode* ins_;
// Definition in the instruction for this register.
LDefinition* def_;
// All live ranges for this register. These may overlap each other, and are
// ordered by their start position.
InlineForwardList<LiveRange::RegisterLink> ranges_;
// Whether def_ is a temp or an output.
bool isTemp_;
// If this register's definition is MUST_REUSE_INPUT, whether a copy must
// be introduced before the definition that relaxes the policy.
bool mustCopyInput_;
// Spill location to use for this register.
LAllocation canonicalSpill_;
// Code position above which the canonical spill cannot be used; such
// intervals may overlap other registers in the same group.
CodePosition canonicalSpillExclude_;
// If this register is associated with a group of other registers,
// information about the group. This structure is shared between all
// registers in the group.
VirtualRegisterGroup* group_;
void operator=(const VirtualRegister&) = delete;
VirtualRegister(const VirtualRegister&) = delete;
public:
explicit BacktrackingVirtualRegister(TempAllocator& alloc)
: VirtualRegister(alloc)
{}
explicit VirtualRegister()
{
// Note: This class is zeroed before it is constructed.
}
void init(LNode* ins, LDefinition* def, bool isTemp) {
MOZ_ASSERT(!ins_);
ins_ = ins;
def_ = def;
isTemp_ = isTemp;
}
LNode* ins() const {
return ins_;
}
LDefinition* def() const {
return def_;
}
LDefinition::Type type() const {
return def()->type();
}
uint32_t vreg() const {
return def()->virtualRegister();
}
bool isCompatible(const AnyRegister& r) const {
return def_->isCompatibleReg(r);
}
bool isCompatible(const VirtualRegister& vr) const {
return def_->isCompatibleDef(*vr.def_);
}
bool isTemp() const {
return isTemp_;
}
void setMustCopyInput() {
mustCopyInput_ = true;
}
@ -77,56 +512,56 @@ class BacktrackingVirtualRegister : public VirtualRegister
return mustCopyInput_;
}
void setCanonicalSpill(LAllocation alloc) {
MOZ_ASSERT(!alloc.isUse());
canonicalSpill_ = alloc;
LiveRange::RegisterLinkIterator rangesBegin() const {
return ranges_.begin();
}
const LAllocation* canonicalSpill() const {
return canonicalSpill_.isBogus() ? nullptr : &canonicalSpill_;
bool hasRanges() const {
return !!rangesBegin();
}
LiveRange* firstRange() const {
return LiveRange::get(*rangesBegin());
}
LiveRange* lastRange() const {
return LiveRange::get(ranges_.back());
}
LiveRange* rangeFor(CodePosition pos) const;
void removeRange(LiveRange* range);
void addRange(LiveRange* range);
LiveBundle* firstBundle() const {
return firstRange()->bundle();
}
void setCanonicalSpillExclude(CodePosition pos) {
canonicalSpillExclude_ = pos;
}
bool hasCanonicalSpillExclude() const {
return canonicalSpillExclude_.bits() != 0;
}
CodePosition canonicalSpillExclude() const {
MOZ_ASSERT(hasCanonicalSpillExclude());
return canonicalSpillExclude_;
}
void setGroup(VirtualRegisterGroup* group) {
group_ = group;
}
VirtualRegisterGroup* group() {
return group_;
}
bool addInitialRange(TempAllocator& alloc, CodePosition from, CodePosition to);
void addInitialUse(UsePosition* use);
void setInitialDefinition(CodePosition from);
};
// A sequence of code positions, for tellings BacktrackingAllocator::splitAt
// where to split.
typedef js::Vector<CodePosition, 4, SystemAllocPolicy> SplitPositionVector;
class BacktrackingAllocator
: private LiveRangeAllocator<BacktrackingVirtualRegister>
class BacktrackingAllocator : protected RegisterAllocator
{
friend class C1Spewer;
friend class JSONSpewer;
// Priority queue element: either an interval or group of intervals and the
// associated priority.
BitSet* liveIn;
FixedList<VirtualRegister> vregs;
// Ranges where all registers must be spilled due to call instructions.
LiveBundle* callRanges;
// Allocation state.
StackSlotAllocator stackSlotAllocator;
// Priority queue element: a bundle and the associated priority.
struct QueueItem
{
LiveInterval* interval;
VirtualRegisterGroup* group;
LiveBundle* bundle;
QueueItem(LiveInterval* interval, size_t priority)
: interval(interval), group(nullptr), priority_(priority)
{}
QueueItem(VirtualRegisterGroup* group, size_t priority)
: interval(nullptr), group(group), priority_(priority)
QueueItem(LiveBundle* bundle, size_t priority)
: bundle(bundle), priority_(priority)
{}
static size_t priority(const QueueItem& v) {
@ -139,37 +574,14 @@ class BacktrackingAllocator
PriorityQueue<QueueItem, QueueItem, 0, SystemAllocPolicy> allocationQueue;
// A subrange over which a physical register is allocated.
struct AllocatedRange {
LiveInterval* interval;
const LiveInterval::Range* range;
AllocatedRange()
: interval(nullptr), range(nullptr)
{}
AllocatedRange(LiveInterval* interval, const LiveInterval::Range* range)
: interval(interval), range(range)
{}
static int compare(const AllocatedRange& v0, const AllocatedRange& v1) {
// LiveInterval::Range includes 'from' but excludes 'to'.
if (v0.range->to <= v1.range->from)
return -1;
if (v0.range->from >= v1.range->to)
return 1;
return 0;
}
};
typedef SplayTree<AllocatedRange, AllocatedRange> AllocatedRangeSet;
typedef SplayTree<LiveRange*, LiveRange> LiveRangeSet;
// Each physical register is associated with the set of ranges over which
// that register is currently allocated.
struct PhysicalRegister {
bool allocatable;
AnyRegister reg;
AllocatedRangeSet allocations;
LiveRangeSet allocations;
PhysicalRegister() : allocatable(false) {}
};
@ -177,16 +589,12 @@ class BacktrackingAllocator
// Ranges of code which are considered to be hot, for which good allocation
// should be prioritized.
AllocatedRangeSet hotcode;
// During register allocation, virtual stack slots are used for spills.
// These are converted to actual spill locations
size_t numVirtualStackSlots;
LiveRangeSet hotcode;
// Information about an allocated stack slot.
struct SpillSlot : public TempObject, public InlineForwardListNode<SpillSlot> {
LStackSlot alloc;
AllocatedRangeSet allocated;
LiveRangeSet allocated;
SpillSlot(uint32_t slot, LifoAlloc* alloc)
: alloc(slot), allocated(alloc)
@ -199,93 +607,130 @@ class BacktrackingAllocator
public:
BacktrackingAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
: LiveRangeAllocator<BacktrackingVirtualRegister>(mir, lir, graph),
numVirtualStackSlots(0)
: RegisterAllocator(mir, lir, graph),
liveIn(nullptr),
callRanges(nullptr)
{ }
bool go();
private:
typedef Vector<LiveInterval*, 4, SystemAllocPolicy> LiveIntervalVector;
typedef Vector<LiveRange*, 4, SystemAllocPolicy> LiveRangeVector;
typedef Vector<LiveBundle*, 4, SystemAllocPolicy> LiveBundleVector;
// Liveness methods.
bool init();
bool canAddToGroup(VirtualRegisterGroup* group, BacktrackingVirtualRegister* reg);
bool tryGroupRegisters(uint32_t vreg0, uint32_t vreg1);
bool tryGroupReusedRegister(uint32_t def, uint32_t use);
bool groupAndQueueRegisters();
bool tryAllocateFixed(LiveInterval* interval, bool* success, bool* pfixed,
LiveIntervalVector& conflicting);
bool tryAllocateNonFixed(LiveInterval* interval, bool* success, bool* pfixed,
LiveIntervalVector& conflicting);
bool processInterval(LiveInterval* interval);
bool processGroup(VirtualRegisterGroup* group);
bool setIntervalRequirement(LiveInterval* interval);
bool tryAllocateRegister(PhysicalRegister& r, LiveInterval* interval,
bool* success, bool* pfixed, LiveIntervalVector& conflicting);
bool tryAllocateGroupRegister(PhysicalRegister& r, VirtualRegisterGroup* group,
bool* psuccess, bool* pfixed, LiveInterval** pconflicting);
bool evictInterval(LiveInterval* interval);
void distributeUses(LiveInterval* interval, const LiveIntervalVector& newIntervals);
bool split(LiveInterval* interval, const LiveIntervalVector& newIntervals);
bool requeueIntervals(const LiveIntervalVector& newIntervals);
void spill(LiveInterval* interval);
bool buildLivenessInfo();
bool addInitialFixedRange(AnyRegister reg, CodePosition from, CodePosition to);
VirtualRegister& vreg(const LDefinition* def) {
return vregs[def->virtualRegister()];
}
VirtualRegister& vreg(const LAllocation* alloc) {
MOZ_ASSERT(alloc->isUse());
return vregs[alloc->toUse()->virtualRegister()];
}
// Allocation methods.
bool tryMergeBundles(LiveBundle* bundle0, LiveBundle* bundle1);
bool tryMergeReusedRegister(VirtualRegister& def, VirtualRegister& input);
bool mergeAndQueueRegisters();
bool tryAllocateFixed(LiveBundle* bundle, Requirement requirement,
bool* success, bool* pfixed, LiveBundleVector& conflicting);
bool tryAllocateNonFixed(LiveBundle* bundle, Requirement requirement, Requirement hint,
bool* success, bool* pfixed, LiveBundleVector& conflicting);
bool processBundle(LiveBundle* bundle);
bool computeRequirement(LiveBundle* bundle, Requirement *prequirement, Requirement *phint);
bool tryAllocateRegister(PhysicalRegister& r, LiveBundle* bundle,
bool* success, bool* pfixed, LiveBundleVector& conflicting);
bool evictBundle(LiveBundle* bundle);
bool splitAndRequeueBundles(LiveBundle* bundle, const LiveBundleVector& newBundles);
bool spill(LiveBundle* bundle);
bool isReusedInput(LUse* use, LNode* ins, bool considerCopy);
bool isRegisterUse(LUse* use, LNode* ins, bool considerCopy = false);
bool isRegisterDefinition(LiveInterval* interval);
bool addLiveInterval(LiveIntervalVector& intervals, uint32_t vreg,
LiveInterval* spillInterval,
CodePosition from, CodePosition to);
bool pickStackSlot(LiveInterval* interval);
bool reuseOrAllocateStackSlot(const LiveIntervalVector& intervals, LDefinition::Type type,
LAllocation* palloc);
bool insertAllRanges(AllocatedRangeSet& set, const LiveIntervalVector& intervals);
bool isRegisterDefinition(LiveRange* range);
bool pickStackSlot(SpillSet* spill);
bool insertAllRanges(LiveRangeSet& set, LiveBundle* bundle);
// Reification methods.
bool pickStackSlots();
bool resolveControlFlow();
bool reifyAllocations();
bool populateSafepoints();
bool annotateMoveGroups();
size_t findFirstNonCallSafepoint(CodePosition from);
size_t findFirstSafepoint(CodePosition pos, size_t startFrom);
void addLiveRegistersForRange(VirtualRegister& reg, LiveRange* range);
void dumpRegisterGroups();
bool addMove(LMoveGroup* moves, LiveRange* from, LiveRange* to, LDefinition::Type type) {
LAllocation fromAlloc = from->bundle()->allocation();
LAllocation toAlloc = to->bundle()->allocation();
MOZ_ASSERT(fromAlloc != toAlloc);
return moves->add(fromAlloc, toAlloc, type);
}
bool moveInput(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = getInputMoveGroup(ins);
return addMove(moves, from, to, type);
}
bool moveAfter(LInstruction* ins, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = getMoveGroupAfter(ins);
return addMove(moves, from, to, type);
}
bool moveAtExit(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = block->getExitMoveGroup(alloc());
return addMove(moves, from, to, type);
}
bool moveAtEntry(LBlock* block, LiveRange* from, LiveRange* to, LDefinition::Type type) {
if (from->bundle()->allocation() == to->bundle()->allocation())
return true;
LMoveGroup* moves = block->getEntryMoveGroup(alloc());
return addMove(moves, from, to, type);
}
// Debugging methods.
void dumpFixedRanges();
void dumpAllocations();
struct PrintLiveIntervalRange;
struct PrintLiveRange;
bool minimalDef(const LiveInterval* interval, LNode* ins);
bool minimalUse(const LiveInterval* interval, LNode* ins);
bool minimalInterval(const LiveInterval* interval, bool* pfixed = nullptr);
bool minimalDef(LiveRange* range, LNode* ins);
bool minimalUse(LiveRange* range, LNode* ins);
bool minimalBundle(LiveBundle* bundle, bool* pfixed = nullptr);
// Heuristic methods.
size_t computePriority(const LiveInterval* interval);
size_t computeSpillWeight(const LiveInterval* interval);
size_t computePriority(LiveBundle* bundle);
size_t computeSpillWeight(LiveBundle* bundle);
size_t computePriority(const VirtualRegisterGroup* group);
size_t computeSpillWeight(const VirtualRegisterGroup* group);
size_t maximumSpillWeight(const LiveBundleVector& bundles);
size_t maximumSpillWeight(const LiveIntervalVector& intervals);
bool chooseBundleSplit(LiveBundle* bundle, bool fixed, LiveBundle* conflict);
bool chooseIntervalSplit(LiveInterval* interval, bool fixed, LiveInterval* conflict);
bool splitAt(LiveInterval* interval,
bool splitAt(LiveBundle* bundle,
const SplitPositionVector& splitPositions);
bool trySplitAcrossHotcode(LiveInterval* interval, bool* success);
bool trySplitAfterLastRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success);
bool trySplitBeforeFirstRegisterUse(LiveInterval* interval, LiveInterval* conflict, bool* success);
bool splitAtAllRegisterUses(LiveInterval* interval);
bool splitAcrossCalls(LiveInterval* interval);
bool trySplitAcrossHotcode(LiveBundle* bundle, bool* success);
bool trySplitAfterLastRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success);
bool trySplitBeforeFirstRegisterUse(LiveBundle* bundle, LiveBundle* conflict, bool* success);
bool splitAcrossCalls(LiveBundle* bundle);
bool compilingAsmJS() {
return mir->info().compilingAsmJS();
}
bool isVirtualStackSlot(LAllocation alloc) {
return alloc.isStackSlot() &&
LAllocation::DATA_MASK - alloc.toStackSlot()->slot() < numVirtualStackSlots;
}
void dumpVregs();
};
} // namespace jit

View File

@ -63,19 +63,18 @@ C1Spewer::spewPass(const char* pass)
}
void
C1Spewer::spewIntervals(const char* pass, BacktrackingAllocator* regalloc)
C1Spewer::spewRanges(const char* pass, BacktrackingAllocator* regalloc)
{
if (!spewout_)
return;
fprintf(spewout_, "begin_intervals\n");
fprintf(spewout_, "begin_ranges\n");
fprintf(spewout_, " name \"%s\"\n", pass);
size_t nextId = 0x4000;
for (MBasicBlockIterator block(graph->begin()); block != graph->end(); block++)
spewIntervals(spewout_, *block, regalloc, nextId);
spewRanges(spewout_, *block, regalloc);
fprintf(spewout_, "end_intervals\n");
fprintf(spewout_, "end_ranges\n");
fflush(spewout_);
}
@ -112,42 +111,37 @@ DumpLIR(FILE* fp, LNode* ins)
}
void
C1Spewer::spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId)
C1Spewer::spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins)
{
for (size_t k = 0; k < ins->numDefs(); k++) {
uint32_t id = ins->getDef(k)->virtualRegister();
VirtualRegister* vreg = &regalloc->vregs[id];
for (size_t i = 0; i < vreg->numIntervals(); i++) {
LiveInterval* live = vreg->getInterval(i);
if (live->numRanges()) {
fprintf(fp, "%d object \"", (i == 0) ? id : int32_t(nextId++));
fprintf(fp, "%s", live->getAllocation()->toString());
fprintf(fp, "\" %d -1", id);
for (size_t j = 0; j < live->numRanges(); j++) {
fprintf(fp, " [%u, %u[", live->getRange(j)->from.bits(),
live->getRange(j)->to.bits());
}
for (UsePositionIterator usePos(live->usesBegin()); usePos != live->usesEnd(); usePos++)
fprintf(fp, " %u M", usePos->pos.bits());
fprintf(fp, " \"\"\n");
}
for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
LiveRange* range = LiveRange::get(*iter);
fprintf(fp, "%d object \"", id);
fprintf(fp, "%s", range->bundle()->allocation().toString());
fprintf(fp, "\" %d -1", id);
fprintf(fp, " [%u, %u[", range->from().bits(), range->to().bits());
for (UsePositionIterator usePos(range->usesBegin()); usePos; usePos++)
fprintf(fp, " %u M", usePos->pos.bits());
fprintf(fp, " \"\"\n");
}
}
}
void
C1Spewer::spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId)
C1Spewer::spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc)
{
LBlock* lir = block->lir();
if (!lir)
return;
for (size_t i = 0; i < lir->numPhis(); i++)
spewIntervals(fp, regalloc, lir->getPhi(i), nextId);
spewRanges(fp, regalloc, lir->getPhi(i));
for (LInstructionIterator ins = lir->begin(); ins != lir->end(); ins++)
spewIntervals(fp, regalloc, *ins, nextId);
spewRanges(fp, regalloc, *ins);
}
void

View File

@ -34,14 +34,14 @@ class C1Spewer
bool init(const char* path);
void beginFunction(MIRGraph* graph, HandleScript script);
void spewPass(const char* pass);
void spewIntervals(const char* pass, BacktrackingAllocator* regalloc);
void spewRanges(const char* pass, BacktrackingAllocator* regalloc);
void endFunction();
void finish();
private:
void spewPass(FILE* fp, MBasicBlock* block);
void spewIntervals(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins, size_t& nextId);
void spewIntervals(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc, size_t& nextId);
void spewRanges(FILE* fp, BacktrackingAllocator* regalloc, LNode* ins);
void spewRanges(FILE* fp, MBasicBlock* block, BacktrackingAllocator* regalloc);
};
} // namespace jit

View File

@ -2120,13 +2120,13 @@ CodeGenerator::visitMoveGroup(LMoveGroup* group)
for (size_t i = 0; i < group->numMoves(); i++) {
const LMove& move = group->getMove(i);
const LAllocation* from = move.from();
const LAllocation* to = move.to();
LAllocation from = move.from();
LAllocation to = move.to();
LDefinition::Type type = move.type();
// No bogus moves.
MOZ_ASSERT(*from != *to);
MOZ_ASSERT(!from->isConstant());
MOZ_ASSERT(from != to);
MOZ_ASSERT(!from.isConstant());
MoveOp::Type moveType;
switch (type) {
case LDefinition::OBJECT:

View File

@ -68,11 +68,6 @@ class InlineForwardList : protected InlineForwardListNode<T>
return iterator(nullptr);
}
void removeAt(iterator where) {
iterator iter(where);
iter++;
#ifdef DEBUG
iter.modifyCount_++;
#endif
removeAfter(where.prev, where.iter);
}
void pushFront(Node* t) {
@ -92,7 +87,7 @@ class InlineForwardList : protected InlineForwardListNode<T>
removeAfter(this, result);
return result;
}
T* back() {
T* back() const {
MOZ_ASSERT(!empty());
return static_cast<T*>(tail_);
}
@ -116,6 +111,18 @@ class InlineForwardList : protected InlineForwardListNode<T>
at->next = item->next;
item->next = nullptr;
}
void removeAndIncrement(iterator &where) {
// Do not change modifyCount_ here. The iterator can still be used
// after calling this method, unlike the other methods that modify
// the list.
Node* item = where.iter;
where.iter = item->next;
if (item == tail_)
tail_ = where.prev;
MOZ_ASSERT(where.prev->next == item);
where.prev->next = where.iter;
item->next = nullptr;
}
void splitAfter(Node* at, InlineForwardList<T>* to) {
MOZ_ASSERT(to->empty());
if (!at)
@ -185,6 +192,9 @@ public:
bool operator ==(const InlineForwardListIterator<T>& where) const {
return iter == where.iter;
}
explicit operator bool() const {
return iter != nullptr;
}
private:
Node* prev;

View File

@ -396,12 +396,12 @@ JSONSpewer::spewLIR(MIRGraph* mir)
}
void
JSONSpewer::spewIntervals(BacktrackingAllocator* regalloc)
JSONSpewer::spewRanges(BacktrackingAllocator* regalloc)
{
if (!fp_)
return;
beginObjectProperty("intervals");
beginObjectProperty("ranges");
beginListProperty("blocks");
for (size_t bno = 0; bno < regalloc->graph.numBlocks(); bno++) {
@ -417,27 +417,17 @@ JSONSpewer::spewIntervals(BacktrackingAllocator* regalloc)
beginObject();
integerProperty("vreg", id);
beginListProperty("intervals");
beginListProperty("ranges");
for (size_t i = 0; i < vreg->numIntervals(); i++) {
LiveInterval* live = vreg->getInterval(i);
for (LiveRange::RegisterLinkIterator iter = vreg->rangesBegin(); iter; iter++) {
LiveRange* range = LiveRange::get(*iter);
if (live->numRanges()) {
beginObject();
property("allocation");
fprintf(fp_, "\"%s\"", live->getAllocation()->toString());
beginListProperty("ranges");
for (size_t j = 0; j < live->numRanges(); j++) {
beginObject();
integerProperty("start", live->getRange(j)->from.bits());
integerProperty("end", live->getRange(j)->to.bits());
endObject();
}
endList();
endObject();
}
beginObject();
property("allocation");
fprintf(fp_, "\"%s\"", range->bundle()->allocation().toString());
integerProperty("start", range->from().bits());
integerProperty("end", range->to().bits());
endObject();
}
endList();

View File

@ -61,7 +61,7 @@ class JSONSpewer
void spewMIR(MIRGraph* mir);
void spewLIns(LNode* ins);
void spewLIR(MIRGraph* mir);
void spewIntervals(BacktrackingAllocator* regalloc);
void spewRanges(BacktrackingAllocator* regalloc);
void endPass();
void endFunction();
void finish();

View File

@ -177,11 +177,11 @@ IonSpewer::spewPass(const char* pass, BacktrackingAllocator* ra)
return;
c1Spewer.spewPass(pass);
c1Spewer.spewIntervals(pass, ra);
c1Spewer.spewRanges(pass, ra);
jsonSpewer.beginPass(pass);
jsonSpewer.spewMIR(graph);
jsonSpewer.spewLIR(graph);
jsonSpewer.spewIntervals(ra);
jsonSpewer.spewRanges(ra);
jsonSpewer.endPass();
}

View File

@ -58,27 +58,21 @@ class LOsiPoint : public LInstructionHelper<0, 0, 0>
class LMove
{
LAllocation* from_;
LAllocation* to_;
LAllocation from_;
LAllocation to_;
LDefinition::Type type_;
public:
LMove(LAllocation* from, LAllocation* to, LDefinition::Type type)
LMove(LAllocation from, LAllocation to, LDefinition::Type type)
: from_(from),
to_(to),
type_(type)
{ }
LAllocation* from() {
LAllocation from() const {
return from_;
}
const LAllocation* from() const {
return from_;
}
LAllocation* to() {
return to_;
}
const LAllocation* to() const {
LAllocation to() const {
return to_;
}
LDefinition::Type type() const {
@ -109,10 +103,10 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0>
void printOperands(FILE* fp);
// Add a move which takes place simultaneously with all others in the group.
bool add(LAllocation* from, LAllocation* to, LDefinition::Type type);
bool add(LAllocation from, LAllocation to, LDefinition::Type type);
// Add a move which takes place after existing moves in the group.
bool addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type);
bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type);
size_t numMoves() const {
return moves_.length();
@ -137,7 +131,7 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0>
bool uses(Register reg) {
for (size_t i = 0; i < numMoves(); i++) {
LMove move = getMove(i);
if (*move.from() == LGeneralReg(reg) || *move.to() == LGeneralReg(reg))
if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg))
return true;
}
return false;

View File

@ -535,28 +535,28 @@ LInstruction::initSafepoint(TempAllocator& alloc)
}
bool
LMoveGroup::add(LAllocation* from, LAllocation* to, LDefinition::Type type)
LMoveGroup::add(LAllocation from, LAllocation to, LDefinition::Type type)
{
#ifdef DEBUG
MOZ_ASSERT(*from != *to);
MOZ_ASSERT(from != to);
for (size_t i = 0; i < moves_.length(); i++)
MOZ_ASSERT(*to != *moves_[i].to());
MOZ_ASSERT(to != moves_[i].to());
// Check that SIMD moves are aligned according to ABI requirements.
if (LDefinition(type).isSimdType()) {
MOZ_ASSERT(from->isMemory() || from->isFloatReg());
if (from->isMemory()) {
if (from->isArgument())
MOZ_ASSERT(from->toArgument()->index() % SimdMemoryAlignment == 0);
MOZ_ASSERT(from.isMemory() || from.isFloatReg());
if (from.isMemory()) {
if (from.isArgument())
MOZ_ASSERT(from.toArgument()->index() % SimdMemoryAlignment == 0);
else
MOZ_ASSERT(from->toStackSlot()->slot() % SimdMemoryAlignment == 0);
MOZ_ASSERT(from.toStackSlot()->slot() % SimdMemoryAlignment == 0);
}
MOZ_ASSERT(to->isMemory() || to->isFloatReg());
if (to->isMemory()) {
if (to->isArgument())
MOZ_ASSERT(to->toArgument()->index() % SimdMemoryAlignment == 0);
MOZ_ASSERT(to.isMemory() || to.isFloatReg());
if (to.isMemory()) {
if (to.isArgument())
MOZ_ASSERT(to.toArgument()->index() % SimdMemoryAlignment == 0);
else
MOZ_ASSERT(to->toStackSlot()->slot() % SimdMemoryAlignment == 0);
MOZ_ASSERT(to.toStackSlot()->slot() % SimdMemoryAlignment == 0);
}
}
#endif
@ -564,24 +564,24 @@ LMoveGroup::add(LAllocation* from, LAllocation* to, LDefinition::Type type)
}
bool
LMoveGroup::addAfter(LAllocation* from, LAllocation* to, LDefinition::Type type)
LMoveGroup::addAfter(LAllocation from, LAllocation to, LDefinition::Type type)
{
// Transform the operands to this move so that performing the result
// simultaneously with existing moves in the group will have the same
// effect as if the original move took place after the existing moves.
for (size_t i = 0; i < moves_.length(); i++) {
if (*moves_[i].to() == *from) {
if (moves_[i].to() == from) {
from = moves_[i].from();
break;
}
}
if (*from == *to)
if (from == to)
return true;
for (size_t i = 0; i < moves_.length(); i++) {
if (*to == *moves_[i].to()) {
if (to == moves_[i].to()) {
moves_[i] = LMove(from, to, type);
return true;
}
@ -596,8 +596,8 @@ LMoveGroup::printOperands(FILE* fp)
for (size_t i = 0; i < numMoves(); i++) {
const LMove& move = getMove(i);
// Use two printfs, as LAllocation::toString is not reentrant.
fprintf(fp, " [%s", move.from()->toString());
fprintf(fp, " -> %s", move.to()->toString());
fprintf(fp, " [%s", move.from().toString());
fprintf(fp, " -> %s", move.to().toString());
#ifdef DEBUG
fprintf(fp, ", %s", TypeChars[move.type()]);
#endif

View File

@ -1,996 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/LiveRangeAllocator.h"
#include "mozilla/DebugOnly.h"
#include "jsprf.h"
#include "jit/BacktrackingAllocator.h"
#include "jit/BitSet.h"
using namespace js;
using namespace js::jit;
using mozilla::DebugOnly;
int
Requirement::priority() const
{
switch (kind_) {
case Requirement::FIXED:
return 0;
case Requirement::REGISTER:
return 1;
case Requirement::NONE:
return 2;
default:
MOZ_CRASH("Unknown requirement kind.");
}
}
const char*
Requirement::toString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[1000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n = -1; // initialize to silence GCC warning
switch (kind()) {
case NONE:
return "none";
case REGISTER:
n = JS_snprintf(cursor, end - cursor, "r");
break;
case FIXED:
n = JS_snprintf(cursor, end - cursor, "%s", allocation().toString());
break;
case MUST_REUSE_INPUT:
n = JS_snprintf(cursor, end - cursor, "v%u", virtualRegister());
break;
}
if (n < 0)
return "???";
cursor += n;
if (pos() != CodePosition::MIN) {
n = JS_snprintf(cursor, end - cursor, "@%u", pos().bits());
if (n < 0)
return "???";
cursor += n;
}
return buf;
#else
return " ???";
#endif
}
void
Requirement::dump() const
{
fprintf(stderr, "%s\n", toString());
}
bool
LiveInterval::Range::contains(const Range* other) const
{
return from <= other->from && to >= other->to;
}
void
LiveInterval::Range::intersect(const Range* other, Range* pre, Range* inside, Range* post) const
{
MOZ_ASSERT(pre->empty() && inside->empty() && post->empty());
CodePosition innerFrom = from;
if (from < other->from) {
if (to < other->from) {
*pre = Range(from, to);
return;
}
*pre = Range(from, other->from);
innerFrom = other->from;
}
CodePosition innerTo = to;
if (to > other->to) {
if (from >= other->to) {
*post = Range(from, to);
return;
}
*post = Range(other->to, to);
innerTo = other->to;
}
if (innerFrom != innerTo)
*inside = Range(innerFrom, innerTo);
}
const char*
LiveInterval::Range::toString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[1000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n = JS_snprintf(cursor, end - cursor, "[%u,%u)", from.bits(), to.bits());
if (n < 0)
return " ???";
cursor += n;
return buf;
#else
return " ???";
#endif
}
void
LiveInterval::Range::dump() const
{
fprintf(stderr, "%s\n", toString());
}
bool
LiveInterval::addRangeAtHead(CodePosition from, CodePosition to)
{
MOZ_ASSERT(from < to);
MOZ_ASSERT(ranges_.empty() || from <= ranges_.back().from);
Range newRange(from, to);
if (ranges_.empty())
return ranges_.append(newRange);
Range& first = ranges_.back();
if (to < first.from)
return ranges_.append(newRange);
if (to == first.from) {
first.from = from;
return true;
}
MOZ_ASSERT(from < first.to);
MOZ_ASSERT(to > first.from);
if (from < first.from)
first.from = from;
if (to > first.to)
first.to = to;
return true;
}
bool
LiveInterval::addRange(CodePosition from, CodePosition to)
{
MOZ_ASSERT(from < to);
Range newRange(from, to);
Range* i;
// Find the location to insert the new range
for (i = ranges_.end(); i > ranges_.begin(); i--) {
if (newRange.from <= i[-1].to) {
if (i[-1].from < newRange.from)
newRange.from = i[-1].from;
break;
}
}
// Perform coalescing on overlapping ranges
Range* coalesceEnd = i;
for (; i > ranges_.begin(); i--) {
if (newRange.to < i[-1].from)
break;
if (newRange.to < i[-1].to)
newRange.to = i[-1].to;
}
if (i == coalesceEnd)
return ranges_.insert(i, newRange);
i[0] = newRange;
ranges_.erase(i + 1, coalesceEnd);
return true;
}
void
LiveInterval::setFrom(CodePosition from)
{
while (!ranges_.empty()) {
if (ranges_.back().to < from) {
ranges_.popBack();
} else {
if (from == ranges_.back().to)
ranges_.popBack();
else
ranges_.back().from = from;
break;
}
}
}
bool
LiveInterval::covers(CodePosition pos)
{
if (pos < start() || pos >= end())
return false;
// Loop over the ranges in ascending order.
size_t i = lastProcessedRangeIfValid(pos);
for (; i < ranges_.length(); i--) {
if (pos < ranges_[i].from)
return false;
setLastProcessedRange(i, pos);
if (pos < ranges_[i].to)
return true;
}
return false;
}
CodePosition
LiveInterval::intersect(LiveInterval* other)
{
if (start() > other->start())
return other->intersect(this);
// Loop over the ranges in ascending order. As an optimization,
// try to start at the last processed range.
size_t i = lastProcessedRangeIfValid(other->start());
size_t j = other->ranges_.length() - 1;
if (i >= ranges_.length() || j >= other->ranges_.length())
return CodePosition::MIN;
while (true) {
const Range& r1 = ranges_[i];
const Range& r2 = other->ranges_[j];
if (r1.from <= r2.from) {
if (r1.from <= other->start())
setLastProcessedRange(i, other->start());
if (r2.from < r1.to)
return r2.from;
if (i == 0 || ranges_[i-1].from > other->end())
break;
i--;
} else {
if (r1.from < r2.to)
return r1.from;
if (j == 0 || other->ranges_[j-1].from > end())
break;
j--;
}
}
return CodePosition::MIN;
}
/*
* This function takes the callee interval and moves all ranges following or
* including provided position to the target interval. Additionally, if a
* range in the callee interval spans the given position, it is split and the
* latter half is placed in the target interval.
*
* This function should only be called if it is known that the interval should
* actually be split (and, presumably, a move inserted). As such, it is an
* error for the caller to request a split that moves all intervals into the
* target. Doing so will trip the assertion at the bottom of the function.
*/
bool
LiveInterval::splitFrom(CodePosition pos, LiveInterval* after)
{
MOZ_ASSERT(pos >= start() && pos < end());
MOZ_ASSERT(after->ranges_.empty());
// Move all intervals over to the target
size_t bufferLength = ranges_.length();
Range* buffer = ranges_.extractRawBuffer();
if (!buffer)
return false;
after->ranges_.replaceRawBuffer(buffer, bufferLength);
// Move intervals back as required
for (Range* i = &after->ranges_.back(); i >= after->ranges_.begin(); i--) {
if (pos >= i->to)
continue;
if (pos > i->from) {
// Split the range
Range split(i->from, pos);
i->from = pos;
if (!ranges_.append(split))
return false;
}
if (!ranges_.append(i + 1, after->ranges_.end()))
return false;
after->ranges_.shrinkBy(after->ranges_.end() - i - 1);
break;
}
// Split the linked list of use positions
UsePosition* prev = nullptr;
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
if (usePos->pos > pos)
break;
prev = *usePos;
}
uses_.splitAfter(prev, &after->uses_);
return true;
}
void
LiveInterval::addUse(UsePosition* use)
{
// Insert use positions in ascending order. Note that instructions
// are visited in reverse order, so in most cases the loop terminates
// at the first iteration and the use position will be added to the
// front of the list.
UsePosition* prev = nullptr;
for (UsePositionIterator current(usesBegin()); current != usesEnd(); current++) {
if (current->pos >= use->pos)
break;
prev = *current;
}
if (prev)
uses_.insertAfter(prev, use);
else
uses_.pushFront(use);
}
void
LiveInterval::addUseAtEnd(UsePosition* use)
{
MOZ_ASSERT(uses_.empty() || use->pos >= uses_.back()->pos);
uses_.pushBack(use);
}
UsePosition*
LiveInterval::nextUseAfter(CodePosition after)
{
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
if (usePos->pos >= after) {
LUse::Policy policy = usePos->use->policy();
MOZ_ASSERT(policy != LUse::RECOVERED_INPUT);
if (policy != LUse::KEEPALIVE)
return *usePos;
}
}
return nullptr;
}
UsePosition*
LiveInterval::popUse()
{
return uses_.popFront();
}
/*
* This function locates the first "real" use of this interval that follows
* the given code position. Non-"real" uses are currently just snapshots,
* which keep virtual registers alive but do not result in the
* generation of code that use them.
*/
CodePosition
LiveInterval::nextUsePosAfter(CodePosition after)
{
UsePosition* min = nextUseAfter(after);
return min ? min->pos : CodePosition::MAX;
}
LiveInterval*
VirtualRegister::intervalFor(CodePosition pos)
{
// Intervals are sorted in ascending order by their start position.
for (LiveInterval** i = intervals_.begin(); i != intervals_.end(); i++) {
if ((*i)->covers(pos))
return *i;
if (pos < (*i)->start())
break;
}
return nullptr;
}
LiveInterval*
VirtualRegister::getFirstInterval()
{
MOZ_ASSERT(!intervals_.empty());
return intervals_[0];
}
// Instantiate LiveRangeAllocator for each template instance.
template bool LiveRangeAllocator<BacktrackingVirtualRegister>::buildLivenessInfo();
template void LiveRangeAllocator<BacktrackingVirtualRegister>::dumpVregs();
#ifdef DEBUG
// Returns true iff ins has a def/temp reusing the input allocation.
static bool
IsInputReused(LInstruction* ins, LUse* use)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getDef(i)->getReusedInput())->toUse() == use)
{
return true;
}
}
for (size_t i = 0; i < ins->numTemps(); i++) {
if (ins->getTemp(i)->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(ins->getTemp(i)->getReusedInput())->toUse() == use)
{
return true;
}
}
return false;
}
#endif
/*
* This function pre-allocates and initializes as much global state as possible
* to avoid littering the algorithms with memory management cruft.
*/
template <typename VREG>
bool
LiveRangeAllocator<VREG>::init()
{
if (!RegisterAllocator::init())
return false;
liveIn = mir->allocate<BitSet>(graph.numBlockIds());
if (!liveIn)
return false;
// Initialize fixed intervals.
for (size_t i = 0; i < AnyRegister::Total; i++) {
AnyRegister reg = AnyRegister::FromCode(i);
LiveInterval* interval = LiveInterval::New(alloc(), 0);
interval->setAllocation(LAllocation(reg));
fixedIntervals[i] = interval;
}
fixedIntervalsUnion = LiveInterval::New(alloc(), 0);
if (!vregs.init(mir, graph.numVirtualRegisters()))
return false;
// Build virtual register objects
for (size_t i = 0; i < graph.numBlocks(); i++) {
if (mir->shouldCancel("Create data structures (main loop)"))
return false;
LBlock* block = graph.getBlock(i);
for (LInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
for (size_t j = 0; j < ins->numDefs(); j++) {
LDefinition* def = ins->getDef(j);
if (def->isBogusTemp())
continue;
if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ false))
return false;
}
for (size_t j = 0; j < ins->numTemps(); j++) {
LDefinition* def = ins->getTemp(j);
if (def->isBogusTemp())
continue;
if (!vregs[def].init(alloc(), *ins, def, /* isTemp */ true))
return false;
}
}
for (size_t j = 0; j < block->numPhis(); j++) {
LPhi* phi = block->getPhi(j);
LDefinition* def = phi->getDef(0);
if (!vregs[def].init(alloc(), phi, def, /* isTemp */ false))
return false;
}
}
return true;
}
/*
* This function builds up liveness intervals for all virtual registers
* defined in the function. Additionally, it populates the liveIn array with
* information about which registers are live at the beginning of a block, to
* aid resolution and reification in a later phase.
*
* The algorithm is based on the one published in:
*
* Wimmer, Christian, and Michael Franz. "Linear Scan Register Allocation on
* SSA Form." Proceedings of the International Symposium on Code Generation
* and Optimization. Toronto, Ontario, Canada, ACM. 2010. 170-79. PDF.
*
* The algorithm operates on blocks ordered such that dominators of a block
* are before the block itself, and such that all blocks of a loop are
* contiguous. It proceeds backwards over the instructions in this order,
* marking registers live at their uses, ending their live intervals at
* definitions, and recording which registers are live at the top of every
* block. To deal with loop backedges, variables live at the beginning of
* a loop gain an interval covering the entire loop.
*/
template <typename VREG>
bool
LiveRangeAllocator<VREG>::buildLivenessInfo()
{
JitSpew(JitSpew_RegAlloc, "Beginning liveness analysis");
if (!init())
return false;
Vector<MBasicBlock*, 1, SystemAllocPolicy> loopWorkList;
BitSet loopDone(graph.numBlockIds());
if (!loopDone.init(alloc()))
return false;
for (size_t i = graph.numBlocks(); i > 0; i--) {
if (mir->shouldCancel("Build Liveness Info (main loop)"))
return false;
LBlock* block = graph.getBlock(i - 1);
MBasicBlock* mblock = block->mir();
BitSet& live = liveIn[mblock->id()];
new (&live) BitSet(graph.numVirtualRegisters());
if (!live.init(alloc()))
return false;
// Propagate liveIn from our successors to us
for (size_t i = 0; i < mblock->lastIns()->numSuccessors(); i++) {
MBasicBlock* successor = mblock->lastIns()->getSuccessor(i);
// Skip backedges, as we fix them up at the loop header.
if (mblock->id() < successor->id())
live.insertAll(liveIn[successor->id()]);
}
// Add successor phis
if (mblock->successorWithPhis()) {
LBlock* phiSuccessor = mblock->successorWithPhis()->lir();
for (unsigned int j = 0; j < phiSuccessor->numPhis(); j++) {
LPhi* phi = phiSuccessor->getPhi(j);
LAllocation* use = phi->getOperand(mblock->positionInPhiSuccessor());
uint32_t reg = use->toUse()->virtualRegister();
live.insert(reg);
}
}
// Variables are assumed alive for the entire block, a define shortens
// the interval to the point of definition.
for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
if (!vregs[*liveRegId].getInterval(0)->addRangeAtHead(entryOf(block),
exitOf(block).next()))
{
return false;
}
}
// Shorten the front end of live intervals for live variables to their
// point of definition, if found.
for (LInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
// Calls may clobber registers, so force a spill and reload around the callsite.
if (ins->isCall()) {
for (AnyRegisterIterator iter(allRegisters_.asLiveSet()); iter.more(); iter++) {
bool found = false;
for (size_t i = 0; i < ins->numDefs(); i++) {
if (ins->getDef(i)->isFixed() &&
ins->getDef(i)->output()->aliases(LAllocation(*iter))) {
found = true;
break;
}
}
if (!found && !addFixedRangeAtHead(*iter, outputOf(*ins), outputOf(*ins).next()))
return false;
}
}
DebugOnly<bool> hasDoubleDef = false;
DebugOnly<bool> hasFloat32Def = false;
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->isBogusTemp())
continue;
#ifdef DEBUG
if (def->type() == LDefinition::DOUBLE)
hasDoubleDef = true;
if (def->type() == LDefinition::FLOAT32)
hasFloat32Def = true;
#endif
CodePosition from = outputOf(*ins);
if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
// MUST_REUSE_INPUT is implemented by allocating an output
// register and moving the input to it. Register hints are
// used to avoid unnecessary moves. We give the input an
// LUse::ANY policy to avoid allocating a register for the
// input.
LUse* inputUse = ins->getOperand(def->getReusedInput())->toUse();
MOZ_ASSERT(inputUse->policy() == LUse::REGISTER);
MOZ_ASSERT(inputUse->usedAtStart());
*inputUse = LUse(inputUse->virtualRegister(), LUse::ANY, /* usedAtStart = */ true);
}
LiveInterval* interval = vregs[def].getInterval(0);
interval->setFrom(from);
// Ensure that if there aren't any uses, there's at least
// some interval for the output to go into.
if (interval->numRanges() == 0) {
if (!interval->addRangeAtHead(from, from.next()))
return false;
}
live.remove(def->virtualRegister());
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition* temp = ins->getTemp(i);
if (temp->isBogusTemp())
continue;
// Normally temps are considered to cover both the input
// and output of the associated instruction. In some cases
// though we want to use a fixed register as both an input
// and clobbered register in the instruction, so watch for
// this and shorten the temp to cover only the output.
CodePosition from = inputOf(*ins);
if (temp->policy() == LDefinition::FIXED) {
AnyRegister reg = temp->output()->toRegister();
for (LInstruction::InputIterator alloc(**ins); alloc.more(); alloc.next()) {
if (alloc->isUse()) {
LUse* use = alloc->toUse();
if (use->isFixedRegister()) {
if (GetFixedRegister(vregs[use].def(), use) == reg)
from = outputOf(*ins);
}
}
}
}
CodePosition to =
ins->isCall() ? outputOf(*ins) : outputOf(*ins).next();
if (!vregs[temp].getInterval(0)->addRangeAtHead(from, to))
return false;
}
DebugOnly<bool> hasUseRegister = false;
DebugOnly<bool> hasUseRegisterAtStart = false;
for (LInstruction::InputIterator inputAlloc(**ins); inputAlloc.more(); inputAlloc.next()) {
if (inputAlloc->isUse()) {
LUse* use = inputAlloc->toUse();
// Call uses should always be at-start or fixed, since the fixed intervals
// use all registers.
MOZ_ASSERT_IF(ins->isCall() && !inputAlloc.isSnapshotInput(),
use->isFixedRegister() || use->usedAtStart());
#ifdef DEBUG
// Don't allow at-start call uses if there are temps of the same kind,
// so that we don't assign the same register.
if (ins->isCall() && use->usedAtStart()) {
for (size_t i = 0; i < ins->numTemps(); i++)
MOZ_ASSERT(vregs[ins->getTemp(i)].isFloatReg() != vregs[use].isFloatReg());
}
// If there are both useRegisterAtStart(x) and useRegister(y)
// uses, we may assign the same register to both operands due to
// interval splitting (bug 772830). Don't allow this for now.
if (use->policy() == LUse::REGISTER) {
if (use->usedAtStart()) {
if (!IsInputReused(*ins, use))
hasUseRegisterAtStart = true;
} else {
hasUseRegister = true;
}
}
MOZ_ASSERT(!(hasUseRegister && hasUseRegisterAtStart));
#endif
// Don't treat RECOVERED_INPUT uses as keeping the vreg alive.
if (use->policy() == LUse::RECOVERED_INPUT)
continue;
// Fixed uses on calls are specially overridden to happen
// at the input position.
CodePosition to =
(use->usedAtStart() || (ins->isCall() && use->isFixedRegister()))
? inputOf(*ins)
: outputOf(*ins);
if (use->isFixedRegister()) {
LAllocation reg(AnyRegister::FromCode(use->registerCode()));
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->policy() == LDefinition::FIXED && *def->output() == reg)
to = inputOf(*ins);
}
}
LiveInterval* interval = vregs[use].getInterval(0);
if (!interval->addRangeAtHead(entryOf(block), to.next()))
return false;
interval->addUse(new(alloc()) UsePosition(use, to));
live.insert(use->virtualRegister());
}
}
}
// Phis have simultaneous assignment semantics at block begin, so at
// the beginning of the block we can be sure that liveIn does not
// contain any phi outputs.
for (unsigned int i = 0; i < block->numPhis(); i++) {
LDefinition* def = block->getPhi(i)->getDef(0);
if (live.contains(def->virtualRegister())) {
live.remove(def->virtualRegister());
} else {
// This is a dead phi, so add a dummy range over all phis. This
// can go away if we have an earlier dead code elimination pass.
CodePosition entryPos = entryOf(block);
if (!vregs[def].getInterval(0)->addRangeAtHead(entryPos, entryPos.next()))
return false;
}
}
if (mblock->isLoopHeader()) {
// A divergence from the published algorithm is required here, as
// our block order does not guarantee that blocks of a loop are
// contiguous. As a result, a single live interval spanning the
// loop is not possible. Additionally, we require liveIn in a later
// pass for resolution, so that must also be fixed up here.
MBasicBlock* loopBlock = mblock->backedge();
while (true) {
// Blocks must already have been visited to have a liveIn set.
MOZ_ASSERT(loopBlock->id() >= mblock->id());
// Add an interval for this entire loop block
CodePosition from = entryOf(loopBlock->lir());
CodePosition to = exitOf(loopBlock->lir()).next();
for (BitSet::Iterator liveRegId(live); liveRegId; ++liveRegId) {
if (!vregs[*liveRegId].getInterval(0)->addRange(from, to))
return false;
}
// Fix up the liveIn set to account for the new interval
liveIn[loopBlock->id()].insertAll(live);
// Make sure we don't visit this node again
loopDone.insert(loopBlock->id());
// If this is the loop header, any predecessors are either the
// backedge or out of the loop, so skip any predecessors of
// this block
if (loopBlock != mblock) {
for (size_t i = 0; i < loopBlock->numPredecessors(); i++) {
MBasicBlock* pred = loopBlock->getPredecessor(i);
if (loopDone.contains(pred->id()))
continue;
if (!loopWorkList.append(pred))
return false;
}
}
// Terminate loop if out of work.
if (loopWorkList.empty())
break;
// Grab the next block off the work list, skipping any OSR block.
MBasicBlock* osrBlock = graph.mir().osrBlock();
while (!loopWorkList.empty()) {
loopBlock = loopWorkList.popCopy();
if (loopBlock != osrBlock)
break;
}
// If end is reached without finding a non-OSR block, then no more work items were found.
if (loopBlock == osrBlock) {
MOZ_ASSERT(loopWorkList.empty());
break;
}
}
// Clear the done set for other loops
loopDone.clear();
}
MOZ_ASSERT_IF(!mblock->numPredecessors(), live.empty());
}
validateVirtualRegisters();
// If the script has an infinite loop, there may be no MReturn and therefore
// no fixed intervals. Add a small range to fixedIntervalsUnion so that the
// rest of the allocator can assume it has at least one range.
if (fixedIntervalsUnion->numRanges() == 0) {
if (!fixedIntervalsUnion->addRangeAtHead(CodePosition(0, CodePosition::INPUT),
CodePosition(0, CodePosition::OUTPUT)))
{
return false;
}
}
JitSpew(JitSpew_RegAlloc, "Liveness analysis complete");
if (JitSpewEnabled(JitSpew_RegAlloc)) {
dumpInstructions();
fprintf(stderr, "Live ranges by virtual register:\n");
dumpVregs();
}
return true;
}
template <typename VREG>
void
LiveRangeAllocator<VREG>::dumpVregs()
{
#ifdef DEBUG
// Virtual register number 0 is unused.
MOZ_ASSERT(vregs[0u].numIntervals() == 0);
for (uint32_t i = 1; i < graph.numVirtualRegisters(); i++) {
fprintf(stderr, " ");
VirtualRegister& vreg = vregs[i];
for (size_t j = 0; j < vreg.numIntervals(); j++) {
if (j)
fprintf(stderr, " / ");
fprintf(stderr, "%s", vreg.getInterval(j)->toString());
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
#endif
}
#ifdef DEBUG
void
LiveInterval::validateRanges()
{
Range* prev = nullptr;
for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
Range* range = &ranges_[i];
MOZ_ASSERT(range->from < range->to);
MOZ_ASSERT_IF(prev, prev->to <= range->from);
prev = range;
}
}
#endif // DEBUG
const char*
LiveInterval::rangesToString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[2000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n;
for (size_t i = ranges_.length() - 1; i < ranges_.length(); i--) {
const LiveInterval::Range* range = getRange(i);
n = JS_snprintf(cursor, end - cursor, " %s", range->toString());
if (n < 0)
return " ???";
cursor += n;
}
return buf;
#else
return " ???";
#endif
}
#ifdef DEBUG
static bool
IsHintInteresting(const Requirement& requirement, const Requirement& hint)
{
if (hint.kind() == Requirement::NONE)
return false;
if (hint.kind() != Requirement::FIXED && hint.kind() != Requirement::REGISTER)
return true;
Requirement merge = requirement;
if (!merge.mergeRequirement(hint))
return true;
return merge.kind() != requirement.kind();
}
#endif
const char*
LiveInterval::toString() const
{
#ifdef DEBUG
// Not reentrant!
static char buf[2000];
char* cursor = buf;
char* end = cursor + sizeof(buf);
int n;
if (hasVreg()) {
n = JS_snprintf(cursor, end - cursor, "v%u", vreg());
if (n < 0) return "???";
cursor += n;
}
n = JS_snprintf(cursor, end - cursor, "[%u]", index());
if (n < 0) return "???";
cursor += n;
if (requirement_.kind() != Requirement::NONE || hint_.kind() != Requirement::NONE) {
n = JS_snprintf(cursor, end - cursor, " req(");
if (n < 0) return "???";
cursor += n;
bool printHint = IsHintInteresting(requirement_, hint_);
if (requirement_.kind() != Requirement::NONE) {
n = JS_snprintf(cursor, end - cursor, "%s%s",
requirement_.toString(),
printHint ? "," : "");
if (n < 0) return "???";
cursor += n;
}
if (printHint) {
n = JS_snprintf(cursor, end - cursor, "%s?", hint_.toString());
if (n < 0) return "???";
cursor += n;
}
n = JS_snprintf(cursor, end - cursor, ")");
if (n < 0) return "???";
cursor += n;
}
if (!alloc_.isBogus()) {
n = JS_snprintf(cursor, end - cursor, " has(%s)", alloc_.toString());
if (n < 0) return "???";
cursor += n;
}
n = JS_snprintf(cursor, end - cursor, "%s", rangesToString());
if (n < 0) return "???";
cursor += n;
for (UsePositionIterator usePos(usesBegin()); usePos != usesEnd(); usePos++) {
n = JS_snprintf(cursor, end - cursor, " %s@%u",
usePos->use->toString(), usePos->pos.bits());
if (n < 0) return "???";
cursor += n;
}
return buf;
#else
return "???";
#endif
}
void
LiveInterval::dump() const
{
fprintf(stderr, "%s\n", toString());
}

View File

@ -1,758 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef jit_LiveRangeAllocator_h
#define jit_LiveRangeAllocator_h
#include "mozilla/Array.h"
#include "mozilla/DebugOnly.h"
#include "jit/RegisterAllocator.h"
#include "jit/StackSlotAllocator.h"
// Common structures and functions used by register allocators that operate on
// virtual register live ranges.
namespace js {
namespace jit {
class Requirement
{
public:
enum Kind {
NONE,
REGISTER,
FIXED,
MUST_REUSE_INPUT
};
Requirement()
: kind_(NONE)
{ }
explicit Requirement(Kind kind)
: kind_(kind)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
Requirement(Kind kind, CodePosition at)
: kind_(kind),
position_(at)
{
// These have dedicated constructors.
MOZ_ASSERT(kind != FIXED && kind != MUST_REUSE_INPUT);
}
explicit Requirement(LAllocation fixed)
: kind_(FIXED),
allocation_(fixed)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
// Only useful as a hint, encodes where the fixed requirement is used to
// avoid allocating a fixed register too early.
Requirement(LAllocation fixed, CodePosition at)
: kind_(FIXED),
allocation_(fixed),
position_(at)
{
MOZ_ASSERT(!fixed.isBogus() && !fixed.isUse());
}
Requirement(uint32_t vreg, CodePosition at)
: kind_(MUST_REUSE_INPUT),
allocation_(LUse(vreg, LUse::ANY)),
position_(at)
{ }
Kind kind() const {
return kind_;
}
LAllocation allocation() const {
MOZ_ASSERT(!allocation_.isBogus() && !allocation_.isUse());
return allocation_;
}
uint32_t virtualRegister() const {
MOZ_ASSERT(allocation_.isUse());
MOZ_ASSERT(kind() == MUST_REUSE_INPUT);
return allocation_.toUse()->virtualRegister();
}
CodePosition pos() const {
return position_;
}
int priority() const;
bool mergeRequirement(const Requirement& newRequirement) {
// Merge newRequirement with any existing requirement, returning false
// if the new and old requirements conflict.
MOZ_ASSERT(newRequirement.kind() != Requirement::MUST_REUSE_INPUT);
if (newRequirement.kind() == Requirement::FIXED) {
if (kind() == Requirement::FIXED)
return newRequirement.allocation() == allocation();
*this = newRequirement;
return true;
}
MOZ_ASSERT(newRequirement.kind() == Requirement::REGISTER);
if (kind() == Requirement::FIXED)
return allocation().isRegister();
*this = newRequirement;
return true;
}
// Return a string describing this requirement. This is not re-entrant!
const char* toString() const;
void dump() const;
private:
Kind kind_;
LAllocation allocation_;
CodePosition position_;
};
struct UsePosition : public TempObject,
public InlineForwardListNode<UsePosition>
{
LUse* use;
CodePosition pos;
UsePosition(LUse* use, CodePosition pos) :
use(use),
pos(pos)
{
// Verify that the usedAtStart() flag is consistent with the
// subposition. For now ignore fixed registers, because they
// are handled specially around calls.
MOZ_ASSERT_IF(!use->isFixedRegister(),
pos.subpos() == (use->usedAtStart()
? CodePosition::INPUT
: CodePosition::OUTPUT));
}
};
typedef InlineForwardListIterator<UsePosition> UsePositionIterator;
static inline bool
UseCompatibleWith(const LUse* use, LAllocation alloc)
{
switch (use->policy()) {
case LUse::ANY:
case LUse::KEEPALIVE:
return alloc.isRegister() || alloc.isMemory();
case LUse::REGISTER:
return alloc.isRegister();
case LUse::FIXED:
// Fixed uses are handled using fixed intervals. The
// UsePosition is only used as hint.
return alloc.isRegister();
default:
MOZ_CRASH("Unknown use policy");
}
}
#ifdef DEBUG
static inline bool
DefinitionCompatibleWith(LNode* ins, const LDefinition* def, LAllocation alloc)
{
if (ins->isPhi()) {
if (def->isFloatReg())
return alloc.isFloatReg() || alloc.isStackSlot();
return alloc.isGeneralReg() || alloc.isStackSlot();
}
switch (def->policy()) {
case LDefinition::REGISTER:
if (!alloc.isRegister())
return false;
return alloc.isFloatReg() == def->isFloatReg();
case LDefinition::FIXED:
return alloc == *def->output();
case LDefinition::MUST_REUSE_INPUT:
if (!alloc.isRegister() || !ins->numOperands())
return false;
return alloc == *ins->getOperand(def->getReusedInput());
default:
MOZ_CRASH("Unknown definition policy");
}
}
#endif // DEBUG
static inline LDefinition*
FindReusingDefinition(LNode* ins, LAllocation* alloc)
{
for (size_t i = 0; i < ins->numDefs(); i++) {
LDefinition* def = ins->getDef(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
for (size_t i = 0; i < ins->numTemps(); i++) {
LDefinition* def = ins->getTemp(i);
if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
ins->getOperand(def->getReusedInput()) == alloc)
return def;
}
return nullptr;
}
/*
* A live interval is a set of disjoint ranges of code positions where a
* virtual register is live. Register allocation operates on these intervals,
* splitting them as necessary and assigning allocations to them as it runs.
*/
class LiveInterval
: public TempObject
{
public:
/*
* A range is a contiguous sequence of CodePositions where the virtual
* register associated with this interval is live.
*/
struct Range {
Range()
: from(),
to()
{ }
Range(CodePosition f, CodePosition t)
: from(f),
to(t)
{
MOZ_ASSERT(from < to);
}
// The beginning of this range, inclusive.
CodePosition from;
// The end of this range, exclusive.
CodePosition to;
bool empty() const {
return from >= to;
}
// Whether this range wholly contains other.
bool contains(const Range* other) const;
// Intersect this range with other, returning the subranges of this
// that are before, inside, or after other.
void intersect(const Range* other, Range* pre, Range* inside, Range* post) const;
// Return a string describing this range. This is not re-entrant!
const char* toString() const;
void dump() const;
};
private:
Vector<Range, 1, JitAllocPolicy> ranges_;
LAllocation alloc_;
LiveInterval* spillInterval_;
uint32_t vreg_;
uint32_t index_;
Requirement requirement_;
Requirement hint_;
InlineForwardList<UsePosition> uses_;
size_t lastProcessedRange_;
LiveInterval(TempAllocator& alloc, uint32_t vreg, uint32_t index)
: ranges_(alloc),
spillInterval_(nullptr),
vreg_(vreg),
index_(index),
lastProcessedRange_(size_t(-1))
{ }
LiveInterval(TempAllocator& alloc, uint32_t index)
: ranges_(alloc),
spillInterval_(nullptr),
vreg_(UINT32_MAX),
index_(index),
lastProcessedRange_(size_t(-1))
{ }
public:
static LiveInterval* New(TempAllocator& alloc, uint32_t vreg, uint32_t index) {
return new(alloc) LiveInterval(alloc, vreg, index);
}
static LiveInterval* New(TempAllocator& alloc, uint32_t index) {
return new(alloc) LiveInterval(alloc, index);
}
bool addRange(CodePosition from, CodePosition to);
bool addRangeAtHead(CodePosition from, CodePosition to);
void setFrom(CodePosition from);
CodePosition intersect(LiveInterval* other);
bool covers(CodePosition pos);
CodePosition start() const {
MOZ_ASSERT(!ranges_.empty());
return ranges_.back().from;
}
CodePosition end() const {
MOZ_ASSERT(!ranges_.empty());
return ranges_.begin()->to;
}
size_t numRanges() const {
return ranges_.length();
}
const Range* getRange(size_t i) const {
return &ranges_[i];
}
void setLastProcessedRange(size_t range, mozilla::DebugOnly<CodePosition> pos) {
// If the range starts after pos, we may not be able to use
// it in the next lastProcessedRangeIfValid call.
MOZ_ASSERT(ranges_[range].from <= pos);
lastProcessedRange_ = range;
}
size_t lastProcessedRangeIfValid(CodePosition pos) const {
if (lastProcessedRange_ < ranges_.length() && ranges_[lastProcessedRange_].from <= pos)
return lastProcessedRange_;
return ranges_.length() - 1;
}
LAllocation* getAllocation() {
return &alloc_;
}
void setAllocation(LAllocation alloc) {
alloc_ = alloc;
}
void setSpillInterval(LiveInterval* spill) {
spillInterval_ = spill;
}
LiveInterval* spillInterval() {
return spillInterval_;
}
bool hasVreg() const {
return vreg_ != UINT32_MAX;
}
uint32_t vreg() const {
MOZ_ASSERT(hasVreg());
return vreg_;
}
uint32_t index() const {
return index_;
}
void setIndex(uint32_t index) {
index_ = index;
}
const Requirement* requirement() const {
return &requirement_;
}
void setRequirement(const Requirement& requirement) {
// A MUST_REUSE_INPUT requirement complicates regalloc too much; it
// should only be used as hint.
MOZ_ASSERT(requirement.kind() != Requirement::MUST_REUSE_INPUT);
requirement_ = requirement;
}
bool addRequirement(const Requirement& newRequirement) {
return requirement_.mergeRequirement(newRequirement);
}
void addHint(const Requirement& newHint) {
// Unlike addRequirement, here in addHint we ignore merge failures,
// because these are just hints.
hint_.mergeRequirement(newHint);
}
const Requirement* hint() const {
return &hint_;
}
void setHint(const Requirement& hint) {
hint_ = hint;
}
bool isSpill() const {
return alloc_.isStackSlot();
}
bool splitFrom(CodePosition pos, LiveInterval* after);
void addUse(UsePosition* use);
void addUseAtEnd(UsePosition* use);
UsePosition* popUse();
UsePosition* nextUseAfter(CodePosition pos);
CodePosition nextUsePosAfter(CodePosition pos);
UsePositionIterator usesBegin() const {
return uses_.begin();
}
UsePositionIterator usesEnd() const {
return uses_.end();
}
bool usesEmpty() const {
return uses_.empty();
}
UsePosition* usesBack() {
return uses_.back();
}
#ifdef DEBUG
void validateRanges();
#endif
// Return a string describing the ranges in this LiveInterval. This is
// not re-entrant!
const char* rangesToString() const;
// Return a string describing this LiveInterval. This is not re-entrant!
const char* toString() const;
void dump() const;
};
/*
* Represents all of the register allocation state associated with a virtual
* register, including all associated intervals and pointers to relevant LIR
* structures.
*/
class VirtualRegister
{
LNode* ins_;
LDefinition* def_;
Vector<LiveInterval*, 1, JitAllocPolicy> intervals_;
// Whether def_ is a temp or an output.
bool isTemp_ : 1;
void operator=(const VirtualRegister&) = delete;
VirtualRegister(const VirtualRegister&) = delete;
protected:
explicit VirtualRegister(TempAllocator& alloc)
: intervals_(alloc)
{}
public:
bool init(TempAllocator& alloc, LNode* ins, LDefinition* def,
bool isTemp)
{
MOZ_ASSERT(ins && !ins_);
ins_ = ins;
def_ = def;
isTemp_ = isTemp;
LiveInterval* initial = LiveInterval::New(alloc, def->virtualRegister(), 0);
if (!initial)
return false;
return intervals_.append(initial);
}
LBlock* block() {
return ins_->block();
}
LNode* ins() {
return ins_;
}
LDefinition* def() const {
return def_;
}
LDefinition::Type type() const {
return def()->type();
}
bool isTemp() const {
return isTemp_;
}
size_t numIntervals() const {
return intervals_.length();
}
LiveInterval* getInterval(size_t i) const {
return intervals_[i];
}
LiveInterval* lastInterval() const {
MOZ_ASSERT(numIntervals() > 0);
return getInterval(numIntervals() - 1);
}
void replaceInterval(LiveInterval* old, LiveInterval* interval) {
MOZ_ASSERT(intervals_[old->index()] == old);
interval->setIndex(old->index());
intervals_[old->index()] = interval;
}
bool addInterval(LiveInterval* interval) {
MOZ_ASSERT(interval->numRanges());
MOZ_ASSERT(interval->vreg() != 0);
// Preserve ascending order for faster lookups.
LiveInterval** found = nullptr;
LiveInterval** i;
for (i = intervals_.begin(); i != intervals_.end(); i++) {
if (!found && interval->start() < (*i)->start())
found = i;
if (found)
(*i)->setIndex((*i)->index() + 1);
}
if (!found)
found = intervals_.end();
interval->setIndex(found - intervals_.begin());
return intervals_.insert(found, interval);
}
void removeInterval(LiveInterval* interval) {
intervals_.erase(intervals_.begin() + interval->index());
for (size_t i = interval->index(), e = intervals_.length(); i < e; ++i)
intervals_[i]->setIndex(i);
interval->setIndex(-1);
}
bool isFloatReg() const {
return def_->isFloatReg();
}
bool isCompatibleReg(const AnyRegister& r) const {
return def_->isCompatibleReg(r);
}
bool isCompatibleVReg(const VirtualRegister& vr) const {
return def_->isCompatibleDef(*vr.def_);
}
LiveInterval* intervalFor(CodePosition pos);
LiveInterval* getFirstInterval();
};
// Index of the virtual registers in a graph. VREG is a subclass of
// VirtualRegister extended with any allocator specific state for the vreg.
template <typename VREG>
class VirtualRegisterMap
{
private:
FixedList<VREG> vregs_;
void operator=(const VirtualRegisterMap&) = delete;
VirtualRegisterMap(const VirtualRegisterMap&) = delete;
public:
VirtualRegisterMap()
: vregs_()
{ }
bool init(MIRGenerator* gen, uint32_t numVregs) {
if (!vregs_.init(gen->alloc(), numVregs))
return false;
memset(&vregs_[0], 0, sizeof(VREG) * numVregs);
TempAllocator& alloc = gen->alloc();
for (uint32_t i = 0; i < numVregs; i++)
new(&vregs_[i]) VREG(alloc);
return true;
}
VREG& operator[](unsigned int index) {
return vregs_[index];
}
VREG& operator[](const LAllocation* alloc) {
MOZ_ASSERT(alloc->isUse());
return vregs_[alloc->toUse()->virtualRegister()];
}
VREG& operator[](const LDefinition* def) {
return vregs_[def->virtualRegister()];
}
uint32_t numVirtualRegisters() const {
return vregs_.length();
}
};
static inline bool
IsNunbox(VirtualRegister* vreg)
{
#ifdef JS_NUNBOX32
return vreg->type() == LDefinition::TYPE ||
vreg->type() == LDefinition::PAYLOAD;
#else
return false;
#endif
}
static inline bool
IsSlotsOrElements(VirtualRegister* vreg)
{
return vreg->type() == LDefinition::SLOTS;
}
static inline bool
IsTraceable(VirtualRegister* reg)
{
if (reg->type() == LDefinition::OBJECT)
return true;
#ifdef JS_PUNBOX64
if (reg->type() == LDefinition::BOX)
return true;
#endif
return false;
}
template <typename VREG>
class LiveRangeAllocator : protected RegisterAllocator
{
protected:
// Computed inforamtion
BitSet* liveIn;
VirtualRegisterMap<VREG> vregs;
mozilla::Array<LiveInterval*, AnyRegister::Total> fixedIntervals;
// Union of all ranges in fixedIntervals, used to quickly determine
// whether an interval intersects with a fixed register.
LiveInterval* fixedIntervalsUnion;
// Allocation state
StackSlotAllocator stackSlotAllocator;
LiveRangeAllocator(MIRGenerator* mir, LIRGenerator* lir, LIRGraph& graph)
: RegisterAllocator(mir, lir, graph),
liveIn(nullptr),
fixedIntervalsUnion(nullptr)
{
}
bool buildLivenessInfo();
bool init();
bool addFixedRangeAtHead(AnyRegister reg, CodePosition from, CodePosition to) {
if (!fixedIntervals[reg.code()]->addRangeAtHead(from, to))
return false;
return fixedIntervalsUnion->addRangeAtHead(from, to);
}
void validateVirtualRegisters()
{
#ifdef DEBUG
if (!js_JitOptions.checkGraphConsistency)
return;
for (size_t i = 1; i < graph.numVirtualRegisters(); i++) {
VirtualRegister* reg = &vregs[i];
LiveInterval* prev = nullptr;
for (size_t j = 0; j < reg->numIntervals(); j++) {
LiveInterval* interval = reg->getInterval(j);
MOZ_ASSERT(interval->vreg() == i);
MOZ_ASSERT(interval->index() == j);
if (interval->numRanges() == 0)
continue;
MOZ_ASSERT_IF(prev, prev->end() <= interval->start());
interval->validateRanges();
prev = interval;
}
}
#endif
}
bool addMove(LMoveGroup* moves, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
MOZ_ASSERT(*from->getAllocation() != *to->getAllocation());
return moves->add(from->getAllocation(), to->getAllocation(), type);
}
bool moveInput(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = getInputMoveGroup(ins);
return addMove(moves, from, to, type);
}
bool moveAfter(LInstruction* ins, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = getMoveGroupAfter(ins);
return addMove(moves, from, to, type);
}
bool moveAtExit(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = block->getExitMoveGroup(alloc());
return addMove(moves, from, to, type);
}
bool moveAtEntry(LBlock* block, LiveInterval* from, LiveInterval* to, LDefinition::Type type) {
if (*from->getAllocation() == *to->getAllocation())
return true;
LMoveGroup* moves = block->getEntryMoveGroup(alloc());
return addMove(moves, from, to, type);
}
size_t findFirstNonCallSafepoint(CodePosition from) const
{
size_t i = 0;
for (; i < graph.numNonCallSafepoints(); i++) {
const LInstruction* ins = graph.getNonCallSafepoint(i);
if (from <= inputOf(ins))
break;
}
return i;
}
void addLiveRegistersForInterval(VirtualRegister* reg, LiveInterval* interval)
{
// Fill in the live register sets for all non-call safepoints.
LAllocation* a = interval->getAllocation();
if (!a->isRegister())
return;
// Don't add output registers to the safepoint.
CodePosition start = interval->start();
if (interval->index() == 0 && !reg->isTemp()) {
#ifdef CHECK_OSIPOINT_REGISTERS
// We don't add the output register to the safepoint,
// but it still might get added as one of the inputs.
// So eagerly add this reg to the safepoint clobbered registers.
if (reg->ins()->isInstruction()) {
if (LSafepoint* safepoint = reg->ins()->toInstruction()->safepoint())
safepoint->addClobberedRegister(a->toRegister());
}
#endif
start = start.next();
}
size_t i = findFirstNonCallSafepoint(start);
for (; i < graph.numNonCallSafepoints(); i++) {
LInstruction* ins = graph.getNonCallSafepoint(i);
CodePosition pos = inputOf(ins);
// Safepoints are sorted, so we can shortcut out of this loop
// if we go out of range.
if (interval->end() <= pos)
break;
if (!interval->covers(pos))
continue;
LSafepoint* safepoint = ins->safepoint();
safepoint->addLiveRegister(a->toRegister());
#ifdef CHECK_OSIPOINT_REGISTERS
if (reg->isTemp())
safepoint->addClobberedRegister(a->toRegister());
#endif
}
}
// Finds the first safepoint that is within range of an interval.
size_t findFirstSafepoint(const LiveInterval* interval, size_t startFrom) const
{
size_t i = startFrom;
for (; i < graph.numSafepoints(); i++) {
LInstruction* ins = graph.getSafepoint(i);
if (interval->start() <= inputOf(ins))
break;
}
return i;
}
void dumpVregs();
};
} // namespace jit
} // namespace js
#endif /* jit_LiveRangeAllocator_h */

View File

@ -191,8 +191,8 @@ AllocationIntegrityState::checkIntegrity(LBlock* block, LInstruction* ins,
if (ins->isMoveGroup()) {
LMoveGroup* group = ins->toMoveGroup();
for (int i = group->numMoves() - 1; i >= 0; i--) {
if (*group->getMove(i).to() == alloc) {
alloc = *group->getMove(i).from();
if (group->getMove(i).to() == alloc) {
alloc = group->getMove(i).from();
break;
}
}
@ -412,8 +412,8 @@ AllocationIntegrityState::dump()
LMoveGroup* group = ins->toMoveGroup();
for (int i = group->numMoves() - 1; i >= 0; i--) {
// Use two printfs, as LAllocation::toString is not reentrant.
fprintf(stderr, " [%s", group->getMove(i).from()->toString());
fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
fprintf(stderr, " [%s", group->getMove(i).from().toString());
fprintf(stderr, " -> %s]", group->getMove(i).to().toString());
}
fprintf(stderr, "\n");
continue;
@ -552,8 +552,8 @@ RegisterAllocator::dumpInstructions()
LMoveGroup* group = ins->toMoveGroup();
for (int i = group->numMoves() - 1; i >= 0; i--) {
// Use two printfs, as LAllocation::toString is not reentant.
fprintf(stderr, " [%s", group->getMove(i).from()->toString());
fprintf(stderr, " -> %s]", group->getMove(i).to()->toString());
fprintf(stderr, " [%s", group->getMove(i).from().toString());
fprintf(stderr, " -> %s]", group->getMove(i).to().toString());
}
fprintf(stderr, "\n");
continue;

View File

@ -186,11 +186,11 @@ StupidAllocator::syncRegister(LInstruction* ins, RegisterIndex index)
{
if (registers[index].dirty) {
LMoveGroup* input = getInputMoveGroup(ins);
LAllocation* source = new(alloc()) LAllocation(registers[index].reg);
LAllocation source(registers[index].reg);
uint32_t existing = registers[index].vreg;
LAllocation* dest = stackLocation(existing);
input->addAfter(source, dest, registers[index].type);
input->addAfter(source, *dest, registers[index].type);
registers[index].dirty = false;
}
@ -219,8 +219,8 @@ StupidAllocator::loadRegister(LInstruction* ins, uint32_t vreg, RegisterIndex in
// Load a vreg from its stack location to a register.
LMoveGroup* input = getInputMoveGroup(ins);
LAllocation* source = stackLocation(vreg);
LAllocation* dest = new(alloc()) LAllocation(registers[index].reg);
input->addAfter(source, dest, type);
LAllocation dest(registers[index].reg);
input->addAfter(*source, dest, type);
registers[index].set(vreg, ins);
registers[index].type = type;
}
@ -321,7 +321,7 @@ StupidAllocator::syncForBlockEnd(LBlock* block, LInstruction* ins)
}
}
group->add(source, dest, phi->getDef(0)->type());
group->add(*source, *dest, phi->getDef(0)->type());
}
}
}

View File

@ -1009,11 +1009,11 @@ CodeGeneratorARM::visitPowHalfD(LPowHalfD* ins)
}
MoveOperand
CodeGeneratorARM::toMoveOperand(const LAllocation* a) const
CodeGeneratorARM::toMoveOperand(LAllocation a) const
{
if (a->isGeneralReg())
if (a.isGeneralReg())
return MoveOperand(ToRegister(a));
if (a->isFloatReg())
if (a.isFloatReg())
return MoveOperand(ToFloatRegister(a));
int32_t offset = ToStackOffset(a);
MOZ_ASSERT((offset & 3) == 0);

View File

@ -43,7 +43,7 @@ class CodeGeneratorARM : public CodeGeneratorShared
return ToOperand(def->output());
}
MoveOperand toMoveOperand(const LAllocation* a) const;
MoveOperand toMoveOperand(LAllocation a) const;
void bailoutIf(Assembler::Condition condition, LSnapshot* snapshot);
void bailoutFrom(Label* label, LSnapshot* snapshot);

View File

@ -948,11 +948,11 @@ CodeGeneratorMIPS::visitPowHalfD(LPowHalfD* ins)
}
MoveOperand
CodeGeneratorMIPS::toMoveOperand(const LAllocation* a) const
CodeGeneratorMIPS::toMoveOperand(LAllocation a) const
{
if (a->isGeneralReg())
if (a.isGeneralReg())
return MoveOperand(ToRegister(a));
if (a->isFloatReg()) {
if (a.isFloatReg()) {
return MoveOperand(ToFloatRegister(a));
}
int32_t offset = ToStackOffset(a);

View File

@ -58,7 +58,7 @@ class CodeGeneratorMIPS : public CodeGeneratorShared
return ToOperand(def->output());
}
MoveOperand toMoveOperand(const LAllocation* a) const;
MoveOperand toMoveOperand(LAllocation a) const;
template <typename T1, typename T2>
void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs, LSnapshot* snapshot) {

View File

@ -235,10 +235,13 @@ class CodeGeneratorShared : public LElementVisitor
return offset;
}
inline int32_t ToStackOffset(LAllocation a) const {
if (a.isArgument())
return ArgToStackOffset(a.toArgument()->index());
return SlotToStackOffset(a.toStackSlot()->slot());
}
inline int32_t ToStackOffset(const LAllocation* a) const {
if (a->isArgument())
return ArgToStackOffset(a->toArgument()->index());
return SlotToStackOffset(a->toStackSlot()->slot());
return ToStackOffset(*a);
}
uint32_t frameSize() const {

Some files were not shown because too many files have changed in this diff Show More