Merge m-i to m-c, a=merge

This commit is contained in:
Phil Ringnalda 2015-09-05 20:19:43 -07:00
commit 52c00cb549
62 changed files with 1123 additions and 903 deletions

View File

@ -14,6 +14,9 @@ export CXX="$topsrcdir/clang/bin/clang++"
# Add the static checker
ac_add_options --enable-clang-plugin
# Treat warnings as errors (modulo ALLOW_COMPILER_WARNINGS).
ac_add_options --enable-warnings-as-errors
# Avoid dependency on libstdc++ 4.7
ac_add_options --enable-stdcxx-compat

View File

@ -1533,9 +1533,12 @@ if test "$GNU_CC"; then
# Turn off the following warnings that -Wall turns on:
# -Wno-unused - lots of violations in third-party code
# -Wno-unused-local-typedef - catches unused typedefs, which are commonly used in assertion macros
#
_WARNINGS_CFLAGS="${_WARNINGS_CFLAGS} -Wno-unused"
MOZ_CXX_SUPPORTS_WARNING(-Wno-, unused-local-typedef, ac_cxx_has_wno_unused_local_typedef)
if test -z "$INTEL_CC" -a -z "$CLANG_CC"; then
# Don't use -Wcast-align with ICC or clang
case "$CPU_ARCH" in
@ -1629,12 +1632,14 @@ if test "$GNU_CXX"; then
# Turn off the following warnings that -Wall turns on:
# -Wno-invalid-offsetof - we use offsetof on non-POD types frequently
# -Wno-inline-new-delete - we inline 'new' and 'delete' in mozalloc
# -Wno-unused-local-typedef - catches unused typedefs, which are commonly used in assertion macros
# for performance reasons, and because GCC and clang accept it (though
# clang warns about it).
#
_WARNINGS_CXXFLAGS="${_WARNINGS_CXXFLAGS} -Wno-invalid-offsetof"
MOZ_CXX_SUPPORTS_WARNING(-Wno-, inline-new-delete, ac_cxx_has_wno_inline_new_delete)
MOZ_CXX_SUPPORTS_WARNING(-Wno-, unused-local-typedef, ac_cxx_has_wno_unused_local_typedef)
if test -z "$INTEL_CXX" -a -z "$CLANG_CXX"; then
# Don't use -Wcast-align with ICC or clang

View File

@ -68,10 +68,7 @@
}
SpecialPowers.pushPrefEnv(
// XXX Set LRUPoolLevels to 2 to avoid breaking priority tests
{ "set": [["dom.ipc.processPriorityManager.BACKGROUND.LRUPoolLevels", 2],
["dom.ipc.processPriorityManager.BACKGROUND_PERCEIVABLE.LRUPoolLevels", 2],
["dom.ipc.processPriorityManager.testMode", true],
{ "set": [["dom.ipc.processPriorityManager.testMode", true],
["dom.ipc.processPriorityManager.enabled", true],
["dom.ipc.tabs.disabled", false],
["dom.ipc.processCount", 3],

View File

@ -118,7 +118,6 @@ public:
private:
ProcessPriority mPriority;
uint32_t mLRUPoolLevels;
uint32_t mLRUPoolSize;
nsTArray<ParticularProcessPriorityManager*> mLRUPool;
uint32_t CalculateLRULevel(uint32_t aLRUPoolIndex);
@ -144,6 +143,7 @@ private:
class ProcessPriorityManagerImpl final
: public nsIObserver
, public WakeLockObserver
, public nsSupportsWeakReference
{
public:
/**
@ -154,6 +154,7 @@ public:
static void StaticInit();
static bool PrefsEnabled();
static bool TestMode();
NS_DECL_ISUPPORTS
NS_DECL_NSIOBSERVER
@ -202,7 +203,16 @@ public:
*/
void Unfreeze();
/**
* Call ShutDown before destroying the ProcessPriorityManager because
* WakeLockObserver hols a strong reference to it.
*/
void ShutDown();
private:
static bool sPrefsEnabled;
static bool sRemoteTabsDisabled;
static bool sTestMode;
static bool sPrefListenersRegistered;
static bool sInitialized;
static bool sFrozen;
@ -288,6 +298,7 @@ public:
NS_DECL_NSITIMERCALLBACK
virtual void Notify(const WakeLockInformation& aInfo) override;
static void StaticInit();
void Init();
int32_t Pid() const;
@ -318,7 +329,12 @@ public:
ProcessPriority CurrentPriority();
ProcessPriority ComputePriority();
void ScheduleResetPriority(const char* aTimeoutPref);
enum TimeoutPref {
BACKGROUND_PERCEIVABLE_GRACE_PERIOD,
BACKGROUND_GRACE_PERIOD,
};
void ScheduleResetPriority(TimeoutPref aTimeoutPref);
void ResetPriority();
void ResetPriorityNow();
void SetPriorityNow(ProcessPriority aPriority, uint32_t aLRU = 0);
@ -328,6 +344,9 @@ public:
void ShutDown();
private:
static uint32_t sBackgroundPerceivableGracePeriodMS;
static uint32_t sBackgroundGracePeriodMS;
void FireTestOnlyObserverNotification(
const char* aTopic,
const nsACString& aData = EmptyCString());
@ -354,26 +373,42 @@ private:
};
/* static */ bool ProcessPriorityManagerImpl::sInitialized = false;
/* static */ bool ProcessPriorityManagerImpl::sPrefsEnabled = false;
/* static */ bool ProcessPriorityManagerImpl::sRemoteTabsDisabled = true;
/* static */ bool ProcessPriorityManagerImpl::sTestMode = false;
/* static */ bool ProcessPriorityManagerImpl::sPrefListenersRegistered = false;
/* static */ bool ProcessPriorityManagerImpl::sFrozen = false;
/* static */ StaticRefPtr<ProcessPriorityManagerImpl>
ProcessPriorityManagerImpl::sSingleton;
/* static */ uint32_t ParticularProcessPriorityManager::sBackgroundPerceivableGracePeriodMS = 0;
/* static */ uint32_t ParticularProcessPriorityManager::sBackgroundGracePeriodMS = 0;
NS_IMPL_ISUPPORTS(ProcessPriorityManagerImpl,
nsIObserver);
nsIObserver,
nsISupportsWeakReference);
/* static */ void
ProcessPriorityManagerImpl::PrefChangedCallback(const char* aPref,
void* aClosure)
{
StaticInit();
if (!PrefsEnabled() && sSingleton) {
sSingleton->ShutDown();
sSingleton = nullptr;
sInitialized = false;
}
}
/* static */ bool
ProcessPriorityManagerImpl::PrefsEnabled()
{
return Preferences::GetBool("dom.ipc.processPriorityManager.enabled") &&
!Preferences::GetBool("dom.ipc.tabs.disabled");
return sPrefsEnabled && !sRemoteTabsDisabled;
}
/* static */ bool
ProcessPriorityManagerImpl::TestMode()
{
return sTestMode;
}
/* static */ void
@ -389,6 +424,15 @@ ProcessPriorityManagerImpl::StaticInit()
return;
}
if (!sPrefListenersRegistered) {
Preferences::AddBoolVarCache(&sPrefsEnabled,
"dom.ipc.processPriorityManager.enabled");
Preferences::AddBoolVarCache(&sRemoteTabsDisabled,
"dom.ipc.tabs.disabled");
Preferences::AddBoolVarCache(&sTestMode,
"dom.ipc.processPriorityManager.testMode");
}
// If IPC tabs aren't enabled at startup, don't bother with any of this.
if (!PrefsEnabled()) {
LOG("InitProcessPriorityManager bailing due to prefs.");
@ -432,6 +476,12 @@ ProcessPriorityManagerImpl::ProcessPriorityManagerImpl()
}
ProcessPriorityManagerImpl::~ProcessPriorityManagerImpl()
{
ShutDown();
}
void
ProcessPriorityManagerImpl::ShutDown()
{
UnregisterWakeLockObserver(this);
}
@ -448,9 +498,9 @@ ProcessPriorityManagerImpl::Init()
nsCOMPtr<nsIObserverService> os = services::GetObserverService();
if (os) {
os->AddObserver(this, "ipc:content-created", /* ownsWeak */ false);
os->AddObserver(this, "ipc:content-shutdown", /* ownsWeak */ false);
os->AddObserver(this, "screen-state-changed", /* ownsWeak */ false);
os->AddObserver(this, "ipc:content-created", /* ownsWeak */ true);
os->AddObserver(this, "ipc:content-shutdown", /* ownsWeak */ true);
os->AddObserver(this, "screen-state-changed", /* ownsWeak */ true);
}
}
@ -664,6 +714,15 @@ ParticularProcessPriorityManager::ParticularProcessPriorityManager(
LOGP("Creating ParticularProcessPriorityManager.");
}
void
ParticularProcessPriorityManager::StaticInit()
{
Preferences::AddUintVarCache(&sBackgroundPerceivableGracePeriodMS,
"dom.ipc.processPriorityManager.backgroundPerceivableGracePeriodMS");
Preferences::AddUintVarCache(&sBackgroundGracePeriodMS,
"dom.ipc.processPriorityManager.backgroundGracePeriodMS");
}
void
ParticularProcessPriorityManager::Init()
{
@ -930,9 +989,9 @@ ParticularProcessPriorityManager::ResetPriority()
// can get their next track started, if there is one, before getting
// downgraded.
if (mPriority == PROCESS_PRIORITY_BACKGROUND_PERCEIVABLE) {
ScheduleResetPriority("backgroundPerceivableGracePeriodMS");
ScheduleResetPriority(BACKGROUND_PERCEIVABLE_GRACE_PERIOD);
} else {
ScheduleResetPriority("backgroundGracePeriodMS");
ScheduleResetPriority(BACKGROUND_GRACE_PERIOD);
}
return;
}
@ -947,15 +1006,26 @@ ParticularProcessPriorityManager::ResetPriorityNow()
}
void
ParticularProcessPriorityManager::ScheduleResetPriority(const char* aTimeoutPref)
ParticularProcessPriorityManager::ScheduleResetPriority(TimeoutPref aTimeoutPref)
{
if (mResetPriorityTimer) {
LOGP("ScheduleResetPriority bailing; the timer is already running.");
return;
}
uint32_t timeout = Preferences::GetUint(
nsPrintfCString("dom.ipc.processPriorityManager.%s", aTimeoutPref).get());
uint32_t timeout = 0;
switch (aTimeoutPref) {
case BACKGROUND_PERCEIVABLE_GRACE_PERIOD:
timeout = sBackgroundPerceivableGracePeriodMS;
break;
case BACKGROUND_GRACE_PERIOD:
timeout = sBackgroundGracePeriodMS;
break;
default:
MOZ_ASSERT(false, "Unrecognized timeout pref");
break;
}
LOGP("Scheduling reset timer to fire in %dms.", timeout);
mResetPriorityTimer = do_CreateInstance("@mozilla.org/timer;1");
mResetPriorityTimer->InitWithCallback(this, timeout, nsITimer::TYPE_ONE_SHOT);
@ -1134,7 +1204,7 @@ ProcessPriorityManagerImpl::FireTestOnlyObserverNotification(
const char* aTopic,
const nsACString& aData /* = EmptyCString() */)
{
if (!Preferences::GetBool("dom.ipc.processPriorityManager.testMode")) {
if (!TestMode()) {
return;
}
@ -1153,7 +1223,7 @@ ParticularProcessPriorityManager::FireTestOnlyObserverNotification(
const char* aTopic,
const char* aData /* = nullptr */ )
{
if (!Preferences::GetBool("dom.ipc.processPriorityManager.testMode")) {
if (!ProcessPriorityManagerImpl::TestMode()) {
return;
}
@ -1170,7 +1240,7 @@ ParticularProcessPriorityManager::FireTestOnlyObserverNotification(
const char* aTopic,
const nsACString& aData /* = EmptyCString() */)
{
if (!Preferences::GetBool("dom.ipc.processPriorityManager.testMode")) {
if (!ProcessPriorityManagerImpl::TestMode()) {
return;
}
@ -1288,9 +1358,9 @@ ProcessLRUPool::ProcessLRUPool(ProcessPriority aPriority)
mLRUPoolLevels <= 4);
// LRU pool size = 2 ^ (number of background LRU pool levels) - 1
mLRUPoolSize = (1 << mLRUPoolLevels) - 1;
uint32_t LRUPoolSize = (1 << mLRUPoolLevels) - 1;
LOG("Making %s LRU pool with size(%d)", str, mLRUPoolSize);
LOG("Making %s LRU pool with size(%d)", str, LRUPoolSize);
}
uint32_t
@ -1381,6 +1451,7 @@ ProcessPriorityManager::Init()
{
ProcessPriorityManagerImpl::StaticInit();
ProcessPriorityManagerChild::StaticInit();
ParticularProcessPriorityManager::StaticInit();
}
/* static */ void

View File

@ -267,7 +267,7 @@ GetNewPluginLibrary(nsPluginTag *aPluginTag)
}
if (XRE_IsContentProcess()) {
return PluginModuleContentParent::LoadModule(aPluginTag->mId);
return PluginModuleContentParent::LoadModule(aPluginTag->mId, aPluginTag);
}
if (nsNPAPIPlugin::RunPluginOOP(aPluginTag)) {

View File

@ -267,6 +267,7 @@ nsPluginTag::nsPluginTag(const char* aName,
mLibrary(nullptr),
mIsJavaPlugin(false),
mIsFlashPlugin(false),
mSupportsAsyncInit(false),
mFullPath(aFullPath),
mLastModifiedTime(aLastModifiedTime),
mCachedBlocklistState(nsIBlocklistService::STATE_NOT_BLOCKED),
@ -343,12 +344,22 @@ void nsPluginTag::InitMime(const char* const* aMimeTypes,
switch (nsPluginHost::GetSpecialType(mimeType)) {
case nsPluginHost::eSpecialType_Java:
mIsJavaPlugin = true;
mSupportsAsyncInit = true;
break;
case nsPluginHost::eSpecialType_Flash:
mIsFlashPlugin = true;
mSupportsAsyncInit = true;
break;
case nsPluginHost::eSpecialType_Silverlight:
case nsPluginHost::eSpecialType_Unity:
mSupportsAsyncInit = true;
break;
case nsPluginHost::eSpecialType_None:
default:
#ifndef RELEASE_BUILD
// Allow async init for all plugins on Nightly and Aurora
mSupportsAsyncInit = true;
#endif
break;
}

View File

@ -165,6 +165,7 @@ public:
nsRefPtr<nsNPAPIPlugin> mPlugin;
bool mIsJavaPlugin;
bool mIsFlashPlugin;
bool mSupportsAsyncInit;
nsCString mFullPath; // UTF-8
int64_t mLastModifiedTime;
nsCOMPtr<nsITimer> mUnloadTimer;

View File

@ -575,8 +575,9 @@ PluginAsyncSurrogate::NotifyAsyncInitFailed()
return;
}
nsPluginInstanceOwner* owner = inst->GetOwner();
MOZ_ASSERT(owner);
owner->NotifyHostAsyncInitFailed();
if (owner) {
owner->NotifyHostAsyncInitFailed();
}
}
// static

View File

@ -207,8 +207,9 @@ namespace {
class PluginModuleMapping : public PRCList
{
public:
explicit PluginModuleMapping(uint32_t aPluginId)
explicit PluginModuleMapping(uint32_t aPluginId, bool aAllowAsyncInit)
: mPluginId(aPluginId)
, mAllowAsyncInit(aAllowAsyncInit)
, mProcessIdValid(false)
, mModule(nullptr)
, mChannelOpened(false)
@ -240,7 +241,7 @@ public:
GetModule()
{
if (!mModule) {
mModule = new PluginModuleContentParent();
mModule = new PluginModuleContentParent(mAllowAsyncInit);
}
return mModule;
}
@ -333,6 +334,7 @@ private:
}
uint32_t mPluginId;
bool mAllowAsyncInit;
bool mProcessIdValid;
base::ProcessId mProcessId;
PluginModuleContentParent* mModule;
@ -372,10 +374,12 @@ mozilla::plugins::TerminatePlugin(uint32_t aPluginId,
}
/* static */ PluginLibrary*
PluginModuleContentParent::LoadModule(uint32_t aPluginId)
PluginModuleContentParent::LoadModule(uint32_t aPluginId,
nsPluginTag* aPluginTag)
{
PluginModuleMapping::NotifyLoadingModule loadingModule;
nsAutoPtr<PluginModuleMapping> mapping(new PluginModuleMapping(aPluginId));
nsAutoPtr<PluginModuleMapping> mapping(
new PluginModuleMapping(aPluginId, aPluginTag->mSupportsAsyncInit));
MOZ_ASSERT(XRE_IsContentProcess());
@ -503,8 +507,9 @@ PluginModuleChromeParent::LoadModule(const char* aFilePath, uint32_t aPluginId,
#endif
#endif
nsAutoPtr<PluginModuleChromeParent> parent(new PluginModuleChromeParent(aFilePath, aPluginId,
sandboxLevel));
nsAutoPtr<PluginModuleChromeParent> parent(
new PluginModuleChromeParent(aFilePath, aPluginId, sandboxLevel,
aPluginTag->mSupportsAsyncInit));
UniquePtr<LaunchCompleteTask> onLaunchedRunnable(new LaunchedTask(parent));
parent->mSubprocess->SetCallRunnableImmediately(!parent->mIsStartingAsync);
TimeStamp launchStart = TimeStamp::Now();
@ -635,7 +640,7 @@ PluginModuleChromeParent::WaitForIPCConnection()
return true;
}
PluginModuleParent::PluginModuleParent(bool aIsChrome)
PluginModuleParent::PluginModuleParent(bool aIsChrome, bool aAllowAsyncInit)
: mQuirks(QUIRKS_NOT_INITIALIZED)
, mIsChrome(aIsChrome)
, mShutdown(false)
@ -653,7 +658,8 @@ PluginModuleParent::PluginModuleParent(bool aIsChrome)
, mAsyncNewRv(NS_ERROR_NOT_INITIALIZED)
{
#if defined(XP_WIN) || defined(XP_MACOSX) || defined(MOZ_WIDGET_GTK)
mIsStartingAsync = Preferences::GetBool(kAsyncInitPref, false);
mIsStartingAsync = aAllowAsyncInit &&
Preferences::GetBool(kAsyncInitPref, false);
#if defined(MOZ_CRASHREPORTER)
CrashReporter::AnnotateCrashReport(NS_LITERAL_CSTRING("AsyncPluginInit"),
mIsStartingAsync ?
@ -676,8 +682,8 @@ PluginModuleParent::~PluginModuleParent()
}
}
PluginModuleContentParent::PluginModuleContentParent()
: PluginModuleParent(false)
PluginModuleContentParent::PluginModuleContentParent(bool aAllowAsyncInit)
: PluginModuleParent(false, aAllowAsyncInit)
{
Preferences::RegisterCallback(TimeoutChanged, kContentTimeoutPref, this);
}
@ -691,8 +697,9 @@ bool PluginModuleChromeParent::sInstantiated = false;
PluginModuleChromeParent::PluginModuleChromeParent(const char* aFilePath,
uint32_t aPluginId,
int32_t aSandboxLevel)
: PluginModuleParent(true)
int32_t aSandboxLevel,
bool aAllowAsyncInit)
: PluginModuleParent(true, aAllowAsyncInit)
, mSubprocess(new PluginProcessParent(aFilePath))
, mPluginId(aPluginId)
, mChromeTaskFactory(this)

View File

@ -97,7 +97,7 @@ protected:
DeallocPPluginInstanceParent(PPluginInstanceParent* aActor) override;
public:
explicit PluginModuleParent(bool aIsChrome);
explicit PluginModuleParent(bool aIsChrome, bool aAllowAsyncInit);
virtual ~PluginModuleParent();
bool RemovePendingSurrogate(const nsRefPtr<PluginAsyncSurrogate>& aSurrogate);
@ -344,9 +344,9 @@ protected:
class PluginModuleContentParent : public PluginModuleParent
{
public:
explicit PluginModuleContentParent();
explicit PluginModuleContentParent(bool aAllowAsyncInit);
static PluginLibrary* LoadModule(uint32_t aPluginId);
static PluginLibrary* LoadModule(uint32_t aPluginId, nsPluginTag* aPluginTag);
static PluginModuleContentParent* Initialize(mozilla::ipc::Transport* aTransport,
base::ProcessId aOtherProcess);
@ -497,7 +497,8 @@ private:
// aFilePath is UTF8, not native!
explicit PluginModuleChromeParent(const char* aFilePath, uint32_t aPluginId,
int32_t aSandboxLevel);
int32_t aSandboxLevel,
bool aAllowAsyncInit);
CrashReporterParent* CrashReporter();

View File

@ -1487,14 +1487,10 @@ class PromiseWorkerProxyRunnable : public workers::WorkerRunnable
{
public:
PromiseWorkerProxyRunnable(PromiseWorkerProxy* aPromiseWorkerProxy,
const JSStructuredCloneCallbacks* aCallbacks,
JSAutoStructuredCloneBuffer&& aBuffer,
PromiseWorkerProxy::RunCallbackFunc aFunc)
: WorkerRunnable(aPromiseWorkerProxy->GetWorkerPrivate(),
WorkerThreadUnchangedBusyCount)
, mPromiseWorkerProxy(aPromiseWorkerProxy)
, mCallbacks(aCallbacks)
, mBuffer(Move(aBuffer))
, mFunc(aFunc)
{
MOZ_ASSERT(NS_IsMainThread());
@ -1513,7 +1509,7 @@ public:
// Here we convert the buffer to a JS::Value.
JS::Rooted<JS::Value> value(aCx);
if (!mBuffer.read(aCx, &value, mCallbacks, mPromiseWorkerProxy)) {
if (!mPromiseWorkerProxy->Read(aCx, &value)) {
JS_ClearPendingException(aCx);
return false;
}
@ -1530,8 +1526,6 @@ protected:
private:
nsRefPtr<PromiseWorkerProxy> mPromiseWorkerProxy;
const JSStructuredCloneCallbacks* mCallbacks;
JSAutoStructuredCloneBuffer mBuffer;
// Function pointer for calling Promise::{ResolveInternal,RejectInternal}.
PromiseWorkerProxy::RunCallbackFunc mFunc;
@ -1541,11 +1535,12 @@ private:
already_AddRefed<PromiseWorkerProxy>
PromiseWorkerProxy::Create(workers::WorkerPrivate* aWorkerPrivate,
Promise* aWorkerPromise,
const JSStructuredCloneCallbacks* aCb)
const PromiseWorkerProxyStructuredCloneCallbacks* aCb)
{
MOZ_ASSERT(aWorkerPrivate);
aWorkerPrivate->AssertIsOnWorkerThread();
MOZ_ASSERT(aWorkerPromise);
MOZ_ASSERT_IF(aCb, !!aCb->Write && !!aCb->Read);
nsRefPtr<PromiseWorkerProxy> proxy =
new PromiseWorkerProxy(aWorkerPrivate, aWorkerPromise, aCb);
@ -1566,7 +1561,7 @@ NS_IMPL_ISUPPORTS0(PromiseWorkerProxy)
PromiseWorkerProxy::PromiseWorkerProxy(workers::WorkerPrivate* aWorkerPrivate,
Promise* aWorkerPromise,
const JSStructuredCloneCallbacks* aCallbacks)
const PromiseWorkerProxyStructuredCloneCallbacks* aCallbacks)
: mWorkerPrivate(aWorkerPrivate)
, mWorkerPromise(aWorkerPromise)
, mCleanedUp(false)
@ -1597,6 +1592,9 @@ PromiseWorkerProxy::CleanProperties()
mCleanedUp = true;
mWorkerPromise = nullptr;
mWorkerPrivate = nullptr;
// Shutdown the StructuredCloneHelperInternal class.
Shutdown();
}
bool
@ -1668,20 +1666,14 @@ PromiseWorkerProxy::RunCallback(JSContext* aCx,
return;
}
// The |aValue| is written into the buffer. Note that we also pass |this|
// into the structured-clone write in order to set its |mSupportsArray| to
// keep objects alive until the structured-clone read/write is done.
JSAutoStructuredCloneBuffer buffer;
if (!buffer.write(aCx, aValue, mCallbacks, this)) {
// The |aValue| is written into the StructuredCloneHelperInternal.
if (!Write(aCx, aValue)) {
JS_ClearPendingException(aCx);
MOZ_ASSERT(false, "cannot write the JSAutoStructuredCloneBuffer!");
MOZ_ASSERT(false, "cannot serialize the value with the StructuredCloneAlgorithm!");
}
nsRefPtr<PromiseWorkerProxyRunnable> runnable =
new PromiseWorkerProxyRunnable(this,
mCallbacks,
Move(buffer),
aFunc);
new PromiseWorkerProxyRunnable(this, aFunc);
runnable->Dispatch(aCx);
}
@ -1738,6 +1730,31 @@ PromiseWorkerProxy::CleanUp(JSContext* aCx)
Release();
}
JSObject*
PromiseWorkerProxy::ReadCallback(JSContext* aCx,
JSStructuredCloneReader* aReader,
uint32_t aTag,
uint32_t aIndex)
{
if (NS_WARN_IF(!mCallbacks)) {
return nullptr;
}
return mCallbacks->Read(aCx, aReader, this, aTag, aIndex);
}
bool
PromiseWorkerProxy::WriteCallback(JSContext* aCx,
JSStructuredCloneWriter* aWriter,
JS::Handle<JSObject*> aObj)
{
if (NS_WARN_IF(!mCallbacks)) {
return false;
}
return mCallbacks->Write(aCx, aWriter, this, aObj);
}
// Specializations of MaybeRejectBrokenly we actually support.
template<>
void Promise::MaybeRejectBrokenly(const nsRefPtr<DOMError>& aArg) {

View File

@ -10,6 +10,7 @@
// Required for Promise::PromiseTaskSync.
#include "mozilla/dom/Promise.h"
#include "mozilla/dom/PromiseNativeHandler.h"
#include "mozilla/dom/StructuredCloneHelper.h"
#include "mozilla/dom/workers/bindings/WorkerFeature.h"
#include "nsProxyRelease.h"
@ -110,18 +111,35 @@ class WorkerPrivate;
// stay alive till the worker reaches a Canceling state, even if all external
// references to it are dropped.
class PromiseWorkerProxy : public PromiseNativeHandler,
public workers::WorkerFeature
class PromiseWorkerProxy : public PromiseNativeHandler
, public workers::WorkerFeature
, public StructuredCloneHelperInternal
{
friend class PromiseWorkerProxyRunnable;
NS_DECL_THREADSAFE_ISUPPORTS
public:
typedef JSObject* (*ReadCallbackOp)(JSContext* aCx,
JSStructuredCloneReader* aReader,
const PromiseWorkerProxy* aProxy,
uint32_t aTag,
uint32_t aData);
typedef bool (*WriteCallbackOp)(JSContext* aCx,
JSStructuredCloneWriter* aWorker,
PromiseWorkerProxy* aProxy,
JS::HandleObject aObj);
struct PromiseWorkerProxyStructuredCloneCallbacks
{
ReadCallbackOp Read;
WriteCallbackOp Write;
};
static already_AddRefed<PromiseWorkerProxy>
Create(workers::WorkerPrivate* aWorkerPrivate,
Promise* aWorkerPromise,
const JSStructuredCloneCallbacks* aCallbacks = nullptr);
const PromiseWorkerProxyStructuredCloneCallbacks* aCallbacks = nullptr);
// Main thread callers must hold Lock() and check CleanUp() before calling this.
// Worker thread callers, this will assert that the proxy has not been cleaned
@ -151,6 +169,17 @@ public:
return mCleanedUp;
}
// StructuredCloneHelperInternal
JSObject* ReadCallback(JSContext* aCx,
JSStructuredCloneReader* aReader,
uint32_t aTag,
uint32_t aIndex) override;
bool WriteCallback(JSContext* aCx,
JSStructuredCloneWriter* aWriter,
JS::Handle<JSObject*> aObj) override;
protected:
virtual void ResolvedCallback(JSContext* aCx,
JS::Handle<JS::Value> aValue) override;
@ -163,7 +192,7 @@ protected:
private:
PromiseWorkerProxy(workers::WorkerPrivate* aWorkerPrivate,
Promise* aWorkerPromise,
const JSStructuredCloneCallbacks* aCallbacks = nullptr);
const PromiseWorkerProxyStructuredCloneCallbacks* aCallbacks = nullptr);
virtual ~PromiseWorkerProxy();
@ -191,7 +220,7 @@ private:
// Main thread must always acquire a lock.
bool mCleanedUp; // To specify if the cleanUp() has been done.
const JSStructuredCloneCallbacks* mCallbacks;
const PromiseWorkerProxyStructuredCloneCallbacks* mCallbacks;
// Aimed to keep objects alive when doing the structured-clone read/write,
// which can be added by calling StoreISupports() on the main thread.

View File

@ -94,11 +94,11 @@ public:
#define WORKER_DATA_STORES_TAG JS_SCTAG_USER_MIN
static JSObject*
GetDataStoresStructuredCloneCallbacksRead(JSContext* aCx,
JSStructuredCloneReader* aReader,
uint32_t aTag,
uint32_t aData,
void* aClosure)
GetDataStoresProxyCloneCallbacksRead(JSContext* aCx,
JSStructuredCloneReader* aReader,
const PromiseWorkerProxy* aProxy,
uint32_t aTag,
uint32_t aData)
{
WorkerPrivate* workerPrivate = GetWorkerPrivateFromContext(aCx);
MOZ_ASSERT(workerPrivate);
@ -155,16 +155,13 @@ GetDataStoresStructuredCloneCallbacksRead(JSContext* aCx,
}
static bool
GetDataStoresStructuredCloneCallbacksWrite(JSContext* aCx,
JSStructuredCloneWriter* aWriter,
JS::Handle<JSObject*> aObj,
void* aClosure)
GetDataStoresProxyCloneCallbacksWrite(JSContext* aCx,
JSStructuredCloneWriter* aWriter,
PromiseWorkerProxy* aProxy,
JS::Handle<JSObject*> aObj)
{
AssertIsOnMainThread();
PromiseWorkerProxy* proxy = static_cast<PromiseWorkerProxy*>(aClosure);
NS_ASSERTION(proxy, "must have proxy!");
if (!JS_WriteUint32Pair(aWriter, WORKER_DATA_STORES_TAG, 0)) {
MOZ_ASSERT(false, "cannot write pair for WORKER_DATA_STORES_TAG!");
return false;
@ -180,7 +177,7 @@ GetDataStoresStructuredCloneCallbacksWrite(JSContext* aCx,
}
// We keep the data store alive here.
proxy->StoreISupports(store);
aProxy->StoreISupports(store);
// Construct the nsMainThreadPtrHolder pointing to the data store.
nsMainThreadPtrHolder<DataStore>* dataStoreholder =
@ -195,10 +192,10 @@ GetDataStoresStructuredCloneCallbacksWrite(JSContext* aCx,
return true;
}
static const JSStructuredCloneCallbacks kGetDataStoresStructuredCloneCallbacks = {
GetDataStoresStructuredCloneCallbacksRead,
GetDataStoresStructuredCloneCallbacksWrite,
nullptr
static const PromiseWorkerProxy::PromiseWorkerProxyStructuredCloneCallbacks
kGetDataStoresCloneCallbacks= {
GetDataStoresProxyCloneCallbacksRead,
GetDataStoresProxyCloneCallbacksWrite
};
// A WorkerMainThreadRunnable to run WorkerNavigator::GetDataStores(...) on the
@ -228,7 +225,7 @@ public:
mPromiseWorkerProxy =
PromiseWorkerProxy::Create(aWorkerPrivate,
aWorkerPromise,
&kGetDataStoresStructuredCloneCallbacks);
&kGetDataStoresCloneCallbacks);
}
bool Dispatch(JSContext* aCx)

View File

@ -175,11 +175,11 @@ class CircularRowBuffer {
// |src_data| and continues for the [begin, end) of the filter.
template<bool has_alpha>
void ConvolveHorizontally(const unsigned char* src_data,
int begin, int end,
const ConvolutionFilter1D& filter,
unsigned char* out_row) {
int num_values = filter.num_values();
// Loop over each pixel on this row in the output image.
for (int out_x = begin; out_x < end; out_x++) {
for (int out_x = 0; out_x < num_values; out_x++) {
// Get the filter that determines the current output pixel.
int filter_offset, filter_length;
const ConvolutionFilter1D::Fixed* filter_values =
@ -220,17 +220,18 @@ void ConvolveHorizontally(const unsigned char* src_data,
// Does vertical convolution to produce one output row. The filter values and
// length are given in the first two parameters. These are applied to each
// of the rows pointed to in the |source_data_rows| array, with each row
// being |end - begin| wide.
// being |pixel_width| wide.
//
// The output must have room for |(end - begin) * 4| bytes.
// The output must have room for |pixel_width * 4| bytes.
template<bool has_alpha>
void ConvolveVertically(const ConvolutionFilter1D::Fixed* filter_values,
int filter_length,
unsigned char* const* source_data_rows,
int begin, int end, unsigned char* out_row) {
int pixel_width,
unsigned char* out_row) {
// We go through each column in the output and do a vertical convolution,
// generating one output pixel each time.
for (int out_x = begin; out_x < end; out_x++) {
for (int out_x = 0; out_x < pixel_width; out_x++) {
// Compute the number of bytes over in each row that the current column
// we're convolving starts at. The pixel will cover the next 4 bytes.
int byte_offset = out_x * 4;
@ -288,28 +289,29 @@ void ConvolveVertically(const ConvolutionFilter1D::Fixed* filter_values,
void ConvolveVertically(const ConvolutionFilter1D::Fixed* filter_values,
int filter_length,
unsigned char* const* source_data_rows,
int width, unsigned char* out_row,
int pixel_width, unsigned char* out_row,
bool has_alpha, bool use_simd) {
int processed = 0;
#if defined(USE_SSE2) || defined(_MIPS_ARCH_LOONGSON3A)
// If the binary was not built with SSE2 support, we had to fallback to C version.
int simd_width = width & ~3;
if (use_simd && simd_width) {
if (use_simd) {
ConvolveVertically_SIMD(filter_values, filter_length,
source_data_rows, 0, simd_width,
source_data_rows,
pixel_width,
out_row, has_alpha);
processed = simd_width;
}
} else
#endif
if (width > processed) {
{
if (has_alpha) {
ConvolveVertically<true>(filter_values, filter_length, source_data_rows,
processed, width, out_row);
ConvolveVertically<true>(filter_values, filter_length,
source_data_rows,
pixel_width,
out_row);
} else {
ConvolveVertically<false>(filter_values, filter_length, source_data_rows,
processed, width, out_row);
ConvolveVertically<false>(filter_values, filter_length,
source_data_rows,
pixel_width,
out_row);
}
}
}
@ -326,16 +328,16 @@ void ConvolveHorizontally(const unsigned char* src_data,
// SIMD implementation works with 4 pixels at a time.
// Therefore we process as much as we can using SSE and then use
// C implementation for leftovers
ConvolveHorizontally_SSE2(src_data, 0, simd_width, filter, out_row);
ConvolveHorizontally_SSE2(src_data, filter, out_row);
processed = simd_width;
}
#endif
if (width > processed) {
if (has_alpha) {
ConvolveHorizontally<true>(src_data, processed, width, filter, out_row);
ConvolveHorizontally<true>(src_data, filter, out_row);
} else {
ConvolveHorizontally<false>(src_data, processed, width, filter, out_row);
ConvolveHorizontally<false>(src_data, filter, out_row);
}
}
}
@ -457,9 +459,23 @@ void BGRAConvolve2D(const unsigned char* source_data,
int num_output_rows = filter_y.num_values();
int pixel_width = filter_x.num_values();
// We need to check which is the last line to convolve before we advance 4
// lines in one iteration.
int last_filter_offset, last_filter_length;
// SSE2 can access up to 3 extra pixels past the end of the
// buffer. At the bottom of the image, we have to be careful
// not to access data past the end of the buffer. Normally
// we fall back to the C++ implementation for the last row.
// If the last row is less than 3 pixels wide, we may have to fall
// back to the C++ version for more rows. Compute how many
// rows we need to avoid the SSE implementation for here.
filter_x.FilterForValue(filter_x.num_values() - 1, &last_filter_offset,
&last_filter_length);
#if defined(USE_SSE2) || defined(_MIPS_ARCH_LOONGSON3A)
int avoid_simd_rows = 1 + 3 /
(last_filter_offset + last_filter_length);
#endif
filter_y.FilterForValue(num_output_rows - 1, &last_filter_offset,
&last_filter_length);
@ -473,36 +489,32 @@ void BGRAConvolve2D(const unsigned char* source_data,
// We don't want to process too much rows in batches of 4 because
// we can go out-of-bounds at the end
while (next_x_row < filter_offset + filter_length) {
if (next_x_row + 3 < last_filter_offset + last_filter_length - 3) {
if (next_x_row + 3 < last_filter_offset + last_filter_length -
avoid_simd_rows) {
const unsigned char* src[4];
unsigned char* out_row[4];
for (int i = 0; i < 4; ++i) {
src[i] = &source_data[(next_x_row + i) * source_byte_row_stride];
out_row[i] = row_buffer.AdvanceRow();
}
ConvolveHorizontally4_SIMD(src, 0, pixel_width, filter_x, out_row);
ConvolveHorizontally4_SIMD(src, filter_x, out_row);
next_x_row += 4;
} else {
unsigned char* buffer = row_buffer.AdvanceRow();
// For last rows, SSE2 load possibly to access data beyond the
// image area. therefore we use cobined C+SSE version here
int simd_width = pixel_width & ~3;
if (simd_width) {
// Check if we need to avoid SSE2 for this row.
if (next_x_row < last_filter_offset + last_filter_length -
avoid_simd_rows) {
ConvolveHorizontally_SIMD(
&source_data[next_x_row * source_byte_row_stride],
0, simd_width, filter_x, buffer);
}
if (pixel_width > simd_width) {
filter_x, row_buffer.AdvanceRow());
} else {
if (source_has_alpha) {
ConvolveHorizontally<true>(
&source_data[next_x_row * source_byte_row_stride],
simd_width, pixel_width, filter_x, buffer);
filter_x, row_buffer.AdvanceRow());
} else {
ConvolveHorizontally<false>(
&source_data[next_x_row * source_byte_row_stride],
simd_width, pixel_width, filter_x, buffer);
filter_x, row_buffer.AdvanceRow());
}
}
next_x_row++;
@ -513,12 +525,12 @@ void BGRAConvolve2D(const unsigned char* source_data,
while (next_x_row < filter_offset + filter_length) {
if (source_has_alpha) {
ConvolveHorizontally<true>(
&source_data[next_x_row * source_byte_row_stride],
0, pixel_width, filter_x, row_buffer.AdvanceRow());
&source_data[next_x_row * source_byte_row_stride],
filter_x, row_buffer.AdvanceRow());
} else {
ConvolveHorizontally<false>(
&source_data[next_x_row * source_byte_row_stride],
0, pixel_width, filter_x, row_buffer.AdvanceRow());
&source_data[next_x_row * source_byte_row_stride],
filter_x, row_buffer.AdvanceRow());
}
next_x_row++;
}

View File

@ -35,26 +35,24 @@
namespace skia {
// Convolves horizontally along a single row. The row data is given in
// |src_data| and continues for the [begin, end) of the filter.
// |src_data| and continues for the num_values() of the filter.
void ConvolveHorizontally_SSE2(const unsigned char* src_data,
int begin, int end,
const ConvolutionFilter1D& filter,
unsigned char* out_row) {
int num_values = filter.num_values();
int filter_offset, filter_length;
__m128i zero = _mm_setzero_si128();
__m128i mask[3];
__m128i mask[4];
// |mask| will be used to decimate all extra filter coefficients that are
// loaded by SIMD when |filter_length| is not divisible by 4.
mask[0] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
// This buffer is used for tails
__m128i buffer;
// mask[0] is not used in following algorithm.
mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
mask[3] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
// Output one pixel each iteration, calculating all channels (RGBA) together.
for (int out_x = begin; out_x < end; out_x++) {
for (int out_x = 0; out_x < num_values; out_x++) {
const ConvolutionFilter1D::Fixed* filter_values =
filter.FilterForValue(out_x, &filter_offset, &filter_length);
@ -117,22 +115,21 @@ void ConvolveHorizontally_SSE2(const unsigned char* src_data,
// When |filter_length| is not divisible by 4, we need to decimate some of
// the filter coefficient that was loaded incorrectly to zero; Other than
// that the algorithm is same with above, except that the 4th pixel will be
// that the algorithm is same with above, exceot that the 4th pixel will be
// always absent.
int r = filter_length & 3;
int r = filter_length&3;
if (r) {
memcpy(&buffer, row_to_filter, r * 4);
// Note: filter_values must be padded to align_up(filter_offset, 8).
__m128i coeff, coeff16;
coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
// Mask out extra filter taps.
coeff = _mm_and_si128(coeff, mask[r-1]);
coeff = _mm_and_si128(coeff, mask[r]);
coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
// Note: line buffer must be padded to align_up(filter_offset, 16).
// We resolve this by temporary buffer
__m128i src8 = _mm_loadu_si128(&buffer);
// We resolve this by use C-version for the last horizontal line.
__m128i src8 = _mm_loadu_si128(row_to_filter);
__m128i src16 = _mm_unpacklo_epi8(src8, zero);
__m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
__m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
@ -165,24 +162,26 @@ void ConvolveHorizontally_SSE2(const unsigned char* src_data,
}
// Convolves horizontally along four rows. The row data is given in
// |src_data| and continues for the [begin, end) of the filter.
// |src_data| and continues for the num_values() of the filter.
// The algorithm is almost same as |ConvolveHorizontally_SSE2|. Please
// refer to that function for detailed comments.
void ConvolveHorizontally4_SSE2(const unsigned char* src_data[4],
int begin, int end,
const ConvolutionFilter1D& filter,
unsigned char* out_row[4]) {
int num_values = filter.num_values();
int filter_offset, filter_length;
__m128i zero = _mm_setzero_si128();
__m128i mask[3];
__m128i mask[4];
// |mask| will be used to decimate all extra filter coefficients that are
// loaded by SIMD when |filter_length| is not divisible by 4.
mask[0] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
// mask[0] is not used in following algorithm.
mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
mask[3] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
// Output one pixel each iteration, calculating all channels (RGBA) together.
for (int out_x = begin; out_x < end; out_x++) {
for (int out_x = 0; out_x < num_values; out_x++) {
const ConvolutionFilter1D::Fixed* filter_values =
filter.FilterForValue(out_x, &filter_offset, &filter_length);
@ -240,7 +239,7 @@ void ConvolveHorizontally4_SSE2(const unsigned char* src_data[4],
__m128i coeff;
coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
// Mask out extra filter taps.
coeff = _mm_and_si128(coeff, mask[r-1]);
coeff = _mm_and_si128(coeff, mask[r]);
__m128i coeff16lo = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
/* c1 c1 c1 c1 c0 c0 c0 c0 */
@ -284,21 +283,22 @@ void ConvolveHorizontally4_SSE2(const unsigned char* src_data[4],
// Does vertical convolution to produce one output row. The filter values and
// length are given in the first two parameters. These are applied to each
// of the rows pointed to in the |source_data_rows| array, with each row
// being |end - begin| wide.
// being |pixel_width| wide.
//
// The output must have room for |(end - begin) * 4| bytes.
// The output must have room for |pixel_width * 4| bytes.
template<bool has_alpha>
void ConvolveVertically_SSE2_impl(const ConvolutionFilter1D::Fixed* filter_values,
int filter_length,
unsigned char* const* source_data_rows,
int begin, int end,
int pixel_width,
unsigned char* out_row) {
int width = pixel_width & ~3;
__m128i zero = _mm_setzero_si128();
__m128i accum0, accum1, accum2, accum3, coeff16;
const __m128i* src;
int out_x;
// Output four pixels per iteration (16 bytes).
for (out_x = begin; out_x + 3 < end; out_x += 4) {
for (int out_x = 0; out_x < width; out_x += 4) {
// Accumulated result for each pixel. 32 bits per RGBA channel.
accum0 = _mm_setzero_si128();
@ -391,11 +391,7 @@ void ConvolveVertically_SSE2_impl(const ConvolutionFilter1D::Fixed* filter_value
// When the width of the output is not divisible by 4, We need to save one
// pixel (4 bytes) each time. And also the fourth pixel is always absent.
int r = end - out_x;
if (r > 0) {
// Since accum3 is never used here, we'll use it as a buffer
__m128i *buffer = &accum3;
if (pixel_width & 3) {
accum0 = _mm_setzero_si128();
accum1 = _mm_setzero_si128();
accum2 = _mm_setzero_si128();
@ -403,9 +399,8 @@ void ConvolveVertically_SSE2_impl(const ConvolutionFilter1D::Fixed* filter_value
coeff16 = _mm_set1_epi16(filter_values[filter_y]);
// [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
src = reinterpret_cast<const __m128i*>(
&source_data_rows[filter_y][out_x * 4]);
memcpy(buffer, src, r * 4);
__m128i src8 = _mm_loadu_si128(buffer);
&source_data_rows[filter_y][width<<2]);
__m128i src8 = _mm_loadu_si128(src);
// [16] a1 b1 g1 r1 a0 b0 g0 r0
__m128i src16 = _mm_unpacklo_epi8(src8, zero);
__m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
@ -451,7 +446,7 @@ void ConvolveVertically_SSE2_impl(const ConvolutionFilter1D::Fixed* filter_value
accum0 = _mm_or_si128(accum0, mask);
}
for (; out_x < end; out_x++) {
for (int out_x = width; out_x < pixel_width; out_x++) {
*(reinterpret_cast<int*>(out_row)) = _mm_cvtsi128_si32(accum0);
accum0 = _mm_srli_si128(accum0, 4);
out_row += 4;
@ -462,14 +457,14 @@ void ConvolveVertically_SSE2_impl(const ConvolutionFilter1D::Fixed* filter_value
void ConvolveVertically_SSE2(const ConvolutionFilter1D::Fixed* filter_values,
int filter_length,
unsigned char* const* source_data_rows,
int begin, int end,
int pixel_width,
unsigned char* out_row, bool has_alpha) {
if (has_alpha) {
ConvolveVertically_SSE2_impl<true>(filter_values, filter_length,
source_data_rows, begin, end, out_row);
source_data_rows, pixel_width, out_row);
} else {
ConvolveVertically_SSE2_impl<false>(filter_values, filter_length,
source_data_rows, begin, end, out_row);
source_data_rows, pixel_width, out_row);
}
}

View File

@ -40,7 +40,6 @@ namespace skia {
// Convolves horizontally along a single row. The row data is given in
// |src_data| and continues for the [begin, end) of the filter.
void ConvolveHorizontally_SSE2(const unsigned char* src_data,
int begin, int end,
const ConvolutionFilter1D& filter,
unsigned char* out_row);
@ -49,7 +48,6 @@ void ConvolveHorizontally_SSE2(const unsigned char* src_data,
// The algorithm is almost same as |ConvolveHorizontally_SSE2|. Please
// refer to that function for detailed comments.
void ConvolveHorizontally4_SSE2(const unsigned char* src_data[4],
int begin, int end,
const ConvolutionFilter1D& filter,
unsigned char* out_row[4]);
@ -62,7 +60,7 @@ void ConvolveHorizontally4_SSE2(const unsigned char* src_data[4],
void ConvolveVertically_SSE2(const ConvolutionFilter1D::Fixed* filter_values,
int filter_length,
unsigned char* const* source_data_rows,
int begin, int end,
int pixel_width,
unsigned char* out_row, bool has_alpha);
} // namespace skia

View File

@ -272,8 +272,11 @@ static EventRegions
GetEventRegions(const LayerMetricsWrapper& aLayer)
{
if (aLayer.IsScrollInfoLayer()) {
return EventRegions(nsIntRegion(ParentLayerIntRect::ToUntyped(
RoundedToInt(aLayer.Metrics().GetCompositionBounds()))));
ParentLayerIntRect compositionBounds(RoundedToInt(aLayer.Metrics().GetCompositionBounds()));
nsIntRegion hitRegion(ParentLayerIntRect::ToUntyped(compositionBounds));
EventRegions eventRegions(hitRegion);
eventRegions.mDispatchToContentHitRegion = eventRegions.mHitRegion;
return eventRegions;
}
return aLayer.GetEventRegions();
}

View File

@ -25,3 +25,5 @@ skip-if = (os == 'android') || (os == 'b2g') # wheel events not supported on mob
skip-if = (os == 'android') || (os == 'b2g') # uses wheel events which are not supported on mobile
[test_basic_pan.html]
skip-if = toolkit != 'gonk'
[test_scroll_inactive_flattened_frame.html]
skip-if = (os == 'android') || (os == 'b2g') || (buildapp == 'mulet') # wheel events not supported on mobile; see bug 1164274 for mulet

View File

@ -0,0 +1,50 @@
<!DOCTYPE HTML>
<html>
<head>
<title>Test scrolling flattened inactive frames</title>
<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<script type="application/javascript" src="/tests/SimpleTest/EventUtils.js"></script>
<script type="application/javascript" src="/tests/SimpleTest/paint_listener.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" />
</head>
<body>
<div id="container" style="height: 300px; width: 600px; overflow: auto; background: yellow">
<div id="outer" style="height: 400px; width: 500px; overflow: auto; background: black">
<div id="inner" style="mix-blend-mode: screen; height: 800px; overflow: auto; background: purple">
</div>
</div>
</div>
<script class="testbody" type="text/javascript;version=1.7">
function test() {
var container = document.getElementById('container');
var outer = document.getElementById('outer');
var inner = document.getElementById('inner');
var outerScrollTop = outer.scrollTop;
var containerScrollTop = container.scrollTop;
var event = {
deltaMode: WheelEvent.DOM_DELTA_LINE,
deltaX: 0,
deltaY: 10,
lineOrPageDeltaX: 0,
lineOrPageDeltaY: 10,
};
sendWheelAndPaint(inner, 20, 30, event, function () {
ok(container.scrollTop == containerScrollTop, "container scrollframe should not have scrolled");
ok(outer.scrollTop > outerScrollTop, "nested scrollframe should have scrolled");
SimpleTest.finish();
});
}
window.onload = function() {
SpecialPowers.pushPrefEnv({
'set': [['general.smoothScroll', false],
['mousewheel.transaction.timeout', 1000000]],
}, function () {
SimpleTest.waitForFocus(test);
});
}
SimpleTest.waitForExplicitFinish();
</script>
</body>
</html>

View File

@ -2079,8 +2079,15 @@ UpdatePluginWindowState(uint64_t aId)
// Hide all of our plugins, this remote layer tree is no longer active.
if (shouldHidePlugin) {
uintptr_t parentWidget = (uintptr_t)lts.mParent->GetWidget();
unused << lts.mParent->SendHideAllPlugins(parentWidget);
for (uint32_t pluginsIdx = 0; pluginsIdx < lts.mPluginData.Length();
pluginsIdx++) {
lts.mPluginData[pluginsIdx].visible() = false;
}
nsIntPoint offset;
nsIntRegion region;
unused << lts.mParent->SendUpdatePluginConfigurations(offset,
region,
lts.mPluginData);
// Clear because there's no recovering from this until we receive
// new shadow layer plugin data in ShadowLayersUpdated.
lts.mPluginData.Clear();

View File

@ -236,7 +236,6 @@ private:
DECL_GFX_PREF(Once, "gfx.direct2d.force-enabled", Direct2DForceEnabled, bool, false);
DECL_GFX_PREF(Live, "gfx.direct2d.use1_1", Direct2DUse1_1, bool, false);
DECL_GFX_PREF(Live, "gfx.draw-color-bars", CompositorDrawColorBars, bool, false);
DECL_GFX_PREF(Once, "gfx.font_rendering.directwrite.enabled", DirectWriteFontRenderingEnabled, bool, false);
DECL_GFX_PREF(Live, "gfx.gralloc.fence-with-readpixels", GrallocFenceWithReadPixels, bool, false);
DECL_GFX_PREF(Live, "gfx.layerscope.enabled", LayerScopeEnabled, bool, false);
DECL_GFX_PREF(Live, "gfx.layerscope.port", LayerScopePort, int32_t, 23456);

View File

@ -91,7 +91,8 @@ Downscaler::BeginFrame(const nsIntSize& aOriginalSize,
mYFilter.get());
// Allocate the buffer, which contains scanlines of the original image.
mRowBuffer = MakeUnique<uint8_t[]>(mOriginalSize.width * sizeof(uint32_t));
// pad by 15 to handle overreads by the simd code
mRowBuffer = MakeUnique<uint8_t[]>(mOriginalSize.width * sizeof(uint32_t) + 15);
if (MOZ_UNLIKELY(!mRowBuffer)) {
return NS_ERROR_OUT_OF_MEMORY;
}
@ -106,7 +107,8 @@ Downscaler::BeginFrame(const nsIntSize& aOriginalSize,
}
bool anyAllocationFailed = false;
const int rowSize = mTargetSize.width * sizeof(uint32_t);
// pad by 15 to handle overreads by the simd code
const int rowSize = mTargetSize.width * sizeof(uint32_t) + 15;
for (int32_t i = 0; i < mWindowCapacity; ++i) {
mWindow[i] = new uint8_t[rowSize];
anyAllocationFailed = anyAllocationFailed || mWindow[i] == nullptr;

View File

@ -338,13 +338,13 @@ JS_CallHashSetObjectTracer(JSTracer* trc, HashSetEnum& e, JSObject* const& key,
extern JS_PUBLIC_API(void)
JS_CallTenuredObjectTracer(JSTracer* trc, JS::TenuredHeap<JSObject*>* objp, const char* name);
extern JS_PUBLIC_API(void)
JS_TraceChildren(JSTracer* trc, void* thing, JS::TraceKind kind);
extern JS_PUBLIC_API(void)
JS_TraceRuntime(JSTracer* trc);
namespace JS {
extern JS_PUBLIC_API(void)
TraceChildren(JSTracer* trc, GCCellPtr thing);
typedef js::HashSet<Zone*, js::DefaultHasher<Zone*>, js::SystemAllocPolicy> ZoneSet;
} // namespace JS

View File

@ -824,7 +824,7 @@ class Edge {
// Concrete instances of this class need not be as lightweight as Node itself,
// since they're usually only instantiated while iterating over a particular
// object's edges. For example, a dumb implementation for JS Cells might use
// JS_TraceChildren to to get the outgoing edges, and then store them in an
// JS::TraceChildren to to get the outgoing edges, and then store them in an
// array internal to the EdgeRange.
class EdgeRange {
protected:
@ -991,7 +991,7 @@ struct Concrete<RootList> : public Base {
};
// A reusable ubi::Concrete specialization base class for types supported by
// JS_TraceChildren.
// JS::TraceChildren.
template<typename Referent>
class TracerConcrete : public Base {
const char16_t* typeName() const override { return concreteTypeName; }
@ -1007,7 +1007,7 @@ class TracerConcrete : public Base {
static void construct(void* storage, Referent* ptr) { new (storage) TracerConcrete(ptr); }
};
// For JS_TraceChildren-based types that have a 'compartment' method.
// For JS::TraceChildren-based types that have a 'compartment' method.
template<typename Referent>
class TracerConcreteWithCompartment : public TracerConcrete<Referent> {
typedef TracerConcrete<Referent> TracerBase;

View File

@ -1250,10 +1250,12 @@ if test "$GNU_CC"; then
# Turn off the following warnings that -Wall turns on:
# -Wno-unused - lots of violations in third-party code
# -Wno-inline-new-delete - we inline 'new' and 'delete' in mozalloc
# -Wno-unused-local-typedef - catches unused typedefs, which are commonly used in assertion macros
#
_WARNINGS_CFLAGS="${_WARNINGS_CFLAGS} -Wno-unused"
MOZ_CXX_SUPPORTS_WARNING(-Wno-, inline-new-delete, ac_cxx_has_wno_inline_new_delete)
MOZ_CXX_SUPPORTS_WARNING(-Wno-, unused-local-typedef, ac_cxx_has_wno_unused_local_typedef)
if test -z "$INTEL_CC" -a -z "$CLANG_CC"; then
# Don't use -Wcast-align with ICC or clang
@ -1357,9 +1359,12 @@ if test "$GNU_CXX"; then
# Turn off the following warnings that -Wall turns on:
# -Wno-invalid-offsetof - we use offsetof on non-POD types frequently
# -Wno-unused-local-typedef - catches unused typedefs, which are commonly used in assertion macros
#
_WARNINGS_CXXFLAGS="${_WARNINGS_CXXFLAGS} -Wno-invalid-offsetof"
MOZ_CXX_SUPPORTS_WARNING(-Wno-, unused-local-typedef, ac_cxx_has_wno_unused_local_typedef)
if test -z "$INTEL_CXX" -a -z "$CLANG_CXX"; then
# Don't use -Wcast-align with ICC or clang
case "$CPU_ARCH" in

View File

@ -3988,6 +3988,13 @@ BytecodeEmitter::emitDestructuringOpsArrayHelper(ParseNode* pattern, VarEmitOpti
return true;
}
bool
BytecodeEmitter::emitComputedPropertyName(ParseNode* computedPropName)
{
MOZ_ASSERT(computedPropName->isKind(PNK_COMPUTED_NAME));
return emitTree(computedPropName->pn_kid) && emit1(JSOP_TOID);
}
bool
BytecodeEmitter::emitDestructuringOpsObjectHelper(ParseNode* pattern, VarEmitOption emitOption)
{
@ -4038,8 +4045,7 @@ BytecodeEmitter::emitDestructuringOpsObjectHelper(ParseNode* pattern, VarEmitOpt
needsGetElem = false;
}
} else {
MOZ_ASSERT(key->isKind(PNK_COMPUTED_NAME));
if (!emitTree(key->pn_kid)) // ... RHS RHS KEY
if (!emitComputedPropertyName(key)) // ... RHS RHS KEY
return false;
}
@ -7045,8 +7051,7 @@ BytecodeEmitter::emitPropertyList(ParseNode* pn, MutableHandlePlainObject objp,
isIndex = true;
}
} else {
MOZ_ASSERT(key->isKind(PNK_COMPUTED_NAME));
if (!emitTree(key->pn_kid))
if (!emitComputedPropertyName(key))
return false;
isIndex = true;
}

View File

@ -482,6 +482,8 @@ struct BytecodeEmitter
bool emitPropOp(ParseNode* pn, JSOp op);
bool emitPropIncDec(ParseNode* pn);
bool emitComputedPropertyName(ParseNode* computedPropName);
// Emit bytecode to put operands for a JSOP_GETELEM/CALLELEM/SETELEM/DELELEM
// opcode onto the stack in the right order. In the case of SETELEM, the
// value to be assigned must already be pushed.

View File

@ -5913,9 +5913,7 @@ Parser<ParseHandler>::yieldExpression(InHandling inHandling)
case TOK_COMMA:
// No value.
exprNode = null();
tokenStream.addModifierException((tt == TOK_EOL || tt == TOK_EOF)
? TokenStream::NoneIsOperandYieldEOL
: TokenStream::NoneIsOperand);
tokenStream.addModifierException(TokenStream::NoneIsOperand);
break;
case TOK_MUL:
kind = PNK_YIELD_STAR;
@ -5978,9 +5976,7 @@ Parser<ParseHandler>::yieldExpression(InHandling inHandling)
case TOK_COMMA:
// No value.
exprNode = null();
tokenStream.addModifierException((tt == TOK_EOL || tt == TOK_EOF)
? TokenStream::NoneIsOperandYieldEOL
: TokenStream::NoneIsOperand);
tokenStream.addModifierException(TokenStream::NoneIsOperand);
break;
default:
exprNode = assignExpr(inHandling, YieldIsKeyword);

View File

@ -121,17 +121,16 @@ struct Token
{
NoException,
// If an yield expression operand is omitted and yield expression is
// followed by non-EOL, the next token is already gotten with Operand,
// but we expect operator (None).
// After |yield| we look for a token on the same line that starts an
// expression (Operand): |yield <expr>|. If no token is found, the
// |yield| stands alone, and the next token on a subsequent line must
// be: a comma continuing a comma expression, a semicolon terminating
// the statement that ended with |yield|, or the start of another
// statement (possibly an expression statement). The comma/semicolon
// cases are gotten as operators (None), contrasting with Operand
// earlier.
NoneIsOperand,
// If an yield expression operand is omitted and yield expression is
// followed by EOL, the next token is already gotten with Operand, and
// we expect Operand in next statement, but MatchOrInsertSemicolon
// after expression statement expects operator (None).
NoneIsOperandYieldEOL,
// If a semicolon is inserted automatically, the next token is already
// gotten with None, but we expect Operand.
OperandIsNone,
@ -437,25 +436,19 @@ class MOZ_STACK_CLASS TokenStream
typedef Token::ModifierException ModifierException;
static MOZ_CONSTEXPR_VAR ModifierException NoException = Token::NoException;
static MOZ_CONSTEXPR_VAR ModifierException NoneIsOperand = Token::NoneIsOperand;
static MOZ_CONSTEXPR_VAR ModifierException NoneIsOperandYieldEOL = Token::NoneIsOperandYieldEOL;
static MOZ_CONSTEXPR_VAR ModifierException OperandIsNone = Token::OperandIsNone;
static MOZ_CONSTEXPR_VAR ModifierException NoneIsKeywordIsName = Token::NoneIsKeywordIsName;
void addModifierException(ModifierException modifierException) {
#ifdef DEBUG
const Token& next = nextToken();
if (next.modifierException == NoneIsOperand ||
next.modifierException == NoneIsOperandYieldEOL)
if (next.modifierException == NoneIsOperand)
{
// Token after yield expression without operand already has
// NoneIsOperand or NoneIsOperandYieldEOL exception.
// NoneIsOperand exception.
MOZ_ASSERT(modifierException == OperandIsNone);
if (next.modifierException == NoneIsOperand)
MOZ_ASSERT(next.type != TOK_DIV && next.type != TOK_REGEXP,
"next token requires contextual specifier to be parsed unambiguously");
else
MOZ_ASSERT(next.type != TOK_DIV,
"next token requires contextual specifier to be parsed unambiguously");
MOZ_ASSERT(next.type != TOK_DIV,
"next token requires contextual specifier to be parsed unambiguously");
// Do not update modifierException.
return;
@ -464,11 +457,6 @@ class MOZ_STACK_CLASS TokenStream
MOZ_ASSERT(next.modifierException == NoException);
switch (modifierException) {
case NoneIsOperand:
MOZ_ASSERT(next.modifier == Operand);
MOZ_ASSERT(next.type != TOK_DIV && next.type != TOK_REGEXP,
"next token requires contextual specifier to be parsed unambiguously");
break;
case NoneIsOperandYieldEOL:
MOZ_ASSERT(next.modifier == Operand);
MOZ_ASSERT(next.type != TOK_DIV,
"next token requires contextual specifier to be parsed unambiguously");
@ -502,9 +490,7 @@ class MOZ_STACK_CLASS TokenStream
return;
}
if (lookaheadToken.modifierException == NoneIsOperand ||
lookaheadToken.modifierException == NoneIsOperandYieldEOL)
{
if (lookaheadToken.modifierException == NoneIsOperand) {
// getToken() permissibly following getToken(Operand).
if (modifier == None && lookaheadToken.modifier == Operand)
return;

View File

@ -1794,7 +1794,7 @@ GCMarker::markDelayedChildren(ArenaHeader* aheader)
TenuredCell* t = i.getCell();
if (always || t->isMarked()) {
t->markIfUnmarked();
JS_TraceChildren(this, t, MapAllocToTraceKind(aheader->getAllocKind()));
js::TraceChildren(this, t, MapAllocToTraceKind(aheader->getAllocKind()));
}
}
} else {
@ -2591,40 +2591,51 @@ UnmarkGrayTracer::onChild(const JS::GCCellPtr& thing)
do {
MOZ_ASSERT(!shape->isMarked(js::gc::GRAY));
TraceChildren(&childTracer, shape, JS::TraceKind::Shape);
shape->traceChildren(&childTracer);
shape = childTracer.previousShape;
childTracer.previousShape = nullptr;
} while (shape);
unmarkedAny |= childTracer.unmarkedAny;
}
bool
js::UnmarkGrayCellRecursively(gc::Cell* cell, JS::TraceKind kind)
template <typename T>
static bool
TypedUnmarkGrayCellRecursively(T* t)
{
MOZ_ASSERT(cell);
MOZ_ASSERT(t);
JSRuntime* rt = cell->runtimeFromMainThread();
JSRuntime* rt = t->runtimeFromMainThread();
MOZ_ASSERT(!rt->isHeapBusy());
bool unmarkedArg = false;
if (cell->isTenured()) {
if (!cell->asTenured().isMarked(GRAY))
if (t->isTenured()) {
if (!t->asTenured().isMarked(GRAY))
return false;
cell->asTenured().unmark(GRAY);
t->asTenured().unmark(GRAY);
unmarkedArg = true;
}
UnmarkGrayTracer trc(rt);
TraceChildren(&trc, cell, kind);
t->traceChildren(&trc);
return unmarkedArg || trc.unmarkedAny;
}
struct UnmarkGrayCellRecursivelyFunctor {
template <typename T> bool operator()(T* t) { return TypedUnmarkGrayCellRecursively(t); }
};
bool
js::UnmarkGrayCellRecursively(Cell* cell, JS::TraceKind kind)
{
return DispatchTraceKindTyped(UnmarkGrayCellRecursivelyFunctor(), cell, kind);
}
bool
js::UnmarkGrayShapeRecursively(Shape* shape)
{
return js::UnmarkGrayCellRecursively(shape, JS::TraceKind::Shape);
return TypedUnmarkGrayCellRecursively(shape);
}
JS_FRIEND_API(bool)

View File

@ -40,9 +40,9 @@ static const size_t NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY = 4096;
static const size_t INCREMENTAL_MARK_STACK_BASE_CAPACITY = 32768;
/*
* When the native stack is low, the GC does not call JS_TraceChildren to mark
* When the native stack is low, the GC does not call js::TraceChildren to mark
* the reachable "children" of the thing. Rather the thing is put aside and
* JS_TraceChildren is called later with more space on the C stack.
* js::TraceChildren is called later with more space on the C stack.
*
* To implement such delayed marking of the children with minimal overhead for
* the normal case of sufficient native stack, the code adds a field per arena.

View File

@ -176,9 +176,9 @@ JS_CallTenuredObjectTracer(JSTracer* trc, JS::TenuredHeap<JSObject*>* objp, cons
}
JS_PUBLIC_API(void)
JS_TraceChildren(JSTracer* trc, void* thing, JS::TraceKind kind)
JS::TraceChildren(JSTracer* trc, GCCellPtr thing)
{
js::TraceChildren(trc, thing, kind);
js::TraceChildren(trc, thing.asCell(), thing.kind());
}
struct TraceChildrenFunctor {

View File

@ -216,7 +216,7 @@ gc::GCRuntime::startVerifyPreBarriers()
VerifyNode* child = MakeNode(trc, e.thing, e.kind);
if (child) {
trc->curnode = child;
JS_TraceChildren(trc, e.thing, e.kind);
js::TraceChildren(trc, e.thing, e.kind);
}
if (trc->edgeptr == trc->term)
goto oom;
@ -338,7 +338,7 @@ gc::GCRuntime::endVerifyPreBarriers()
VerifyNode* node = NextNode(trc->root);
while ((char*)node < trc->edgeptr) {
cetrc.node = node;
JS_TraceChildren(&cetrc, node->thing, node->kind);
js::TraceChildren(&cetrc, node->thing, node->kind);
if (node->count <= MAX_VERIFIER_EDGES) {
for (uint32_t i = 0; i < node->count; i++)

View File

@ -0,0 +1,47 @@
load(libdir + 'asserts.js');
load(libdir + 'iteration.js');
var outer = "unmodified";
function f(v)
{
if (v + "")
({ [(outer = "modified")]: v } = v);
}
assertEq(outer, "unmodified");
f(true);
assertEq(outer, "modified");
outer = "unmodified";
f({});
assertEq(outer, "modified");
outer = "unmodified";
assertThrowsInstanceOf(() => f(null), TypeError);
assertEq(outer, "unmodified");
assertThrowsInstanceOf(() => f(undefined), TypeError);
assertEq(outer, "unmodified");
function g(v)
{
if (v + "")
({ [{ toString() { outer = "modified"; return 0; } }]: v } = v);
}
outer = "unmodified";
g(true);
assertEq(outer, "modified");
outer = "unmodified";
g({});
assertEq(outer, "modified");
outer = "unmodified";
assertThrowsInstanceOf(() => g(undefined), TypeError);
assertEq(outer, "unmodified");
assertThrowsInstanceOf(() => g(null), TypeError);
assertEq(outer, "unmodified");

View File

@ -1,7 +1,7 @@
load(libdir + "match.js")
// At the moment, findPath just returns the names as provided by ubi::Node,
// which just uses JS_TraceChildren for now. However, we have various plans
// which just uses js::TraceChildren for now. However, we have various plans
// to improve the quality of ubi::Node's metadata, to improve the precision
// and clarity of the results here.

View File

@ -1052,14 +1052,14 @@ bool
RRegExpReplace::recover(JSContext* cx, SnapshotIterator& iter) const
{
RootedString string(cx, iter.read().toString());
RootedObject regexp(cx, &iter.read().toObject());
Rooted<RegExpObject*> regexp(cx, &iter.read().toObject().as<RegExpObject>());
RootedString repl(cx, iter.read().toString());
RootedValue result(cx);
if (!js::str_replace_regexp_raw(cx, string, regexp, repl, &result))
JSString* result = js::str_replace_regexp_raw(cx, string, regexp, repl);
if (!result)
return false;
iter.storeInstructionResult(result);
iter.storeInstructionResult(StringValue(result));
return true;
}
@ -1502,12 +1502,12 @@ bool RStringReplace::recover(JSContext* cx, SnapshotIterator& iter) const
RootedString string(cx, iter.read().toString());
RootedString pattern(cx, iter.read().toString());
RootedString replace(cx, iter.read().toString());
RootedValue result(cx);
if (!js::str_replace_string_raw(cx, string, pattern, replace, &result))
JSString* result = js::str_replace_string_raw(cx, string, pattern, replace);
if (!result)
return false;
iter.storeInstructionResult(result);
iter.storeInstructionResult(StringValue(result));
return true;
}

View File

@ -1041,11 +1041,7 @@ RegExpReplace(JSContext* cx, HandleString string, HandleObject regexp, HandleStr
MOZ_ASSERT(string);
MOZ_ASSERT(repl);
RootedValue rval(cx);
if (!str_replace_regexp_raw(cx, string, regexp, repl, &rval))
return nullptr;
return rval.toString();
return str_replace_regexp_raw(cx, string, regexp.as<RegExpObject>(), repl);
}
JSString*
@ -1055,11 +1051,7 @@ StringReplace(JSContext* cx, HandleString string, HandleString pattern, HandleSt
MOZ_ASSERT(pattern);
MOZ_ASSERT(repl);
RootedValue rval(cx);
if (!str_replace_string_raw(cx, string, pattern, repl, &rval))
return nullptr;
return rval.toString();
return str_replace_string_raw(cx, string, pattern, repl);
}
bool

View File

@ -956,7 +956,7 @@ DumpHeapVisitCell(JSRuntime* rt, void* data, void* thing,
char cellDesc[1024 * 32];
JS_GetTraceThingInfo(cellDesc, sizeof(cellDesc), dtrc, thing, traceKind, true);
fprintf(dtrc->output, "%p %c %s\n", thing, MarkDescriptor(thing), cellDesc);
JS_TraceChildren(dtrc, thing, traceKind);
js::TraceChildren(dtrc, thing, traceKind);
}
void

View File

@ -3771,7 +3771,7 @@ GCRuntime::checkForCompartmentMismatches()
trc.srcKind = MapAllocToTraceKind(thingKind);
trc.compartment = DispatchTraceKindTyped(MaybeCompartmentFunctor(),
trc.src, trc.srcKind);
JS_TraceChildren(&trc, trc.src, trc.srcKind);
js::TraceChildren(&trc, trc.src, trc.srcKind);
}
}
}

View File

@ -2101,7 +2101,7 @@ class MOZ_STACK_CLASS StringRegExpGuard
bool init(JSContext* cx, const CallArgs& args, bool convertVoid = false)
{
if (args.length() != 0 && IsObjectWithClass(args[0], ESClass_RegExp, cx))
return init(cx, &args[0].toObject());
return initRegExp(cx, &args[0].toObject());
if (convertVoid && !args.hasDefined(0)) {
fm.pat_ = cx->runtime()->emptyString;
@ -2119,14 +2119,12 @@ class MOZ_STACK_CLASS StringRegExpGuard
return true;
}
bool init(JSContext* cx, JSObject* regexp) {
bool initRegExp(JSContext* cx, JSObject* regexp) {
obj_ = regexp;
MOZ_ASSERT(ObjectClassIs(obj_, ESClass_RegExp, cx));
if (!RegExpToShared(cx, obj_, &re_))
return false;
return true;
return RegExpToShared(cx, obj_, &re_);
}
bool init(JSContext* cx, HandleString pattern) {
@ -2905,9 +2903,9 @@ ReplaceRegExp(JSContext* cx, RegExpStatics* res, ReplaceData& rdata)
return true;
}
static bool
static JSString*
BuildFlatReplacement(JSContext* cx, HandleString textstr, HandleString repstr,
const FlatMatch& fm, MutableHandleValue rval)
const FlatMatch& fm)
{
RopeBuilder builder(cx);
size_t match = fm.match();
@ -2920,7 +2918,8 @@ BuildFlatReplacement(JSContext* cx, HandleString textstr, HandleString repstr,
*/
StringSegmentRange r(cx);
if (!r.init(textstr))
return false;
return nullptr;
size_t pos = 0;
while (!r.empty()) {
RootedString str(cx, r.front());
@ -2941,8 +2940,9 @@ BuildFlatReplacement(JSContext* cx, HandleString textstr, HandleString repstr,
RootedString leftSide(cx, NewDependentString(cx, str, 0, match - pos));
if (!leftSide ||
!builder.append(leftSide) ||
!builder.append(repstr)) {
return false;
!builder.append(repstr))
{
return nullptr;
}
}
@ -2954,33 +2954,33 @@ BuildFlatReplacement(JSContext* cx, HandleString textstr, HandleString repstr,
RootedString rightSide(cx, NewDependentString(cx, str, matchEnd - pos,
strEnd - matchEnd));
if (!rightSide || !builder.append(rightSide))
return false;
return nullptr;
}
} else {
if (!builder.append(str))
return false;
return nullptr;
}
pos += str->length();
if (!r.popFront())
return false;
return nullptr;
}
} else {
RootedString leftSide(cx, NewDependentString(cx, textstr, 0, match));
if (!leftSide)
return false;
return nullptr;
RootedString rightSide(cx);
rightSide = NewDependentString(cx, textstr, match + fm.patternLength(),
textstr->length() - match - fm.patternLength());
if (!rightSide ||
!builder.append(leftSide) ||
!builder.append(repstr) ||
!builder.append(rightSide)) {
return false;
!builder.append(rightSide))
{
return nullptr;
}
}
rval.setString(builder.result());
return true;
return builder.result();
}
template <typename CharT>
@ -3040,13 +3040,13 @@ AppendDollarReplacement(StringBuffer& newReplaceChars, size_t firstDollarIndex,
*
* newstring = string[:matchStart] + dollarSub(replaceValue) + string[matchLimit:]
*/
static inline bool
static JSString*
BuildDollarReplacement(JSContext* cx, JSString* textstrArg, JSLinearString* repstr,
uint32_t firstDollarIndex, const FlatMatch& fm, MutableHandleValue rval)
uint32_t firstDollarIndex, const FlatMatch& fm)
{
RootedLinearString textstr(cx, textstrArg->ensureLinear(cx));
if (!textstr)
return false;
return nullptr;
size_t matchStart = fm.match();
size_t matchLimit = matchStart + fm.patternLength();
@ -3060,10 +3060,10 @@ BuildDollarReplacement(JSContext* cx, JSString* textstrArg, JSLinearString* reps
*/
StringBuffer newReplaceChars(cx);
if (repstr->hasTwoByteChars() && !newReplaceChars.ensureTwoByteChars())
return false;
return nullptr;
if (!newReplaceChars.reserve(textstr->length() - fm.patternLength() + repstr->length()))
return false;
return nullptr;
bool res;
if (repstr->hasLatin1Chars()) {
@ -3076,28 +3076,27 @@ BuildDollarReplacement(JSContext* cx, JSString* textstrArg, JSLinearString* reps
repstr->twoByteChars(nogc), repstr->length());
}
if (!res)
return false;
return nullptr;
RootedString leftSide(cx, NewDependentString(cx, textstr, 0, matchStart));
if (!leftSide)
return false;
return nullptr;
RootedString newReplace(cx, newReplaceChars.finishString());
if (!newReplace)
return false;
return nullptr;
MOZ_ASSERT(textstr->length() >= matchLimit);
RootedString rightSide(cx, NewDependentString(cx, textstr, matchLimit,
textstr->length() - matchLimit));
if (!rightSide)
return false;
return nullptr;
RopeBuilder builder(cx);
if (!builder.append(leftSide) || !builder.append(newReplace) || !builder.append(rightSide))
return false;
return nullptr;
rval.setString(builder.result());
return true;
return builder.result();
}
struct StringRange
@ -3193,12 +3192,12 @@ AppendSubstrings(JSContext* cx, HandleLinearString str, const StringRange* range
return rope.result();
}
static bool
StrReplaceRegexpRemove(JSContext* cx, HandleString str, RegExpShared& re, MutableHandleValue rval)
static JSString*
StrReplaceRegexpRemove(JSContext* cx, HandleString str, RegExpShared& re)
{
RootedLinearString linearStr(cx, str->ensureLinear(cx));
if (!linearStr)
return false;
return nullptr;
Vector<StringRange, 16, SystemAllocPolicy> ranges;
@ -3212,11 +3211,11 @@ StrReplaceRegexpRemove(JSContext* cx, HandleString str, RegExpShared& re, Mutabl
/* Accumulate StringRanges for unmatched substrings. */
while (startIndex <= charsLen) {
if (!CheckForInterrupt(cx))
return false;
return nullptr;
RegExpRunStatus status = re.execute(cx, linearStr, startIndex, &matches);
if (status == RegExpRunStatus_Error)
return false;
return nullptr;
if (status == RegExpRunStatus_Success_NotFound)
break;
MatchPair& match = matches[0];
@ -3224,7 +3223,7 @@ StrReplaceRegexpRemove(JSContext* cx, HandleString str, RegExpShared& re, Mutabl
/* Include the latest unmatched substring. */
if (size_t(match.start) > lastIndex) {
if (!ranges.append(StringRange(lastIndex, match.start - lastIndex)))
return false;
return nullptr;
}
lazyIndex = lastIndex;
@ -3244,49 +3243,42 @@ StrReplaceRegexpRemove(JSContext* cx, HandleString str, RegExpShared& re, Mutabl
if (startIndex > 0) {
res = cx->global()->getRegExpStatics(cx);
if (!res)
return false;
return nullptr;
res->updateLazily(cx, linearStr, &re, lazyIndex);
}
rval.setString(str);
return true;
return str;
}
/* The last successful match updates the RegExpStatics. */
res = cx->global()->getRegExpStatics(cx);
if (!res)
return false;
return nullptr;
res->updateLazily(cx, linearStr, &re, lazyIndex);
/* Include any remaining part of the string. */
if (lastIndex < charsLen) {
if (!ranges.append(StringRange(lastIndex, charsLen - lastIndex)))
return false;
return nullptr;
}
/* Handle the empty string before calling .begin(). */
if (ranges.empty()) {
rval.setString(cx->runtime()->emptyString);
return true;
}
if (ranges.empty())
return cx->runtime()->emptyString;
JSString* result = AppendSubstrings(cx, linearStr, ranges.begin(), ranges.length());
if (!result)
return false;
rval.setString(result);
return true;
return AppendSubstrings(cx, linearStr, ranges.begin(), ranges.length());
}
static inline bool
StrReplaceRegExp(JSContext* cx, ReplaceData& rdata, MutableHandleValue rval)
static inline JSString*
StrReplaceRegExp(JSContext* cx, ReplaceData& rdata)
{
rdata.leftIndex = 0;
rdata.calledBack = false;
RegExpStatics* res = cx->global()->getRegExpStatics(cx);
if (!res)
return false;
return nullptr;
RegExpShared& re = rdata.g.regExp();
@ -3297,43 +3289,37 @@ StrReplaceRegExp(JSContext* cx, ReplaceData& rdata, MutableHandleValue rval)
// of DoMatchGlobal explaining why we can zero the the RegExp object's
// lastIndex property here.
if (re.global() && !rdata.g.zeroLastIndex(cx))
return false;
return nullptr;
/* Optimize removal. */
if (rdata.repstr && rdata.repstr->length() == 0) {
MOZ_ASSERT(!rdata.lambda && !rdata.elembase && rdata.dollarIndex == UINT32_MAX);
return StrReplaceRegexpRemove(cx, rdata.str, re, rval);
return StrReplaceRegexpRemove(cx, rdata.str, re);
}
RootedLinearString linearStr(cx, rdata.str->ensureLinear(cx));
if (!linearStr)
return false;
return nullptr;
if (re.global()) {
if (!DoMatchForReplaceGlobal(cx, res, linearStr, re, rdata))
return false;
return nullptr;
} else {
if (!DoMatchForReplaceLocal(cx, res, linearStr, re, rdata))
return false;
return nullptr;
}
if (!rdata.calledBack) {
/* Didn't match, so the string is unmodified. */
rval.setString(rdata.str);
return true;
return rdata.str;
}
JSSubString sub;
res->getRightContext(&sub);
if (!rdata.sb.appendSubstring(sub.base, sub.offset, sub.length))
return false;
return nullptr;
JSString* retstr = rdata.sb.finishString();
if (!retstr)
return false;
rval.setString(retstr);
return true;
return rdata.sb.finishString();
}
static inline bool
@ -3342,21 +3328,26 @@ str_replace_regexp(JSContext* cx, const CallArgs& args, ReplaceData& rdata)
if (!rdata.g.normalizeRegExp(cx, true, 2, args))
return false;
return StrReplaceRegExp(cx, rdata, args.rval());
JSString* res = StrReplaceRegExp(cx, rdata);
if (!res)
return false;
args.rval().setString(res);
return true;
}
bool
js::str_replace_regexp_raw(JSContext* cx, HandleString string, HandleObject regexp,
HandleString replacement, MutableHandleValue rval)
JSString*
js::str_replace_regexp_raw(JSContext* cx, HandleString string, Handle<RegExpObject*> regexp,
HandleString replacement)
{
/* Optimize removal, so we don't have to create ReplaceData */
if (replacement->length() == 0) {
StringRegExpGuard guard(cx);
if (!guard.init(cx, regexp))
return false;
if (!guard.initRegExp(cx, regexp))
return nullptr;
RegExpShared& re = guard.regExp();
return StrReplaceRegexpRemove(cx, string, re, rval);
return StrReplaceRegexpRemove(cx, string, re);
}
ReplaceData rdata(cx);
@ -3364,52 +3355,50 @@ js::str_replace_regexp_raw(JSContext* cx, HandleString string, HandleObject rege
JSLinearString* repl = replacement->ensureLinear(cx);
if (!repl)
return false;
return nullptr;
rdata.setReplacementString(repl);
if (!rdata.g.init(cx, regexp))
return false;
if (!rdata.g.initRegExp(cx, regexp))
return nullptr;
return StrReplaceRegExp(cx, rdata, rval);
return StrReplaceRegExp(cx, rdata);
}
static inline bool
StrReplaceString(JSContext* cx, ReplaceData& rdata, const FlatMatch& fm, MutableHandleValue rval)
static JSString*
StrReplaceString(JSContext* cx, ReplaceData& rdata, const FlatMatch& fm)
{
/*
* Note: we could optimize the text.length == pattern.length case if we wanted,
* even in the presence of dollar metachars.
*/
if (rdata.dollarIndex != UINT32_MAX)
return BuildDollarReplacement(cx, rdata.str, rdata.repstr, rdata.dollarIndex, fm, rval);
return BuildFlatReplacement(cx, rdata.str, rdata.repstr, fm, rval);
return BuildDollarReplacement(cx, rdata.str, rdata.repstr, rdata.dollarIndex, fm);
return BuildFlatReplacement(cx, rdata.str, rdata.repstr, fm);
}
static const uint32_t ReplaceOptArg = 2;
bool
JSString*
js::str_replace_string_raw(JSContext* cx, HandleString string, HandleString pattern,
HandleString replacement, MutableHandleValue rval)
HandleString replacement)
{
ReplaceData rdata(cx);
rdata.str = string;
JSLinearString* repl = replacement->ensureLinear(cx);
if (!repl)
return false;
return nullptr;
rdata.setReplacementString(repl);
if (!rdata.g.init(cx, pattern))
return false;
return nullptr;
const FlatMatch* fm = rdata.g.tryFlatMatch(cx, rdata.str, ReplaceOptArg, ReplaceOptArg, false);
if (fm->match() < 0) {
rval.setString(string);
return true;
}
if (fm->match() < 0)
return string;
return StrReplaceString(cx, rdata, *fm, rval);
return StrReplaceString(cx, rdata, *fm);
}
static inline bool
@ -3580,7 +3569,13 @@ js::str_replace(JSContext* cx, unsigned argc, Value* vp)
if (rdata.lambda)
return str_replace_flat_lambda(cx, args, rdata, *fm);
return StrReplaceString(cx, rdata, *fm, args.rval());
JSString* res = StrReplaceString(cx, rdata, *fm);
if (!res)
return false;
args.rval().setString(res);
return true;
}
namespace {

View File

@ -428,13 +428,9 @@ str_split(JSContext* cx, unsigned argc, Value* vp);
JSObject*
str_split_string(JSContext* cx, HandleObjectGroup group, HandleString str, HandleString sep);
bool
str_replace_regexp_raw(JSContext* cx, HandleString string, HandleObject regexp,
HandleString replacement, MutableHandleValue rval);
bool
JSString*
str_replace_string_raw(JSContext* cx, HandleString string, HandleString pattern,
HandleString replacement, MutableHandleValue rval);
HandleString replacement);
extern bool
StringConstructor(JSContext* cx, unsigned argc, Value* vp);

View File

@ -0,0 +1,38 @@
// Any copyright is dedicated to the Public Domain.
// http://creativecommons.org/licenses/publicdomain/
//-----------------------------------------------------------------------------
var BUGNUMBER = 1199546;
var summary =
"Convert computed property name expressions to property key before " +
"evaluating the property's value";
print(BUGNUMBER + ": " + summary);
/**************
* BEGIN TEST *
**************/
var s = "foo";
var convertsToS = { toString() { return s; } };
var o = {
[convertsToS]: // after ToPropertyKey becomes "foo"
(function() {
s = 'bar';
return 'abc'; // so we have "foo": "bar" for the first property
})(),
[convertsToS]: // |s| was set above to "bar", so after ToPropertyKey, "bar"
'efg' // so we have "bar": "efg" for the second property
};
assertEq(o.foo, "abc");
assertEq(o.bar, "efg");
/******************************************************************************/
if (typeof reportCompare === "function")
reportCompare(true, true);
print("Tests complete");

View File

@ -474,6 +474,10 @@ class RegExpObject : public NativeObject
void setPrivate(void* priv) = delete;
};
JSString*
str_replace_regexp_raw(JSContext* cx, HandleString string, Handle<RegExpObject*> regexp,
HandleString replacement);
/*
* Parse regexp flags. Report an error and return false if an invalid
* sequence of flags is encountered (repeat/invalid flag).

View File

@ -280,7 +280,7 @@ class SimpleEdgeRange : public EdgeRange {
bool init(JSContext* cx, void* thing, JS::TraceKind kind, bool wantNames = true) {
SimpleEdgeVectorTracer tracer(cx, &edges, wantNames);
JS_TraceChildren(&tracer, thing, kind);
js::TraceChildren(&tracer, thing, kind);
settle();
return tracer.okay;
}

View File

@ -372,7 +372,7 @@ CreateGlobalObject(JSContext* cx, const JSClass* clasp, nsIPrincipal* principal,
if (!((const js::Class*)clasp)->ext.isWrappedNative)
{
VerifyTraceProtoAndIfaceCacheCalledTracer trc(JS_GetRuntime(cx));
JS_TraceChildren(&trc, global, JS::TraceKind::Object);
TraceChildren(&trc, GCCellPtr(global.get()));
MOZ_ASSERT(trc.ok, "Trace hook on global needs to call TraceXPCGlobal for XPConnect compartments.");
}
#endif

View File

@ -487,13 +487,13 @@ public:
*/
nsRegion mVerticalPanRegion;
/**
* Scaled versions of mHitRegion and mMaybeHitRegion.
* Scaled versions of the bounds of mHitRegion and mMaybeHitRegion.
* We store these because FindPaintedLayerFor() needs to consume them
* in this form, and it's a hot code path so we don't wnat to scale
* in this form, and it's a hot code path so we don't want to scale
* them inside that function.
*/
nsIntRegion mScaledHitRegion;
nsIntRegion mScaledMaybeHitRegion;
nsIntRect mScaledHitRegionBounds;
nsIntRect mScaledMaybeHitRegionBounds;
/**
* The "active scrolled root" for all content in the layer. Must
* be non-null; all content in a PaintedLayer must have the same
@ -2591,14 +2591,15 @@ PaintedLayerDataNode::FindPaintedLayerFor(const nsIntRect& aVisibleRect,
MOZ_ASSERT(!data.mExclusiveToOneItem);
lowestUsableLayer = &data;
nsIntRegion visibleRegion = data.mVisibleRegion;
// When checking whether the visible region intersects the given
// visible rect, also include the event-regions in the visible region,
// Also check whether the event-regions intersect the visible rect,
// unless we're in an inactive layer, in which case the event-regions
// will be hoisted out into their own layer.
ContainerState& contState = mTree.ContState();
if (!contState.IsInInactiveLayer()) {
visibleRegion.OrWith(data.mScaledHitRegion);
visibleRegion.OrWith(data.mScaledMaybeHitRegion);
// For performance reasons, we check the intersection with the bounds
// of the event-regions.
if (!mTree.ContState().IsInInactiveLayer() &&
(data.mScaledHitRegionBounds.Intersects(aVisibleRect) ||
data.mScaledMaybeHitRegionBounds.Intersects(aVisibleRect))) {
break;
}
if (visibleRegion.Intersects(aVisibleRect)) {
break;
@ -3449,15 +3450,10 @@ PaintedLayerData::AccumulateEventRegions(ContainerState* aState, nsDisplayLayerE
mHorizontalPanRegion.Or(mHorizontalPanRegion, aEventRegions->HorizontalPanRegion());
mVerticalPanRegion.Or(mVerticalPanRegion, aEventRegions->VerticalPanRegion());
// Simplify the maybe-hit region because it can be a complex region
// and operations on it, such as the scaling below and the use of the
// result in hot code paths like FindPaintedLayerFor(), can be very expensive.
mMaybeHitRegion.SimplifyOutward(8);
// Calculate scaled versions of mHitRegion and mMaybeHitRegion for quick
// access in FindPaintedLayerFor().
mScaledHitRegion = aState->ScaleRegionToOutsidePixels(mHitRegion);
mScaledMaybeHitRegion = aState->ScaleRegionToOutsidePixels(mMaybeHitRegion);
// Calculate scaled versions of the bounds of mHitRegion and mMaybeHitRegion
// for quick access in FindPaintedLayerFor().
mScaledHitRegionBounds = aState->ScaleToOutsidePixels(mHitRegion.GetBounds());
mScaledMaybeHitRegionBounds = aState->ScaleToOutsidePixels(mMaybeHitRegion.GetBounds());
}
PaintedLayerData

View File

@ -613,12 +613,12 @@ nsDisplayListBuilder::nsDisplayListBuilder(nsIFrame* aReferenceFrame,
mCurrentAnimatedGeometryRoot(nullptr),
mDirtyRect(-1,-1,-1,-1),
mGlassDisplayItem(nullptr),
mScrollInfoItemsForHoisting(nullptr),
mPendingScrollInfoItems(nullptr),
mCommittedScrollInfoItems(nullptr),
mMode(aMode),
mCurrentScrollParentId(FrameMetrics::NULL_SCROLL_ID),
mCurrentScrollbarTarget(FrameMetrics::NULL_SCROLL_ID),
mCurrentScrollbarFlags(0),
mSVGEffectsBuildingDepth(0),
mBuildCaret(aBuildCaret),
mIgnoreSuppression(false),
mHadToIgnoreSuppression(false),
@ -1251,35 +1251,27 @@ nsDisplayListBuilder::IsInWillChangeBudget(nsIFrame* aFrame,
return onBudget;
}
void
nsDisplayListBuilder::EnterSVGEffectsContents(nsDisplayList* aHoistedItemsStorage)
nsDisplayList*
nsDisplayListBuilder::EnterScrollInfoItemHoisting(nsDisplayList* aScrollInfoItemStorage)
{
MOZ_ASSERT(mSVGEffectsBuildingDepth >= 0);
MOZ_ASSERT(aHoistedItemsStorage);
if (mSVGEffectsBuildingDepth == 0) {
MOZ_ASSERT(!mScrollInfoItemsForHoisting);
mScrollInfoItemsForHoisting = aHoistedItemsStorage;
}
mSVGEffectsBuildingDepth++;
MOZ_ASSERT(ShouldBuildScrollInfoItemsForHoisting());
nsDisplayList* old = mPendingScrollInfoItems;
mPendingScrollInfoItems = aScrollInfoItemStorage;
return old;
}
void
nsDisplayListBuilder::ExitSVGEffectsContents()
nsDisplayListBuilder::LeaveScrollInfoItemHoisting(nsDisplayList* aScrollInfoItemStorage)
{
mSVGEffectsBuildingDepth--;
MOZ_ASSERT(mSVGEffectsBuildingDepth >= 0);
MOZ_ASSERT(mScrollInfoItemsForHoisting);
if (mSVGEffectsBuildingDepth == 0) {
mScrollInfoItemsForHoisting = nullptr;
}
MOZ_ASSERT(ShouldBuildScrollInfoItemsForHoisting());
mPendingScrollInfoItems = aScrollInfoItemStorage;
}
void
nsDisplayListBuilder::AppendNewScrollInfoItemForHoisting(nsDisplayScrollInfoLayer* aScrollInfoItem)
{
MOZ_ASSERT(ShouldBuildScrollInfoItemsForHoisting());
MOZ_ASSERT(mScrollInfoItemsForHoisting);
mScrollInfoItemsForHoisting->AppendNewToTop(aScrollInfoItem);
mPendingScrollInfoItems->AppendNewToTop(aScrollInfoItem);
}
void
@ -4448,6 +4440,7 @@ nsDisplayScrollInfoLayer::nsDisplayScrollInfoLayer(
, mScrollFrame(aScrollFrame)
, mScrolledFrame(aScrolledFrame)
, mScrollParentId(aBuilder->GetCurrentScrollParentId())
, mIgnoreIfCompositorSupportsBlending(false)
{
#ifdef NS_BUILD_REFCNT_LOGGING
MOZ_COUNT_CTOR(nsDisplayScrollInfoLayer);
@ -4471,6 +4464,16 @@ nsDisplayScrollInfoLayer::BuildLayer(nsDisplayListBuilder* aBuilder,
// cannot be layerized, and so needs to scroll synchronously. To handle those
// cases, we still want to generate scrollinfo layers.
if (mIgnoreIfCompositorSupportsBlending) {
// This item was created pessimistically because, during display list
// building, we encountered a mix blend mode. If our layer manager
// supports compositing this mix blend mode, we don't actually need to
// create a scroll info layer.
if (aManager->SupportsMixBlendModes(mContainedBlendModes)) {
return nullptr;
}
}
ContainerLayerParameters params = aContainerParameters;
if (mScrolledFrame->GetContent() &&
nsLayoutUtils::GetCriticalDisplayPort(mScrolledFrame->GetContent(), nullptr)) {
@ -4512,7 +4515,24 @@ nsDisplayScrollInfoLayer::ComputeFrameMetrics(Layer* aLayer,
mScrollParentId, viewport, Nothing(), false, params)));
}
void
nsDisplayScrollInfoLayer::IgnoreIfCompositorSupportsBlending(BlendModeSet aBlendModes)
{
mContainedBlendModes += aBlendModes;
mIgnoreIfCompositorSupportsBlending = true;
}
void
nsDisplayScrollInfoLayer::UnsetIgnoreIfCompositorSupportsBlending()
{
mIgnoreIfCompositorSupportsBlending = false;
}
bool
nsDisplayScrollInfoLayer::ContainedInMixBlendMode() const
{
return mIgnoreIfCompositorSupportsBlending;
}
void
nsDisplayScrollInfoLayer::WriteDebugInfo(std::stringstream& aStream)

View File

@ -841,11 +841,26 @@ public:
const nsIFrame* aStopAtAncestor,
nsIFrame** aOutResult);
void EnterSVGEffectsContents(nsDisplayList* aHoistedItemsStorage);
void ExitSVGEffectsContents();
void SetCommittedScrollInfoItemList(nsDisplayList* aScrollInfoItemStorage) {
mCommittedScrollInfoItems = aScrollInfoItemStorage;
}
nsDisplayList* CommittedScrollInfoItems() const {
return mCommittedScrollInfoItems;
}
bool ShouldBuildScrollInfoItemsForHoisting() const {
return IsPaintingToWindow();
}
bool ShouldBuildScrollInfoItemsForHoisting() const
{ return mSVGEffectsBuildingDepth > 0; }
// When building display lists for stacking contexts, we append scroll info
// items to a temporary list. If the stacking context would create an
// inactive layer, we commit these items to the final hoisted scroll items
// list. Otherwise, we propagate these items to the parent stacking
// context's list of pending scroll info items.
//
// EnterScrollInfoItemHoisting returns the parent stacking context's pending
// item list.
nsDisplayList* EnterScrollInfoItemHoisting(nsDisplayList* aScrollInfoItemStorage);
void LeaveScrollInfoItemHoisting(nsDisplayList* aScrollInfoItemStorage);
void AppendNewScrollInfoItemForHoisting(nsDisplayScrollInfoLayer* aScrollInfoItem);
@ -954,19 +969,20 @@ private:
nsIntRegion mWindowDraggingRegion;
// The display item for the Windows window glass background, if any
nsDisplayItem* mGlassDisplayItem;
// A temporary list that we append scroll info items to while building
// display items for the contents of frames with SVG effects.
// Only non-null when ShouldBuildScrollInfoItemsForHoisting() is true.
// This is a pointer and not a real nsDisplayList value because the
// nsDisplayList class is defined below this class, so we can't use it here.
nsDisplayList* mScrollInfoItemsForHoisting;
// When encountering inactive layers, we need to hoist scroll info items
// above these layers so APZ can deliver events to content. Such scroll
// info items are considered "committed" to the final hoisting list. If
// no hoisting is needed immediately, it may be needed later if a blend
// mode is introduced in a higher stacking context, so we keep all scroll
// info items until the end of display list building.
nsDisplayList* mPendingScrollInfoItems;
nsDisplayList* mCommittedScrollInfoItems;
nsTArray<DisplayItemClip*> mDisplayItemClipsToDestroy;
Mode mMode;
ViewID mCurrentScrollParentId;
ViewID mCurrentScrollbarTarget;
uint32_t mCurrentScrollbarFlags;
BlendModeSet mContainedBlendModes;
int32_t mSVGEffectsBuildingDepth;
bool mBuildCaret;
bool mIgnoreSuppression;
bool mHadToIgnoreSuppression;
@ -3265,10 +3281,20 @@ public:
mozilla::UniquePtr<FrameMetrics> ComputeFrameMetrics(Layer* aLayer,
const ContainerLayerParameters& aContainerParameters);
void IgnoreIfCompositorSupportsBlending(BlendModeSet aBlendModes);
void UnsetIgnoreIfCompositorSupportsBlending();
bool ContainedInMixBlendMode() const;
protected:
nsIFrame* mScrollFrame;
nsIFrame* mScrolledFrame;
ViewID mScrollParentId;
// If the only reason for the ScrollInfoLayer is a blend mode, the blend
// mode may be supported in the compositor. We track it here to determine
// if so during layer construction.
BlendModeSet mContainedBlendModes;
bool mIgnoreIfCompositorSupportsBlending;
};
/**

View File

@ -3103,6 +3103,11 @@ nsLayoutUtils::PaintFrame(nsRenderingContext* aRenderingContext, nsIFrame* aFram
builder, rootScrollFrame, displayportBase, &displayport);
}
nsDisplayList hoistedScrollItemStorage;
if (aFlags & (PAINT_WIDGET_LAYERS | PAINT_TO_WINDOW)) {
builder.SetCommittedScrollInfoItemList(&hoistedScrollItemStorage);
}
nsRegion visibleRegion;
if (aFlags & PAINT_WIDGET_LAYERS) {
// This layer tree will be reused, so we'll need to calculate it

View File

@ -1911,6 +1911,87 @@ public:
}
};
class AutoHoistScrollInfoItems
{
nsDisplayListBuilder& mBuilder;
nsDisplayList* mParentPendingList;
nsDisplayList mPendingList;
public:
explicit AutoHoistScrollInfoItems(nsDisplayListBuilder& aBuilder)
: mBuilder(aBuilder),
mParentPendingList(nullptr)
{
if (!mBuilder.ShouldBuildScrollInfoItemsForHoisting()) {
return;
}
mParentPendingList = mBuilder.EnterScrollInfoItemHoisting(&mPendingList);
}
~AutoHoistScrollInfoItems() {
if (!mParentPendingList) {
// If we have no parent stacking context, we will throw out any scroll
// info items that are pending (meaning, we can safely ignore them since
// the scrollable layers they represent will not be flattened).
return;
}
mParentPendingList->AppendToTop(&mPendingList);
mBuilder.LeaveScrollInfoItemHoisting(mParentPendingList);
}
// The current stacking context will definitely be flattened, so commit all
// pending scroll info items and make sure they will not be optimized away
// in the case they were also inside a compositor-supported mix-blend-mode.
void Commit() {
nsDisplayItem* iter = nullptr;
while ((iter = mPendingList.RemoveBottom()) != nullptr) {
MOZ_ASSERT(iter->GetType() == nsDisplayItem::TYPE_SCROLL_INFO_LAYER);
auto item = static_cast<nsDisplayScrollInfoLayer*>(iter);
item->UnsetIgnoreIfCompositorSupportsBlending();
mBuilder.CommittedScrollInfoItems()->AppendToTop(item);
}
}
// The current stacking context will only be flattened if the given mix-blend
// mode is not supported in the compositor. Annotate the scroll info items
// and keep them in the pending list.
void AnnotateForBlendModes(BlendModeSet aBlendModes) {
for (nsDisplayItem* iter = mPendingList.GetBottom(); iter; iter = iter->GetAbove()) {
MOZ_ASSERT(iter->GetType() == nsDisplayItem::TYPE_SCROLL_INFO_LAYER);
auto item = static_cast<nsDisplayScrollInfoLayer*>(iter);
item->IgnoreIfCompositorSupportsBlending(aBlendModes);
}
}
bool IsRootStackingContext() {
// We're only finished building the hoisted list if we have no parent
// stacking context.
return !mParentPendingList;
}
// Any scroll info items which contain a mix-blend mode are moved into the
// parent display list.
void Finish(nsDisplayList* aResultList) {
MOZ_ASSERT(IsRootStackingContext());
nsDisplayItem* iter = nullptr;
while ((iter = mPendingList.RemoveBottom()) != nullptr) {
MOZ_ASSERT(iter->GetType() == nsDisplayItem::TYPE_SCROLL_INFO_LAYER);
nsDisplayScrollInfoLayer *item = static_cast<decltype(item)>(iter);
if (!item->ContainedInMixBlendMode()) {
// Discard the item, it was not committed for having an SVG effect nor
// was it contained with a mix-blend mode.
item->~nsDisplayScrollInfoLayer();
continue;
}
aResultList->AppendToTop(item);
}
}
};
static void
CheckForApzAwareEventHandlers(nsDisplayListBuilder* aBuilder, nsIFrame* aFrame)
{
@ -1990,13 +2071,13 @@ nsIFrame::BuildDisplayListForStackingContext(nsDisplayListBuilder* aBuilder,
inTransform = true;
}
AutoHoistScrollInfoItems hoistedScrollInfoItems(*aBuilder);
bool usingSVGEffects = nsSVGIntegrationUtils::UsingEffectsForFrame(this);
nsRect dirtyRectOutsideSVGEffects = dirtyRect;
nsDisplayList hoistedScrollInfoItemsStorage;
if (usingSVGEffects) {
dirtyRect =
nsSVGIntegrationUtils::GetRequiredSourceForInvalidArea(this, dirtyRect);
aBuilder->EnterSVGEffectsContents(&hoistedScrollInfoItemsStorage);
}
bool useOpacity = HasVisualOpacity() && !nsSVGUtils::CanOptimizeOpacity(this);
@ -2130,10 +2211,6 @@ nsIFrame::BuildDisplayListForStackingContext(nsDisplayListBuilder* aBuilder,
/* List now emptied, so add the new list to the top. */
resultList.AppendNewToTop(
new (aBuilder) nsDisplaySVGEffects(aBuilder, this, &resultList));
// Also add the hoisted scroll info items. We need those for APZ scrolling
// because nsDisplaySVGEffects items can't build active layers.
aBuilder->ExitSVGEffectsContents();
resultList.AppendToTop(&hoistedScrollInfoItemsStorage);
}
/* Else, if the list is non-empty and there is CSS group opacity without SVG
* effects, wrap it up in an opacity item.
@ -2202,8 +2279,25 @@ nsIFrame::BuildDisplayListForStackingContext(nsDisplayListBuilder* aBuilder,
*/
if (aBuilder->ContainsBlendMode()) {
resultList.AppendNewToTop(
new (aBuilder) nsDisplayBlendContainer(aBuilder, this, &resultList, aBuilder->ContainedBlendModes()));
resultList.AppendNewToTop(
new (aBuilder) nsDisplayBlendContainer(aBuilder, this, &resultList, aBuilder->ContainedBlendModes()));
}
if (aBuilder->ShouldBuildScrollInfoItemsForHoisting()) {
if (usingSVGEffects) {
// We know this stacking context will be flattened, so hoist any scroll
// info items we created.
hoistedScrollInfoItems.Commit();
} else if (aBuilder->ContainsBlendMode()) {
hoistedScrollInfoItems.AnnotateForBlendModes(aBuilder->ContainedBlendModes());
}
if (hoistedScrollInfoItems.IsRootStackingContext()) {
// If we're the root stacking context, no more mix-blend modes can be
// introduced and it's safe to hoist scroll info items.
resultList.AppendToTop(aBuilder->CommittedScrollInfoItems());
hoistedScrollInfoItems.Finish(&resultList);
}
}
/* If there's blending, wrap up the list in a blend-mode item. Note

View File

@ -500,6 +500,38 @@ RotateRight(const T aValue, uint_fast8_t aShift)
return (aValue >> aShift) | (aValue << (sizeof(T) * CHAR_BIT - aShift));
}
/**
* Returns true if |x| is a power of two.
* Zero is not an integer power of two. (-Inf is not an integer)
*/
template<typename T>
inline bool
IsPowerOfTwo(T x)
{
static_assert(IsUnsigned<T>::value,
"IsPowerOfTwo requires unsigned values");
return x && (x & (x - 1)) == 0;
}
template<typename T>
inline T
Clamp(const T aValue, const T aMin, const T aMax)
{
static_assert(IsIntegral<T>::value,
"Clamp accepts only integral types, so that it doesn't have"
" to distinguish differently-signed zeroes (which users may"
" or may not care to distinguish, likely at a perf cost) or"
" to decide how to clamp NaN or a range with a NaN"
" endpoint.");
MOZ_ASSERT(aMin <= aMax);
if (aValue <= aMin)
return aMin;
if (aValue >= aMax)
return aMax;
return aValue;
}
} /* namespace mozilla */
#endif /* mozilla_MathAlgorithms_h */

View File

@ -0,0 +1,87 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
* You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/MathAlgorithms.h"
using mozilla::Clamp;
using mozilla::IsPowerOfTwo;
static void
TestClamp()
{
MOZ_RELEASE_ASSERT(Clamp(0, 0, 0) == 0);
MOZ_RELEASE_ASSERT(Clamp(1, 0, 0) == 0);
MOZ_RELEASE_ASSERT(Clamp(-1, 0, 0) == 0);
MOZ_RELEASE_ASSERT(Clamp(0, 1, 1) == 1);
MOZ_RELEASE_ASSERT(Clamp(0, 1, 2) == 1);
MOZ_RELEASE_ASSERT(Clamp(0, -1, -1) == -1);
MOZ_RELEASE_ASSERT(Clamp(0, -2, -1) == -1);
MOZ_RELEASE_ASSERT(Clamp(0, 1, 3) == 1);
MOZ_RELEASE_ASSERT(Clamp(1, 1, 3) == 1);
MOZ_RELEASE_ASSERT(Clamp(2, 1, 3) == 2);
MOZ_RELEASE_ASSERT(Clamp(3, 1, 3) == 3);
MOZ_RELEASE_ASSERT(Clamp(4, 1, 3) == 3);
MOZ_RELEASE_ASSERT(Clamp(5, 1, 3) == 3);
MOZ_RELEASE_ASSERT(Clamp<uint8_t>(UINT8_MAX, 0, UINT8_MAX) == UINT8_MAX);
MOZ_RELEASE_ASSERT(Clamp<uint8_t>(0, 0, UINT8_MAX) == 0);
MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MIN, INT8_MIN, INT8_MAX) == INT8_MIN);
MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MIN, 0, INT8_MAX) == 0);
MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MAX, INT8_MIN, INT8_MAX) == INT8_MAX);
MOZ_RELEASE_ASSERT(Clamp<int8_t>(INT8_MAX, INT8_MIN, 0) == 0);
}
static void
TestIsPowerOfTwo()
{
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(0u));
MOZ_RELEASE_ASSERT( IsPowerOfTwo(1u));
MOZ_RELEASE_ASSERT( IsPowerOfTwo(2u));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(3u));
MOZ_RELEASE_ASSERT( IsPowerOfTwo(4u));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(5u));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(6u));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(7u));
MOZ_RELEASE_ASSERT( IsPowerOfTwo(8u));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(9u));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint8_t(UINT8_MAX/2))); // 127, 0x7f
MOZ_RELEASE_ASSERT( IsPowerOfTwo(uint8_t(UINT8_MAX/2 + 1))); // 128, 0x80
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint8_t(UINT8_MAX/2 + 2))); // 129, 0x81
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint8_t(UINT8_MAX - 1))); // 254, 0xfe
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint8_t(UINT8_MAX))); // 255, 0xff
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint16_t(UINT16_MAX/2))); // 0x7fff
MOZ_RELEASE_ASSERT( IsPowerOfTwo(uint16_t(UINT16_MAX/2 + 1))); // 0x8000
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint16_t(UINT16_MAX/2 + 2))); // 0x8001
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint16_t(UINT16_MAX - 1))); // 0xfffe
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint16_t(UINT16_MAX))); // 0xffff
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint32_t(UINT32_MAX/2)));
MOZ_RELEASE_ASSERT( IsPowerOfTwo(uint32_t(UINT32_MAX/2 + 1)));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint32_t(UINT32_MAX/2 + 2)));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint32_t(UINT32_MAX - 1)));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint32_t(UINT32_MAX)));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint64_t(UINT64_MAX/2)));
MOZ_RELEASE_ASSERT( IsPowerOfTwo(uint64_t(UINT64_MAX/2 + 1)));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint64_t(UINT64_MAX/2 + 2)));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint64_t(UINT64_MAX - 1)));
MOZ_RELEASE_ASSERT(!IsPowerOfTwo(uint64_t(UINT64_MAX)));
}
int
main()
{
TestIsPowerOfTwo();
TestClamp();
return 0;
}

View File

@ -22,6 +22,7 @@ CppUnitTests([
'TestJSONWriter',
'TestMacroArgs',
'TestMacroForEach',
'TestMathAlgorithms',
'TestMaybe',
'TestPair',
'TestRefPtr',

View File

@ -668,7 +668,6 @@ pref("gfx.font_rendering.wordcache.maxentries", 10000);
pref("gfx.font_rendering.graphite.enabled", true);
#ifdef XP_WIN
pref("gfx.font_rendering.directwrite.enabled", false);
pref("gfx.font_rendering.directwrite.use_gdi_table_loading", true);
#endif

View File

@ -523,10 +523,18 @@ function sendWheelAndPaint(aTarget, aOffsetX, aOffsetY, aEvent, aCallback, aWind
if (!aCallback)
return;
aWindow.waitForAllPaintsFlushed(function() {
utils.restoreNormalRefresh();
aCallback();
});
var waitForPaints = function () {
SpecialPowers.Services.obs.removeObserver(waitForPaints, "apz-repaints-flushed", false);
aWindow.waitForAllPaintsFlushed(function() {
utils.restoreNormalRefresh();
aCallback();
});
}
SpecialPowers.Services.obs.addObserver(waitForPaints, "apz-repaints-flushed", false);
if (!utils.flushApzRepaints(aWindow)) {
waitForPaints();
}
}, 0);
};

View File

@ -6,7 +6,7 @@ import sys
from setuptools import setup, find_packages
PACKAGE_NAME = 'mozrunner'
PACKAGE_VERSION = '6.9'
PACKAGE_VERSION = '6.10'
desc = """Reliable start/stop/configuration of Mozilla Applications (Firefox, Thunderbird, etc.)"""
@ -15,7 +15,7 @@ deps = ['mozcrash >= 0.14',
'mozfile >= 1.0',
'mozinfo >= 0.7',
'mozlog >= 3.0',
'mozprocess >= 0.17',
'mozprocess >= 0.22',
'mozprofile >= 0.18',
]

View File

@ -32,6 +32,10 @@
"other": {
"tests": ["a11yr", "ts_paint", "tpaint", "sessionrestore", "sessionrestore_no_auto_restore"]
},
"other-e10s": {
"tests": ["a11yr", "ts_paint", "tpaint", "sessionrestore", "sessionrestore_no_auto_restore"],
"talos_options": ["--e10s"]
},
"other-osx-e10s": {
"tests": ["a11yr", "ts_paint", "tpaint", "sessionrestore", "sessionrestore_no_auto_restore"],
"talos_options": ["--e10s"]

View File

@ -148,7 +148,7 @@ NoteWeakMapChildrenTracer::onChild(const JS::GCCellPtr& aThing)
mCb.NoteWeakMapping(mMap, mKey, mKeyDelegate, aThing);
mTracedAny = true;
} else {
JS_TraceChildren(this, aThing.asCell(), aThing.kind());
JS::TraceChildren(this, aThing);
}
}
@ -203,7 +203,7 @@ NoteWeakMapsTracer::trace(JSObject* aMap, JS::GCCellPtr aKey,
mChildTracer.mKeyDelegate = kdelegate;
if (aValue.is<JSString>()) {
JS_TraceChildren(&mChildTracer, aValue.asCell(), aValue.kind());
JS::TraceChildren(&mChildTracer, aValue);
}
// The delegate could hold alive the key, so report something to the CC
@ -360,7 +360,7 @@ TraversalTracer::onChild(const JS::GCCellPtr& aThing)
// be traced.
JS_TraceObjectGroupCycleCollectorChildren(this, aThing);
} else if (!aThing.is<JSString>()) {
JS_TraceChildren(this, aThing.asCell(), aThing.kind());
JS::TraceChildren(this, aThing);
}
}
@ -542,7 +542,7 @@ CycleCollectedJSRuntime::NoteGCThingJSChildren(JS::GCCellPtr aThing,
{
MOZ_ASSERT(mJSRuntime);
TraversalTracer trc(mJSRuntime, aCb);
JS_TraceChildren(&trc, aThing.asCell(), aThing.kind());
JS::TraceChildren(&trc, aThing);
}
void

View File

@ -66,6 +66,37 @@
namespace mozilla {
namespace internal {
class AutoVirtualProtect
{
public:
AutoVirtualProtect(void* aFunc, size_t aSize, DWORD aProtect)
: mFunc(aFunc), mSize(aSize), mNewProtect(aProtect), mOldProtect(0),
mSuccess(false)
{}
~AutoVirtualProtect()
{
if (mSuccess) {
VirtualProtectEx(GetCurrentProcess(), mFunc, mSize, mOldProtect,
&mOldProtect);
}
}
bool Protect()
{
mSuccess = !!VirtualProtectEx(GetCurrentProcess(), mFunc, mSize,
mNewProtect, &mOldProtect);
return mSuccess;
}
private:
void* const mFunc;
size_t const mSize;
DWORD const mNewProtect;
DWORD mOldProtect;
bool mSuccess;
};
class WindowsDllNopSpacePatcher
{
typedef unsigned char* byteptr_t;
@ -91,8 +122,8 @@ public:
byteptr_t fn = mPatchedFns[i];
// Ensure we can write to the code.
DWORD op;
if (!VirtualProtectEx(GetCurrentProcess(), fn, 2, PAGE_EXECUTE_READWRITE, &op)) {
AutoVirtualProtect protect(fn, 2, PAGE_EXECUTE_READWRITE);
if (!protect.Protect()) {
// printf("VirtualProtectEx failed! %d\n", GetLastError());
continue;
}
@ -100,9 +131,6 @@ public:
// mov edi, edi
*((uint16_t*)fn) = 0xff8b;
// Restore the old protection.
VirtualProtectEx(GetCurrentProcess(), fn, 2, op, &op);
// I don't think this is actually necessary, but it can't hurt.
FlushInstructionCache(GetCurrentProcess(),
/* ignored */ nullptr,
@ -141,19 +169,16 @@ public:
// Ensure we can read and write starting at fn - 5 (for the long jmp we're
// going to write) and ending at fn + 2 (for the short jmp up to the long
// jmp).
DWORD op;
if (!VirtualProtectEx(GetCurrentProcess(), fn - 5, 7,
PAGE_EXECUTE_READWRITE, &op)) {
// jmp). These bytes may span two pages with different protection.
AutoVirtualProtect protectBefore(fn - 5, 5, PAGE_EXECUTE_READWRITE);
AutoVirtualProtect protectAfter(fn, 2, PAGE_EXECUTE_READWRITE);
if (!protectBefore.Protect() || !protectAfter.Protect()) {
//printf ("VirtualProtectEx failed! %d\n", GetLastError());
return false;
}
bool rv = WriteHook(fn, aHookDest, aOrigFunc);
// Re-protect, and we're done.
VirtualProtectEx(GetCurrentProcess(), fn - 5, 7, op, &op);
if (rv) {
mPatchedFns[mPatchedFnsLen] = fn;
mPatchedFnsLen++;
@ -248,13 +273,14 @@ public:
#error "Unknown processor type"
#endif
byteptr_t origBytes = *((byteptr_t*)p);
// ensure we can modify the original code
DWORD op;
if (!VirtualProtectEx(GetCurrentProcess(), origBytes, nBytes,
PAGE_EXECUTE_READWRITE, &op)) {
AutoVirtualProtect protect(origBytes, nBytes, PAGE_EXECUTE_READWRITE);
if (!protect.Protect()) {
//printf ("VirtualProtectEx failed! %d\n", GetLastError());
continue;
}
// Remove the hook by making the original function jump directly
// in the trampoline.
intptr_t dest = (intptr_t)(p + sizeof(void*));
@ -266,8 +292,6 @@ public:
#else
#error "Unknown processor type"
#endif
// restore protection; if this fails we can't really do anything about it
VirtualProtectEx(GetCurrentProcess(), origBytes, nBytes, op, &op);
}
}
@ -629,9 +653,8 @@ protected:
*aOutTramp = tramp;
// ensure we can modify the original code
DWORD op;
if (!VirtualProtectEx(GetCurrentProcess(), aOrigFunction, nBytes,
PAGE_EXECUTE_READWRITE, &op)) {
AutoVirtualProtect protect(aOrigFunction, nBytes, PAGE_EXECUTE_READWRITE);
if (!protect.Protect()) {
//printf ("VirtualProtectEx failed! %d\n", GetLastError());
return;
}
@ -653,9 +676,6 @@ protected:
origBytes[11] = 0xff;
origBytes[12] = 0xe3;
#endif
// restore protection; if this fails we can't really do anything about it
VirtualProtectEx(GetCurrentProcess(), aOrigFunction, nBytes, op, &op);
}
byteptr_t FindTrampolineSpace()

View File

@ -11,6 +11,8 @@
#include <stdio.h>
#endif
#include "mozilla/CheckedInt.h"
/**
* 07/02/2001 09:17p 509,104 clangref.pdf from openwatcom's site
* Watcom C Language Reference Edition 11.0c
@ -163,12 +165,24 @@ nsDeque::Erase()
bool
nsDeque::GrowCapacity()
{
int32_t theNewSize = mCapacity << 2;
NS_ASSERTION(theNewSize > mCapacity, "Overflow");
if (theNewSize <= mCapacity) {
mozilla::CheckedInt<int32_t> newCapacity = mCapacity;
newCapacity *= 4;
NS_ASSERTION(newCapacity.isValid(), "Overflow");
if (!newCapacity.isValid()) {
return false;
}
void** temp = (void**)malloc(theNewSize * sizeof(void*));
// Sanity check the new byte size.
mozilla::CheckedInt<int32_t> newByteSize = newCapacity;
newByteSize *= sizeof(void*);
NS_ASSERTION(newByteSize.isValid(), "Overflow");
if (!newByteSize.isValid()) {
return false;
}
void** temp = (void**)malloc(newByteSize.value());
if (!temp) {
return false;
}
@ -185,7 +199,7 @@ nsDeque::GrowCapacity()
free(mData);
}
mCapacity = theNewSize;
mCapacity = newCapacity.value();
mOrigin = 0; //now realign the origin...
mData = temp;
@ -374,37 +388,10 @@ nsDeque::RemoveObjectAt(int32_t aIndex)
return result;
}
/**
* Create and return an iterator pointing to
* the beginning of the queue. Note that this
* takes the circular buffer semantics into account.
*
* @return new deque iterator, init'ed to 1st item
*/
nsDequeIterator
nsDeque::Begin() const
{
return nsDequeIterator(*this, 0);
}
/**
* Create and return an iterator pointing to
* the last item in the deque.
* Note that this takes the circular buffer semantics
* into account.
*
* @return new deque iterator, init'ed to the last item
*/
nsDequeIterator
nsDeque::End() const
{
return nsDequeIterator(*this, mSize - 1);
}
void*
nsDeque::Last() const
{
return End().GetCurrent();
return ObjectAt(mSize - 1);
}
/**
@ -443,245 +430,3 @@ nsDeque::FirstThat(nsDequeFunctor& aFunctor) const
}
return 0;
}
/******************************************************
* Here comes the nsDequeIterator class...
******************************************************/
/**
* DequeIterator is an object that knows how to iterate (forward and backward)
* through a Deque. Normally, you don't need to do this, but there are some special
* cases where it is pretty handy, so here you go.
*
* This is a standard dequeiterator constructor
*
* @param aQueue is the deque object to be iterated
* @param aIndex is the starting position for your iteration
*/
nsDequeIterator::nsDequeIterator(const nsDeque& aQueue, int aIndex)
: mIndex(aIndex)
, mDeque(aQueue)
{
}
/**
* Create a copy of a DequeIterator
*
* @param aCopy is another iterator to copy from
*/
nsDequeIterator::nsDequeIterator(const nsDequeIterator& aCopy)
: mIndex(aCopy.mIndex)
, mDeque(aCopy.mDeque)
{
}
/**
* Moves iterator to first element in deque
* @return *this
*/
nsDequeIterator&
nsDequeIterator::First()
{
mIndex = 0;
return *this;
}
/**
* Standard assignment operator for dequeiterator
*
* @param aCopy is an iterator to be copied from
* @return *this
*/
nsDequeIterator&
nsDequeIterator::operator=(const nsDequeIterator& aCopy)
{
NS_ASSERTION(&mDeque == &aCopy.mDeque,
"you can't change the deque that an interator is iterating over, sorry.");
mIndex = aCopy.mIndex;
return *this;
}
/**
* preform ! operation against to iterators to test for equivalence
* (or lack thereof)!
*
* @param aIter is the object to be compared to
* @return TRUE if NOT equal.
*/
bool
nsDequeIterator::operator!=(nsDequeIterator& aIter)
{
return !this->operator==(aIter);
}
/**
* Compare two iterators for increasing order.
*
* @param aIter is the other iterator to be compared to
* @return TRUE if this object points to an element before
* the element pointed to by aIter.
* FALSE if this and aIter are not iterating over the same deque.
*/
bool
nsDequeIterator::operator<(nsDequeIterator& aIter)
{
return mIndex < aIter.mIndex && &mDeque == &aIter.mDeque;
}
/**
* Compare two iterators for equivalence.
*
* @param aIter is the other iterator to be compared to
* @return TRUE if EQUAL
*/
bool
nsDequeIterator::operator==(nsDequeIterator& aIter)
{
return mIndex == aIter.mIndex && &mDeque == &aIter.mDeque;
}
/**
* Compare two iterators for non strict decreasing order.
*
* @param aIter is the other iterator to be compared to
* @return TRUE if this object points to the same element, or
* an element after the element pointed to by aIter.
* FALSE if this and aIter are not iterating over the same deque.
*/
bool
nsDequeIterator::operator>=(nsDequeIterator& aIter)
{
return mIndex >= aIter.mIndex && &mDeque == &aIter.mDeque;
}
/**
* Pre-increment operator
*
* @return object at post-incremented index
*/
void*
nsDequeIterator::operator++()
{
NS_ASSERTION(mIndex < mDeque.mSize,
"You have reached the end of the Internet. You have seen "
"everything there is to see. Please go back. Now.");
#ifndef TIMELESS_LIGHTWEIGHT
if (mIndex >= mDeque.mSize) {
return 0;
}
#endif
return mDeque.ObjectAt(++mIndex);
}
/**
* Post-increment operator
*
* @param param is ignored
* @return object at pre-incremented index
*/
void*
nsDequeIterator::operator++(int)
{
NS_ASSERTION(mIndex <= mDeque.mSize,
"You have reached the end of the Internet. You have seen "
"everything there is to see. Please go back. Now.");
#ifndef TIMELESS_LIGHTWEIGHT
if (mIndex > mDeque.mSize) {
return 0;
}
#endif
return mDeque.ObjectAt(mIndex++);
}
/**
* Pre-decrement operator
*
* @return object at pre-decremented index
*/
void*
nsDequeIterator::operator--()
{
NS_ASSERTION(mIndex >= 0,
"You have reached the end of the Internet. You have seen "
"everything there is to see. Please go forward. Now.");
#ifndef TIMELESS_LIGHTWEIGHT
if (mIndex < 0) {
return 0;
}
#endif
return mDeque.ObjectAt(--mIndex);
}
/**
* Post-decrement operator
*
* @param param is ignored
* @return object at post-decremented index
*/
void*
nsDequeIterator::operator--(int)
{
NS_ASSERTION(mIndex >= 0,
"You have reached the end of the Internet. You have seen "
"everything there is to see. Please go forward. Now.");
#ifndef TIMELESS_LIGHTWEIGHT
if (mIndex < 0) {
return 0;
}
#endif
return mDeque.ObjectAt(mIndex--);
}
/**
* Dereference operator
* Note that the iterator floats, so you don't need to do:
* <code>++iter; aDeque.PopFront();</code>
* Unless you actually want your iterator to jump 2 spaces.
*
* Picture: [1 2I 3 4]
* PopFront()
* Picture: [2 3I 4]
* Note that I still happily points to object at the second index
*
* @return object at ith index
*/
void*
nsDequeIterator::GetCurrent()
{
NS_ASSERTION(mIndex < mDeque.mSize && mIndex >= 0, "Current is out of bounds");
#ifndef TIMELESS_LIGHTWEIGHT
if (mIndex >= mDeque.mSize || mIndex < 0) {
return 0;
}
#endif
return mDeque.ObjectAt(mIndex);
}
/**
* Call this method when you want to iterate all the
* members of the container, passing a functor along
* to call your code.
*
* @param aFunctor object to call for each member
* @return *this
*/
void
nsDequeIterator::ForEach(nsDequeFunctor& aFunctor) const
{
mDeque.ForEach(aFunctor);
}
/**
* Call this method when you want to iterate all the
* members of the container, calling the functor you
* passed with each member. This process will interrupt
* if your function returns non 0 to this method.
*
* @param aFunctor object to call for each member
* @return first nonzero result of aFunctor or 0.
*/
const void*
nsDequeIterator::FirstThat(nsDequeFunctor& aFunctor) const
{
return mDeque.FirstThat(aFunctor);
}

View File

@ -63,7 +63,6 @@ class nsDequeIterator;
class nsDeque
{
friend class nsDequeIterator;
typedef mozilla::fallible_t fallible_t;
public:
explicit nsDeque(nsDequeFunctor* aDeallocator = nullptr);
@ -161,22 +160,6 @@ public:
*/
void Erase();
/**
* Creates a new iterator, pointing to the first
* item in the deque.
*
* @return new dequeIterator
*/
nsDequeIterator Begin() const;
/**
* Creates a new iterator, pointing to the last
* item in the deque.
*
* @return new dequeIterator
*/
nsDequeIterator End() const;
void* Last() const;
/**
@ -231,167 +214,4 @@ private:
bool GrowCapacity();
};
/******************************************************
* Here comes the nsDequeIterator class...
******************************************************/
class nsDequeIterator
{
public:
/**
* DequeIterator is an object that knows how to iterate
* (forward and backward) through a Deque. Normally,
* you don't need to do this, but there are some special
* cases where it is pretty handy.
*
* One warning: the iterator is not bound to an item,
* it is bound to an index, so if you insert or remove
* from the beginning while using an iterator
* (which is not recommended) then the iterator will
* point to a different item. @see GetCurrent()
*
* Here you go.
*
* @param aQueue is the deque object to be iterated
* @param aIndex is the starting position for your iteration
*/
explicit nsDequeIterator(const nsDeque& aQueue, int aIndex = 0);
/**
* Create a copy of a DequeIterator
*
* @param aCopy is another iterator to copy from
*/
nsDequeIterator(const nsDequeIterator& aCopy);
/**
* Moves iterator to first element in the deque
* @return *this
*/
nsDequeIterator& First();
/**
* Standard assignment operator for dequeiterator
* @param aCopy is another iterator to copy from
* @return *this
*/
nsDequeIterator& operator=(const nsDequeIterator& aCopy);
/**
* preform ! operation against two iterators to test for equivalence
* (or lack thereof)!
*
* @param aIter is the object to be compared to
* @return TRUE if NOT equal.
*/
bool operator!=(nsDequeIterator& aIter);
/**
* Compare two iterators for increasing order.
*
* @param aIter is the other iterator to be compared to
* @return TRUE if this object points to an element before
* the element pointed to by aIter.
* FALSE if this and aIter are not iterating over
* the same deque.
*/
bool operator<(nsDequeIterator& aIter);
/**
* Compare two iterators for equivalence.
*
* @param aIter is the other iterator to be compared to
* @return TRUE if EQUAL
*/
bool operator==(nsDequeIterator& aIter);
/**
* Compare two iterators for non strict decreasing order.
*
* @param aIter is the other iterator to be compared to
* @return TRUE if this object points to the same element, or
* an element after the element pointed to by aIter.
* FALSE if this and aIter are not iterating over
* the same deque.
*/
bool operator>=(nsDequeIterator& aIter);
/**
* Pre-increment operator
* Iterator will advance one index towards the end.
*
* @return object_at(++index)
*/
void* operator++();
/**
* Post-increment operator
* Iterator will advance one index towards the end.
*
* @param param is ignored
* @return object_at(mIndex++)
*/
void* operator++(int);
/**
* Pre-decrement operator
* Iterator will advance one index towards the beginning.
*
* @return object_at(--index)
*/
void* operator--();
/**
* Post-decrement operator
* Iterator will advance one index towards the beginning.
*
* @param param is ignored
* @return object_at(index--)
*/
void* operator--(int);
/**
* Retrieve the the iterator's notion of current node.
*
* Note that the iterator floats, so you don't need to do:
* <code>++iter; aDeque.PopFront();</code>
* Unless you actually want your iterator to jump 2 positions
* relative to its origin.
*
* Picture: [1 2i 3 4]
* PopFront()
* Picture: [2 3i 4]
* Note that I still happily points to object at the second index.
*
* @return object at i'th index
*/
void* GetCurrent();
/**
* Call this method when you want to iterate all the
* members of the container, passing a functor along
* to call your code.
*
* @param aFunctor object to call for each member
* @return *this
*/
void ForEach(nsDequeFunctor& aFunctor) const;
/**
* Call this method when you want to iterate all the
* members of the container, calling the functor you
* passed with each member. This process will interrupt
* if your function returns non 0 to this method.
*
* @param aFunctor object to call for each member
* @return first nonzero result of aFunctor or 0.
*/
const void* FirstThat(nsDequeFunctor& aFunctor) const;
protected:
int32_t mIndex;
const nsDeque& mDeque;
};
#endif

View File

@ -19,6 +19,8 @@ private:
int OriginalFlaw();
int AssignFlaw();
int TestRemove();
int TestPushFront();
int TestEmpty();
};
class _Dealloc: public nsDequeFunctor {
@ -44,6 +46,8 @@ int _TestDeque::Test() {
results+=OriginalFlaw();
results+=AssignFlaw();
results+=TestRemove();
results+=TestPushFront();
results+=TestEmpty();
return results;
}
@ -229,8 +233,82 @@ int _TestDeque::TestRemove() {
return 0;
}
int _TestDeque::TestPushFront() {
// PushFront has some interesting corner cases, primarily we're interested in whether:
// - wrapping around works properly
// - growing works properly
nsDeque d;
const int kPoolSize = 10;
const int kMaxSizeBeforeGrowth = 8;
int pool[kPoolSize];
for (int i = 0; i < kPoolSize; i++) {
pool[i] = i;
}
for (int i = 0; i < kMaxSizeBeforeGrowth; i++) {
d.PushFront(pool + i);
}
TEST(d.GetSize() == kMaxSizeBeforeGrowth, "verify size");
static const int t1[] = {7,6,5,4,3,2,1,0};
TEST(VerifyContents(d, t1, kMaxSizeBeforeGrowth), "verify pushfront 1");
// Now push one more so it grows
d.PushFront(pool + kMaxSizeBeforeGrowth);
TEST(d.GetSize() == kMaxSizeBeforeGrowth + 1, "verify size");
static const int t2[] = {8,7,6,5,4,3,2,1,0};
TEST(VerifyContents(d, t2, kMaxSizeBeforeGrowth + 1), "verify pushfront 2");
// And one more so that it wraps again
d.PushFront(pool + kMaxSizeBeforeGrowth + 1);
TEST(d.GetSize() == kMaxSizeBeforeGrowth + 2, "verify size");
static const int t3[] = {9,8,7,6,5,4,3,2,1,0};
TEST(VerifyContents(d, t3, kMaxSizeBeforeGrowth + 2), "verify pushfront 3");
return 0;
}
int _TestDeque::TestEmpty() {
// Make sure nsDeque gives sane results if it's empty.
nsDeque d;
TEST(d.GetSize() == 0, "Size should be 0");
TEST(d.Pop() == nullptr, "Invalid operation should return nullptr");
TEST(d.PopFront() == nullptr, "Invalid operation should return nullptr");
TEST(d.Peek() == nullptr, "Invalid operation should return nullptr");
TEST(d.PeekFront() == nullptr, "Invalid operation should return nullptr");
TEST(d.ObjectAt(0) == nullptr, "Invalid operation should return nullptr");
TEST(d.Last() == nullptr, "Invalid operation should return nullptr");
// Fill it up and drain it.
for (size_t i = 0; i < 8; i++) {
d.Push((void*)0xAA);
}
for (size_t i = 0; i < 8; i++) {
(void)d.Pop();
}
// Now check it again.
TEST(d.GetSize() == 0, "Size should be 0");
TEST(d.Pop() == nullptr, "Invalid operation should return nullptr");
TEST(d.PopFront() == nullptr, "Invalid operation should return nullptr");
TEST(d.Peek() == nullptr, "Invalid operation should return nullptr");
TEST(d.PeekFront() == nullptr, "Invalid operation should return nullptr");
TEST(d.ObjectAt(0) == nullptr, "Invalid operation should return nullptr");
TEST(d.Last() == nullptr, "Invalid operation should return nullptr");
return 0;
}
int main (void) {
ScopedXPCOM xpcom("TestTimers");
ScopedXPCOM xpcom("TestDeque");
NS_ENSURE_FALSE(xpcom.failed(), 1);
_TestDeque test;