Merge inbound to m-c a=merge

This commit is contained in:
Wes Kocher 2015-02-24 15:13:03 -08:00
commit a6f2e5f2b9
107 changed files with 2639 additions and 1447 deletions

View File

@ -34,6 +34,9 @@ pref("browser.tabs.remote.autostart.1", false);
// Bug 945235: Prevent all bars to be considered visible:
pref("toolkit.defaultChromeFeatures", "chrome,dialog=no,close,resizable,scrollbars,extrachrome");
// Disable focus rings
pref("browser.display.focus_ring_width", 0);
// Device pixel to CSS px ratio, in percent. Set to -1 to calculate based on display density.
pref("browser.viewport.scaleRatio", -1);

View File

@ -672,8 +672,8 @@ let ClickEventHandler = {
event.preventDefault(); // Need to prevent the pageload.
}
}
json.noReferrer = BrowserUtils.linkHasNoReferrer(node)
}
json.noReferrer = BrowserUtils.linkHasNoReferrer(node)
sendAsyncMessage("Content:Click", json);
return;

View File

@ -231,7 +231,7 @@ function openLinkIn(url, where, params) {
"where == 'save' but without initiatingDoc. See bug 814264.");
return;
}
saveURL(url, null, null, true, null, aReferrerURI, aInitiatingDoc);
saveURL(url, null, null, true, null, aNoReferrer ? null : aReferrerURI, aInitiatingDoc);
return;
}
const Cc = Components.classes;
@ -265,7 +265,7 @@ function openLinkIn(url, where, params) {
sa.AppendElement(wuri);
sa.AppendElement(charset);
sa.AppendElement(aReferrerURI);
sa.AppendElement(aNoReferrer ? null : aReferrerURI);
sa.AppendElement(aPostData);
sa.AppendElement(allowThirdPartyFixupSupports);
@ -327,7 +327,7 @@ function openLinkIn(url, where, params) {
if (aDisallowInheritPrincipal && !(uriObj && uriObj.schemeIs("javascript")))
flags |= Ci.nsIWebNavigation.LOAD_FLAGS_DISALLOW_INHERIT_OWNER;
w.gBrowser.loadURIWithFlags(url, flags, aReferrerURI, null, aPostData);
w.gBrowser.loadURIWithFlags(url, flags, aNoReferrer ? null : aReferrerURI, null, aPostData);
break;
case "tabshifted":
loadInBackground = !loadInBackground;

View File

@ -31,6 +31,52 @@ AnimationPlayer::WrapObject(JSContext* aCx)
return dom::AnimationPlayerBinding::Wrap(aCx, this);
}
void
AnimationPlayer::SetStartTime(const Nullable<TimeDuration>& aNewStartTime)
{
#if 1
// Bug 1096776: once we support inactive/missing timelines we'll want to take
// the disabled branch.
MOZ_ASSERT(mTimeline && !mTimeline->GetCurrentTime().IsNull(),
"We don't support inactive/missing timelines yet");
#else
Nullable<TimeDuration> timelineTime = mTimeline->GetCurrentTime();
if (mTimeline) {
// The spec says to check if the timeline is active (has a resolved time)
// before using it here, but we don't need to since it's harmless to set
// the already null time to null.
timelineTime = mTimeline->GetCurrentTime();
}
if (timelineTime.IsNull() && !aNewStartTime.IsNull()) {
mHoldTime.SetNull();
}
#endif
Nullable<TimeDuration> previousCurrentTime = GetCurrentTime();
mStartTime = aNewStartTime;
if (!aNewStartTime.IsNull()) {
// Until bug 1127380 (playbackRate) is implemented, the rate is essentially
// one. Once that bug is fixed we should only SetNull() if the rate is not
// zero.
mHoldTime.SetNull();
} else {
mHoldTime = previousCurrentTime;
}
CancelPendingPlay();
if (mReady) {
// We may have already resolved mReady, but in that case calling
// MaybeResolve is a no-op, so that's okay.
mReady->MaybeResolve(this);
}
UpdateSourceContent();
PostUpdate();
// FIXME: Once bug 1074630 is fixed, run the procedure to update a player's
// finished state for player:
// http://w3c.github.io/web-animations/#update-a-players-finished-state
}
Nullable<TimeDuration>
AnimationPlayer::GetCurrentTime() const
{
@ -112,6 +158,12 @@ AnimationPlayer::GetStartTimeAsDouble() const
return AnimationUtils::TimeDurationToDouble(mStartTime);
}
void
AnimationPlayer::SetStartTimeAsDouble(const Nullable<double>& aStartTime)
{
return SetStartTime(AnimationUtils::DoubleToTimeDuration(aStartTime));
}
Nullable<double>
AnimationPlayer::GetCurrentTimeAsDouble() const
{

View File

@ -72,6 +72,7 @@ public:
Animation* GetSource() const { return mSource; }
AnimationTimeline* Timeline() const { return mTimeline; }
Nullable<TimeDuration> GetStartTime() const { return mStartTime; }
void SetStartTime(const Nullable<TimeDuration>& aNewStartTime);
Nullable<TimeDuration> GetCurrentTime() const;
AnimationPlayState PlayState() const;
virtual Promise* GetReady(ErrorResult& aRv);
@ -84,6 +85,7 @@ public:
// script but when called from script we (or one of our subclasses) perform
// extra steps such as flushing style or converting the return type.
Nullable<double> GetStartTimeAsDouble() const;
void SetStartTimeAsDouble(const Nullable<double>& aStartTime);
Nullable<double> GetCurrentTimeAsDouble() const;
virtual AnimationPlayState PlayStateFromJS() const { return PlayState(); }
virtual void PlayFromJS() { Play(); }

View File

@ -26,6 +26,18 @@ public:
return result;
}
static Nullable<TimeDuration>
DoubleToTimeDuration(const Nullable<double>& aTime)
{
Nullable<TimeDuration> result;
if (!aTime.IsNull()) {
result.SetValue(TimeDuration::FromMilliseconds(aTime.Value()));
}
return result;
}
};
} // namespace dom

View File

@ -1184,6 +1184,10 @@ FragmentOrElement::DestroyContent()
// The child can remove itself from the parent in BindToTree.
mAttrsAndChildren.ChildAt(i)->DestroyContent();
}
ShadowRoot* shadowRoot = GetShadowRoot();
if (shadowRoot) {
shadowRoot->DestroyContent();
}
}
void

View File

@ -711,6 +711,15 @@ ShadowRoot::ContentRemoved(nsIDocument* aDocument,
}
}
void
ShadowRoot::DestroyContent()
{
if (mOlderShadow) {
mOlderShadow->DestroyContent();
}
DocumentFragment::DestroyContent();
}
NS_IMPL_CYCLE_COLLECTION_INHERITED(ShadowRootStyleSheetList, StyleSheetList,
mShadowRoot)

View File

@ -133,6 +133,8 @@ public:
{
mIsComposedDocParticipant = aIsComposedDocParticipant;
}
virtual void DestroyContent() MOZ_OVERRIDE;
protected:
virtual ~ShadowRoot();

View File

@ -19,6 +19,13 @@ namespace dom {
static bool
ShouldExposeChildWindow(nsString& aNameBeingResolved, nsIDOMWindow *aChild)
{
nsCOMPtr<nsPIDOMWindow> piWin = do_QueryInterface(aChild);
NS_ENSURE_TRUE(piWin, false);
Element* e = piWin->GetFrameElementInternal();
if (e && e->IsInShadowTree()) {
return false;
}
// If we're same-origin with the child, go ahead and expose it.
nsCOMPtr<nsIScriptObjectPrincipal> sop = do_QueryInterface(aChild);
NS_ENSURE_TRUE(sop, false);
@ -63,9 +70,6 @@ ShouldExposeChildWindow(nsString& aNameBeingResolved, nsIDOMWindow *aChild)
// allow the child to arbitrarily pollute the parent namespace, and requires
// cross-origin communication only in a limited set of cases that can be
// computed independently by the parent.
nsCOMPtr<nsPIDOMWindow> piWin = do_QueryInterface(aChild);
NS_ENSURE_TRUE(piWin, false);
Element* e = piWin->GetFrameElementInternal();
return e && e->AttrValueIs(kNameSpaceID_None, nsGkAtoms::name,
aNameBeingResolved, eCaseMatters);
}

View File

@ -6,9 +6,7 @@
#include "nsIVariant.h"
#include "nsIInputStream.h"
#include "mozilla/dom/File.h"
#include "mozilla/dom/File.h"
#include "mozilla/dom/HTMLFormElement.h"
#include "mozilla/dom/FormDataBinding.h"
using namespace mozilla;
using namespace mozilla::dom;
@ -86,6 +84,119 @@ nsFormData::Append(const nsAString& aName, File& aBlob,
AddNameFilePair(aName, &aBlob, filename);
}
void
nsFormData::Delete(const nsAString& aName)
{
// We have to use this slightly awkward for loop since uint32_t >= 0 is an
// error for being always true.
for (uint32_t i = mFormData.Length(); i-- > 0; ) {
if (aName.Equals(mFormData[i].name)) {
mFormData.RemoveElementAt(i);
}
}
}
void
nsFormData::ExtractValue(const FormDataTuple& aTuple,
OwningFileOrUSVString* aOutValue)
{
if (aTuple.valueIsFile) {
aOutValue->SetAsFile() = aTuple.fileValue;
} else {
aOutValue->SetAsUSVString() = aTuple.stringValue;
}
}
void
nsFormData::Get(const nsAString& aName,
Nullable<OwningFileOrUSVString>& aOutValue)
{
for (uint32_t i = 0; i < mFormData.Length(); ++i) {
if (aName.Equals(mFormData[i].name)) {
ExtractValue(mFormData[i], &aOutValue.SetValue());
return;
}
}
aOutValue.SetNull();
}
void
nsFormData::GetAll(const nsAString& aName,
nsTArray<OwningFileOrUSVString>& aValues)
{
for (uint32_t i = 0; i < mFormData.Length(); ++i) {
if (aName.Equals(mFormData[i].name)) {
OwningFileOrUSVString* element = aValues.AppendElement();
ExtractValue(mFormData[i], element);
}
}
}
bool
nsFormData::Has(const nsAString& aName)
{
for (uint32_t i = 0; i < mFormData.Length(); ++i) {
if (aName.Equals(mFormData[i].name)) {
return true;
}
}
return false;
}
nsFormData::FormDataTuple*
nsFormData::RemoveAllOthersAndGetFirstFormDataTuple(const nsAString& aName)
{
FormDataTuple* lastFoundTuple = nullptr;
uint32_t lastFoundIndex = mFormData.Length();
// We have to use this slightly awkward for loop since uint32_t >= 0 is an
// error for being always true.
for (uint32_t i = mFormData.Length(); i-- > 0; ) {
if (aName.Equals(mFormData[i].name)) {
if (lastFoundTuple) {
// The one we found earlier was not the first one, we can remove it.
mFormData.RemoveElementAt(lastFoundIndex);
}
lastFoundTuple = &mFormData[i];
lastFoundIndex = i;
}
}
return lastFoundTuple;
}
void
nsFormData::Set(const nsAString& aName, File& aBlob,
const Optional<nsAString>& aFilename)
{
FormDataTuple* tuple = RemoveAllOthersAndGetFirstFormDataTuple(aName);
if (tuple) {
nsAutoString filename;
if (aFilename.WasPassed()) {
filename = aFilename.Value();
} else {
filename.SetIsVoid(true);
}
SetNameFilePair(tuple, aName, &aBlob, filename);
} else {
Append(aName, aBlob, aFilename);
}
}
void
nsFormData::Set(const nsAString& aName, const nsAString& aValue)
{
FormDataTuple* tuple = RemoveAllOthersAndGetFirstFormDataTuple(aName);
if (tuple) {
SetNameValuePair(tuple, aName, aValue);
} else {
Append(aName, aValue);
}
}
// -------------------------------------------------------------------------
// nsIDOMFormData

View File

@ -13,6 +13,7 @@
#include "nsTArray.h"
#include "mozilla/ErrorResult.h"
#include "mozilla/dom/BindingDeclarations.h"
#include "mozilla/dom/FormDataBinding.h"
namespace mozilla {
class ErrorResult;
@ -29,8 +30,48 @@ class nsFormData MOZ_FINAL : public nsIDOMFormData,
public nsFormSubmission,
public nsWrapperCache
{
private:
~nsFormData() {}
typedef mozilla::dom::File File;
struct FormDataTuple
{
nsString name;
nsString stringValue;
nsRefPtr<File> fileValue;
nsString filename;
bool valueIsFile;
};
// Returns the FormDataTuple to modify. This may be null, in which case
// no element with aName was found.
FormDataTuple*
RemoveAllOthersAndGetFirstFormDataTuple(const nsAString& aName);
void SetNameValuePair(FormDataTuple* aData,
const nsAString& aName,
const nsAString& aValue)
{
MOZ_ASSERT(aData);
aData->name = aName;
aData->stringValue = aValue;
aData->valueIsFile = false;
}
void SetNameFilePair(FormDataTuple* aData,
const nsAString& aName,
File* aBlob,
const nsAString& aFilename)
{
MOZ_ASSERT(aData);
aData->name = aName;
aData->fileValue = aBlob;
aData->filename = aFilename;
aData->valueIsFile = true;
}
void ExtractValue(const FormDataTuple& aTuple,
mozilla::dom::OwningFileOrUSVString* aOutValue);
public:
explicit nsFormData(nsISupports* aOwner = nullptr);
@ -55,8 +96,15 @@ public:
const mozilla::dom::Optional<mozilla::dom::NonNull<mozilla::dom::HTMLFormElement> >& aFormElement,
mozilla::ErrorResult& aRv);
void Append(const nsAString& aName, const nsAString& aValue);
void Append(const nsAString& aName, mozilla::dom::File& aBlob,
void Append(const nsAString& aName, File& aBlob,
const mozilla::dom::Optional<nsAString>& aFilename);
void Delete(const nsAString& aName);
void Get(const nsAString& aName, mozilla::dom::Nullable<mozilla::dom::OwningFileOrUSVString>& aOutValue);
void GetAll(const nsAString& aName, nsTArray<mozilla::dom::OwningFileOrUSVString>& aValues);
bool Has(const nsAString& aName);
void Set(const nsAString& aName, File& aBlob,
const mozilla::dom::Optional<nsAString>& aFilename);
void Set(const nsAString& aName, const nsAString& aValue);
// nsFormSubmission
virtual nsresult GetEncodedSubmission(nsIURI* aURI,
@ -65,35 +113,20 @@ public:
const nsAString& aValue) MOZ_OVERRIDE
{
FormDataTuple* data = mFormData.AppendElement();
data->name = aName;
data->stringValue = aValue;
data->valueIsFile = false;
SetNameValuePair(data, aName, aValue);
return NS_OK;
}
virtual nsresult AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
File* aBlob,
const nsString& aFilename) MOZ_OVERRIDE
{
FormDataTuple* data = mFormData.AppendElement();
data->name = aName;
data->fileValue = aBlob;
data->filename = aFilename;
data->valueIsFile = true;
SetNameFilePair(data, aName, aBlob, aFilename);
return NS_OK;
}
private:
nsCOMPtr<nsISupports> mOwner;
struct FormDataTuple
{
nsString name;
nsString stringValue;
nsCOMPtr<nsIDOMBlob> fileValue;
nsString filename;
bool valueIsFile;
};
nsTArray<FormDataTuple> mFormData;
};

View File

@ -194,7 +194,7 @@ nsFrameLoader::Create(Element* aOwner, bool aNetworkCreated)
NS_ENSURE_TRUE(aOwner, nullptr);
nsIDocument* doc = aOwner->OwnerDoc();
NS_ENSURE_TRUE(!doc->IsResourceDoc() &&
((!doc->IsLoadedAsData() && aOwner->GetUncomposedDoc()) ||
((!doc->IsLoadedAsData() && aOwner->GetComposedDoc()) ||
doc->IsStaticDocument()),
nullptr);
@ -349,7 +349,7 @@ private:
nsresult
nsFrameLoader::ReallyStartLoadingInternal()
{
NS_ENSURE_STATE(mURIToLoad && mOwnerContent && mOwnerContent->IsInDoc());
NS_ENSURE_STATE(mURIToLoad && mOwnerContent && mOwnerContent->IsInComposedDoc());
PROFILER_LABEL("nsFrameLoader", "ReallyStartLoading",
js::ProfileEntry::Category::OTHER);
@ -876,12 +876,12 @@ nsFrameLoader::ShowRemoteFrame(const nsIntSize& size,
// want here. For now, hack.
if (!mRemoteBrowserShown) {
if (!mOwnerContent ||
!mOwnerContent->GetUncomposedDoc()) {
!mOwnerContent->GetComposedDoc()) {
return false;
}
nsRefPtr<layers::LayerManager> layerManager =
nsContentUtils::LayerManagerForDocument(mOwnerContent->GetUncomposedDoc());
nsContentUtils::LayerManagerForDocument(mOwnerContent->GetComposedDoc());
if (!layerManager) {
// This is just not going to work.
return false;
@ -972,8 +972,8 @@ nsFrameLoader::SwapWithOtherRemoteLoader(nsFrameLoader* aOther,
return NS_ERROR_DOM_SECURITY_ERR;
}
nsIDocument* ourDoc = ourContent->GetCurrentDoc();
nsIDocument* otherDoc = otherContent->GetCurrentDoc();
nsIDocument* ourDoc = ourContent->GetComposedDoc();
nsIDocument* otherDoc = otherContent->GetComposedDoc();
if (!ourDoc || !otherDoc) {
// Again, how odd, given that we had docshells
return NS_ERROR_NOT_IMPLEMENTED;
@ -1191,8 +1191,8 @@ nsFrameLoader::SwapWithOtherLoader(nsFrameLoader* aOther,
otherChildDocument->GetParentDocument();
// Make sure to swap docshells between the two frames.
nsIDocument* ourDoc = ourContent->GetUncomposedDoc();
nsIDocument* otherDoc = otherContent->GetUncomposedDoc();
nsIDocument* ourDoc = ourContent->GetComposedDoc();
nsIDocument* otherDoc = otherContent->GetComposedDoc();
if (!ourDoc || !otherDoc) {
// Again, how odd, given that we had docshells
return NS_ERROR_NOT_IMPLEMENTED;
@ -1612,7 +1612,7 @@ nsFrameLoader::MaybeCreateDocShell()
// XXXbz this is such a total hack.... We really need to have a
// better setup for doing this.
nsIDocument* doc = mOwnerContent->OwnerDoc();
if (!(doc->IsStaticDocument() || mOwnerContent->IsInDoc())) {
if (!(doc->IsStaticDocument() || mOwnerContent->IsInComposedDoc())) {
return NS_ERROR_UNEXPECTED;
}

View File

@ -122,7 +122,7 @@ IsJavaMIME(const nsACString & aMIMEType)
static bool
InActiveDocument(nsIContent *aContent)
{
if (!aContent->IsInDoc()) {
if (!aContent->IsInComposedDoc()) {
return false;
}
nsIDocument *doc = aContent->OwnerDoc();
@ -3315,11 +3315,10 @@ nsObjectLoadingContent::GetContentDocument()
nsCOMPtr<nsIContent> thisContent =
do_QueryInterface(static_cast<nsIImageLoadingContent*>(this));
if (!thisContent->IsInDoc()) {
if (!thisContent->IsInComposedDoc()) {
return nullptr;
}
// XXXbz should this use GetComposedDoc()? sXBL/XBL2 issue!
nsIDocument *sub_doc = thisContent->OwnerDoc()->GetSubDocumentFor(thisContent);
if (!sub_doc) {
return nullptr;

View File

@ -767,6 +767,7 @@ support-files = file_bug503473-frame.sjs
skip-if = buildapp == 'b2g' || e10s
support-files = file_bug1011748_redirect.sjs file_bug1011748_OK.sjs
[test_bug1025933.html]
[test_bug1037687.html]
[test_element.matches.html]
[test_user_select.html]
skip-if = buildapp == 'mulet' || buildapp == 'b2g' || toolkit == 'android'

View File

@ -0,0 +1,63 @@
<!DOCTYPE HTML>
<html>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=1037687
-->
<head>
<meta charset="utf-8">
<title>Test for Bug 1037687</title>
<script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
<script type="application/javascript">
/** Test for Bug 1037687 **/
SimpleTest.waitForExplicitFinish();
var host;
var sr;
var embed;
var object;
var iframe;
var resourceLoadCount = 0;
function resourceLoaded(event) {
++resourceLoadCount;
ok(true, event.target + " got " + event.load);
if (resourceLoadCount == 3) {
SimpleTest.finish();
}
}
function createResource(sr, type) {
var el = document.createElement(type);
var attrName = type == "object" ? "data" : "src";
el.setAttribute(attrName, "file_mozfiledataurl_img.jpg");
el.onload = resourceLoaded;
var info = document.createElement("div");
info.textContent = type;
sr.appendChild(info);
sr.appendChild(el);
}
function test() {
host = document.getElementById("host");
sr = host.createShadowRoot();
embed = createResource(sr, "embed");
object = createResource(sr, "object");
iframe = createResource(sr, "iframe");
}
</script>
</head>
<body onload="test()">
<a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=1037687">Mozilla Bug 1037687</a>
<p id="display"></p>
<div id="content" style="display: none">
</div>
<pre id="test">
</pre>
<div id="host"></div>
</body>
</html>

View File

@ -68,7 +68,7 @@ HTMLObjectElement::DoneAddingChildren(bool aHaveNotified)
// If we're already in a document, we need to trigger the load
// Otherwise, BindToTree takes care of that.
if (IsInDoc()) {
if (IsInComposedDoc()) {
StartObjectLoad(aHaveNotified);
}
}
@ -231,7 +231,7 @@ HTMLObjectElement::SetAttr(int32_t aNameSpaceID, nsIAtom *aName,
// We also don't want to start loading the object when we're not yet in
// a document, just in case that the caller wants to set additional
// attributes before inserting the node into the document.
if (aNotify && IsInDoc() && mIsDoneAddingChildren &&
if (aNotify && IsInComposedDoc() && mIsDoneAddingChildren &&
aNameSpaceID == kNameSpaceID_None && aName == nsGkAtoms::data) {
return LoadObject(aNotify, true);
}
@ -248,7 +248,7 @@ HTMLObjectElement::UnsetAttr(int32_t aNameSpaceID, nsIAtom* aAttribute,
NS_ENSURE_SUCCESS(rv, rv);
// See comment in SetAttr
if (aNotify && IsInDoc() && mIsDoneAddingChildren &&
if (aNotify && IsInComposedDoc() && mIsDoneAddingChildren &&
aNameSpaceID == kNameSpaceID_None && aAttribute == nsGkAtoms::data) {
return LoadObject(aNotify, true);
}
@ -460,7 +460,7 @@ HTMLObjectElement::StartObjectLoad(bool aNotify)
{
// BindToTree can call us asynchronously, and we may be removed from the tree
// in the interim
if (!IsInDoc() || !OwnerDoc()->IsActive()) {
if (!IsInComposedDoc() || !OwnerDoc()->IsActive()) {
return;
}

View File

@ -80,7 +80,7 @@ HTMLSharedObjectElement::DoneAddingChildren(bool aHaveNotified)
// If we're already in a document, we need to trigger the load
// Otherwise, BindToTree takes care of that.
if (IsInDoc()) {
if (IsInComposedDoc()) {
StartObjectLoad(aHaveNotified);
}
}
@ -180,7 +180,7 @@ HTMLSharedObjectElement::SetAttr(int32_t aNameSpaceID, nsIAtom *aName,
// We also don't want to start loading the object when we're not yet in
// a document, just in case that the caller wants to set additional
// attributes before inserting the node into the document.
if (aNotify && IsInDoc() && mIsDoneAddingChildren &&
if (aNotify && IsInComposedDoc() && mIsDoneAddingChildren &&
aNameSpaceID == kNameSpaceID_None && aName == URIAttrName()) {
return LoadObject(aNotify, true);
}
@ -313,7 +313,7 @@ HTMLSharedObjectElement::StartObjectLoad(bool aNotify)
{
// BindToTree can call us asynchronously, and we may be removed from the tree
// in the interim
if (!IsInDoc() || !OwnerDoc()->IsActive()) {
if (!IsInComposedDoc() || !OwnerDoc()->IsActive()) {
return;
}

View File

@ -38,6 +38,7 @@
#include "nsContentUtils.h"
#include "mozilla/dom/EncodingUtils.h"
#include "mozilla/dom/File.h"
using namespace mozilla;
using mozilla::dom::EncodingUtils;
@ -78,7 +79,7 @@ public:
virtual nsresult AddNameValuePair(const nsAString& aName,
const nsAString& aValue);
virtual nsresult AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
File* aBlob,
const nsString& aFilename);
virtual nsresult GetEncodedSubmission(nsIURI* aURI,
nsIInputStream** aPostDataStream);
@ -165,7 +166,7 @@ nsFSURLEncoded::AddIsindex(const nsAString& aValue)
nsresult
nsFSURLEncoded::AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
File* aBlob,
const nsString& aFilename)
{
if (!mWarnedFileControl) {
@ -174,9 +175,8 @@ nsFSURLEncoded::AddNameFilePair(const nsAString& aName,
}
nsAutoString filename;
nsCOMPtr<nsIDOMFile> file = do_QueryInterface(aBlob);
if (file) {
file->GetName(filename);
if (aBlob && aBlob->IsFile()) {
aBlob->GetName(filename);
}
return AddNameValuePair(aName, filename);
@ -441,7 +441,7 @@ nsFSMultipartFormData::AddNameValuePair(const nsAString& aName,
nsresult
nsFSMultipartFormData::AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
File* aBlob,
const nsString& aFilename)
{
// Encode the control name
@ -459,9 +459,8 @@ nsFSMultipartFormData::AddNameFilePair(const nsAString& aName,
} else {
// Get and encode the filename
nsAutoString filename16;
nsCOMPtr<nsIDOMFile> file = do_QueryInterface(aBlob);
if (file) {
rv = file->GetName(filename16);
if (aBlob->IsFile()) {
rv = aBlob->GetName(filename16);
NS_ENSURE_SUCCESS(rv, rv);
}
@ -469,7 +468,7 @@ nsFSMultipartFormData::AddNameFilePair(const nsAString& aName,
filename16.AssignLiteral("blob");
} else {
nsAutoString filepath16;
rv = file->GetPath(filepath16);
rv = aBlob->GetPath(filepath16);
NS_ENSURE_SUCCESS(rv, rv);
if (!filepath16.IsEmpty()) {
// File.path includes trailing "/"
@ -598,7 +597,7 @@ public:
virtual nsresult AddNameValuePair(const nsAString& aName,
const nsAString& aValue);
virtual nsresult AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
File* aBlob,
const nsString& aFilename);
virtual nsresult GetEncodedSubmission(nsIURI* aURI,
nsIInputStream** aPostDataStream);
@ -622,13 +621,12 @@ nsFSTextPlain::AddNameValuePair(const nsAString& aName,
nsresult
nsFSTextPlain::AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
File* aBlob,
const nsString& aFilename)
{
nsAutoString filename;
nsCOMPtr<nsIDOMFile> file = do_QueryInterface(aBlob);
if (file) {
file->GetName(filename);
if (aBlob && aBlob->IsFile()) {
aBlob->GetName(filename);
}
AddNameValuePair(aName, filename);

View File

@ -20,7 +20,12 @@ class nsIDocShell;
class nsIRequest;
class nsISaveAsCharset;
class nsIMultiplexInputStream;
class nsIDOMBlob;
namespace mozilla {
namespace dom {
class File;
} // namespace dom
} // namespace mozilla
/**
* Class for form submissions; encompasses the function to call to submit as
@ -51,9 +56,9 @@ public:
* @param aFilename the filename to be used (not void)
*/
virtual nsresult AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
mozilla::dom::File* aBlob,
const nsString& aFilename) = 0;
/**
* Reports whether the instance supports AddIsindex().
*
@ -161,7 +166,7 @@ public:
virtual nsresult AddNameValuePair(const nsAString& aName,
const nsAString& aValue) MOZ_OVERRIDE;
virtual nsresult AddNameFilePair(const nsAString& aName,
nsIDOMBlob* aBlob,
mozilla::dom::File* aBlob,
const nsString& aFilename) MOZ_OVERRIDE;
virtual nsresult GetEncodedSubmission(nsIURI* aURI,
nsIInputStream** aPostDataStream) MOZ_OVERRIDE;

View File

@ -131,7 +131,7 @@ nsGenericHTMLFrameElement::GetContentWindow()
void
nsGenericHTMLFrameElement::EnsureFrameLoader()
{
if (!IsInDoc() || mFrameLoader || mFrameLoaderCreationDisallowed) {
if (!IsInComposedDoc() || mFrameLoader || mFrameLoaderCreationDisallowed) {
// If frame loader is there, we just keep it around, cached
return;
}
@ -221,7 +221,7 @@ nsGenericHTMLFrameElement::BindToTree(nsIDocument* aDocument,
aCompileEventHandlers);
NS_ENSURE_SUCCESS(rv, rv);
if (aDocument) {
if (IsInComposedDoc()) {
NS_ASSERTION(!nsContentUtils::IsSafeToRunScript(),
"Missing a script blocker!");

View File

@ -14,7 +14,90 @@ https://bugzilla.mozilla.org/show_bug.cgi?id=690659
<script type="text/javascript">
SimpleTest.waitForExplicitFinish();
function runTest() {
function testHas() {
var f = new FormData();
f.append("foo", "bar");
f.append("another", "value");
ok(f.has("foo"), "has() on existing name should be true.");
ok(f.has("another"), "has() on existing name should be true.");
ok(!f.has("nonexistent"), "has() on non-existent name should be false.");
}
function testGet() {
var f = new FormData();
f.append("foo", "bar");
f.append("foo", "bar2");
f.append("blob", new Blob(["hey"], { type: 'text/plain' }));
f.append("file", new File(["hey"], 'testname', {type: 'text/plain'}));
is(f.get("foo"), "bar", "get() on existing name should return first value");
ok(f.get("blob") instanceof Blob, "get() on existing name should return first value");
is(f.get("blob").type, 'text/plain', "get() on existing name should return first value");
ok(f.get("file") instanceof File, "get() on existing name should return first value");
is(f.get("file").name, 'testname', "get() on existing name should return first value");
is(f.get("nonexistent"), null, "get() on non-existent name should return null.");
}
function testGetAll() {
var f = new FormData();
f.append("other", "value");
f.append("foo", "bar");
f.append("foo", "bar2");
f.append("foo", new Blob(["hey"], { type: 'text/plain' }));
var arr = f.getAll("foo");
is(arr.length, 3, "getAll() should retrieve all matching entries.");
is(arr[0], "bar", "values should match and be in order");
is(arr[1], "bar2", "values should match and be in order");
ok(arr[2] instanceof Blob, "values should match and be in order");
is(f.get("nonexistent"), null, "get() on non-existent name should return null.");
}
function testDelete() {
var f = new FormData();
f.append("other", "value");
f.append("foo", "bar");
f.append("foo", "bar2");
f.append("foo", new Blob(["hey"], { type: 'text/plain' }));
ok(f.has("foo"), "has() on existing name should be true.");
f.delete("foo");
ok(!f.has("foo"), "has() on deleted name should be false.");
is(f.getAll("foo").length, 0, "all entries should be deleted.");
is(f.getAll("other").length, 1, "other names should still be there.");
f.delete("other");
is(f.getAll("other").length, 0, "all entries should be deleted.");
}
function testSet() {
var f = new FormData();
f.set("other", "value");
ok(f.has("other"), "set() on new name should be similar to append()");
is(f.getAll("other").length, 1, "set() on new name should be similar to append()");
f.append("other", "value2");
is(f.getAll("other").length, 2, "append() should not replace existing entries.");
f.append("foo", "bar");
f.append("other", "value3");
f.append("other", "value3");
f.append("other", "value3");
is(f.getAll("other").length, 5, "append() should not replace existing entries.");
f.set("other", "value4");
is(f.getAll("other").length, 1, "set() should replace existing entries.");
is(f.getAll("other")[0], "value4", "set() should replace existing entries.");
}
function testIterate() {
todo(false, "Implement this in Bug 1085284.");
}
function testSend() {
var xhr = new XMLHttpRequest();
xhr.open("POST", "form_submit_server.sjs");
xhr.onload = function () {
@ -62,6 +145,17 @@ function runTest() {
xhr.send(fd);
}
function runTest() {
testHas();
testGet();
testGetAll();
testDelete();
testSet();
testIterate();
// Finally, send an XHR and verify the response matches.
testSend();
}
runTest()
</script>
</body>

View File

@ -836,6 +836,24 @@ policies and contribution forms [3].
}
expose(assert_greater_than, "assert_greater_than");
function assert_between_exclusive(actual, lower, upper, description)
{
/*
* Test if a primitive number is between two others
*/
assert(typeof actual === "number",
"assert_between_exclusive", description,
"expected a number but got a ${type_actual}",
{type_actual:typeof actual});
assert(actual > lower && actual < upper,
"assert_between_exclusive", description,
"expected a number greater than ${lower} " +
"and less than ${upper} but got ${actual}",
{lower:lower, upper:upper, actual:actual});
}
expose(assert_between_exclusive, "assert_between_exclusive");
function assert_less_than_equal(actual, expected, description)
{
/*
@ -870,6 +888,24 @@ policies and contribution forms [3].
}
expose(assert_greater_than_equal, "assert_greater_than_equal");
function assert_between_inclusive(actual, lower, upper, description)
{
/*
* Test if a primitive number is between to two others or equal to either of them
*/
assert(typeof actual === "number",
"assert_between_inclusive", description,
"expected a number but got a ${type_actual}",
{type_actual:typeof actual});
assert(actual >= lower && actual <= upper,
"assert_between_inclusive", description,
"expected a number greater than or equal to ${lower} " +
"and less than or equal to ${upper} but got ${actual}",
{lower:lower, upper:upper, actual:actual});
}
expose(assert_between_inclusive, "assert_between_inclusive");
function assert_regexp_match(actual, expected, description) {
/*
* Test if a string (actual) matches a regexp (expected)

View File

@ -535,22 +535,18 @@ MediaStreamGraphImpl::UpdateStreamOrder()
// If this is a AudioNodeStream, force a AudioCallbackDriver.
if (stream->AsAudioNodeStream()) {
audioTrackPresent = true;
}
for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer(), MediaSegment::AUDIO);
!tracks.IsEnded(); tracks.Next()) {
audioTrackPresent = true;
} else {
for (StreamBuffer::TrackIter tracks(stream->GetStreamBuffer(), MediaSegment::AUDIO);
!tracks.IsEnded(); tracks.Next()) {
audioTrackPresent = true;
}
}
}
if (!audioTrackPresent &&
CurrentDriver()->AsAudioCallbackDriver()) {
bool started;
{
MonitorAutoLock mon(mMonitor);
started = CurrentDriver()->AsAudioCallbackDriver()->IsStarted();
}
if (started) {
MonitorAutoLock mon(mMonitor);
MonitorAutoLock mon(mMonitor);
if (CurrentDriver()->AsAudioCallbackDriver()->IsStarted()) {
if (mLifecycleState == LIFECYCLE_RUNNING) {
SystemClockDriver* driver = new SystemClockDriver(this);
CurrentDriver()->SwitchAtNextIteration(driver);
@ -570,6 +566,12 @@ MediaStreamGraphImpl::UpdateStreamOrder()
}
#endif
if (!mStreamOrderDirty) {
return;
}
mStreamOrderDirty = false;
// The algorithm for finding cycles is based on Tim Leslie's iterative
// implementation [1][2] of Pearce's variant [3] of Tarjan's strongly
// connected components (SCC) algorithm. There are variations (a) to
@ -1262,9 +1264,7 @@ MediaStreamGraphImpl::UpdateGraph(GraphTime aEndBlockingDecision)
}
mFrontMessageQueue.Clear();
if (mStreamOrderDirty) {
UpdateStreamOrder();
}
UpdateStreamOrder();
bool ensureNextIteration = false;

View File

@ -20,7 +20,7 @@ interface AnimationPlayer {
readonly attribute Animation? source;
readonly attribute AnimationTimeline timeline;
[BinaryName="startTimeAsDouble"]
readonly attribute double? startTime;
attribute double? startTime;
[BinaryName="currentTimeAsDouble"]
readonly attribute double? currentTime;

View File

@ -7,8 +7,17 @@
* http://xhr.spec.whatwg.org
*/
typedef (File or USVString) FormDataEntryValue;
[Constructor(optional HTMLFormElement form)]
interface FormData {
void append(DOMString name, Blob value, optional DOMString filename);
void append(DOMString name, DOMString value);
void append(USVString name, Blob value, optional USVString filename);
void append(USVString name, USVString value);
void delete(USVString name);
FormDataEntryValue? get(USVString name);
sequence<FormDataEntryValue> getAll(USVString name);
boolean has(USVString name);
void set(USVString name, Blob value, optional USVString filename);
void set(USVString name, USVString value);
// iterable<USVString, FormDataEntryValue>; - Bug 1127703
};

View File

@ -660,14 +660,17 @@ nsPNGEncoder::ConvertHostARGBRow(const uint8_t* aSrc, uint8_t* aDest,
uint8_t* pixelOut = &aDest[x * pixelStride];
uint8_t alpha = (pixelIn & 0xff000000) >> 24;
if (alpha == 0) {
pixelOut[0] = pixelOut[1] = pixelOut[2] = pixelOut[3] = 0;
pixelOut[pixelStride - 1] = alpha; // overwritten below if pixelStride == 3
if (alpha == 255) {
pixelOut[0] = (pixelIn & 0xff0000) >> 16;
pixelOut[1] = (pixelIn & 0x00ff00) >> 8;
pixelOut[2] = (pixelIn & 0x0000ff) ;
} else if (alpha == 0) {
pixelOut[0] = pixelOut[1] = pixelOut[2] = 0;
} else {
pixelOut[0] = (((pixelIn & 0xff0000) >> 16) * 255 + alpha / 2) / alpha;
pixelOut[1] = (((pixelIn & 0x00ff00) >> 8) * 255 + alpha / 2) / alpha;
pixelOut[2] = (((pixelIn & 0x0000ff) >> 0) * 255 + alpha / 2) / alpha;
if (aUseTransparency)
pixelOut[3] = alpha;
pixelOut[2] = (((pixelIn & 0x0000ff) ) * 255 + alpha / 2) / alpha;
}
}
}

View File

@ -250,7 +250,7 @@ struct HeapAccessOffset
const AsmJSHeapAccessVector &accesses;
explicit HeapAccessOffset(const AsmJSHeapAccessVector &accesses) : accesses(accesses) {}
uintptr_t operator[](size_t index) const {
return accesses[index].offset();
return accesses[index].insnOffset();
}
};
@ -328,7 +328,7 @@ AsmJSModule::finish(ExclusiveContext *cx, TokenStream &tokenStream, MacroAssembl
pod.functionBytes_ = masm.actualOffset(pod.functionBytes_);
for (size_t i = 0; i < heapAccesses_.length(); i++) {
AsmJSHeapAccess &a = heapAccesses_[i];
a.setOffset(masm.actualOffset(a.offset()));
a.setInsnOffset(masm.actualOffset(a.insnOffset()));
}
for (unsigned i = 0; i < numExportedFunctions(); i++) {
if (!exportedFunction(i).isChangeHeap())
@ -774,16 +774,6 @@ AsmJSModule::staticallyLink(ExclusiveContext *cx)
MOZ_ASSERT(isStaticallyLinked());
}
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
static size_t
ByteSizeOfHeapAccess(const jit::AsmJSHeapAccess access)
{
Scalar::Type type = access.type();
if (Scalar::isSimdType(type))
return Scalar::scalarByteSize(type) * access.numSimdElems();
return TypedArrayElemSize(type);
}
#endif
void
AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared *> heap, JSContext *cx)
{
@ -797,18 +787,17 @@ AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared *> heap, JSContext *cx
#if defined(JS_CODEGEN_X86)
uint8_t *heapOffset = heap->dataPointer();
uint32_t heapLength = heap->byteLength();
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
const jit::AsmJSHeapAccess &access = heapAccesses_[i];
if (access.hasLengthCheck()) {
// An access is out-of-bounds iff
// ptr + data-type-byte-size > heapLength
// i.e. ptr >= heapLength + 1 - data-type-byte-size
// (Note that we need >= as this is what codegen uses.)
size_t scalarByteSize = ByteSizeOfHeapAccess(access);
X86Encoding::SetPointer(access.patchLengthAt(code_),
(void*)(heap->byteLength() + 1 - scalarByteSize));
}
void *addr = access.patchOffsetAt(code_);
// An access is out-of-bounds iff
// ptr + offset + data-type-byte-size > heapLength
// i.e. ptr > heapLength - data-type-byte-size - offset.
// data-type-byte-size and offset are already included in the addend
// so we just have to add the heap length here.
if (access.hasLengthCheck())
X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
void *addr = access.patchHeapPtrImmAt(code_);
uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
MOZ_ASSERT(disp <= INT32_MAX);
X86Encoding::SetPointer(addr, (void *)(heapOffset + disp));
@ -821,20 +810,18 @@ AsmJSModule::initHeap(Handle<ArrayBufferObjectMaybeShared *> heap, JSContext *cx
// checks at the right places. All accesses that have been recorded are the
// only ones that need bound checks (see also
// CodeGeneratorX64::visitAsmJS{Load,Store,CompareExchange,AtomicBinop}Heap)
int32_t heapLength = int32_t(intptr_t(heap->byteLength()));
uint32_t heapLength = heap->byteLength();
for (size_t i = 0; i < heapAccesses_.length(); i++) {
const jit::AsmJSHeapAccess &access = heapAccesses_[i];
if (access.hasLengthCheck()) {
// See comment above for x86 codegen.
size_t scalarByteSize = ByteSizeOfHeapAccess(access);
X86Encoding::SetInt32(access.patchLengthAt(code_), heapLength + 1 - scalarByteSize);
}
// See comment above for x86 codegen.
if (access.hasLengthCheck())
X86Encoding::AddInt32(access.patchLengthAt(code_), heapLength);
}
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
uint32_t heapLength = heap->byteLength();
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
jit::Assembler::UpdateBoundsCheck(heapLength,
(jit::Instruction*)(heapAccesses_[i].offset() + code_));
(jit::Instruction*)(heapAccesses_[i].insnOffset() + code_));
}
#endif
}
@ -846,14 +833,28 @@ AsmJSModule::restoreHeapToInitialState(ArrayBufferObjectMaybeShared *maybePrevBu
if (maybePrevBuffer) {
// Subtract out the base-pointer added by AsmJSModule::initHeap.
uint8_t *ptrBase = maybePrevBuffer->dataPointer();
uint32_t heapLength = maybePrevBuffer->byteLength();
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
const jit::AsmJSHeapAccess &access = heapAccesses_[i];
void *addr = access.patchOffsetAt(code_);
// Subtract the heap length back out, leaving the raw displacement in place.
if (access.hasLengthCheck())
X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
void *addr = access.patchHeapPtrImmAt(code_);
uint8_t *ptr = reinterpret_cast<uint8_t*>(X86Encoding::GetPointer(addr));
MOZ_ASSERT(ptr >= ptrBase);
X86Encoding::SetPointer(addr, (void *)(ptr - ptrBase));
}
}
#elif defined(JS_CODEGEN_X64)
if (maybePrevBuffer) {
uint32_t heapLength = maybePrevBuffer->byteLength();
for (unsigned i = 0; i < heapAccesses_.length(); i++) {
const jit::AsmJSHeapAccess &access = heapAccesses_[i];
// See comment above for x86 codegen.
if (access.hasLengthCheck())
X86Encoding::AddInt32(access.patchLengthAt(code_), -heapLength);
}
}
#endif
maybeHeap_ = nullptr;

View File

@ -22,6 +22,7 @@
#include "mozilla/PodOperations.h"
#include "asmjs/AsmJSModule.h"
#include "jit/Disassembler.h"
#include "vm/Runtime.h"
using namespace js;
@ -308,6 +309,28 @@ enum { REG_EIP = 14 };
# define CONTEXT ucontext_t
#endif
// Define a context type for use in the emulator code. This is usually just
// the same as CONTEXT, but on Mac we use a different structure since we call
// into the emulator code from a Mach exception handler rather than a
// sigaction-style signal handler.
#if defined(XP_MACOSX)
# if defined(JS_CODEGEN_X64)
struct macos_x64_context {
x86_thread_state64_t thread;
x86_float_state64_t float_;
};
# define EMULATOR_CONTEXT macos_x64_context
# else
struct macos_x86_context {
x86_thread_state_t thread;
x86_float_state_t float_;
};
# define EMULATOR_CONTEXT macos_x86_context
# endif
#else
# define EMULATOR_CONTEXT CONTEXT
#endif
#if defined(JS_CPU_X64)
# define PC_sig(p) RIP_sig(p)
#elif defined(JS_CPU_X86)
@ -329,96 +352,368 @@ ContextToPC(CONTEXT *context)
}
#if defined(JS_CODEGEN_X64)
template <class T>
static void
SetXMMRegToNaN(Scalar::Type viewType, T *xmm_reg)
MOZ_COLD static void
SetFPRegToNaN(size_t size, void *fp_reg)
{
switch (viewType) {
case Scalar::Float32: {
JS_STATIC_ASSERT(sizeof(T) == 4 * sizeof(float));
float *floats = reinterpret_cast<float*>(xmm_reg);
floats[0] = GenericNaN();
floats[1] = 0;
floats[2] = 0;
floats[3] = 0;
break;
}
case Scalar::Float64: {
JS_STATIC_ASSERT(sizeof(T) == 2 * sizeof(double));
double *dbls = reinterpret_cast<double*>(xmm_reg);
dbls[0] = GenericNaN();
dbls[1] = 0;
break;
}
// Float32x4 and Int32x4 out of bounds are handled with the OutOfBounds stub.
case Scalar::Float32x4:
case Scalar::Int32x4:
case Scalar::Int8:
case Scalar::Uint8:
case Scalar::Int16:
case Scalar::Uint16:
case Scalar::Int32:
case Scalar::Uint32:
case Scalar::Uint8Clamped:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected type in SetXMMRegToNaN");
MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
memset(fp_reg, 0, Simd128DataSize);
switch (size) {
case 4: *static_cast<float *>(fp_reg) = GenericNaN(); break;
case 8: *static_cast<double *>(fp_reg) = GenericNaN(); break;
default:
// All SIMD accesses throw on OOB.
MOZ_CRASH("unexpected size in SetFPRegToNaN");
}
}
MOZ_COLD static void
SetGPRegToZero(void *gp_reg)
{
memset(gp_reg, 0, sizeof(intptr_t));
}
MOZ_COLD static void
SetFPRegToLoadedValue(const void *addr, size_t size, void *fp_reg)
{
MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
memset(fp_reg, 0, Simd128DataSize);
memcpy(fp_reg, addr, size);
}
MOZ_COLD static void
SetGPRegToLoadedValue(const void *addr, size_t size, void *gp_reg)
{
MOZ_RELEASE_ASSERT(size <= sizeof(void *));
memset(gp_reg, 0, sizeof(void *));
memcpy(gp_reg, addr, size);
}
MOZ_COLD static void
SetGPRegToLoadedValueSext32(const void *addr, size_t size, void *gp_reg)
{
MOZ_RELEASE_ASSERT(size <= sizeof(int32_t));
int8_t msb = static_cast<const int8_t *>(addr)[size - 1];
memset(gp_reg, 0, sizeof(void *));
memset(gp_reg, msb >> 7, sizeof(int32_t));
memcpy(gp_reg, addr, size);
}
MOZ_COLD static void
StoreValueFromFPReg(void *addr, size_t size, const void *fp_reg)
{
MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
memcpy(addr, fp_reg, size);
}
MOZ_COLD static void
StoreValueFromGPReg(void *addr, size_t size, const void *gp_reg)
{
MOZ_RELEASE_ASSERT(size <= sizeof(void *));
memcpy(addr, gp_reg, size);
}
MOZ_COLD static void
StoreValueFromGPImm(void *addr, size_t size, int32_t imm)
{
MOZ_RELEASE_ASSERT(size <= sizeof(imm));
memcpy(addr, &imm, size);
}
# if !defined(XP_MACOSX)
static void
SetRegisterToCoercedUndefined(CONTEXT *context, Scalar::Type viewType, AnyRegister reg)
MOZ_COLD static void *
AddressOfFPRegisterSlot(CONTEXT *context, FloatRegisters::Code code)
{
if (reg.isFloat()) {
switch (reg.fpu().code()) {
case X86Encoding::xmm0: SetXMMRegToNaN(viewType, &XMM_sig(context, 0)); break;
case X86Encoding::xmm1: SetXMMRegToNaN(viewType, &XMM_sig(context, 1)); break;
case X86Encoding::xmm2: SetXMMRegToNaN(viewType, &XMM_sig(context, 2)); break;
case X86Encoding::xmm3: SetXMMRegToNaN(viewType, &XMM_sig(context, 3)); break;
case X86Encoding::xmm4: SetXMMRegToNaN(viewType, &XMM_sig(context, 4)); break;
case X86Encoding::xmm5: SetXMMRegToNaN(viewType, &XMM_sig(context, 5)); break;
case X86Encoding::xmm6: SetXMMRegToNaN(viewType, &XMM_sig(context, 6)); break;
case X86Encoding::xmm7: SetXMMRegToNaN(viewType, &XMM_sig(context, 7)); break;
case X86Encoding::xmm8: SetXMMRegToNaN(viewType, &XMM_sig(context, 8)); break;
case X86Encoding::xmm9: SetXMMRegToNaN(viewType, &XMM_sig(context, 9)); break;
case X86Encoding::xmm10: SetXMMRegToNaN(viewType, &XMM_sig(context, 10)); break;
case X86Encoding::xmm11: SetXMMRegToNaN(viewType, &XMM_sig(context, 11)); break;
case X86Encoding::xmm12: SetXMMRegToNaN(viewType, &XMM_sig(context, 12)); break;
case X86Encoding::xmm13: SetXMMRegToNaN(viewType, &XMM_sig(context, 13)); break;
case X86Encoding::xmm14: SetXMMRegToNaN(viewType, &XMM_sig(context, 14)); break;
case X86Encoding::xmm15: SetXMMRegToNaN(viewType, &XMM_sig(context, 15)); break;
default: MOZ_CRASH();
}
} else {
switch (reg.gpr().code()) {
case X86Encoding::rax: RAX_sig(context) = 0; break;
case X86Encoding::rcx: RCX_sig(context) = 0; break;
case X86Encoding::rdx: RDX_sig(context) = 0; break;
case X86Encoding::rbx: RBX_sig(context) = 0; break;
case X86Encoding::rsp: RSP_sig(context) = 0; break;
case X86Encoding::rbp: RBP_sig(context) = 0; break;
case X86Encoding::rsi: RSI_sig(context) = 0; break;
case X86Encoding::rdi: RDI_sig(context) = 0; break;
case X86Encoding::r8: R8_sig(context) = 0; break;
case X86Encoding::r9: R9_sig(context) = 0; break;
case X86Encoding::r10: R10_sig(context) = 0; break;
case X86Encoding::r11: R11_sig(context) = 0; break;
case X86Encoding::r12: R12_sig(context) = 0; break;
case X86Encoding::r13: R13_sig(context) = 0; break;
case X86Encoding::r14: R14_sig(context) = 0; break;
case X86Encoding::r15: R15_sig(context) = 0; break;
default: MOZ_CRASH();
}
switch (code) {
case X86Encoding::xmm0: return &XMM_sig(context, 0);
case X86Encoding::xmm1: return &XMM_sig(context, 1);
case X86Encoding::xmm2: return &XMM_sig(context, 2);
case X86Encoding::xmm3: return &XMM_sig(context, 3);
case X86Encoding::xmm4: return &XMM_sig(context, 4);
case X86Encoding::xmm5: return &XMM_sig(context, 5);
case X86Encoding::xmm6: return &XMM_sig(context, 6);
case X86Encoding::xmm7: return &XMM_sig(context, 7);
case X86Encoding::xmm8: return &XMM_sig(context, 8);
case X86Encoding::xmm9: return &XMM_sig(context, 9);
case X86Encoding::xmm10: return &XMM_sig(context, 10);
case X86Encoding::xmm11: return &XMM_sig(context, 11);
case X86Encoding::xmm12: return &XMM_sig(context, 12);
case X86Encoding::xmm13: return &XMM_sig(context, 13);
case X86Encoding::xmm14: return &XMM_sig(context, 14);
case X86Encoding::xmm15: return &XMM_sig(context, 15);
default: break;
}
MOZ_CRASH();
}
MOZ_COLD static void *
AddressOfGPRegisterSlot(EMULATOR_CONTEXT *context, Registers::Code code)
{
switch (code) {
case X86Encoding::rax: return &RAX_sig(context);
case X86Encoding::rcx: return &RCX_sig(context);
case X86Encoding::rdx: return &RDX_sig(context);
case X86Encoding::rbx: return &RBX_sig(context);
case X86Encoding::rsp: return &RSP_sig(context);
case X86Encoding::rbp: return &RBP_sig(context);
case X86Encoding::rsi: return &RSI_sig(context);
case X86Encoding::rdi: return &RDI_sig(context);
case X86Encoding::r8: return &R8_sig(context);
case X86Encoding::r9: return &R9_sig(context);
case X86Encoding::r10: return &R10_sig(context);
case X86Encoding::r11: return &R11_sig(context);
case X86Encoding::r12: return &R12_sig(context);
case X86Encoding::r13: return &R13_sig(context);
case X86Encoding::r14: return &R14_sig(context);
case X86Encoding::r15: return &R15_sig(context);
default: break;
}
MOZ_CRASH();
}
# else
MOZ_COLD static void *
AddressOfFPRegisterSlot(EMULATOR_CONTEXT *context, FloatRegisters::Code code)
{
switch (code) {
case X86Encoding::xmm0: return &context->float_.__fpu_xmm0;
case X86Encoding::xmm1: return &context->float_.__fpu_xmm1;
case X86Encoding::xmm2: return &context->float_.__fpu_xmm2;
case X86Encoding::xmm3: return &context->float_.__fpu_xmm3;
case X86Encoding::xmm4: return &context->float_.__fpu_xmm4;
case X86Encoding::xmm5: return &context->float_.__fpu_xmm5;
case X86Encoding::xmm6: return &context->float_.__fpu_xmm6;
case X86Encoding::xmm7: return &context->float_.__fpu_xmm7;
case X86Encoding::xmm8: return &context->float_.__fpu_xmm8;
case X86Encoding::xmm9: return &context->float_.__fpu_xmm9;
case X86Encoding::xmm10: return &context->float_.__fpu_xmm10;
case X86Encoding::xmm11: return &context->float_.__fpu_xmm11;
case X86Encoding::xmm12: return &context->float_.__fpu_xmm12;
case X86Encoding::xmm13: return &context->float_.__fpu_xmm13;
case X86Encoding::xmm14: return &context->float_.__fpu_xmm14;
case X86Encoding::xmm15: return &context->float_.__fpu_xmm15;
default: break;
}
MOZ_CRASH();
}
MOZ_COLD static void *
AddressOfGPRegisterSlot(EMULATOR_CONTEXT *context, Registers::Code code)
{
switch (code) {
case X86Encoding::rax: return &context->thread.__rax;
case X86Encoding::rcx: return &context->thread.__rcx;
case X86Encoding::rdx: return &context->thread.__rdx;
case X86Encoding::rbx: return &context->thread.__rbx;
case X86Encoding::rsp: return &context->thread.__rsp;
case X86Encoding::rbp: return &context->thread.__rbp;
case X86Encoding::rsi: return &context->thread.__rsi;
case X86Encoding::rdi: return &context->thread.__rdi;
case X86Encoding::r8: return &context->thread.__r8;
case X86Encoding::r9: return &context->thread.__r9;
case X86Encoding::r10: return &context->thread.__r10;
case X86Encoding::r11: return &context->thread.__r11;
case X86Encoding::r12: return &context->thread.__r12;
case X86Encoding::r13: return &context->thread.__r13;
case X86Encoding::r14: return &context->thread.__r14;
case X86Encoding::r15: return &context->thread.__r15;
default: break;
}
MOZ_CRASH();
}
# endif // !XP_MACOSX
static void
RedirectToOutOfBoundsLabel(uint8_t **ppc, const AsmJSModule &module)
MOZ_COLD static void
SetRegisterToCoercedUndefined(EMULATOR_CONTEXT *context, size_t size,
const Disassembler::OtherOperand &value)
{
MOZ_ASSERT(module.containsFunctionPC(*ppc));
*ppc = module.outOfBoundsExit();
if (value.kind() == Disassembler::OtherOperand::FPR)
SetFPRegToNaN(size, AddressOfFPRegisterSlot(context, value.fpr()));
else
SetGPRegToZero(AddressOfGPRegisterSlot(context, value.gpr()));
}
MOZ_COLD static void
SetRegisterToLoadedValue(EMULATOR_CONTEXT *context, const void *addr, size_t size,
const Disassembler::OtherOperand &value)
{
if (value.kind() == Disassembler::OtherOperand::FPR)
SetFPRegToLoadedValue(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
else
SetGPRegToLoadedValue(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
}
MOZ_COLD static void
SetRegisterToLoadedValueSext32(EMULATOR_CONTEXT *context, const void *addr, size_t size,
const Disassembler::OtherOperand &value)
{
SetGPRegToLoadedValueSext32(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
}
MOZ_COLD static void
StoreValueFromRegister(EMULATOR_CONTEXT *context, void *addr, size_t size,
const Disassembler::OtherOperand &value)
{
if (value.kind() == Disassembler::OtherOperand::FPR)
StoreValueFromFPReg(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
else if (value.kind() == Disassembler::OtherOperand::GPR)
StoreValueFromGPReg(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
else
StoreValueFromGPImm(addr, size, value.imm());
}
MOZ_COLD static uint8_t *
ComputeAccessAddress(EMULATOR_CONTEXT *context, const Disassembler::ComplexAddress &address)
{
MOZ_RELEASE_ASSERT(!address.isPCRelative(), "PC-relative addresses not supported yet");
uintptr_t result = address.disp();
if (address.base() != Registers::Invalid) {
uintptr_t base;
StoreValueFromGPReg(&base, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.base()));
result += base;
}
if (address.index() != Registers::Invalid) {
uintptr_t index;
StoreValueFromGPReg(&index, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.index()));
result += index * (1 << address.scale());
}
return reinterpret_cast<uint8_t *>(result);
}
MOZ_COLD static uint8_t *
EmulateHeapAccess(EMULATOR_CONTEXT *context, uint8_t *pc, uint8_t *faultingAddress,
const AsmJSHeapAccess *heapAccess, const AsmJSModule &module)
{
MOZ_RELEASE_ASSERT(module.containsFunctionPC(pc));
MOZ_RELEASE_ASSERT(module.usesSignalHandlersForOOB());
MOZ_RELEASE_ASSERT(!heapAccess->hasLengthCheck());
MOZ_RELEASE_ASSERT(heapAccess->insnOffset() == (pc - module.codeBase()));
// Disassemble the instruction which caused the trap so that we can extract
// information about it and decide what to do.
Disassembler::HeapAccess access;
uint8_t *end = Disassembler::DisassembleHeapAccess(pc, &access);
const Disassembler::ComplexAddress &address = access.address();
MOZ_RELEASE_ASSERT(end > pc);
MOZ_RELEASE_ASSERT(module.containsFunctionPC(end));
#if defined(JS_CODEGEN_X64)
// Check x64 asm.js heap access invariants.
MOZ_RELEASE_ASSERT(address.disp() >= 0);
MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
MOZ_RELEASE_ASSERT(address.index() != HeapReg.code());
MOZ_RELEASE_ASSERT(address.scale() == 0);
if (address.base() != Registers::Invalid) {
uintptr_t base;
StoreValueFromGPReg(&base, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.base()));
MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t *>(base) == module.maybeHeap());
}
if (address.index() != Registers::Invalid) {
uintptr_t index;
StoreValueFromGPReg(&index, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.index()));
MOZ_RELEASE_ASSERT(uint32_t(index) == index);
}
#endif
// Determine the actual effective address of the faulting access. We can't
// rely on the faultingAddress given to us by the OS, because we need the
// address of the start of the access, and the OS may sometimes give us an
// address somewhere in the middle of the heap access.
uint8_t *accessAddress = ComputeAccessAddress(context, address);
MOZ_RELEASE_ASSERT(size_t(faultingAddress - accessAddress) < access.size(),
"Given faulting address does not appear to be within computed "
"faulting address range");
MOZ_RELEASE_ASSERT(accessAddress >= module.maybeHeap(),
"Access begins outside the asm.js heap");
MOZ_RELEASE_ASSERT(accessAddress + access.size() <= module.maybeHeap() + AsmJSMappedSize,
"Access extends beyond the asm.js heap guard region");
MOZ_RELEASE_ASSERT(accessAddress + access.size() > module.maybeHeap() + module.heapLength(),
"Computed access address is not actually out of bounds");
// The basic sandbox model is that all heap accesses are a heap base
// register plus an index, and the index is always computed with 32-bit
// operations, so we know it can only be 4 GiB off of the heap base.
//
// However, we wish to support the optimization of folding immediates
// and scaled indices into addresses, and any address arithmetic we fold
// gets done at full pointer width, so it doesn't get properly wrapped.
// We support this by extending AsmJSMappedSize to the greatest size
// that could be reached by such an unwrapped address, and then when we
// arrive here in the signal handler for such an access, we compute the
// fully wrapped address, and perform the load or store on it.
//
// Taking a signal is really slow, but in theory programs really shouldn't
// be hitting this anyway.
intptr_t unwrappedOffset = accessAddress - module.maybeHeap();
uint32_t wrappedOffset = uint32_t(unwrappedOffset);
size_t size = access.size();
MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
bool inBounds = wrappedOffset < module.heapLength() &&
wrappedOffset + size < module.heapLength();
// If this is storing Z of an XYZ, check whether X is also in bounds, so
// that we don't store anything before throwing.
MOZ_RELEASE_ASSERT(unwrappedOffset > heapAccess->offsetWithinWholeSimdVector());
uint32_t wrappedBaseOffset = uint32_t(unwrappedOffset - heapAccess->offsetWithinWholeSimdVector());
if (wrappedBaseOffset >= module.heapLength())
inBounds = false;
if (inBounds) {
// We now know that this is an access that is actually in bounds when
// properly wrapped. Complete the load or store with the wrapped
// address.
uint8_t *wrappedAddress = module.maybeHeap() + wrappedOffset;
MOZ_RELEASE_ASSERT(wrappedAddress >= module.maybeHeap());
MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
MOZ_RELEASE_ASSERT(wrappedAddress + size <= module.maybeHeap() + module.heapLength());
switch (access.kind()) {
case Disassembler::HeapAccess::Load:
SetRegisterToLoadedValue(context, wrappedAddress, size, access.otherOperand());
break;
case Disassembler::HeapAccess::LoadSext32:
SetRegisterToLoadedValueSext32(context, wrappedAddress, size, access.otherOperand());
break;
case Disassembler::HeapAccess::Store:
StoreValueFromRegister(context, wrappedAddress, size, access.otherOperand());
break;
case Disassembler::HeapAccess::Unknown:
MOZ_CRASH("Failed to disassemble instruction");
}
} else {
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle.
if (heapAccess->throwOnOOB())
return module.outOfBoundsExit();
switch (access.kind()) {
case Disassembler::HeapAccess::Load:
case Disassembler::HeapAccess::LoadSext32:
// Assign the JS-defined result value to the destination register
// (ToInt32(undefined) or ToNumber(undefined), determined by the
// type of the destination register). Very conveniently, we can
// infer the type from the register class, since all SIMD accesses
// throw on out of bounds (see above), so the only types using FP
// registers are float32 and double.
SetRegisterToCoercedUndefined(context, access.size(), access.otherOperand());
break;
case Disassembler::HeapAccess::Store:
// Do nothing.
break;
case Disassembler::HeapAccess::Unknown:
MOZ_CRASH("Failed to disassemble instruction");
}
}
return end;
}
#endif // JS_CODEGEN_X64
#if defined(XP_WIN)
@ -453,7 +748,7 @@ HandleFault(PEXCEPTION_POINTERS exception)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
void *faultingAddress = (void*)record->ExceptionInformation[1];
uint8_t *faultingAddress = reinterpret_cast<uint8_t *>(record->ExceptionInformation[1]);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
@ -484,26 +779,7 @@ HandleFault(PEXCEPTION_POINTERS exception)
if (!heapAccess)
return false;
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle.
// SIMD out-of-bounds loads and stores just need to throw.
if (Scalar::isSimdType(heapAccess->type())) {
RedirectToOutOfBoundsLabel(ppc, module);
return true;
}
// Also not necessary, but, since we can, do.
if (heapAccess->isLoad() != !record->ExceptionInformation[0])
return false;
// If this is a load, assign the JS-defined result value to the destination
// register (ToInt32(undefined) or ToNumber(undefined), determined by the
// type of the destination register) and set the PC to the next op. Upon
// return from the handler, execution will resume at this next PC.
if (heapAccess->isLoad())
SetRegisterToCoercedUndefined(context, heapAccess->type(), heapAccess->loadedReg());
*ppc += heapAccess->opLength();
*ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, module);
return true;
# else
@ -525,82 +801,19 @@ AsmJSFaultHandler(LPEXCEPTION_POINTERS exception)
# include <mach/exc.h>
static uint8_t **
ContextToPC(x86_thread_state_t &state)
ContextToPC(EMULATOR_CONTEXT *context)
{
# if defined(JS_CPU_X64)
static_assert(sizeof(state.uts.ts64.__rip) == sizeof(void*),
static_assert(sizeof(context->thread.__rip) == sizeof(void*),
"stored IP should be compile-time pointer-sized");
return reinterpret_cast<uint8_t**>(&state.uts.ts64.__rip);
return reinterpret_cast<uint8_t**>(&context->thread.__rip);
# else
static_assert(sizeof(state.uts.ts32.__eip) == sizeof(void*),
static_assert(sizeof(context->thread.uts.ts32.__eip) == sizeof(void*),
"stored IP should be compile-time pointer-sized");
return reinterpret_cast<uint8_t**>(&state.uts.ts32.__eip);
# endif
return reinterpret_cast<uint8_t**>(&context->thread.uts.ts32.__eip);
#endif
}
# if defined(JS_CODEGEN_X64)
static bool
SetRegisterToCoercedUndefined(mach_port_t rtThread, x86_thread_state64_t &state,
const AsmJSHeapAccess &heapAccess)
{
if (heapAccess.loadedReg().isFloat()) {
kern_return_t kret;
x86_float_state64_t fstate;
unsigned int count = x86_FLOAT_STATE64_COUNT;
kret = thread_get_state(rtThread, x86_FLOAT_STATE64, (thread_state_t) &fstate, &count);
if (kret != KERN_SUCCESS)
return false;
Scalar::Type viewType = heapAccess.type();
switch (heapAccess.loadedReg().fpu().code()) {
case X86Encoding::xmm0: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm0); break;
case X86Encoding::xmm1: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm1); break;
case X86Encoding::xmm2: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm2); break;
case X86Encoding::xmm3: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm3); break;
case X86Encoding::xmm4: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm4); break;
case X86Encoding::xmm5: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm5); break;
case X86Encoding::xmm6: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm6); break;
case X86Encoding::xmm7: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm7); break;
case X86Encoding::xmm8: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm8); break;
case X86Encoding::xmm9: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm9); break;
case X86Encoding::xmm10: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm10); break;
case X86Encoding::xmm11: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm11); break;
case X86Encoding::xmm12: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm12); break;
case X86Encoding::xmm13: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm13); break;
case X86Encoding::xmm14: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm14); break;
case X86Encoding::xmm15: SetXMMRegToNaN(viewType, &fstate.__fpu_xmm15); break;
default: MOZ_CRASH();
}
kret = thread_set_state(rtThread, x86_FLOAT_STATE64, (thread_state_t)&fstate, x86_FLOAT_STATE64_COUNT);
if (kret != KERN_SUCCESS)
return false;
} else {
switch (heapAccess.loadedReg().gpr().code()) {
case X86Encoding::rax: state.__rax = 0; break;
case X86Encoding::rcx: state.__rcx = 0; break;
case X86Encoding::rdx: state.__rdx = 0; break;
case X86Encoding::rbx: state.__rbx = 0; break;
case X86Encoding::rsp: state.__rsp = 0; break;
case X86Encoding::rbp: state.__rbp = 0; break;
case X86Encoding::rsi: state.__rsi = 0; break;
case X86Encoding::rdi: state.__rdi = 0; break;
case X86Encoding::r8: state.__r8 = 0; break;
case X86Encoding::r9: state.__r9 = 0; break;
case X86Encoding::r10: state.__r10 = 0; break;
case X86Encoding::r11: state.__r11 = 0; break;
case X86Encoding::r12: state.__r12 = 0; break;
case X86Encoding::r13: state.__r13 = 0; break;
case X86Encoding::r14: state.__r14 = 0; break;
case X86Encoding::r15: state.__r15 = 0; break;
default: MOZ_CRASH();
}
}
return true;
}
# endif
// This definition was generated by mig (the Mach Interface Generator) for the
// routine 'exception_raise' (exc.defs).
#pragma pack(4)
@ -637,14 +850,29 @@ HandleMachException(JSRuntime *rt, const ExceptionRequest &request)
mach_port_t rtThread = request.body.thread.name;
// Read out the JSRuntime thread's register state.
x86_thread_state_t state;
unsigned int count = x86_THREAD_STATE_COUNT;
EMULATOR_CONTEXT context;
# if defined(JS_CODEGEN_X64)
unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
int thread_state = x86_THREAD_STATE64;
int float_state = x86_FLOAT_STATE64;
# else
unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
int thread_state = x86_THREAD_STATE;
int float_state = x86_FLOAT_STATE;
# endif
kern_return_t kret;
kret = thread_get_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, &count);
kret = thread_get_state(rtThread, thread_state,
(thread_state_t)&context.thread, &thread_state_count);
if (kret != KERN_SUCCESS)
return false;
kret = thread_get_state(rtThread, float_state,
(thread_state_t)&context.float_, &float_state_count);
if (kret != KERN_SUCCESS)
return false;
uint8_t **ppc = ContextToPC(state);
uint8_t **ppc = ContextToPC(&context);
uint8_t *pc = *ppc;
if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2)
@ -658,10 +886,10 @@ HandleMachException(JSRuntime *rt, const ExceptionRequest &request)
if (!module.containsFunctionPC(pc))
return false;
# if defined(JS_CPU_X64)
# if defined(JS_CODEGEN_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
void *faultingAddress = (void*)request.body.code[1];
uint8_t *faultingAddress = reinterpret_cast<uint8_t *>(request.body.code[1]);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
@ -673,26 +901,13 @@ HandleMachException(JSRuntime *rt, const ExceptionRequest &request)
if (!heapAccess)
return false;
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle.
*ppc = EmulateHeapAccess(&context, pc, faultingAddress, heapAccess, module);
if (Scalar::isSimdType(heapAccess->type())) {
// SIMD out-of-bounds loads and stores just need to throw.
RedirectToOutOfBoundsLabel(ppc, module);
} else {
// If this is a load, assign the JS-defined result value to the destination
// register (ToInt32(undefined) or ToNumber(undefined), determined by the
// type of the destination register) and set the PC to the next op. Upon
// return from the handler, execution will resume at this next PC.
if (heapAccess->isLoad()) {
if (!SetRegisterToCoercedUndefined(rtThread, state.uts.ts64, *heapAccess))
return false;
}
*ppc += heapAccess->opLength();
}
// Update the thread state with the new pc.
kret = thread_set_state(rtThread, x86_THREAD_STATE, (thread_state_t)&state, x86_THREAD_STATE_COUNT);
// Update the thread state with the new pc and register values.
kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
if (kret != KERN_SUCCESS)
return false;
kret = thread_set_state(rtThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
if (kret != KERN_SUCCESS)
return false;
@ -860,6 +1075,13 @@ AsmJSMachExceptionHandler::install(JSRuntime *rt)
static bool
HandleFault(int signum, siginfo_t *info, void *ctx)
{
// The signals we're expecting come from access violations, accessing
// mprotected memory. If the signal originates anywhere else, don't try
// to handle it.
MOZ_RELEASE_ASSERT(signum == SIGSEGV);
if (info->si_code != SEGV_ACCERR)
return false;
CONTEXT *context = (CONTEXT *)ctx;
uint8_t **ppc = ContextToPC(context);
uint8_t *pc = *ppc;
@ -881,7 +1103,7 @@ HandleFault(int signum, siginfo_t *info, void *ctx)
# if defined(JS_CODEGEN_X64)
// These checks aren't necessary, but, since we can, check anyway to make
// sure we aren't covering up a real bug.
void *faultingAddress = info->si_addr;
uint8_t *faultingAddress = static_cast<uint8_t *>(info->si_addr);
if (!module.maybeHeap() ||
faultingAddress < module.maybeHeap() ||
faultingAddress >= module.maybeHeap() + AsmJSMappedSize)
@ -893,22 +1115,7 @@ HandleFault(int signum, siginfo_t *info, void *ctx)
if (!heapAccess)
return false;
// We now know that this is an out-of-bounds access made by an asm.js
// load/store that we should handle.
// SIMD out-of-bounds loads and stores just need to throw.
if (Scalar::isSimdType(heapAccess->type())) {
RedirectToOutOfBoundsLabel(ppc, module);
return true;
}
// If this is a load, assign the JS-defined result value to the destination
// register (ToInt32(undefined) or ToNumber(undefined), determined by the
// type of the destination register) and set the PC to the next op. Upon
// return from the handler, execution will resume at this next PC.
if (heapAccess->isLoad())
SetRegisterToCoercedUndefined(context, heapAccess->type(), heapAccess->loadedReg());
*ppc += heapAccess->opLength();
*ppc = EmulateHeapAccess(context, pc, faultingAddress, heapAccess, module);
return true;
# else

View File

@ -2519,7 +2519,9 @@ class FunctionCompiler
const JitCompileOptions options;
mirGen_ = lifo_.new_<MIRGenerator>(CompileCompartment::get(cx()->compartment()),
options, alloc_,
graph_, info_, optimizationInfo);
graph_, info_, optimizationInfo,
&m().onOutOfBoundsLabel(),
m().usesSignalHandlersForOOB());
if (!newBlock(/* pred = */ nullptr, &curBlock_, fn_))
return false;
@ -2871,7 +2873,7 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD loads should use loadSimdHeap");
MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck);
curBlock_->add(load);
@ -2884,11 +2886,10 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
MOZ_ASSERT(Scalar::isSimdType(accessType), "loadSimdHeap can only load from a SIMD view");
Label *outOfBoundsLabel = &m().onOutOfBoundsLabel();
MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
outOfBoundsLabel, numElems);
numElems);
curBlock_->add(load);
return load;
}
@ -2898,7 +2899,7 @@ class FunctionCompiler
if (inDeadCode())
return;
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
MOZ_ASSERT(!Scalar::isSimdType(accessType), "SIMD stores should use loadSimdHeap");
MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck);
curBlock_->add(store);
@ -2910,11 +2911,10 @@ class FunctionCompiler
if (inDeadCode())
return;
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
MOZ_ASSERT(Scalar::isSimdType(accessType), "storeSimdHeap can only load from a SIMD view");
Label *outOfBoundsLabel = &m().onOutOfBoundsLabel();
MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
outOfBoundsLabel, numElems);
numElems);
curBlock_->add(store);
}
@ -2931,9 +2931,8 @@ class FunctionCompiler
if (inDeadCode())
return nullptr;
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
MAsmJSLoadHeap *load = MAsmJSLoadHeap::New(alloc(), accessType, ptr, needsBoundsCheck,
/* outOfBoundsLabel = */ nullptr,
/* numElems */ 0,
MembarBeforeLoad, MembarAfterLoad);
curBlock_->add(load);
@ -2945,9 +2944,8 @@ class FunctionCompiler
if (inDeadCode())
return;
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK && !m().usesSignalHandlersForOOB();
bool needsBoundsCheck = chk == NEEDS_BOUNDS_CHECK;
MAsmJSStoreHeap *store = MAsmJSStoreHeap::New(alloc(), accessType, ptr, v, needsBoundsCheck,
/* outOfBoundsLabel = */ nullptr,
/* numElems = */ 0,
MembarBeforeStore, MembarAfterStore);
curBlock_->add(store);

View File

@ -25,6 +25,7 @@
#include "jsutil.h"
#include "jit/Registers.h"
#include "js/TypeDecls.h"
#include "vm/NativeObject.h"
@ -53,13 +54,25 @@ ValidateAsmJS(ExclusiveContext *cx, AsmJSParser &parser, frontend::ParseNode *st
// The assumed page size; dynamically checked in ValidateAsmJS.
const size_t AsmJSPageSize = 4096;
// Targets define AsmJSImmediateRange to be the size of an address immediate,
// and AsmJSCheckedImmediateRange, to be the size of an address immediate that
// can be supported by signal-handler OOB handling.
static_assert(jit::AsmJSCheckedImmediateRange <= jit::AsmJSImmediateRange,
"AsmJSImmediateRange should be the size of an unconstrained "
"address immediate");
#ifdef JS_CPU_X64
// On x64, the internal ArrayBuffer data array is inflated to 4GiB (only the
// byteLength portion of which is accessible) so that out-of-bounds accesses
// (made using a uint32 index) are guaranteed to raise a SIGSEGV.
// Unaligned accesses and mask optimizations might also try to access a few
// bytes after this limit, so just inflate it by AsmJSPageSize.
static const size_t AsmJSMappedSize = 4 * 1024ULL * 1024ULL * 1024ULL + AsmJSPageSize;
// Then, an additional extent is added to permit folding of small immediate
// values into addresses. And finally, unaligned accesses and mask optimizations
// might also try to access a few bytes after this limit, so just inflate it by
// AsmJSPageSize.
static const size_t AsmJSMappedSize = 4 * 1024ULL * 1024ULL * 1024ULL +
jit::AsmJSCheckedImmediateRange +
AsmJSPageSize;
#endif
// From the asm.js spec Linking section:

View File

@ -908,7 +908,7 @@ class GCRuntime
void sweepBackgroundThings(ZoneList &zones, LifoAlloc &freeBlocks, ThreadType threadType);
void assertBackgroundSweepingFinished();
bool shouldCompact();
IncrementalProgress compactPhase(bool lastGC, JS::gcreason::Reason reason);
IncrementalProgress compactPhase(JS::gcreason::Reason reason);
void sweepTypesAfterCompacting(Zone *zone);
void sweepZoneAfterCompacting(Zone *zone);
ArenaHeader *relocateArenas(JS::gcreason::Reason reason);

View File

@ -691,14 +691,18 @@ MarkIdInternal(JSTracer *trc, jsid *id)
{
if (JSID_IS_STRING(*id)) {
JSString *str = JSID_TO_STRING(*id);
JSString *prior = str;
trc->setTracingLocation((void *)id);
MarkInternal(trc, &str);
*id = NON_INTEGER_ATOM_TO_JSID(reinterpret_cast<JSAtom *>(str));
if (str != prior)
*id = NON_INTEGER_ATOM_TO_JSID(reinterpret_cast<JSAtom *>(str));
} else if (JSID_IS_SYMBOL(*id)) {
JS::Symbol *sym = JSID_TO_SYMBOL(*id);
JS::Symbol *prior = sym;
trc->setTracingLocation((void *)id);
MarkInternal(trc, &sym);
*id = SYMBOL_TO_JSID(sym);
if (sym != prior)
*id = SYMBOL_TO_JSID(sym);
} else {
/* Unset realLocation manually if we do not call MarkInternal. */
trc->unsetTracingLocation();
@ -755,14 +759,22 @@ MarkValueInternal(JSTracer *trc, Value *v)
MOZ_ASSERT(v->toGCThing());
void *thing = v->toGCThing();
trc->setTracingLocation((void *)v);
MarkKind(trc, &thing, v->gcKind());
if (v->isString()) {
v->setString((JSString *)thing);
JSString *str = static_cast<JSString*>(thing);
MarkInternal(trc, &str);
if (str != thing)
v->setString(str);
} else if (v->isObject()) {
v->setObjectOrNull((JSObject *)thing);
JSObject *obj = static_cast<JSObject*>(thing);
MarkInternal(trc, &obj);
if (obj != thing)
v->setObjectOrNull(obj);
} else {
MOZ_ASSERT(v->isSymbol());
v->setSymbol((JS::Symbol *)thing);
JS::Symbol *sym = static_cast<JS::Symbol*>(thing);
MarkInternal(trc, &sym);
if (sym != thing)
v->setSymbol(sym);
}
} else {
/* Unset realLocation manually if we do not call MarkInternal. */
@ -995,24 +1007,6 @@ gc::MarkValueUnbarriered(JSTracer *trc, Value *v, const char *name)
MarkValueInternal(trc, v);
}
bool
gc::IsCellMarked(Cell **thingp)
{
return IsMarked<Cell>(thingp);
}
bool
gc::IsCellAboutToBeFinalized(Cell **thingp)
{
return IsAboutToBeFinalized<Cell>(thingp);
}
bool
gc::IsCellAboutToBeFinalizedFromAnyThread(Cell **thingp)
{
return IsAboutToBeFinalizedFromAnyThread<Cell>(thingp);
}
/*** Push Mark Stack ***/
#define JS_COMPARTMENT_ASSERT(rt, thing) \

View File

@ -320,15 +320,6 @@ Mark(JSTracer *trc, ScopeObject **obj, const char *name)
MarkObjectUnbarriered(trc, obj, name);
}
bool
IsCellMarked(Cell **thingp);
bool
IsCellAboutToBeFinalized(Cell **thing);
bool
IsCellAboutToBeFinalizedFromAnyThread(Cell **thing);
inline bool
IsMarked(BarrieredBase<Value> *v)
{

View File

@ -0,0 +1,48 @@
// |jit-test| test-also-noasmjs
load(libdir + "asm.js");
setIonCheckGraphCoherency(false);
setCachingEnabled(false);
// constants
var buf = new ArrayBuffer(BUF_MIN);
// An unshifted literal constant byte index in the range 0 to 2^31-1 inclusive should give a link failure.
assertAsmLinkFail(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x7fffffff]|0 } return f'), this, null, buf);
assertAsmLinkFail(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x1fffffff]|0 } return f'), this, null, buf);
// An unshifted literal constant byte index outside the range 0 to 2^31-1 inclusive should cause an error compiling.
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x20000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x3fffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x40000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x7fffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x80000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x8fffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0xffffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x100000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x80000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x100000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int16Array(b); function f() {return arr[-1]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[-2]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[10-12]|0 } return f');
// An intish shifted literal constant index should not fail to compile or link.
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x3fffffff>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x3fffffff>>2]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0xffffffff>>2]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[-1>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[-1>>2]|0 } return f'), this, null, buf)(), 0);
// Unsigned (intish) folded constant index.
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff>>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {arr[0] = 1; return arr[(0xffffffff+1)>>>0]|0 } return f'), this, null, buf)(), 1);
// A non-intish shifted literal constant index should cause an error compiling.
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x100000000>>0]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x100000000>>2]|0 } return f');
// Folded non-intish constant expressions should cause an error compiling.
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff+1]|0 } return f');

View File

@ -1,116 +1,83 @@
// |jit-test| test-also-noasmjs
load(libdir + "asm.js");
load(libdir + "asserts.js");
setIonCheckGraphCoherency(false);
setCachingEnabled(false);
// constants
var buf = new ArrayBuffer(BUF_MIN);
// An unshifted literal constant byte index in the range 0 to 2^31-1 inclusive should give a link failure.
assertAsmLinkFail(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x7fffffff]|0 } return f'), this, null, buf);
assertAsmLinkFail(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x1fffffff]|0 } return f'), this, null, buf);
// An unshifted literal constant byte index outside the range 0 to 2^31-1 inclusive should cause an error compiling.
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x20000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x3fffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x40000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x7fffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x80000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x8fffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0xffffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x100000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x80000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x100000000]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int16Array(b); function f() {return arr[-1]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[-2]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[10-12]|0 } return f');
// An intish shifted literal constant index should not fail to compile or link.
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x3fffffff>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x3fffffff>>2]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0xffffffff>>2]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[-1>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[-1>>2]|0 } return f'), this, null, buf)(), 0);
// Unsigned (intish) folded constant index.
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff>>>0]|0 } return f'), this, null, buf)(), 0);
assertEq(asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {arr[0] = 1; return arr[(0xffffffff+1)>>>0]|0 } return f'), this, null, buf)(), 1);
// A non-intish shifted literal constant index should cause an error compiling.
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0x100000000>>0]|0 } return f');
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int32Array(b); function f() {return arr[0x100000000>>2]|0 } return f');
// Folded non-intish constant expressions should cause an error compiling.
assertAsmTypeFail('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.Int8Array(b); function f() {return arr[0xffffffff+1]|0 } return f');
var ab = new ArrayBuffer(BUF_MIN);
var arr = new Int32Array(BUF_MIN);
for (var i = 0; i < arr.length; i++)
arr[i] = i;
// Compute a set of interesting indices.
indices = [0]
for (var i of [4,1024,BUF_MIN,Math.pow(2,30),Math.pow(2,31),Math.pow(2,32),Math.pow(2,33)]) {
for (var j of [-2,-1,0,1,2]) {
for (var k of [1,-1])
indices.push((i+j)*k);
}
}
function testInt(ctor, shift, scale, disp) {
var arr = new ctor(ab);
var f = asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.' + ctor.name + '(b); function f(i) {i=i|0; return arr[((i<<' + scale + ')+' + disp + ')>>' + shift + ']|0 } return f'), this, null, ab);
for (var i of [0,1,2,3,4,1023,1024,1025,BUF_MIN-2,BUF_MIN-1,BUF_MIN,BUF_MIN+1])
assertEq(f(i), arr[((i<<scale)+disp)>>shift]|0);
for (var i of [-Math.pow(2,28),Math.pow(2,28),-Math.pow(2,29),Math.pow(2,29),-Math.pow(2,30),Math.pow(2,30),-Math.pow(2,31),Math.pow(2,31),-Math.pow(2,32),Math.pow(2,32)]) {
for (var j of [-8,-4,-1,0,1,4,8])
assertEq(f(i+j), arr[(((i+j)<<scale)+disp)>>shift]|0);
}
var c = asmCompile('glob', 'imp', 'b',
USE_ASM +
'var arr=new glob.' + ctor.name + '(b); ' +
'function load(i) {i=i|0; return arr[((i<<' + scale + ')+' + disp + ')>>' + shift + ']|0 } ' +
'function store(i,j) {i=i|0;j=j|0; arr[((i<<' + scale + ')+' + disp + ')>>' + shift + '] = j } ' +
'function storeZero(i) {i=i|0; arr[((i<<' + scale + ')+' + disp + ')>>' + shift + '] = 0 } ' +
'function storeNegOne(i) {i=i|0; arr[((i<<' + scale + ')+' + disp + ')>>' + shift + '] = -1 } ' +
'return { load: load, store: store, storeZero: storeZero, storeNegOne: storeNegOne }');
var f = asmLink(c, this, null, ab);
var f = asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.' + ctor.name + '(b); function f(i,j) {i=i|0;j=j|0; arr[((i<<' + scale + ')+' + disp + ')>>' + shift + '] = j } return f'), this, null, ab);
for (var i of [0,1,2,3,4,1023,1024,1025,4095,4096,4097]) {
var v = arr[0];
arr[0] = -1;
var negOne = arr[0]|0;
arr[0] = v;
for (var i of indices) {
var index = ((i<<scale)+disp)>>shift;
var v = arr[index]|0;
arr[index] = 0;
f(i, v);
assertEq(arr[index]|0, v);
}
v = arr[index]|0;
for (var i of [-Math.pow(2,31), Math.pow(2,31)-1, Math.pow(2,32)]) {
for (var j of [-8,-4,-1,0,1,4,8]) {
var index = (((i+j)<<scale)+disp)>>shift;
var v = arr[index]|0;
arr[index] = 0;
f(i+j, v);
assertEq(arr[index]|0, v);
}
// Loads
assertEq(f.load(i), v);
// Stores of immediates
arr[index] = 1;
f.storeZero(i);
assertEq(arr[index]|0, 0);
f.storeNegOne(i);
assertEq(arr[index]|0, index>>>0 < arr.length ? negOne : 0);
// Stores
arr[index] = ~v;
f.store(i, v);
assertEq(arr[index]|0, v);
}
}
function testFloat(ctor, shift, scale, disp, coercion) {
var arr = new ctor(ab);
var f = asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.' + ctor.name + '(b); var toF = glob.Math.fround; function f(i) {i=i|0; return ' + coercion + '(arr[((i<<' + scale + ')+' + disp + ')>>' + shift + ']) } return f'), this, null, ab);
for (var i of [0,1,2,3,4,1023,1024,1025,BUF_MIN-2,BUF_MIN-1,BUF_MIN,BUF_MIN+1])
assertEq(f(i), +arr[((i<<scale)+disp)>>shift]);
for (var i of [-Math.pow(2,31), Math.pow(2,31)-1, Math.pow(2,32)]) {
for (var j of [-8,-4,-1,0,1,4,8])
assertEq(f(i+j), +arr[(((i+j)<<scale)+disp)>>shift]);
}
var c = asmCompile('glob', 'imp', 'b',
USE_ASM +
'var arr=new glob.' + ctor.name + '(b); ' +
'var toF = glob.Math.fround; ' +
'function load(i) {i=i|0; return ' + coercion + '(arr[((i<<' + scale + ')+' + disp + ')>>' + shift + ']) } ' +
'function store(i,j) {i=i|0;j=+j; arr[((i<<' + scale + ')+' + disp + ')>>' + shift + '] = j } ' +
'return { load: load, store: store }');
var f = asmLink(c, this, null, ab);
var f = asmLink(asmCompile('glob', 'imp', 'b', USE_ASM + 'var arr=new glob.' + ctor.name + '(b); var toF = glob.Math.fround; function f(i,j) {i=i|0;j=+j; arr[((i<<' + scale + ')+' + disp + ')>>' + shift + '] = j } return f'), this, null, ab);
for (var i of [0,1,2,3,4,1023,1024,1025,4095,4096,4097]) {
for (var i of indices) {
var index = ((i<<scale)+disp)>>shift;
var v = +arr[index];
arr[index] = 0;
f(i, v);
assertEq(+arr[index], v);
}
for (var i of [-Math.pow(2,31), Math.pow(2,31)-1, Math.pow(2,32)]) {
for (var j of [-8,-4,-1,0,1,4,8]) {
var index = (((i+j)<<scale)+disp)>>shift;
var v = +arr[index];
arr[index] = 0;
f(i+j, v);
assertEq(+arr[index], v);
}
// Loads
assertEq(f.load(i), v);
// Stores
arr[index] = ~v;
f.store(i, v);
assertEq(+arr[index], v);
}
}
@ -121,11 +88,146 @@ function testFloat64(ctor, shift, scale, disp) {
testFloat(ctor, shift, scale, disp, "+");
}
function assertEqX4(observed, expected) {
assertEq(observed.x, expected.x);
assertEq(observed.y, expected.y);
assertEq(observed.z, expected.z);
assertEq(observed.w, expected.w);
}
function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
var arr = new ctor(ab);
var c = asmCompile('glob', 'imp', 'b',
USE_ASM +
'var arr=new glob.' + ctor.name + '(b); ' +
'var SIMD_' + simdName + ' = glob.SIMD.' + simdName + '; ' +
'var SIMD_' + simdName + '_check = SIMD_' + simdName + '.check; ' +
'var SIMD_' + simdName + '_load = SIMD_' + simdName + '.load; ' +
'var SIMD_' + simdName + '_loadXYZ = SIMD_' + simdName + '.loadXYZ; ' +
'var SIMD_' + simdName + '_loadXY = SIMD_' + simdName + '.loadXY; ' +
'var SIMD_' + simdName + '_loadX = SIMD_' + simdName + '.loadX; ' +
'var SIMD_' + simdName + '_store = SIMD_' + simdName + '.store; ' +
'var SIMD_' + simdName + '_storeXYZ = SIMD_' + simdName + '.storeXYZ; ' +
'var SIMD_' + simdName + '_storeXY = SIMD_' + simdName + '.storeXY; ' +
'var SIMD_' + simdName + '_storeX = SIMD_' + simdName + '.storeX; ' +
'function load(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
'function loadXYZ(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_loadXYZ(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
'function loadXY(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_loadXY(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
'function loadX(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_loadX(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
'function store(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
'function storeXYZ(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_storeXYZ(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
'function storeXY(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_storeXY(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
'function storeX(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_storeX(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
'return { load: load, loadXYZ: loadXYZ, loadXY: loadXY, loadX: loadX, store: store, storeXYZ: storeXYZ, storeXY : storeXY, storeX : storeX }');
var f = asmLink(c, this, null, ab);
for (var i of indices) {
var index = ((i<<scale)+disp)>>shift;
var v, vXYZ, vXY, vX;
var t = false, tXYZ = false, tXY = false, tX = false;
try { v = simdCtor.load(arr, index); }
catch (e) {
assertEq(e instanceof RangeError, true);
t = true;
}
try { vXYZ = simdCtor.loadXYZ(arr, index); }
catch (e) {
assertEq(e instanceof RangeError, true);
tXYZ = true;
}
try { vXY = simdCtor.loadXY(arr, index); }
catch (e) {
assertEq(e instanceof RangeError, true);
tXY = true;
}
try { vX = simdCtor.loadX(arr, index); }
catch (e) {
assertEq(e instanceof RangeError, true);
tX = true;
}
// Loads
var l, lXYZ, lXY, lX;
var r = false, rXYZ = false, rXY = false, rX = false;
try { l = f.load(i); }
catch (e) {
assertEq(e instanceof RangeError, true);
r = true;
}
try { lXYZ = f.loadXYZ(i); }
catch (e) {
assertEq(e instanceof RangeError, true);
rXYZ = true;
}
try { lXY = f.loadXY(i); }
catch (e) {
assertEq(e instanceof RangeError, true);
rXY = true;
}
try { lX = f.loadX(i); }
catch (e) {
assertEq(e instanceof RangeError, true);
rX = true;
}
assertEq(t, r);
assertEq(tXYZ, rXYZ);
assertEq(tXY, rXY);
assertEq(tX, rX);
if (!t) assertEqX4(v, l);
if (!tXYZ) assertEqX4(vXYZ, lXYZ);
if (!tXY) assertEqX4(vXY, lXY);
if (!tX) assertEqX4(vX, lX);
// Stores
if (!t) {
simdCtor.store(arr, index, simdCtor.not(v));
f.store(i, v);
assertEqX4(simdCtor.load(arr, index), v);
} else
assertThrowsInstanceOf(() => f.store(i, simdCtor()), RangeError);
if (!tXYZ) {
simdCtor.storeXYZ(arr, index, simdCtor.not(vXYZ));
f.storeXYZ(i, vXYZ);
assertEqX4(simdCtor.loadXYZ(arr, index), vXYZ);
} else
assertThrowsInstanceOf(() => f.storeXYZ(i, simdCtor()), RangeError);
if (!tXY) {
simdCtor.storeXY(arr, index, simdCtor.not(vXY));
f.storeXY(i, vXY);
assertEqX4(simdCtor.loadXY(arr, index), vXY);
} else
assertThrowsInstanceOf(() => f.storeXY(i, simdCtor()), RangeError);
if (!tX) {
simdCtor.storeX(arr, index, simdCtor.not(vX));
f.storeX(i, vX);
assertEqX4(simdCtor.loadX(arr, index), vX);
} else
assertThrowsInstanceOf(() => f.storeX(i, simdCtor()), RangeError);
}
}
function testFloat32x4(ctor, shift, scale, disp) {
testSimdX4(ctor, shift, scale, disp, 'float32x4', SIMD.float32x4);
}
function testInt32x4(ctor, shift, scale, disp) {
testSimdX4(ctor, shift, scale, disp, 'int32x4', SIMD.int32x4);
}
function test(tester, ctor, shift) {
var arr = new ctor(ab);
for (var i = 0; i < arr.length; i++)
arr[i] = Math.imul(i, Math.imul((i & 1), 2) - 1);
for (scale of [0,1,2,3]) {
for (disp of [0,1,8,Math.pow(2,31)-1,Math.pow(2,31),Math.pow(2,32)-1])
for (disp of [0,1,2,8,Math.pow(2,31)-1,Math.pow(2,31),Math.pow(2,32)-1])
tester(ctor, shift, scale, disp);
}
for (var i = 0; i < arr.length; i++) {
var v = arr[i];
arr[i] = Math.imul(i, Math.imul((i & 1), 2) - 1);
assertEq(arr[i], v);
}
}
test(testInt, Int8Array, 0);
@ -136,3 +238,7 @@ test(testInt, Int32Array, 2);
test(testInt, Uint32Array, 2);
test(testFloat32, Float32Array, 2);
test(testFloat64, Float64Array, 3);
if (typeof SIMD !== 'undefined' && isSimdAvailable()) {
test(testInt32x4, Uint8Array, 0);
test(testFloat32x4, Uint8Array, 0);
}

View File

@ -653,7 +653,7 @@ CodeGenerator::getJumpLabelForBranch(MBasicBlock *block)
// important here as these tests are extremely unlikely to be used in loop
// backedges, so emit inline code for the patchable jump. Heap allocating
// the label allows it to be used by out of line blocks.
Label *res = GetJitContext()->temp->lifoAlloc()->new_<Label>();
Label *res = alloc().lifoAlloc()->new_<Label>();
Label after;
masm.jump(&after);
masm.bind(res);

View File

@ -257,7 +257,6 @@ VerifyHeapAccess(uint8_t *begin, uint8_t *end, const HeapAccess &expected)
MOZ_ASSERT(e == end);
MOZ_ASSERT(disassembled == expected);
}
#endif
} // namespace Disassembler

View File

@ -89,6 +89,68 @@ AnalyzeLsh(TempAllocator &alloc, MLsh *lsh)
last->block()->insertAfter(last, eaddr);
}
static bool
IsAlignmentMask(uint32_t m)
{
// Test whether m is just leading ones and trailing zeros.
return (-m & ~m) == 0;
}
template<typename MAsmJSHeapAccessType>
static void
AnalyzeAsmHeapAccess(MAsmJSHeapAccessType *ins, MIRGraph &graph)
{
MDefinition *ptr = ins->ptr();
if (ptr->isConstantValue()) {
// Look for heap[i] where i is a constant offset, and fold the offset.
// By doing the folding now, we simplify the task of codegen; the offset
// is always the address mode immediate. This also allows it to avoid
// a situation where the sum of a constant pointer value and a non-zero
// offset doesn't actually fit into the address mode immediate.
int32_t imm = ptr->constantValue().toInt32();
if (imm != 0 && ins->tryAddDisplacement(imm)) {
MInstruction *zero = MConstant::New(graph.alloc(), Int32Value(0));
ins->block()->insertBefore(ins, zero);
ins->replacePtr(zero);
}
} else if (ptr->isAdd()) {
// Look for heap[a+i] where i is a constant offset, and fold the offset.
MDefinition *op0 = ptr->toAdd()->getOperand(0);
MDefinition *op1 = ptr->toAdd()->getOperand(1);
if (op0->isConstantValue())
mozilla::Swap(op0, op1);
if (op1->isConstantValue()) {
int32_t imm = op1->constantValue().toInt32();
if (ins->tryAddDisplacement(imm))
ins->replacePtr(op0);
}
} else if (ptr->isBitAnd() && ptr->hasOneUse()) {
// Transform heap[(a+i)&m] to heap[(a&m)+i] so that we can fold i into
// the access. Since we currently just mutate the BitAnd in place, this
// requires that we are its only user.
MDefinition *lhs = ptr->toBitAnd()->getOperand(0);
MDefinition *rhs = ptr->toBitAnd()->getOperand(1);
int lhsIndex = 0;
if (lhs->isConstantValue()) {
mozilla::Swap(lhs, rhs);
lhsIndex = 1;
}
if (lhs->isAdd() && rhs->isConstantValue()) {
MDefinition *op0 = lhs->toAdd()->getOperand(0);
MDefinition *op1 = lhs->toAdd()->getOperand(1);
if (op0->isConstantValue())
mozilla::Swap(op0, op1);
if (op1->isConstantValue()) {
uint32_t i = op1->constantValue().toInt32();
uint32_t m = rhs->constantValue().toInt32();
if (IsAlignmentMask(m) && ((i & m) == i) && ins->tryAddDisplacement(i))
ptr->toBitAnd()->replaceOperand(lhsIndex, op0);
}
}
}
}
// This analysis converts patterns of the form:
// truncate(x + (y << {0,1,2,3}))
// truncate(x + (y << {0,1,2,3}) + imm32)
@ -108,8 +170,15 @@ EffectiveAddressAnalysis::analyze()
{
for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
// Note that we don't check for MAsmJSCompareExchangeHeap
// or MAsmJSAtomicBinopHeap, because the backend and the OOB
// mechanism don't support non-zero offsets for them yet.
if (i->isLsh())
AnalyzeLsh(graph_.alloc(), i->toLsh());
else if (i->isAsmJSLoadHeap())
AnalyzeAsmHeapAccess(i->toAsmJSLoadHeap(), graph_);
else if (i->isAsmJSStoreHeap())
AnalyzeAsmHeapAccess(i->toAsmJSStoreHeap(), graph_);
}
}
return true;

View File

@ -4432,7 +4432,7 @@ IonBuilder::inlineScriptedCall(CallInfo &callInfo, JSFunction *target)
callInfo.pushFormals(current);
MResumePoint *outerResumePoint =
MResumePoint::New(alloc(), current, pc, callerResumePoint_, MResumePoint::Outer);
MResumePoint::New(alloc(), current, pc, MResumePoint::Outer);
if (!outerResumePoint)
return false;
current->setOuterResumePoint(outerResumePoint);
@ -5089,7 +5089,7 @@ IonBuilder::inlineObjectGroupFallback(CallInfo &callInfo, MBasicBlock *dispatchB
// Capture stack prior to the call operation. This captures the function.
MResumePoint *preCallResumePoint =
MResumePoint::New(alloc(), dispatchBlock, pc, callerResumePoint_, MResumePoint::ResumeAt);
MResumePoint::New(alloc(), dispatchBlock, pc, MResumePoint::ResumeAt);
if (!preCallResumePoint)
return false;
@ -6771,7 +6771,7 @@ IonBuilder::resume(MInstruction *ins, jsbytecode *pc, MResumePoint::Mode mode)
{
MOZ_ASSERT(ins->isEffectful() || !ins->isMovable());
MResumePoint *resumePoint = MResumePoint::New(alloc(), ins->block(), pc, callerResumePoint_,
MResumePoint *resumePoint = MResumePoint::New(alloc(), ins->block(), pc,
mode);
if (!resumePoint)
return false;
@ -9589,7 +9589,7 @@ IonBuilder::annotateGetPropertyCache(MDefinition *obj, MGetPropertyCache *getPro
if (inlinePropTable->numEntries() > 0) {
// Push the object back onto the stack temporarily to capture the resume point.
current->push(obj);
MResumePoint *resumePoint = MResumePoint::New(alloc(), current, pc, callerResumePoint_,
MResumePoint *resumePoint = MResumePoint::New(alloc(), current, pc,
MResumePoint::ResumeAt);
if (!resumePoint)
return false;

View File

@ -2931,10 +2931,10 @@ MUrsh::NewAsmJS(TempAllocator &alloc, MDefinition *left, MDefinition *right)
}
MResumePoint *
MResumePoint::New(TempAllocator &alloc, MBasicBlock *block, jsbytecode *pc, MResumePoint *parent,
MResumePoint::New(TempAllocator &alloc, MBasicBlock *block, jsbytecode *pc,
Mode mode)
{
MResumePoint *resume = new(alloc) MResumePoint(block, pc, parent, mode);
MResumePoint *resume = new(alloc) MResumePoint(block, pc, mode);
if (!resume->init(alloc))
return nullptr;
resume->inherit(block);
@ -2945,7 +2945,7 @@ MResumePoint *
MResumePoint::New(TempAllocator &alloc, MBasicBlock *block, MResumePoint *model,
const MDefinitionVector &operands)
{
MResumePoint *resume = new(alloc) MResumePoint(block, model->pc(), model->caller(), model->mode());
MResumePoint *resume = new(alloc) MResumePoint(block, model->pc(), model->mode());
// Allocate the same number of operands as the original resume point, and
// copy operands from the operands vector and not the not from the current
@ -2964,7 +2964,7 @@ MResumePoint *
MResumePoint::Copy(TempAllocator &alloc, MResumePoint *src)
{
MResumePoint *resume = new(alloc) MResumePoint(src->block(), src->pc(),
src->caller(), src->mode());
src->mode());
// Copy the operands from the original resume point, and not from the
// current block stack.
if (!resume->operands_.init(alloc, src->numAllocatedOperands()))
@ -2976,11 +2976,9 @@ MResumePoint::Copy(TempAllocator &alloc, MResumePoint *src)
return resume;
}
MResumePoint::MResumePoint(MBasicBlock *block, jsbytecode *pc, MResumePoint *caller,
Mode mode)
MResumePoint::MResumePoint(MBasicBlock *block, jsbytecode *pc, Mode mode)
: MNode(block),
pc_(pc),
caller_(caller),
instruction_(nullptr),
mode_(mode)
{
@ -2993,6 +2991,12 @@ MResumePoint::init(TempAllocator &alloc)
return operands_.init(alloc, block()->stackDepth());
}
MResumePoint*
MResumePoint::caller() const
{
return block_->callerResumePoint();
}
void
MResumePoint::inherit(MBasicBlock *block)
{

View File

@ -246,6 +246,7 @@ class MNode : public TempObject
MBasicBlock *block() const {
return block_;
}
MBasicBlock *caller() const;
// Sets an already set operand, updating use information. If you're looking
// for setOperand, this is probably what you want.
@ -11716,11 +11717,10 @@ class MResumePoint MOZ_FINAL :
MStoresToRecoverList stores_;
jsbytecode *pc_;
MResumePoint *caller_;
MInstruction *instruction_;
Mode mode_;
MResumePoint(MBasicBlock *block, jsbytecode *pc, MResumePoint *parent, Mode mode);
MResumePoint(MBasicBlock *block, jsbytecode *pc, Mode mode);
void inherit(MBasicBlock *state);
protected:
@ -11742,7 +11742,7 @@ class MResumePoint MOZ_FINAL :
public:
static MResumePoint *New(TempAllocator &alloc, MBasicBlock *block, jsbytecode *pc,
MResumePoint *parent, Mode mode);
Mode mode);
static MResumePoint *New(TempAllocator &alloc, MBasicBlock *block, MResumePoint *model,
const MDefinitionVector &operands);
static MResumePoint *Copy(TempAllocator &alloc, MResumePoint *src);
@ -11782,15 +11782,10 @@ class MResumePoint MOZ_FINAL :
jsbytecode *pc() const {
return pc_;
}
MResumePoint *caller() const {
return caller_;
}
void setCaller(MResumePoint *caller) {
caller_ = caller;
}
MResumePoint *caller() const;
uint32_t frameCount() const {
uint32_t count = 1;
for (MResumePoint *it = caller_; it; it = it->caller_)
for (MResumePoint *it = caller(); it; it = it->caller())
count++;
return count;
}
@ -12181,25 +12176,57 @@ class MAsmJSNeg
class MAsmJSHeapAccess
{
Scalar::Type accessType_;
int32_t offset_;
Scalar::Type accessType_ : 8;
bool needsBoundsCheck_;
Label *outOfBoundsLabel_;
unsigned numSimdElems_;
public:
MAsmJSHeapAccess(Scalar::Type accessType, bool needsBoundsCheck,
Label *outOfBoundsLabel = nullptr, unsigned numSimdElems = 0)
: accessType_(accessType), needsBoundsCheck_(needsBoundsCheck),
outOfBoundsLabel_(outOfBoundsLabel), numSimdElems_(numSimdElems)
MAsmJSHeapAccess(Scalar::Type accessType, bool needsBoundsCheck, unsigned numSimdElems = 0)
: offset_(0), accessType_(accessType),
needsBoundsCheck_(needsBoundsCheck), numSimdElems_(numSimdElems)
{
MOZ_ASSERT(numSimdElems <= ScalarTypeToLength(accessType));
}
int32_t offset() const { return offset_; }
int32_t endOffset() const { return offset() + byteSize(); }
Scalar::Type accessType() const { return accessType_; }
unsigned byteSize() const {
return Scalar::isSimdType(accessType())
? Scalar::scalarByteSize(accessType()) * numSimdElems()
: TypedArrayElemSize(accessType());
}
bool needsBoundsCheck() const { return needsBoundsCheck_; }
void removeBoundsCheck() { needsBoundsCheck_ = false; }
Label *outOfBoundsLabel() const { return outOfBoundsLabel_; }
unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; }
bool tryAddDisplacement(int32_t o) {
// Compute the new offset. Check for overflow and negative. In theory it
// ought to be possible to support negative offsets, but it'd require
// more elaborate bounds checking mechanisms than we currently have.
MOZ_ASSERT(offset_ >= 0);
int32_t newOffset = uint32_t(offset_) + o;
if (newOffset < 0)
return false;
// Compute the new offset to the end of the access. Check for overflow
// and negative here also.
int32_t newEnd = uint32_t(newOffset) + byteSize();
if (newEnd < 0)
return false;
MOZ_ASSERT(uint32_t(newEnd) >= uint32_t(newOffset));
// If we need bounds checking, keep it within the more restrictive
// AsmJSCheckedImmediateRange. Otherwise, just keep it within what
// the instruction set can support.
size_t range = needsBoundsCheck() ? AsmJSCheckedImmediateRange : AsmJSImmediateRange;
if (size_t(newEnd) > range)
return false;
offset_ = newOffset;
return true;
}
};
class MAsmJSLoadHeap
@ -12211,10 +12238,9 @@ class MAsmJSLoadHeap
MemoryBarrierBits barrierAfter_;
MAsmJSLoadHeap(Scalar::Type accessType, MDefinition *ptr, bool needsBoundsCheck,
Label *outOfBoundsLabel, unsigned numSimdElems,
MemoryBarrierBits before, MemoryBarrierBits after)
unsigned numSimdElems, MemoryBarrierBits before, MemoryBarrierBits after)
: MUnaryInstruction(ptr),
MAsmJSHeapAccess(accessType, needsBoundsCheck, outOfBoundsLabel, numSimdElems),
MAsmJSHeapAccess(accessType, needsBoundsCheck, numSimdElems),
barrierBefore_(before),
barrierAfter_(after)
{
@ -12255,16 +12281,16 @@ class MAsmJSLoadHeap
static MAsmJSLoadHeap *New(TempAllocator &alloc, Scalar::Type accessType,
MDefinition *ptr, bool needsBoundsCheck,
Label *outOfBoundsLabel = nullptr,
unsigned numSimdElems = 0,
MemoryBarrierBits barrierBefore = MembarNobits,
MemoryBarrierBits barrierAfter = MembarNobits)
{
return new(alloc) MAsmJSLoadHeap(accessType, ptr, needsBoundsCheck, outOfBoundsLabel,
return new(alloc) MAsmJSLoadHeap(accessType, ptr, needsBoundsCheck,
numSimdElems, barrierBefore, barrierAfter);
}
MDefinition *ptr() const { return getOperand(0); }
void replacePtr(MDefinition *newPtr) { replaceOperand(0, newPtr); }
MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
@ -12284,10 +12310,9 @@ class MAsmJSStoreHeap
MemoryBarrierBits barrierAfter_;
MAsmJSStoreHeap(Scalar::Type accessType, MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
Label *outOfBoundsLabel, unsigned numSimdElems,
MemoryBarrierBits before, MemoryBarrierBits after)
unsigned numSimdElems, MemoryBarrierBits before, MemoryBarrierBits after)
: MBinaryInstruction(ptr, v),
MAsmJSHeapAccess(accessType, needsBoundsCheck, outOfBoundsLabel, numSimdElems),
MAsmJSHeapAccess(accessType, needsBoundsCheck, numSimdElems),
barrierBefore_(before),
barrierAfter_(after)
{
@ -12300,16 +12325,16 @@ class MAsmJSStoreHeap
static MAsmJSStoreHeap *New(TempAllocator &alloc, Scalar::Type accessType,
MDefinition *ptr, MDefinition *v, bool needsBoundsCheck,
Label *outOfBoundsLabel = nullptr,
unsigned numSimdElems = 0,
MemoryBarrierBits barrierBefore = MembarNobits,
MemoryBarrierBits barrierAfter = MembarNobits)
{
return new(alloc) MAsmJSStoreHeap(accessType, ptr, v, needsBoundsCheck, outOfBoundsLabel,
return new(alloc) MAsmJSStoreHeap(accessType, ptr, v, needsBoundsCheck,
numSimdElems, barrierBefore, barrierAfter);
}
MDefinition *ptr() const { return getOperand(0); }
void replacePtr(MDefinition *newPtr) { replaceOperand(0, newPtr); }
MDefinition *value() const { return getOperand(1); }
MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
MemoryBarrierBits barrierAfter() const { return barrierAfter_; }

View File

@ -38,7 +38,8 @@ class MIRGenerator
public:
MIRGenerator(CompileCompartment *compartment, const JitCompileOptions &options,
TempAllocator *alloc, MIRGraph *graph,
CompileInfo *info, const OptimizationInfo *optimizationInfo);
CompileInfo *info, const OptimizationInfo *optimizationInfo,
Label *outOfBoundsLabel = nullptr, bool usesSignalHandlersForAsmJSOOB = false);
TempAllocator &alloc() {
return *alloc_;
@ -199,6 +200,9 @@ class MIRGenerator
// CodeGenerator::link).
ObjectVector nurseryObjects_;
Label *outOfBoundsLabel_;
bool usesSignalHandlersForAsmJSOOB_;
void addAbortedNewScriptPropertiesGroup(ObjectGroup *type);
void setForceAbort() {
shouldForceAbort_ = true;
@ -222,6 +226,21 @@ class MIRGenerator
const ObjectVector &nurseryObjects() const {
return nurseryObjects_;
}
Label *outOfBoundsLabel() const {
return outOfBoundsLabel_;
}
bool needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess *access) const {
// A heap access needs a bounds-check branch if we're not relying on signal
// handlers to catch errors, and if it's not proven to be within bounds.
// We use signal-handlers on x64, but on x86 there isn't enough address
// space for a guard region.
#ifdef JS_CODEGEN_X64
if (usesSignalHandlersForAsmJSOOB_)
return false;
#endif
return access->needsBoundsCheck();
}
};
} // namespace jit

View File

@ -19,7 +19,8 @@ using mozilla::Swap;
MIRGenerator::MIRGenerator(CompileCompartment *compartment, const JitCompileOptions &options,
TempAllocator *alloc, MIRGraph *graph, CompileInfo *info,
const OptimizationInfo *optimizationInfo)
const OptimizationInfo *optimizationInfo,
Label *outOfBoundsLabel, bool usesSignalHandlersForAsmJSOOB)
: compartment(compartment),
info_(info),
optimizationInfo_(optimizationInfo),
@ -40,6 +41,8 @@ MIRGenerator::MIRGenerator(CompileCompartment *compartment, const JitCompileOpti
instrumentedProfiling_(false),
instrumentedProfilingIsCached_(false),
nurseryObjects_(*alloc),
outOfBoundsLabel_(outOfBoundsLabel),
usesSignalHandlersForAsmJSOOB_(usesSignalHandlersForAsmJSOOB),
options(options)
{ }
@ -406,11 +409,10 @@ MBasicBlock::inherit(TempAllocator &alloc, BytecodeAnalysis *analysis, MBasicBlo
MOZ_ASSERT(!entryResumePoint_);
// Propagate the caller resume point from the inherited block.
MResumePoint *callerResumePoint = pred ? pred->callerResumePoint() : nullptr;
callerResumePoint_ = pred ? pred->callerResumePoint() : nullptr;
// Create a resume point using our initial stack state.
entryResumePoint_ = new(alloc) MResumePoint(this, pc(), callerResumePoint,
MResumePoint::ResumeAt);
entryResumePoint_ = new(alloc) MResumePoint(this, pc(), MResumePoint::ResumeAt);
if (!entryResumePoint_->init(alloc))
return false;
@ -475,6 +477,8 @@ MBasicBlock::inheritResumePoint(MBasicBlock *pred)
MOZ_ASSERT(kind_ != PENDING_LOOP_HEADER);
MOZ_ASSERT(pred != nullptr);
callerResumePoint_ = pred->callerResumePoint();
if (!predecessors_.append(pred))
return false;
@ -495,8 +499,7 @@ MBasicBlock::initEntrySlots(TempAllocator &alloc)
discardResumePoint(entryResumePoint_);
// Create a resume point using our initial stack state.
entryResumePoint_ = MResumePoint::New(alloc, this, pc(), callerResumePoint(),
MResumePoint::ResumeAt);
entryResumePoint_ = MResumePoint::New(alloc, this, pc(), MResumePoint::ResumeAt);
if (!entryResumePoint_)
return false;
return true;

View File

@ -57,6 +57,8 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
// This block cannot be reached by any means.
bool unreachable_;
MResumePoint *callerResumePoint_;
// Pushes a copy of a local variable or argument.
void pushVariable(uint32_t slot);
@ -545,11 +547,11 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
discardResumePoint(outerResumePoint_);
outerResumePoint_ = nullptr;
}
MResumePoint *callerResumePoint() {
return entryResumePoint() ? entryResumePoint()->caller() : nullptr;
MResumePoint *callerResumePoint() const {
return callerResumePoint_;
}
void setCallerResumePoint(MResumePoint *caller) {
entryResumePoint()->setCaller(caller);
callerResumePoint_ = caller;
}
size_t numEntrySlots() const {
return entryResumePoint()->stackDepth();

View File

@ -583,6 +583,12 @@ static inline bool UseHardFpABI()
}
#endif
// See the comments above AsmJSMappedSize in AsmJSValidate.h for more info.
// TODO: Implement this for ARM. Note that it requires Codegen to respect the
// offset field of AsmJSHeapAccess.
static const size_t AsmJSCheckedImmediateRange = 0;
static const size_t AsmJSImmediateRange = 0;
} // namespace jit
} // namespace js

View File

@ -501,6 +501,12 @@ hasMultiAlias() {
return true;
}
// See the comments above AsmJSMappedSize in AsmJSValidate.h for more info.
// TODO: Implement this for MIPS. Note that it requires Codegen to respect the
// offset field of AsmJSHeapAccess.
static const size_t AsmJSCheckedImmediateRange = 0;
static const size_t AsmJSImmediateRange = 0;
} // namespace jit
} // namespace js

View File

@ -316,6 +316,9 @@ struct PatchedAbsoluteAddress
explicit PatchedAbsoluteAddress(const void *addr)
: addr(const_cast<void*>(addr))
{ }
explicit PatchedAbsoluteAddress(uintptr_t addr)
: addr(reinterpret_cast<void*>(addr))
{ }
};
// Specifies an address computed in the form of a register base and a constant,
@ -764,14 +767,25 @@ static const unsigned AsmJSNaN32GlobalDataOffset = 2 * sizeof(void*) + sizeof(do
// #ifdefery.
class AsmJSHeapAccess
{
#if defined(JS_CODEGEN_X64)
public:
enum WhatToDoOnOOB {
CarryOn, // loads return undefined, stores do nothing.
Throw // throw a RangeError
};
#endif
private:
uint32_t offset_;
uint32_t insnOffset_;
#if defined(JS_CODEGEN_X86)
uint8_t opLength_; // the length of the load/store instruction
#endif
#if defined(JS_CODEGEN_X64)
uint8_t offsetWithinWholeSimdVector_; // if is this e.g. the Z of an XYZ
bool throwOnOOB_; // should we throw on OOB?
#endif
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
uint8_t cmpDelta_; // the number of bytes from the cmp to the load/store instruction
uint8_t opLength_; // the length of the load/store instruction
uint8_t numSimdElems_; // the number of SIMD lanes to load/store at once
Scalar::Type type_;
AnyRegister::Code loadedReg_ : 8;
#endif
JS_STATIC_ASSERT(AnyRegister::Total < UINT8_MAX);
@ -780,63 +794,50 @@ class AsmJSHeapAccess
AsmJSHeapAccess() {}
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
static const uint32_t NoLengthCheck = UINT32_MAX;
#endif
// If 'cmp' equals 'offset' or if it is not supplied then the
#if defined(JS_CODEGEN_X86)
// If 'cmp' equals 'insnOffset' or if it is not supplied then the
// cmpDelta_ is zero indicating that there is no length to patch.
AsmJSHeapAccess(uint32_t offset, uint32_t after, Scalar::Type type, AnyRegister loadedReg,
uint32_t cmp = NoLengthCheck)
: offset_(offset),
cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
opLength_(after - offset),
numSimdElems_(UINT8_MAX),
type_(type),
loadedReg_(loadedReg.code())
AsmJSHeapAccess(uint32_t insnOffset, uint32_t after, uint32_t cmp = NoLengthCheck)
: insnOffset_(insnOffset),
opLength_(after - insnOffset),
cmpDelta_(cmp == NoLengthCheck ? 0 : insnOffset - cmp)
{}
#elif defined(JS_CODEGEN_X64)
// If 'cmp' equals 'insnOffset' or if it is not supplied then the
// cmpDelta_ is zero indicating that there is no length to patch.
AsmJSHeapAccess(uint32_t insnOffset, WhatToDoOnOOB oob,
uint32_t cmp = NoLengthCheck,
uint32_t offsetWithinWholeSimdVector = 0)
: insnOffset_(insnOffset),
offsetWithinWholeSimdVector_(offsetWithinWholeSimdVector),
throwOnOOB_(oob == Throw),
cmpDelta_(cmp == NoLengthCheck ? 0 : insnOffset - cmp)
{
MOZ_ASSERT(!Scalar::isSimdType(type));
}
AsmJSHeapAccess(uint32_t offset, uint8_t after, Scalar::Type type, uint32_t cmp = NoLengthCheck)
: offset_(offset),
cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
opLength_(after - offset),
numSimdElems_(UINT8_MAX),
type_(type),
loadedReg_(UINT8_MAX)
{
MOZ_ASSERT(!Scalar::isSimdType(type));
}
// SIMD loads / stores
AsmJSHeapAccess(uint32_t offset, uint32_t after, unsigned numSimdElems, Scalar::Type type,
uint32_t cmp = NoLengthCheck)
: offset_(offset),
cmpDelta_(cmp == NoLengthCheck ? 0 : offset - cmp),
opLength_(after - offset),
numSimdElems_(numSimdElems),
type_(type),
loadedReg_(UINT8_MAX)
{
MOZ_ASSERT(Scalar::isSimdType(type));
MOZ_ASSERT(offsetWithinWholeSimdVector_ == offsetWithinWholeSimdVector);
}
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS)
explicit AsmJSHeapAccess(uint32_t offset)
: offset_(offset)
explicit AsmJSHeapAccess(uint32_t insnOffset)
: insnOffset_(insnOffset)
{}
#endif
uint32_t offset() const { return offset_; }
void setOffset(uint32_t offset) { offset_ = offset; }
uint32_t insnOffset() const { return insnOffset_; }
void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
#if defined(JS_CODEGEN_X86)
void *patchOffsetAt(uint8_t *code) const { return code + (offset_ + opLength_); }
void *patchHeapPtrImmAt(uint8_t *code) const { return code + (insnOffset_ + opLength_); }
#endif
#if defined(JS_CODEGEN_X64)
unsigned opLength() const { MOZ_ASSERT(!Scalar::isSimdType(type_)); return opLength_; }
bool isLoad() const { MOZ_ASSERT(!Scalar::isSimdType(type_)); return loadedReg_ != UINT8_MAX; }
bool throwOnOOB() const { return throwOnOOB_; }
uint32_t offsetWithinWholeSimdVector() const { return offsetWithinWholeSimdVector_; }
#endif
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
bool hasLengthCheck() const { return cmpDelta_ > 0; }
void *patchLengthAt(uint8_t *code) const { return code + (offset_ - cmpDelta_); }
unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(type_)); return numSimdElems_; }
Scalar::Type type() const { return type_; }
AnyRegister loadedReg() const { return AnyRegister::FromCode(loadedReg_); }
void *patchLengthAt(uint8_t *code) const {
MOZ_ASSERT(hasLengthCheck());
return code + (insnOffset_ - cmpDelta_);
}
#endif
};

View File

@ -70,6 +70,10 @@ class Operand
: kind_(MEM_ADDRESS32),
disp_(X86Encoding::AddressImmediate(address.addr))
{ }
explicit Operand(PatchedAbsoluteAddress address)
: kind_(MEM_ADDRESS32),
disp_(X86Encoding::AddressImmediate(address.addr))
{ }
Address toAddress() const {
MOZ_ASSERT(kind() == MEM_REG_DISP);

View File

@ -186,8 +186,8 @@ CodeGeneratorShared::restoreLiveVolatile(LInstruction *ins)
void
CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
Scalar::Type type, const Operand &mem,
LAllocation alloc)
Scalar::Type type, unsigned numElems,
const Operand &mem, LAllocation alloc)
{
#ifdef DEBUG
using namespace Disassembler;
@ -229,9 +229,11 @@ CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, b
MOZ_CRASH("Unexpected array type");
}
size_t size = Scalar::isSimdType(type)
? Scalar::scalarByteSize(type) * numElems
: TypedArrayElemSize(type);
masm.verifyHeapAccessDisassembly(begin, end,
HeapAccess(kind, TypedArrayElemSize(type),
ComplexAddress(mem), op));
HeapAccess(kind, size, ComplexAddress(mem), op));
#endif
}

View File

@ -549,8 +549,8 @@ class CodeGeneratorShared : public LElementVisitor
}
inline void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, bool isLoad,
Scalar::Type type, const Operand &mem,
LAllocation alloc);
Scalar::Type type, unsigned numElems,
const Operand &mem, LAllocation alloc);
};
// An out-of-line path is generated at the end of the function.

View File

@ -363,6 +363,82 @@ CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTyp
masm.jmp(ool->rejoin());
}
void
CodeGeneratorX86Shared::visitOffsetBoundsCheck(OffsetBoundsCheck *oolCheck)
{
// The access is heap[ptr + offset]. The inline code checks that
// ptr < heap.length - offset. We get here when that fails. We need to check
// for the case where ptr + offset >= 0, in which case the access is still
// in bounds.
MOZ_ASSERT(oolCheck->offset() != 0,
"An access without a constant offset doesn't need a separate OffsetBoundsCheck");
masm.cmp32(oolCheck->ptrReg(), Imm32(-uint32_t(oolCheck->offset())));
masm.j(Assembler::Below, oolCheck->outOfBounds());
#ifdef JS_CODEGEN_X64
// In order to get the offset to wrap properly, we must sign-extend the
// pointer to 32-bits. We'll zero out the sign extension immediately
// after the access to restore asm.js invariants.
masm.movslq(oolCheck->ptrReg(), oolCheck->ptrReg());
#endif
masm.jmp(oolCheck->rejoin());
}
uint32_t
CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess *access,
const MInstruction *mir,
Register ptr, Label *fail)
{
// Emit a bounds-checking branch for |access|.
MOZ_ASSERT(gen->needsAsmJSBoundsCheckBranch(access));
Label *pass = nullptr;
// If we have a non-zero offset, it's possible that |ptr| itself is out of
// bounds, while adding the offset computes an in-bounds address. To catch
// this case, we need a second branch, which we emit out of line since it's
// unlikely to be needed in normal programs.
if (access->offset() != 0) {
OffsetBoundsCheck *oolCheck = new(alloc()) OffsetBoundsCheck(fail, ptr, access->offset());
fail = oolCheck->entry();
pass = oolCheck->rejoin();
addOutOfLineCode(oolCheck, mir);
}
// The bounds check is a comparison with an immediate value. The asm.js
// module linking process will add the length of the heap to the immediate
// field, so -access->endOffset() will turn into
// (heapLength - access->endOffset()), allowing us to test whether the end
// of the access is beyond the end of the heap.
uint32_t maybeCmpOffset = masm.cmp32WithPatch(ptr, Imm32(-access->endOffset())).offset();
masm.j(Assembler::Above, fail);
if (pass)
masm.bind(pass);
return maybeCmpOffset;
}
void
CodeGeneratorX86Shared::cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess *access,
Register ptr)
{
// Clean up after performing a heap access checked by a branch.
MOZ_ASSERT(gen->needsAsmJSBoundsCheckBranch(access));
#ifdef JS_CODEGEN_X64
// If the offset is 0, we don't use an OffsetBoundsCheck.
if (access->offset() != 0) {
// Zero out the high 32 bits, in case the OffsetBoundsCheck code had to
// sign-extend (movslq) the pointer value to get wraparound to work.
masm.movl(ptr, ptr);
}
#endif
}
bool
CodeGeneratorX86Shared::generateOutOfLineCode()
{

View File

@ -32,7 +32,6 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
void bailout(const T &t, LSnapshot *snapshot);
protected:
// Load a NaN or zero into a register for an out of bounds AsmJS or static
// typed array load.
class OutOfLineLoadTypedArrayOutOfBounds : public OutOfLineCodeBase<CodeGeneratorX86Shared>
@ -51,6 +50,31 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
}
};
// Additional bounds checking for heap accesses with constant offsets.
class OffsetBoundsCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
{
Label *outOfBounds_;
Register ptrReg_;
int32_t offset_;
public:
OffsetBoundsCheck(Label *outOfBounds, Register ptrReg, int32_t offset)
: outOfBounds_(outOfBounds), ptrReg_(ptrReg), offset_(offset)
{}
Label *outOfBounds() const { return outOfBounds_; }
Register ptrReg() const { return ptrReg_; }
int32_t offset() const { return offset_; }
void accept(CodeGeneratorX86Shared *codegen) {
codegen->visitOffsetBoundsCheck(this);
}
};
// Functions for emitting bounds-checking code with branches.
MOZ_WARN_UNUSED_RESULT
uint32_t emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess *mir, const MInstruction *ins,
Register ptr, Label *fail);
void cleanupAfterAsmJSBoundsCheckBranch(const MAsmJSHeapAccess *mir, Register ptr);
// Label for the common return path.
NonAssertingLabel returnLabel_;
NonAssertingLabel deoptLabel_;
@ -214,6 +238,7 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
virtual void visitMemoryBarrier(LMemoryBarrier *ins);
void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool);
void visitOffsetBoundsCheck(OffsetBoundsCheck *oolCheck);
void visitNegI(LNegI *lir);
void visitNegD(LNegD *lir);

View File

@ -276,10 +276,10 @@ LIRGeneratorShared::useRegisterOrConstantAtStart(MDefinition *mir)
}
LAllocation
LIRGeneratorShared::useRegisterOrNonNegativeConstantAtStart(MDefinition *mir)
LIRGeneratorShared::useRegisterOrZeroAtStart(MDefinition *mir)
{
if (mir->isConstant() && mir->toConstant()->value().toInt32() >= 0)
return LAllocation(mir->toConstant()->vp());
if (mir->isConstant() && mir->toConstant()->value().isInt32(0))
return LAllocation();
return useRegisterAtStart(mir);
}

View File

@ -108,7 +108,7 @@ class LIRGeneratorShared : public MDefinitionVisitor
inline LAllocation useKeepaliveOrConstant(MDefinition *mir);
inline LAllocation useRegisterOrConstant(MDefinition *mir);
inline LAllocation useRegisterOrConstantAtStart(MDefinition *mir);
inline LAllocation useRegisterOrNonNegativeConstantAtStart(MDefinition *mir);
inline LAllocation useRegisterOrZeroAtStart(MDefinition *mir);
inline LAllocation useRegisterOrNonDoubleConstant(MDefinition *mir);
inline LUse useRegisterForTypedLoad(MDefinition *mir, MIRType type);

View File

@ -262,6 +262,11 @@ hasMultiAlias()
return false;
}
// Support some constant-offset addressing.
// See the comments above AsmJSMappedSize in AsmJSValidate.h for more info.
static const size_t AsmJSCheckedImmediateRange = 4096;
static const size_t AsmJSImmediateRange = UINT32_C(0x80000000);
} // namespace jit
} // namespace js

View File

@ -298,94 +298,86 @@ CodeGeneratorX64::loadSimd(Scalar::Type type, unsigned numElems, const Operand &
void
CodeGeneratorX64::emitSimdLoad(LAsmJSLoadHeap *ins)
{
MAsmJSLoadHeap *mir = ins->mir();
const MAsmJSLoadHeap *mir = ins->mir();
Scalar::Type type = mir->accessType();
const LAllocation *ptr = ins->ptr();
FloatRegister out = ToFloatRegister(ins->output());
Operand srcAddr(HeapReg);
if (ptr->isConstant()) {
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
srcAddr = Operand(HeapReg, ptrImm);
} else {
srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
}
const LAllocation *ptr = ins->ptr();
Operand srcAddr = ptr->isBogus()
? Operand(HeapReg, mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0)).offset();
masm.j(Assembler::AboveOrEqual, mir->outOfBoundsLabel()); // Throws RangeError
}
if (gen->needsAsmJSBoundsCheckBranch(mir))
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
gen->outOfBoundsLabel());
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
Operand shiftedOffset(HeapReg);
if (ptr->isConstant())
shiftedOffset = Operand(HeapReg, ptr->toConstant()->toInt32() + 2 * sizeof(float));
else
shiftedOffset = Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float));
Operand srcAddrZ =
ptr->isBogus()
? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
// Load XY
uint32_t before = masm.size();
loadSimd(type, 2, srcAddr, out);
uint32_t after = masm.size();
// We're noting a load of 3 elements, so that the bounds check checks
// for 3 elements.
masm.append(AsmJSHeapAccess(before, after, 3, type, maybeCmpOffset));
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 2, srcAddr,
*ins->output()->output());
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
// Load Z (W is zeroed)
// This is still in bounds, as we've checked with a manual bounds check
// or we had enough space for sure when removing the bounds check.
before = after;
loadSimd(type, 1, shiftedOffset, ScratchSimdReg);
loadSimd(type, 1, srcAddrZ, ScratchSimdReg);
after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 1, type));
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, 1, srcAddrZ, LFloatReg(ScratchSimdReg));
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw,
AsmJSHeapAccess::NoLengthCheck, 8));
// Move ZW atop XY
masm.vmovlhps(ScratchSimdReg, out, out);
return;
} else {
uint32_t before = masm.size();
loadSimd(type, numElems, srcAddr, out);
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, type, numElems, srcAddr, *ins->output()->output());
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
}
uint32_t before = masm.size();
loadSimd(type, numElems, srcAddr, out);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, numElems, type, maybeCmpOffset));
if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
void
CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
MAsmJSLoadHeap *mir = ins->mir();
Scalar::Type vt = mir->accessType();
const LAllocation *ptr = ins->ptr();
const LDefinition *out = ins->output();
Operand srcAddr(HeapReg);
const MAsmJSLoadHeap *mir = ins->mir();
Scalar::Type accessType = mir->accessType();
if (Scalar::isSimdType(vt))
if (Scalar::isSimdType(accessType))
return emitSimdLoad(ins);
if (ptr->isConstant()) {
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
srcAddr = Operand(HeapReg, ptrImm);
} else {
srcAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
}
const LAllocation *ptr = ins->ptr();
const LDefinition *out = ins->output();
Operand srcAddr = ptr->isBogus()
? Operand(HeapReg, mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
memoryBarrier(ins->mir()->barrierBefore());
memoryBarrier(mir->barrierBefore());
OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
CodeOffsetLabel cmp = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0));
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
addOutOfLineCode(ool, ins->mir());
masm.j(Assembler::AboveOrEqual, ool->entry());
maybeCmpOffset = cmp.offset();
if (gen->needsAsmJSBoundsCheckBranch(mir)) {
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
addOutOfLineCode(ool, mir);
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), ool->entry());
}
uint32_t before = masm.size();
switch (vt) {
switch (accessType) {
case Scalar::Int8: masm.movsbl(srcAddr, ToRegister(out)); break;
case Scalar::Uint8: masm.movzbl(srcAddr, ToRegister(out)); break;
case Scalar::Int16: masm.movswl(srcAddr, ToRegister(out)); break;
@ -401,11 +393,13 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
MOZ_CRASH("unexpected array type");
}
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, vt, srcAddr, *out->output());
if (ool)
verifyHeapAccessDisassembly(before, after, /*isLoad=*/true, accessType, 0, srcAddr, *out->output());
if (ool) {
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(ool->rejoin());
memoryBarrier(ins->mir()->barrierAfter());
masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out), maybeCmpOffset));
}
memoryBarrier(mir->barrierAfter());
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::CarryOn, maybeCmpOffset));
}
void
@ -452,98 +446,90 @@ CodeGeneratorX64::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister
void
CodeGeneratorX64::emitSimdStore(LAsmJSStoreHeap *ins)
{
MAsmJSStoreHeap *mir = ins->mir();
const MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type type = mir->accessType();
const LAllocation *ptr = ins->ptr();
FloatRegister in = ToFloatRegister(ins->value());
Operand dstAddr(HeapReg);
if (ptr->isConstant()) {
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
dstAddr = Operand(HeapReg, ptrImm);
} else {
dstAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
}
const LAllocation *ptr = ins->ptr();
Operand dstAddr = ptr->isBogus()
? Operand(HeapReg, mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0)).offset();
masm.j(Assembler::AboveOrEqual, mir->outOfBoundsLabel()); // Throws RangeError
}
if (gen->needsAsmJSBoundsCheckBranch(mir))
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
gen->outOfBoundsLabel());
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
Operand shiftedOffset(HeapReg);
if (ptr->isConstant())
shiftedOffset = Operand(HeapReg, ptr->toConstant()->toInt32() + 2 * sizeof(float));
else
shiftedOffset = Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float));
Operand dstAddrZ =
ptr->isBogus()
? Operand(HeapReg, 2 * sizeof(float) + mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, 2 * sizeof(float) + mir->offset());
// Store Z first: it would be observable to store XY first, in the
// case XY can be stored in bounds but Z can't (in this case, we'd throw
// without restoring the values previously stored before XY).
// It's possible that the Z could be out of bounds when the XY is in
// bounds. To avoid storing the XY before the exception is thrown, we
// store the Z first, and record its offset in the AsmJSHeapAccess so
// that the signal handler knows to check the bounds of the full
// access, rather than just the Z.
masm.vmovhlps(in, ScratchSimdReg, ScratchSimdReg);
uint32_t before = masm.size();
storeSimd(type, 1, ScratchSimdReg, shiftedOffset);
storeSimd(type, 1, ScratchSimdReg, dstAddrZ);
uint32_t after = masm.size();
// We're noting a store of 3 elements, so that the bounds check checks
// for 3 elements.
masm.append(AsmJSHeapAccess(before, after, 3, type, maybeCmpOffset));
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 1, dstAddrZ, LFloatReg(ScratchSimdReg));
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset, 8));
// Store XY
before = after;
storeSimd(type, 2, in, dstAddr);
after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 2, type));
return;
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, 2, dstAddr, *ins->value());
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw));
} else {
uint32_t before = masm.size();
storeSimd(type, numElems, in, dstAddr);
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, type, numElems, dstAddr, *ins->value());
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::Throw, maybeCmpOffset));
}
uint32_t before = masm.size();
storeSimd(type, numElems, in, dstAddr);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, numElems, type, maybeCmpOffset));
if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
void
CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
{
MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type vt = mir->accessType();
const LAllocation *ptr = ins->ptr();
Operand dstAddr(HeapReg);
const MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type accessType = mir->accessType();
if (Scalar::isSimdType(vt))
if (Scalar::isSimdType(accessType))
return emitSimdStore(ins);
if (ptr->isConstant()) {
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
dstAddr = Operand(HeapReg, ptrImm);
} else {
dstAddr = Operand(HeapReg, ToRegister(ptr), TimesOne);
}
const LAllocation *value = ins->value();
const LAllocation *ptr = ins->ptr();
Operand dstAddr = ptr->isBogus()
? Operand(HeapReg, mir->offset())
: Operand(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
memoryBarrier(ins->mir()->barrierBefore());
Label rejoin;
memoryBarrier(mir->barrierBefore());
Label *rejoin = nullptr;
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
CodeOffsetLabel cmp = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0));
masm.j(Assembler::AboveOrEqual, &rejoin);
maybeCmpOffset = cmp.offset();
if (gen->needsAsmJSBoundsCheckBranch(mir)) {
rejoin = alloc().lifoAlloc()->new_<Label>();
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), rejoin);
}
uint32_t before = masm.size();
if (ins->value()->isConstant()) {
switch (vt) {
if (value->isConstant()) {
switch (accessType) {
case Scalar::Int8:
case Scalar::Uint8: masm.movb(Imm32(ToInt32(ins->value())), dstAddr); break;
case Scalar::Uint8: masm.movb(Imm32(ToInt32(value)), dstAddr); break;
case Scalar::Int16:
case Scalar::Uint16: masm.movw(Imm32(ToInt32(ins->value())), dstAddr); break;
case Scalar::Uint16: masm.movw(Imm32(ToInt32(value)), dstAddr); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movl(Imm32(ToInt32(ins->value())), dstAddr); break;
case Scalar::Uint32: masm.movl(Imm32(ToInt32(value)), dstAddr); break;
case Scalar::Float32:
case Scalar::Float64:
case Scalar::Float32x4:
@ -553,15 +539,15 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
MOZ_CRASH("unexpected array type");
}
} else {
switch (vt) {
switch (accessType) {
case Scalar::Int8:
case Scalar::Uint8: masm.movb(ToRegister(ins->value()), dstAddr); break;
case Scalar::Uint8: masm.movb(ToRegister(value), dstAddr); break;
case Scalar::Int16:
case Scalar::Uint16: masm.movw(ToRegister(ins->value()), dstAddr); break;
case Scalar::Uint16: masm.movw(ToRegister(value), dstAddr); break;
case Scalar::Int32:
case Scalar::Uint32: masm.movl(ToRegister(ins->value()), dstAddr); break;
case Scalar::Float32: masm.storeFloat32(ToFloatRegister(ins->value()), dstAddr); break;
case Scalar::Float64: masm.storeDouble(ToFloatRegister(ins->value()), dstAddr); break;
case Scalar::Uint32: masm.movl(ToRegister(value), dstAddr); break;
case Scalar::Float32: masm.storeFloat32(ToFloatRegister(value), dstAddr); break;
case Scalar::Float64: masm.storeDouble(ToFloatRegister(value), dstAddr); break;
case Scalar::Float32x4:
case Scalar::Int32x4: MOZ_CRASH("SIMD stores must be handled in emitSimdStore");
case Scalar::Uint8Clamped:
@ -570,39 +556,44 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
}
}
uint32_t after = masm.size();
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, vt, dstAddr, *ins->value());
if (rejoin.used())
masm.bind(&rejoin);
memoryBarrier(ins->mir()->barrierAfter());
masm.append(AsmJSHeapAccess(before, after, vt, maybeCmpOffset));
verifyHeapAccessDisassembly(before, after, /*isLoad=*/false, accessType, 0, dstAddr, *value);
if (rejoin) {
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(rejoin);
}
memoryBarrier(mir->barrierAfter());
masm.append(AsmJSHeapAccess(before, AsmJSHeapAccess::CarryOn, maybeCmpOffset));
}
void
CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
{
MAsmJSCompareExchangeHeap *mir = ins->mir();
Scalar::Type vt = mir->accessType();
Scalar::Type accessType = mir->accessType();
const LAllocation *ptr = ins->ptr();
MOZ_ASSERT(ptr->isRegister());
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
Register oldval = ToRegister(ins->oldValue());
Register newval = ToRegister(ins->newValue());
// Note that we can't use
// needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
// since signal-handler bounds checking is not yet implemented for atomic accesses.
Label rejoin;
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0)).offset();
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
Label goahead;
masm.j(Assembler::Below, &goahead);
masm.j(Assembler::BelowOrEqual, &goahead);
memoryBarrier(MembarFull);
Register out = ToRegister(ins->output());
masm.xorl(out, out);
masm.jmp(&rejoin);
masm.bind(&goahead);
}
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
srcAddr,
oldval,
newval,
@ -611,28 +602,34 @@ CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
uint32_t after = masm.size();
if (rejoin.used())
masm.bind(&rejoin);
masm.append(AsmJSHeapAccess(after, after, mir->accessType(), maybeCmpOffset));
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
"atomic accesses in the case of a fault from an unwrapped offset");
masm.append(AsmJSHeapAccess(after, AsmJSHeapAccess::Throw, maybeCmpOffset));
}
void
CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
{
MAsmJSAtomicBinopHeap *mir = ins->mir();
Scalar::Type vt = mir->accessType();
Scalar::Type accessType = mir->accessType();
const LAllocation *ptr = ins->ptr();
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
const LAllocation* value = ins->value();
AtomicOp op = mir->operation();
MOZ_ASSERT(ptr->isRegister());
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne);
BaseIndex srcAddr(HeapReg, ToRegister(ptr), TimesOne, mir->offset());
// Note that we can't use
// needsAsmJSBoundsCheckBranch/emitAsmJSBoundsCheckBranch/cleanupAfterAsmJSBoundsCheckBranch
// since signal-handler bounds checking is not yet implemented for atomic accesses.
Label rejoin;
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(0)).offset();
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
Label goahead;
masm.j(Assembler::Below, &goahead);
masm.j(Assembler::BelowOrEqual, &goahead);
memoryBarrier(MembarFull);
Register out = ToRegister(ins->output());
masm.xorl(out,out);
@ -640,14 +637,14 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
masm.bind(&goahead);
}
if (value->isConstant()) {
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
Imm32(ToInt32(value)),
srcAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
} else {
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
ToRegister(value),
srcAddr,
temp,
@ -657,7 +654,10 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
uint32_t after = masm.size();
if (rejoin.used())
masm.bind(&rejoin);
masm.append(AsmJSHeapAccess(after, after, mir->accessType(), maybeCmpOffset));
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
"atomic accesses in the case of a fault from an unwrapped offset");
masm.append(AsmJSHeapAccess(after, AsmJSHeapAccess::Throw, maybeCmpOffset));
}
void

View File

@ -153,14 +153,11 @@ LIRGeneratorX64::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
MDefinition *ptr = ins->ptr();
MOZ_ASSERT(ptr->type() == MIRType_Int32);
// Only a positive index is accepted because a negative offset encoded as an
// offset in the addressing mode would not wrap back into the protected area
// reserved for the heap. For simplicity (and since we don't care about
// getting maximum performance in these cases) only allow constant
// operands when skipping bounds checks.
LAllocation ptrAlloc = ins->needsBoundsCheck()
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
? useRegisterAtStart(ptr)
: useRegisterOrNonNegativeConstantAtStart(ptr);
: useRegisterOrZeroAtStart(ptr);
define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
}
@ -171,14 +168,11 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
MDefinition *ptr = ins->ptr();
MOZ_ASSERT(ptr->type() == MIRType_Int32);
// Only a positive index is accepted because a negative offset encoded as an
// offset in the addressing mode would not wrap back into the protected area
// reserved for the heap. For simplicity (and since we don't care about
// getting maximum performance in these cases) only allow constant
// opererands when skipping bounds checks.
LAllocation ptrAlloc = ins->needsBoundsCheck()
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
? useRegisterAtStart(ptr)
: useRegisterOrNonNegativeConstantAtStart(ptr);
: useRegisterOrZeroAtStart(ptr);
LAsmJSStoreHeap *lir = nullptr; // initialize to silence GCC warning
switch (ins->accessType()) {
@ -200,7 +194,6 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
add(lir, ins);
}

View File

@ -239,6 +239,11 @@ hasMultiAlias()
return false;
}
// Support some constant-offset addressing.
// See the comments above AsmJSMappedSize in AsmJSValidate.h for more info.
static const size_t AsmJSCheckedImmediateRange = 4096;
static const size_t AsmJSImmediateRange = UINT32_C(0x80000000);
} // namespace jit
} // namespace js

View File

@ -442,98 +442,278 @@ class Assembler : public AssemblerX86Shared
}
// Load from *(base + disp32) where disp32 can be patched.
CodeOffsetLabel movsblWithPatch(Address src, Register dest) {
masm.movsbl_mr_disp32(src.offset, src.base.code(), dest.code());
CodeOffsetLabel movsblWithPatch(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.movsbl_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.movsbl_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movzblWithPatch(Address src, Register dest) {
masm.movzbl_mr_disp32(src.offset, src.base.code(), dest.code());
CodeOffsetLabel movzblWithPatch(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.movzbl_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.movzbl_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movswlWithPatch(Address src, Register dest) {
masm.movswl_mr_disp32(src.offset, src.base.code(), dest.code());
CodeOffsetLabel movswlWithPatch(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.movswl_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.movswl_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movzwlWithPatch(Address src, Register dest) {
masm.movzwl_mr_disp32(src.offset, src.base.code(), dest.code());
CodeOffsetLabel movzwlWithPatch(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.movzwl_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.movzwl_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movlWithPatch(Address src, Register dest) {
masm.movl_mr_disp32(src.offset, src.base.code(), dest.code());
CodeOffsetLabel movlWithPatch(const Operand &src, Register dest) {
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.movl_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.movl_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovssWithPatch(Address src, FloatRegister dest) {
CodeOffsetLabel vmovssWithPatch(const Operand &src, FloatRegister dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovss_mr_disp32(src.offset, src.base.code(), dest.code());
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovss_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.vmovss_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovdWithPatch(Address src, FloatRegister dest) {
CodeOffsetLabel vmovdWithPatch(const Operand &src, FloatRegister dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovd_mr_disp32(src.offset, src.base.code(), dest.code());
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovd_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.vmovd_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovqWithPatch(Address src, FloatRegister dest) {
CodeOffsetLabel vmovqWithPatch(const Operand &src, FloatRegister dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovq_mr_disp32(src.offset, src.base.code(), dest.code());
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovq_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.vmovq_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovsdWithPatch(Address src, FloatRegister dest) {
CodeOffsetLabel vmovsdWithPatch(const Operand &src, FloatRegister dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovsd_mr_disp32(src.offset, src.base.code(), dest.code());
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.vmovsd_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovupsWithPatch(Address src, FloatRegister dest) {
CodeOffsetLabel vmovupsWithPatch(const Operand &src, FloatRegister dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovups_mr_disp32(src.offset, src.base.code(), dest.code());
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovups_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.vmovups_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovdquWithPatch(Address src, FloatRegister dest) {
CodeOffsetLabel vmovdquWithPatch(const Operand &src, FloatRegister dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovdqu_mr_disp32(src.offset, src.base.code(), dest.code());
switch (src.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.code());
break;
case Operand::MEM_ADDRESS32:
masm.vmovdqu_mr(src.address(), dest.code());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
// Store to *(base + disp32) where disp32 can be patched.
CodeOffsetLabel movbWithPatch(Register src, Address dest) {
masm.movb_rm_disp32(src.code(), dest.offset, dest.base.code());
CodeOffsetLabel movbWithPatch(Register src, const Operand &dest) {
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.movb_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.movb_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movwWithPatch(Register src, Address dest) {
masm.movw_rm_disp32(src.code(), dest.offset, dest.base.code());
CodeOffsetLabel movwWithPatch(Register src, const Operand &dest) {
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.movw_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.movw_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel movlWithPatch(Register src, Address dest) {
masm.movl_rm_disp32(src.code(), dest.offset, dest.base.code());
CodeOffsetLabel movlWithPatch(Register src, const Operand &dest) {
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.movl_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.movl_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovdWithPatch(FloatRegister src, Address dest) {
CodeOffsetLabel vmovdWithPatch(FloatRegister src, const Operand &dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovd_rm_disp32(src.code(), dest.offset, dest.base.code());
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovd_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.vmovd_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovqWithPatch(FloatRegister src, Address dest) {
CodeOffsetLabel vmovqWithPatch(FloatRegister src, const Operand &dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovq_rm_disp32(src.code(), dest.offset, dest.base.code());
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovq_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.vmovq_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovssWithPatch(FloatRegister src, Address dest) {
CodeOffsetLabel vmovssWithPatch(FloatRegister src, const Operand &dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovss_rm_disp32(src.code(), dest.offset, dest.base.code());
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovss_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.vmovss_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovsdWithPatch(FloatRegister src, Address dest) {
CodeOffsetLabel vmovsdWithPatch(FloatRegister src, const Operand &dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovsd_rm_disp32(src.code(), dest.offset, dest.base.code());
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovsd_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.vmovsd_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovupsWithPatch(FloatRegister src, Address dest) {
CodeOffsetLabel vmovupsWithPatch(FloatRegister src, const Operand &dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovups_rm_disp32(src.code(), dest.offset, dest.base.code());
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovups_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.vmovups_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}
CodeOffsetLabel vmovdquWithPatch(FloatRegister src, Address dest) {
CodeOffsetLabel vmovdquWithPatch(FloatRegister src, const Operand &dest) {
MOZ_ASSERT(HasSSE2());
masm.vmovdqu_rm_disp32(src.code(), dest.offset, dest.base.code());
switch (dest.kind()) {
case Operand::MEM_REG_DISP:
masm.vmovdqu_rm_disp32(src.code(), dest.disp(), dest.base());
break;
case Operand::MEM_ADDRESS32:
masm.vmovdqu_rm(src.code(), dest.address());
break;
default:
MOZ_CRASH("unexpected operand kind");
}
return CodeOffsetLabel(masm.currentOffset());
}

View File

@ -255,11 +255,10 @@ CodeGeneratorX86::visitAsmJSUInt32ToFloat32(LAsmJSUInt32ToFloat32 *lir)
masm.convertUInt32ToFloat32(temp, output);
}
template<typename T>
void
CodeGeneratorX86::load(Scalar::Type vt, const T &srcAddr, const LDefinition *out)
CodeGeneratorX86::load(Scalar::Type accessType, const Operand &srcAddr, const LDefinition *out)
{
switch (vt) {
switch (accessType) {
case Scalar::Int8: masm.movsblWithPatch(srcAddr, ToRegister(out)); break;
case Scalar::Uint8Clamped:
case Scalar::Uint8: masm.movzblWithPatch(srcAddr, ToRegister(out)); break;
@ -275,23 +274,12 @@ CodeGeneratorX86::load(Scalar::Type vt, const T &srcAddr, const LDefinition *out
}
}
template<typename T>
void
CodeGeneratorX86::loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr,
const LDefinition *out)
{
uint32_t before = masm.size();
load(vt, srcAddr, out);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, vt, ToAnyRegister(out)));
}
void
CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
{
const MLoadTypedArrayElementStatic *mir = ins->mir();
Scalar::Type vt = mir->accessType();
MOZ_ASSERT_IF(vt == Scalar::Float32, mir->type() == MIRType_Float32);
Scalar::Type accessType = mir->accessType();
MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType_Float32);
Register ptr = ToRegister(ins->ptr());
const LDefinition *out = ins->output();
@ -301,7 +289,7 @@ CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic
if (mir->needsBoundsCheck()) {
MOZ_ASSERT(offset == 0);
if (!mir->fallible()) {
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), vt);
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
addOutOfLineCode(ool, ins->mir());
}
@ -312,11 +300,11 @@ CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic
bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
}
Address srcAddr(ptr, int32_t(mir->base()) + int32_t(offset));
load(vt, srcAddr, out);
if (vt == Scalar::Float64)
Operand srcAddr(ptr, int32_t(mir->base()) + int32_t(offset));
load(accessType, srcAddr, out);
if (accessType == Scalar::Float64)
masm.canonicalizeDouble(ToFloatRegister(out));
if (vt == Scalar::Float32)
if (accessType == Scalar::Float32)
masm.canonicalizeFloat(ToFloatRegister(out));
if (ool)
masm.bind(ool->rejoin());
@ -354,9 +342,9 @@ CodeGeneratorX86::memoryBarrier(MemoryBarrierBits barrier)
masm.storeLoadFence();
}
template<typename T>
void
CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, T srcAddr, FloatRegister out)
CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, const Operand &srcAddr,
FloatRegister out)
{
switch (type) {
case Scalar::Float32x4: {
@ -374,7 +362,7 @@ CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, T srcAddr, Floa
switch (numElems) {
// In memory-to-register mode, movd zeroes out the high lanes.
case 1: masm.vmovdWithPatch(srcAddr, out); break;
// See comment above, which also applies to movsd.
// See comment above, which also applies to movq.
case 2: masm.vmovqWithPatch(srcAddr, out); break;
case 4: masm.vmovdquWithPatch(srcAddr, out); break;
default: MOZ_CRASH("unexpected size for partial load");
@ -396,77 +384,55 @@ CodeGeneratorX86::loadSimd(Scalar::Type type, unsigned numElems, T srcAddr, Floa
}
void
CodeGeneratorX86::emitSimdLoad(Scalar::Type type, unsigned numElems, const LAllocation *ptr,
FloatRegister out, bool needsBoundsCheck /* = false */,
Label *oobLabel /* = nullptr */)
CodeGeneratorX86::emitSimdLoad(LAsmJSLoadHeap *ins)
{
if (ptr->isConstant()) {
MOZ_ASSERT(!needsBoundsCheck);
const MAsmJSLoadHeap *mir = ins->mir();
Scalar::Type type = mir->accessType();
FloatRegister out = ToFloatRegister(ins->output());
const LAllocation *ptr = ins->ptr();
Operand srcAddr = ptr->isBogus()
? Operand(PatchedAbsoluteAddress(mir->offset()))
: Operand(ToRegister(ptr), mir->offset());
if (numElems == 3) {
MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
// Load XY
emitSimdLoad(type, 2, ptr, out);
// Load Z (W is zeroed)
// This add won't overflow, as we've checked that we have at least
// room for loading 4 elements during asm.js validation.
PatchedAbsoluteAddress srcAddr((void *) (ptr->toConstant()->toInt32() + 2 * sizeof(float)));
uint32_t before = masm.size();
loadSimd(type, 1, srcAddr, ScratchSimdReg);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 1, type));
// Move ZW atop XY
masm.vmovlhps(ScratchSimdReg, out, out);
return;
}
PatchedAbsoluteAddress srcAddr((void *) ptr->toConstant()->toInt32());
uint32_t before = masm.size();
loadSimd(type, numElems, srcAddr, out);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, numElems, type));
return;
}
Register ptrReg = ToRegister(ptr);
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (needsBoundsCheck) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(0)).offset();
masm.j(Assembler::AboveOrEqual, oobLabel); // Throws RangeError
}
if (gen->needsAsmJSBoundsCheckBranch(mir))
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
gen->outOfBoundsLabel());
uint32_t before = masm.size();
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
Operand srcAddrZ =
ptr->isBogus()
? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
: Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
// Load XY
Address addr(ptrReg, 0);
before = masm.size();
loadSimd(type, 2, addr, out);
uint32_t before = masm.size();
loadSimd(type, 2, srcAddr, out);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 3, type, maybeCmpOffset));
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
// Load Z (W is zeroed)
// This is still in bounds, as we've checked with a manual bounds check
// or we had enough space for sure when removing the bounds check.
Address shiftedAddr(ptrReg, 2 * sizeof(float));
before = after;
loadSimd(type, 1, shiftedAddr, ScratchSimdReg);
loadSimd(type, 1, srcAddrZ, ScratchSimdReg);
after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 1, type));
masm.append(AsmJSHeapAccess(before, after));
// Move ZW atop XY
masm.vmovlhps(ScratchSimdReg, out, out);
return;
} else {
uint32_t before = masm.size();
loadSimd(type, numElems, srcAddr, out);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
}
Address addr(ptrReg, 0);
loadSimd(type, numElems, addr, out);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, numElems, type, maybeCmpOffset));
if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
void
@ -474,56 +440,40 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
const MAsmJSLoadHeap *mir = ins->mir();
Scalar::Type accessType = mir->accessType();
if (Scalar::isSimdType(accessType))
return emitSimdLoad(ins);
const LAllocation *ptr = ins->ptr();
const LDefinition *out = ins->output();
Operand srcAddr = ptr->isBogus()
? Operand(PatchedAbsoluteAddress(mir->offset()))
: Operand(ToRegister(ptr), mir->offset());
if (Scalar::isSimdType(accessType)) {
return emitSimdLoad(accessType, mir->numSimdElems(), ptr, ToFloatRegister(out),
mir->needsBoundsCheck(), mir->outOfBoundsLabel());
memoryBarrier(mir->barrierBefore());
OutOfLineLoadTypedArrayOutOfBounds *ool = nullptr;
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (gen->needsAsmJSBoundsCheckBranch(mir)) {
ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
addOutOfLineCode(ool, mir);
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), ool->entry());
}
memoryBarrier(ins->mir()->barrierBefore());
if (ptr->isConstant()) {
// The constant displacement still needs to be added to the as-yet-unknown
// base address of the heap. For now, embed the displacement as an
// immediate in the instruction. This displacement will fixed up when the
// base address is known during dynamic linking (AsmJSModule::initHeap).
PatchedAbsoluteAddress srcAddr((void *) ptr->toConstant()->toInt32());
loadAndNoteViewTypeElement(accessType, srcAddr, out);
memoryBarrier(ins->mir()->barrierAfter());
return;
}
Register ptrReg = ToRegister(ptr);
Address srcAddr(ptrReg, 0);
if (!mir->needsBoundsCheck()) {
loadAndNoteViewTypeElement(accessType, srcAddr, out);
memoryBarrier(ins->mir()->barrierAfter());
return;
}
OutOfLineLoadTypedArrayOutOfBounds *ool =
new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(ToAnyRegister(out), accessType);
CodeOffsetLabel cmp = masm.cmp32WithPatch(ptrReg, Imm32(0));
addOutOfLineCode(ool, mir);
masm.j(Assembler::AboveOrEqual, ool->entry());
uint32_t before = masm.size();
load(accessType, srcAddr, out);
uint32_t after = masm.size();
if (ool)
if (ool) {
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(ool->rejoin());
memoryBarrier(ins->mir()->barrierAfter());
masm.append(AsmJSHeapAccess(before, after, accessType, ToAnyRegister(out), cmp.offset()));
}
memoryBarrier(mir->barrierAfter());
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
}
template<typename T>
void
CodeGeneratorX86::store(Scalar::Type vt, const LAllocation *value, const T &dstAddr)
CodeGeneratorX86::store(Scalar::Type accessType, const LAllocation *value, const Operand &dstAddr)
{
switch (vt) {
switch (accessType) {
case Scalar::Int8:
case Scalar::Uint8Clamped:
case Scalar::Uint8: masm.movbWithPatch(ToRegister(value), dstAddr); break;
@ -539,29 +489,18 @@ CodeGeneratorX86::store(Scalar::Type vt, const LAllocation *value, const T &dstA
}
}
template<typename T>
void
CodeGeneratorX86::storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value,
const T &dstAddr)
{
uint32_t before = masm.size();
store(vt, value, dstAddr);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, vt));
}
void
CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
{
MStoreTypedArrayElementStatic *mir = ins->mir();
Scalar::Type vt = mir->accessType();
Scalar::Type accessType = mir->accessType();
Register ptr = ToRegister(ins->ptr());
const LAllocation *value = ins->value();
uint32_t offset = mir->offset();
if (!mir->needsBoundsCheck()) {
Address dstAddr(ptr, int32_t(mir->base()) + int32_t(offset));
store(vt, value, dstAddr);
Operand dstAddr(ptr, int32_t(mir->base()) + int32_t(offset));
store(accessType, value, dstAddr);
return;
}
@ -570,34 +509,34 @@ CodeGeneratorX86::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStati
Label rejoin;
masm.j(Assembler::AboveOrEqual, &rejoin);
Address dstAddr(ptr, (int32_t) mir->base());
store(vt, value, dstAddr);
Operand dstAddr(ptr, (int32_t) mir->base());
store(accessType, value, dstAddr);
masm.bind(&rejoin);
}
template<typename T>
void
CodeGeneratorX86::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, T destAddr)
CodeGeneratorX86::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in,
const Operand &dstAddr)
{
switch (type) {
case Scalar::Float32x4: {
switch (numElems) {
// In memory-to-register mode, movss zeroes out the high lanes.
case 1: masm.vmovssWithPatch(in, destAddr); break;
case 1: masm.vmovssWithPatch(in, dstAddr); break;
// See comment above, which also applies to movsd.
case 2: masm.vmovsdWithPatch(in, destAddr); break;
case 4: masm.vmovupsWithPatch(in, destAddr); break;
case 2: masm.vmovsdWithPatch(in, dstAddr); break;
case 4: masm.vmovupsWithPatch(in, dstAddr); break;
default: MOZ_CRASH("unexpected size for partial load");
}
break;
}
case Scalar::Int32x4: {
switch (numElems) {
// In memory-to-register mode, movd zeroes destAddr the high lanes.
case 1: masm.vmovdWithPatch(in, destAddr); break;
// In memory-to-register mode, movd zeroes out the high lanes.
case 1: masm.vmovdWithPatch(in, dstAddr); break;
// See comment above, which also applies to movsd.
case 2: masm.vmovqWithPatch(in, destAddr); break;
case 4: masm.vmovdquWithPatch(in, destAddr); break;
case 2: masm.vmovqWithPatch(in, dstAddr); break;
case 4: masm.vmovdquWithPatch(in, dstAddr); break;
default: MOZ_CRASH("unexpected size for partial load");
}
break;
@ -617,129 +556,95 @@ CodeGeneratorX86::storeSimd(Scalar::Type type, unsigned numElems, FloatRegister
}
void
CodeGeneratorX86::emitSimdStore(Scalar::Type type, unsigned numElems, FloatRegister in,
const LAllocation *ptr, bool needsBoundsCheck /* = false */,
Label *oobLabel /* = nullptr */)
CodeGeneratorX86::emitSimdStore(LAsmJSStoreHeap *ins)
{
if (ptr->isConstant()) {
MOZ_ASSERT(!needsBoundsCheck);
const MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type type = mir->accessType();
FloatRegister in = ToFloatRegister(ins->value());
const LAllocation *ptr = ins->ptr();
Operand dstAddr = ptr->isBogus()
? Operand(PatchedAbsoluteAddress(mir->offset()))
: Operand(ToRegister(ptr), mir->offset());
if (numElems == 3) {
MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
// Store XY
emitSimdStore(type, 2, in, ptr);
masm.vmovhlps(in, ScratchSimdReg, ScratchSimdReg);
// Store Z
// This add won't overflow, as we've checked that we have at least
// room for loading 4 elements during asm.js validation.
PatchedAbsoluteAddress dstAddr((void *) (ptr->toConstant()->toInt32() + 2 * sizeof(float)));
uint32_t before = masm.size();
storeSimd(type, 1, ScratchSimdReg, dstAddr);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 1, type));
return;
}
PatchedAbsoluteAddress dstAddr((void *) ptr->toConstant()->toInt32());
uint32_t before = masm.size();
storeSimd(type, numElems, in, dstAddr);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 3, type));
return;
}
Register ptrReg = ToRegister(ptr);
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (needsBoundsCheck) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(0)).offset();
masm.j(Assembler::AboveOrEqual, oobLabel); // Throws RangeError
}
if (gen->needsAsmJSBoundsCheckBranch(mir))
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr),
gen->outOfBoundsLabel());
uint32_t before = masm.size();
unsigned numElems = mir->numSimdElems();
if (numElems == 3) {
MOZ_ASSERT(type == Scalar::Int32x4 || type == Scalar::Float32x4);
Operand dstAddrZ =
ptr->isBogus()
? Operand(PatchedAbsoluteAddress(2 * sizeof(float) + mir->offset()))
: Operand(ToRegister(ptr), 2 * sizeof(float) + mir->offset());
// Store XY
Address addr(ptrReg, 0);
before = masm.size();
storeSimd(type, 2, in, addr);
uint32_t before = masm.size();
storeSimd(type, 2, in, dstAddr);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 3, type, maybeCmpOffset));
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
masm.vmovhlps(in, ScratchSimdReg, ScratchSimdReg);
// Store Z (W is zeroed)
// This is still in bounds, as we've checked with a manual bounds check
// or we had enough space for sure when removing the bounds check.
Address shiftedAddr(ptrReg, 2 * sizeof(float));
before = masm.size();
storeSimd(type, 1, ScratchSimdReg, shiftedAddr);
storeSimd(type, 1, ScratchSimdReg, dstAddrZ);
after = masm.size();
masm.append(AsmJSHeapAccess(before, after, 1, type));
return;
masm.append(AsmJSHeapAccess(before, after));
} else {
uint32_t before = masm.size();
storeSimd(type, numElems, in, dstAddr);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
}
Address addr(ptrReg, 0);
storeSimd(type, numElems, in, addr);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, numElems, type, maybeCmpOffset));
if (maybeCmpOffset != AsmJSHeapAccess::NoLengthCheck)
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
}
void
CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
{
MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type vt = mir->accessType();
const MAsmJSStoreHeap *mir = ins->mir();
Scalar::Type accessType = mir->accessType();
if (Scalar::isSimdType(accessType))
return emitSimdStore(ins);
const LAllocation *value = ins->value();
const LAllocation *ptr = ins->ptr();
Operand dstAddr = ptr->isBogus()
? Operand(PatchedAbsoluteAddress(mir->offset()))
: Operand(ToRegister(ptr), mir->offset());
if (Scalar::isSimdType(vt)) {
return emitSimdStore(vt, mir->numSimdElems(), ToFloatRegister(value), ptr,
mir->needsBoundsCheck(), mir->outOfBoundsLabel());
memoryBarrier(mir->barrierBefore());
Label *rejoin = nullptr;
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (gen->needsAsmJSBoundsCheckBranch(mir)) {
rejoin = alloc().lifoAlloc()->new_<Label>();
maybeCmpOffset = emitAsmJSBoundsCheckBranch(mir, mir, ToRegister(ptr), rejoin);
}
memoryBarrier(ins->mir()->barrierBefore());
if (ptr->isConstant()) {
// The constant displacement still needs to be added to the as-yet-unknown
// base address of the heap. For now, embed the displacement as an
// immediate in the instruction. This displacement will fixed up when the
// base address is known during dynamic linking (AsmJSModule::initHeap).
PatchedAbsoluteAddress dstAddr((void *) ptr->toConstant()->toInt32());
storeAndNoteViewTypeElement(vt, value, dstAddr);
memoryBarrier(ins->mir()->barrierAfter());
return;
}
Register ptrReg = ToRegister(ptr);
Address dstAddr(ptrReg, 0);
if (!mir->needsBoundsCheck()) {
storeAndNoteViewTypeElement(vt, value, dstAddr);
memoryBarrier(ins->mir()->barrierAfter());
return;
}
CodeOffsetLabel cmp = masm.cmp32WithPatch(ptrReg, Imm32(0));
Label rejoin;
masm.j(Assembler::AboveOrEqual, &rejoin);
uint32_t before = masm.size();
store(vt, value, dstAddr);
store(accessType, value, dstAddr);
uint32_t after = masm.size();
masm.bind(&rejoin);
memoryBarrier(ins->mir()->barrierAfter());
masm.append(AsmJSHeapAccess(before, after, vt, cmp.offset()));
if (rejoin) {
cleanupAfterAsmJSBoundsCheckBranch(mir, ToRegister(ptr));
masm.bind(rejoin);
}
memoryBarrier(mir->barrierAfter());
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
}
void
CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
{
MAsmJSCompareExchangeHeap *mir = ins->mir();
Scalar::Type vt = mir->accessType();
Scalar::Type accessType = mir->accessType();
const LAllocation *ptr = ins->ptr();
Register oldval = ToRegister(ins->oldValue());
Register newval = ToRegister(ins->newValue());
@ -752,9 +657,9 @@ CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(0)).offset();
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
Label goahead;
masm.j(Assembler::Below, &goahead);
masm.j(Assembler::BelowOrEqual, &goahead);
memoryBarrier(MembarFull);
Register out = ToRegister(ins->output());
masm.xorl(out,out);
@ -765,12 +670,12 @@ CodeGeneratorX86::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap *ins)
// Add in the actual heap pointer explicitly, to avoid opening up
// the abstraction that is compareExchangeToTypedIntArray at this time.
uint32_t before = masm.size();
masm.addlWithPatch(Imm32(0), ptrReg);
masm.addlWithPatch(Imm32(mir->offset()), ptrReg);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, mir->accessType(), maybeCmpOffset));
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
Address memAddr(ToRegister(ptr), 0);
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
Address memAddr(ToRegister(ptr), mir->offset());
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
memAddr,
oldval,
newval,
@ -784,7 +689,7 @@ void
CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
{
MAsmJSAtomicBinopHeap *mir = ins->mir();
Scalar::Type vt = mir->accessType();
Scalar::Type accessType = mir->accessType();
const LAllocation *ptr = ins->ptr();
Register temp = ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
const LAllocation* value = ins->value();
@ -798,9 +703,9 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
uint32_t maybeCmpOffset = AsmJSHeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(0)).offset();
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
Label goahead;
masm.j(Assembler::Below, &goahead);
masm.j(Assembler::BelowOrEqual, &goahead);
memoryBarrier(MembarFull);
Register out = ToRegister(ins->output());
masm.xorl(out,out);
@ -811,20 +716,20 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap *ins)
// Add in the actual heap pointer explicitly, to avoid opening up
// the abstraction that is atomicBinopToTypedIntArray at this time.
uint32_t before = masm.size();
masm.addlWithPatch(Imm32(0), ptrReg);
masm.addlWithPatch(Imm32(mir->offset()), ptrReg);
uint32_t after = masm.size();
masm.append(AsmJSHeapAccess(before, after, mir->accessType(), maybeCmpOffset));
masm.append(AsmJSHeapAccess(before, after, maybeCmpOffset));
Address memAddr(ptrReg, 0);
Address memAddr(ptrReg, mir->offset());
if (value->isConstant()) {
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
Imm32(ToInt32(value)),
memAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
} else {
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
ToRegister(value),
memAddr,
temp,

View File

@ -28,25 +28,14 @@ class CodeGeneratorX86 : public CodeGeneratorX86Shared
ValueOperand ToOutValue(LInstruction *ins);
ValueOperand ToTempValue(LInstruction *ins, size_t pos);
template<typename T>
void loadAndNoteViewTypeElement(Scalar::Type vt, const T &srcAddr, const LDefinition *out);
template<typename T>
void load(Scalar::Type vt, const T &srcAddr, const LDefinition *out);
template<typename T>
void storeAndNoteViewTypeElement(Scalar::Type vt, const LAllocation *value, const T &dstAddr);
template<typename T>
void store(Scalar::Type vt, const LAllocation *value, const T &dstAddr);
void load(Scalar::Type vt, const Operand &srcAddr, const LDefinition *out);
void store(Scalar::Type vt, const LAllocation *value, const Operand &dstAddr);
template<typename T>
void loadSimd(Scalar::Type type, unsigned numElems, T srcAddr, FloatRegister out);
void emitSimdLoad(Scalar::Type type, unsigned numElems, const LAllocation *ptr,
FloatRegister out, bool needsBoundsCheck = false, Label *oobLabel = nullptr);
void loadSimd(Scalar::Type type, unsigned numElems, const Operand &srcAddr, FloatRegister out);
void emitSimdLoad(LAsmJSLoadHeap *ins);
template<typename T>
void storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, T destAddr);
void emitSimdStore(Scalar::Type type, unsigned numElems, FloatRegister in,
const LAllocation *ptr, bool needsBoundsCheck = false,
Label *oobLabel = nullptr);
void storeSimd(Scalar::Type type, unsigned numElems, FloatRegister in, const Operand &dstAddr);
void emitSimdStore(LAsmJSStoreHeap *ins);
void memoryBarrier(MemoryBarrierBits barrier);

View File

@ -202,55 +202,34 @@ void
LIRGeneratorX86::visitAsmJSLoadHeap(MAsmJSLoadHeap *ins)
{
MDefinition *ptr = ins->ptr();
LAllocation ptrAlloc;
MOZ_ASSERT(ptr->type() == MIRType_Int32);
// For the x86 it is best to keep the 'ptr' in a register if a bounds check is needed.
if (ptr->isConstant() && !ins->needsBoundsCheck()) {
// A bounds check is only skipped for a positive index.
MOZ_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
ptrAlloc = LAllocation(ptr->toConstant()->vp());
} else {
ptrAlloc = useRegisterAtStart(ptr);
}
LAsmJSLoadHeap *lir = new(alloc()) LAsmJSLoadHeap(ptrAlloc);
define(lir, ins);
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
? useRegisterAtStart(ptr)
: useRegisterOrZeroAtStart(ptr);
define(new(alloc()) LAsmJSLoadHeap(ptrAlloc), ins);
}
void
LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
{
MDefinition *ptr = ins->ptr();
LAsmJSStoreHeap *lir;
MOZ_ASSERT(ptr->type() == MIRType_Int32);
if (ptr->isConstant() && !ins->needsBoundsCheck()) {
MOZ_ASSERT(ptr->toConstant()->value().toInt32() >= 0);
LAllocation ptrAlloc = LAllocation(ptr->toConstant()->vp());
switch (ins->accessType()) {
case Scalar::Int8: case Scalar::Uint8:
// See comment below.
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax));
break;
case Scalar::Int16: case Scalar::Uint16:
case Scalar::Int32: case Scalar::Uint32:
case Scalar::Float32: case Scalar::Float64:
case Scalar::Float32x4: case Scalar::Int32x4:
// See comment below.
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
break;
case Scalar::Uint8Clamped:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
add(lir, ins);
return;
}
// For simplicity, require a register if we're going to emit a bounds-check
// branch, so that we don't have special cases for constants.
LAllocation ptrAlloc = gen->needsAsmJSBoundsCheckBranch(ins)
? useRegisterAtStart(ptr)
: useRegisterOrZeroAtStart(ptr);
LAsmJSStoreHeap *lir = nullptr;
switch (ins->accessType()) {
case Scalar::Int8: case Scalar::Uint8:
// See comment for LIRGeneratorX86::useByteOpRegister.
lir = new(alloc()) LAsmJSStoreHeap(useRegister(ins->ptr()), useFixed(ins->value(), eax));
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useFixed(ins->value(), eax));
break;
case Scalar::Int16: case Scalar::Uint16:
case Scalar::Int32: case Scalar::Uint32:
@ -258,13 +237,12 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap *ins)
case Scalar::Float32x4: case Scalar::Int32x4:
// For now, don't allow constant values. The immediate operand
// affects instruction layout which affects patching.
lir = new(alloc()) LAsmJSStoreHeap(useRegisterAtStart(ptr), useRegisterAtStart(ins->value()));
lir = new(alloc()) LAsmJSStoreHeap(ptrAlloc, useRegisterAtStart(ins->value()));
break;
case Scalar::Uint8Clamped:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH("unexpected array type");
}
add(lir, ins);
}

View File

@ -76,20 +76,23 @@ bool JSAPITest::definePrint()
JSObject * JSAPITest::createGlobal(JSPrincipals *principals)
{
/* Create the global object. */
JS::RootedObject newGlobal(cx);
JS::CompartmentOptions options;
options.setVersion(JSVERSION_LATEST);
global = JS_NewGlobalObject(cx, getGlobalClass(), principals, JS::FireOnNewGlobalHook, options);
if (!global)
newGlobal = JS_NewGlobalObject(cx, getGlobalClass(), principals, JS::FireOnNewGlobalHook,
options);
if (!newGlobal)
return nullptr;
JSAutoCompartment ac(cx, global);
JSAutoCompartment ac(cx, newGlobal);
/* Populate the global object with the standard globals, like Object and
Array. */
if (!JS_InitStandardClasses(cx, global))
global = nullptr;
if (!JS_InitStandardClasses(cx, newGlobal))
return nullptr;
return global;
global = newGlobal;
return newGlobal;
}
int main(int argc, char *argv[])

View File

@ -203,8 +203,15 @@ class WrapperMapRef : public BufferableRef
CrossCompartmentKey prior = key;
if (key.debugger)
Mark(trc, &key.debugger, "CCW debugger");
if (key.kind != CrossCompartmentKey::StringWrapper)
if (key.kind == CrossCompartmentKey::ObjectWrapper ||
key.kind == CrossCompartmentKey::DebuggerObject ||
key.kind == CrossCompartmentKey::DebuggerEnvironment ||
key.kind == CrossCompartmentKey::DebuggerSource)
{
MOZ_ASSERT(IsInsideNursery(key.wrapped) ||
key.wrapped->asTenured().getTraceKind() == JSTRACE_OBJECT);
Mark(trc, reinterpret_cast<JSObject**>(&key.wrapped), "CCW wrapped object");
}
if (key.debugger == prior.debugger && key.wrapped == prior.wrapped)
return;
@ -630,7 +637,30 @@ JSCompartment::sweepCrossCompartmentWrappers()
/* Remove dead wrappers from the table. */
for (WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
CrossCompartmentKey key = e.front().key();
bool keyDying = IsCellAboutToBeFinalizedFromAnyThread(&key.wrapped);
bool keyDying;
switch (key.kind) {
case CrossCompartmentKey::ObjectWrapper:
case CrossCompartmentKey::DebuggerObject:
case CrossCompartmentKey::DebuggerEnvironment:
case CrossCompartmentKey::DebuggerSource:
MOZ_ASSERT(IsInsideNursery(key.wrapped) ||
key.wrapped->asTenured().getTraceKind() == JSTRACE_OBJECT);
keyDying = IsObjectAboutToBeFinalizedFromAnyThread(
reinterpret_cast<JSObject**>(&key.wrapped));
break;
case CrossCompartmentKey::StringWrapper:
MOZ_ASSERT(key.wrapped->asTenured().getTraceKind() == JSTRACE_STRING);
keyDying = IsStringAboutToBeFinalizedFromAnyThread(
reinterpret_cast<JSString**>(&key.wrapped));
break;
case CrossCompartmentKey::DebuggerScript:
MOZ_ASSERT(key.wrapped->asTenured().getTraceKind() == JSTRACE_SCRIPT);
keyDying = IsScriptAboutToBeFinalizedFromAnyThread(
reinterpret_cast<JSScript**>(&key.wrapped));
break;
default:
MOZ_CRASH("Unknown key kind");
}
bool valDying = IsValueAboutToBeFinalizedFromAnyThread(e.front().value().unsafeGet());
bool dbgDying = key.debugger && IsObjectAboutToBeFinalizedFromAnyThread(&key.debugger);
if (keyDying || valDying || dbgDying) {

View File

@ -1935,7 +1935,6 @@ CanRelocateAllocKind(AllocKind kind)
return kind <= FINALIZE_OBJECT_LAST;
}
size_t ArenaHeader::countFreeCells()
{
size_t count = 0;
@ -2249,16 +2248,16 @@ void
MovingTracer::Visit(JSTracer *jstrc, void **thingp, JSGCTraceKind kind)
{
TenuredCell *thing = TenuredCell::fromPointer(*thingp);
Zone *zone = thing->zoneFromAnyThread();
if (!zone->isGCCompacting()) {
MOZ_ASSERT(!IsForwarded(thing));
// Currently we only relocate objects.
if (kind != JSTRACE_OBJECT) {
MOZ_ASSERT(!RelocationOverlay::isCellForwarded(thing));
return;
}
if (IsForwarded(thing)) {
Cell *dst = Forwarded(thing);
*thingp = dst;
}
JSObject *obj = reinterpret_cast<JSObject*>(thing);
if (IsForwarded(obj))
*thingp = Forwarded(obj);
}
void
@ -5471,7 +5470,7 @@ GCRuntime::endSweepPhase(bool lastGC)
}
GCRuntime::IncrementalProgress
GCRuntime::compactPhase(bool lastGC, JS::gcreason::Reason reason)
GCRuntime::compactPhase(JS::gcreason::Reason reason)
{
gcstats::AutoPhase ap(stats, gcstats::PHASE_COMPACT);
@ -5495,7 +5494,7 @@ GCRuntime::compactPhase(bool lastGC, JS::gcreason::Reason reason)
for (ArenaHeader *arena = relocatedList; arena; arena = arena->next) {
for (ArenaCellIterUnderFinalize i(arena); !i.done(); i.next()) {
TenuredCell *src = i.getCell();
MOZ_ASSERT(IsForwarded(src));
MOZ_ASSERT(RelocationOverlay::isCellForwarded(src));
TenuredCell *dest = Forwarded(src);
MOZ_ASSERT(src->isMarked(BLACK) == dest->isMarked(BLACK));
MOZ_ASSERT(src->isMarked(GRAY) == dest->isMarked(GRAY));
@ -5504,16 +5503,18 @@ GCRuntime::compactPhase(bool lastGC, JS::gcreason::Reason reason)
#endif
// Release the relocated arenas, or in debug builds queue them to be
// released until the start of the next GC unless this is the last GC.
// released until the start of the next GC unless this is the last GC or we
// are doing a last ditch GC.
#ifndef DEBUG
releaseRelocatedArenas(relocatedList);
#else
protectRelocatedArenas(relocatedList);
MOZ_ASSERT(!relocatedArenasToRelease);
if (!lastGC)
relocatedArenasToRelease = relocatedList;
else
if (reason == JS::gcreason::DESTROY_RUNTIME || reason == JS::gcreason::LAST_DITCH) {
releaseRelocatedArenas(relocatedList);
} else {
MOZ_ASSERT(!relocatedArenasToRelease);
protectRelocatedArenas(relocatedList);
relocatedArenasToRelease = relocatedList;
}
#endif
// Ensure execess chunks are returns to the system and free arenas
@ -5937,7 +5938,7 @@ GCRuntime::incrementalCollectSlice(SliceBudget &budget, JS::gcreason::Reason rea
break;
case COMPACT:
if (isCompacting && compactPhase(lastGC, reason) == NotFinished)
if (isCompacting && compactPhase(reason) == NotFinished)
break;
finishCollection(reason);

View File

@ -1182,15 +1182,43 @@ class RelocationOverlay
RelocationOverlay *next() const {
return next_;
}
static bool isCellForwarded(Cell *cell) {
return fromCell(cell)->isForwarded();
}
};
/* Functions for checking and updating things that might be moved by compacting GC. */
#define TYPE_MIGHT_BE_FORWARDED(T, value) \
inline bool \
TypeMightBeForwarded(T *thing) \
{ \
return value; \
} \
TYPE_MIGHT_BE_FORWARDED(JSObject, true)
TYPE_MIGHT_BE_FORWARDED(JSString, false)
TYPE_MIGHT_BE_FORWARDED(JS::Symbol, false)
TYPE_MIGHT_BE_FORWARDED(JSScript, false)
TYPE_MIGHT_BE_FORWARDED(Shape, false)
TYPE_MIGHT_BE_FORWARDED(BaseShape, false)
TYPE_MIGHT_BE_FORWARDED(jit::JitCode, false)
TYPE_MIGHT_BE_FORWARDED(LazyScript, false)
TYPE_MIGHT_BE_FORWARDED(ObjectGroup, false)
#undef TYPE_MIGHT_BE_FORWARDED
template <typename T>
inline bool
IsForwarded(T *t)
{
RelocationOverlay *overlay = RelocationOverlay::fromCell(t);
if (!TypeMightBeForwarded(t)) {
MOZ_ASSERT(!overlay->isForwarded());
return false;
}
return overlay->isForwarded();
}
@ -1247,7 +1275,7 @@ inline void
CheckGCThingAfterMovingGC(T *t)
{
MOZ_ASSERT_IF(t, !IsInsideNursery(t));
MOZ_ASSERT_IF(t, !IsForwarded(t));
MOZ_ASSERT_IF(t, !RelocationOverlay::isCellForwarded(t));
}
inline void

View File

@ -7,6 +7,7 @@
#include "vm/SavedStacks.h"
#include "mozilla/Attributes.h"
#include "mozilla/DebugOnly.h"
#include <math.h>
@ -30,6 +31,7 @@
#include "vm/NativeObject-inl.h"
using mozilla::AddToHash;
using mozilla::DebugOnly;
using mozilla::HashString;
namespace js {

View File

@ -3073,11 +3073,20 @@ js::TypeMonitorCallSlow(JSContext *cx, JSObject *callee, const CallArgs &args, b
static inline bool
IsAboutToBeFinalized(TypeSet::ObjectKey **keyp)
{
// Mask out the low bit indicating whether this is a group or JS object.
uintptr_t flagBit = uintptr_t(*keyp) & 1;
gc::Cell *tmp = reinterpret_cast<gc::Cell *>(uintptr_t(*keyp) & ~1);
bool isAboutToBeFinalized = IsCellAboutToBeFinalized(&tmp);
*keyp = reinterpret_cast<TypeSet::ObjectKey *>(uintptr_t(tmp) | flagBit);
TypeSet::ObjectKey *key = *keyp;
bool isAboutToBeFinalized;
if (key->isGroup()) {
ObjectGroup *group = key->groupNoBarrier();
isAboutToBeFinalized = IsObjectGroupAboutToBeFinalized(&group);
if (!isAboutToBeFinalized)
*keyp = TypeSet::ObjectKey::get(group);
} else {
MOZ_ASSERT(key->isSingleton());
JSObject *singleton = key->singletonNoBarrier();
isAboutToBeFinalized = IsObjectAboutToBeFinalized(&singleton);
if (!isAboutToBeFinalized)
*keyp = TypeSet::ObjectKey::get(singleton);
}
return isAboutToBeFinalized;
}

View File

@ -4348,59 +4348,56 @@ nsCSSRendering::PaintDecorationLine(nsIFrame* aFrame,
}
}
void
nsCSSRendering::DecorationLineToPath(nsIFrame* aFrame,
gfxContext* aGfxContext,
const gfxRect& aDirtyRect,
const nscolor aColor,
const gfxPoint& aPt,
const gfxFloat aICoordInFrame,
const gfxSize& aLineSize,
const gfxFloat aAscent,
const gfxFloat aOffset,
Rect
nsCSSRendering::DecorationLineToPath(const Rect& aDirtyRect,
const Point& aPt,
const Size& aLineSize,
const Float aAscent,
const Float aOffset,
const uint8_t aDecoration,
const uint8_t aStyle,
bool aVertical,
const gfxFloat aDescentLimit)
const Float aDescentLimit)
{
NS_ASSERTION(aStyle != NS_STYLE_TEXT_DECORATION_STYLE_NONE, "aStyle is none");
aGfxContext->NewPath();
Rect path; // To benefit from RVO, we return this from all return points
gfxRect rect =
GetTextDecorationRectInternal(aPt, aLineSize, aAscent, aOffset,
Rect rect = ToRect(
GetTextDecorationRectInternal(ThebesPoint(aPt), ThebesSize(aLineSize),
aAscent, aOffset,
aDecoration, aStyle, aVertical,
aDescentLimit);
aDescentLimit));
if (rect.IsEmpty() || !rect.Intersects(aDirtyRect)) {
return;
return path;
}
if (aDecoration != NS_STYLE_TEXT_DECORATION_LINE_UNDERLINE &&
aDecoration != NS_STYLE_TEXT_DECORATION_LINE_OVERLINE &&
aDecoration != NS_STYLE_TEXT_DECORATION_LINE_LINE_THROUGH) {
NS_ERROR("Invalid decoration value!");
return;
return path;
}
if (aStyle != NS_STYLE_TEXT_DECORATION_STYLE_SOLID) {
// For the moment, we support only solid text decorations.
return;
return path;
}
gfxFloat lineThickness = std::max(NS_round(aLineSize.height), 1.0);
Float lineThickness = std::max(NS_round(aLineSize.height), 1.0);
// The block-direction position should be set to the middle of the line.
if (aVertical) {
rect.x += lineThickness / 2;
aGfxContext->Rectangle
(gfxRect(gfxPoint(rect.TopLeft() - gfxPoint(lineThickness / 2, 0.0)),
gfxSize(lineThickness, rect.Height())));
path = Rect(rect.TopLeft() - Point(lineThickness / 2, 0.0),
Size(lineThickness, rect.Height()));
} else {
rect.y += lineThickness / 2;
aGfxContext->Rectangle
(gfxRect(gfxPoint(rect.TopLeft() - gfxPoint(0.0, lineThickness / 2)),
gfxSize(rect.Width(), lineThickness)));
path = Rect(rect.TopLeft() - Point(0.0, lineThickness / 2),
Size(rect.Width(), lineThickness));
}
return path;
}
nsRect

View File

@ -321,7 +321,9 @@ struct nsBackgroundLayerState {
struct nsCSSRendering {
typedef mozilla::gfx::DrawTarget DrawTarget;
typedef mozilla::gfx::Float Float;
typedef mozilla::gfx::Point Point;
typedef mozilla::gfx::Rect Rect;
typedef mozilla::gfx::Size Size;
typedef mozilla::gfx::RectCornerRadii RectCornerRadii;
typedef mozilla::image::DrawResult DrawResult;
typedef nsIFrame::Sides Sides;
@ -684,25 +686,20 @@ struct nsCSSRendering {
const gfxFloat aDescentLimit = -1.0);
/**
* Adds a path corresponding to the outline of the decoration line to
* the specified context. Arguments have the same meaning as for
* Returns a Rect corresponding to the outline of the decoration line for the
* given text metrics. Arguments have the same meaning as for
* PaintDecorationLine. Currently this only works for solid
* decorations; for other decoration styles, an empty path is added
* to the context.
* decorations; for other decoration styles the returned Rect will be empty.
*/
static void DecorationLineToPath(nsIFrame* aFrame,
gfxContext* aGfxContext,
const gfxRect& aDirtyRect,
const nscolor aColor,
const gfxPoint& aPt,
const gfxFloat aICoordInFrame,
const gfxSize& aLineSize,
const gfxFloat aAscent,
const gfxFloat aOffset,
static Rect DecorationLineToPath(const Rect& aDirtyRect,
const Point& aPt,
const Size& aLineSize,
const Float aAscent,
const Float aOffset,
const uint8_t aDecoration,
const uint8_t aStyle,
bool aVertical,
const gfxFloat aDescentLimit = -1.0);
const Float aDescentLimit = -1.0);
/**
* Function for getting the decoration line rect for the text.

View File

@ -590,7 +590,8 @@ nsDisplayListBuilder::nsDisplayListBuilder(nsIFrame* aReferenceFrame,
mAncestorHasApzAwareEventHandler(false),
mHaveScrollableDisplayPort(false),
mWindowDraggingAllowed(false),
mIsBuildingForPopup(nsLayoutUtils::IsPopup(aReferenceFrame))
mIsBuildingForPopup(nsLayoutUtils::IsPopup(aReferenceFrame)),
mForceLayerForScrollParent(false)
{
MOZ_COUNT_CTOR(nsDisplayListBuilder);
PL_InitArenaPool(&mPool, "displayListArena", 1024,

View File

@ -253,6 +253,13 @@ public:
* Get the ViewID of the nearest scrolling ancestor frame.
*/
ViewID GetCurrentScrollParentId() const { return mCurrentScrollParentId; }
/**
* Get and set the flag that indicates if scroll parents should have layers
* forcibly created. This flag is set when a deeply nested scrollframe has
* a displayport, and for scroll handoff to work properly the ancestor
* scrollframes should also get their own scrollable layers.
*/
void ForceLayerForScrollParent() { mForceLayerForScrollParent = true; }
/**
* Get the ViewID and the scrollbar flags corresponding to the scrollbar for
* which we are building display items at the moment.
@ -660,15 +667,41 @@ public:
class AutoCurrentScrollParentIdSetter {
public:
AutoCurrentScrollParentIdSetter(nsDisplayListBuilder* aBuilder, ViewID aScrollId)
: mBuilder(aBuilder), mOldValue(aBuilder->mCurrentScrollParentId) {
: mBuilder(aBuilder)
, mOldValue(aBuilder->mCurrentScrollParentId)
, mOldForceLayer(aBuilder->mForceLayerForScrollParent) {
// If this AutoCurrentScrollParentIdSetter has the same scrollId as the
// previous one on the stack, then that means the scrollframe that
// created this isn't actually scrollable and cannot participate in
// scroll handoff. We set mCanBeScrollParent to false to indicate this.
mCanBeScrollParent = (mOldValue != aScrollId);
aBuilder->mCurrentScrollParentId = aScrollId;
aBuilder->mForceLayerForScrollParent = false;
}
bool ShouldForceLayerForScrollParent() const {
// Only scrollframes participating in scroll handoff can be forced to
// layerize
return mCanBeScrollParent && mBuilder->mForceLayerForScrollParent;
};
~AutoCurrentScrollParentIdSetter() {
mBuilder->mCurrentScrollParentId = mOldValue;
if (mCanBeScrollParent) {
// If this flag is set, caller code is responsible for having dealt
// with the current value of mBuilder->mForceLayerForScrollParent, so
// we can just restore the old value.
mBuilder->mForceLayerForScrollParent = mOldForceLayer;
} else {
// Otherwise we need to keep propagating the force-layerization flag
// upwards to the next ancestor scrollframe that does participate in
// scroll handoff.
mBuilder->mForceLayerForScrollParent |= mOldForceLayer;
}
}
private:
nsDisplayListBuilder* mBuilder;
ViewID mOldValue;
bool mOldForceLayer;
bool mCanBeScrollParent;
};
/**
@ -919,6 +952,7 @@ private:
bool mHaveScrollableDisplayPort;
bool mWindowDraggingAllowed;
bool mIsBuildingForPopup;
bool mForceLayerForScrollParent;
};
class nsDisplayItem;

View File

@ -8,6 +8,7 @@
#include "mozilla/ArrayUtils.h"
#include "mozilla/BasicEvents.h"
#include "mozilla/EventDispatcher.h"
#include "mozilla/gfx/PathHelpers.h"
#include "mozilla/Likely.h"
#include "mozilla/Maybe.h"

View File

@ -3072,6 +3072,22 @@ ScrollFrameHelper::BuildDisplayList(nsDisplayListBuilder* aBuilder,
}
mOuter->BuildDisplayListForChild(aBuilder, mScrolledFrame, dirtyRect, scrolledContent);
if (idSetter.ShouldForceLayerForScrollParent() &&
!gfxPrefs::LayoutUseContainersForRootFrames())
{
// Note that forcing layerization of scroll parents follows the scroll
// handoff chain which is subject to the out-of-flow-frames caveat noted
// above (where the idSetter variable is created).
//
// This is not compatible when using containes for root scrollframes.
MOZ_ASSERT(shouldBuildLayer && mScrolledFrame->GetContent());
mShouldBuildScrollableLayer = true;
}
}
if (mShouldBuildScrollableLayer && !gfxPrefs::LayoutUseContainersForRootFrames()) {
aBuilder->ForceLayerForScrollParent();
}
if (MOZ_UNLIKELY(mOuter->StyleDisplay()->mOverflowClipBox ==

View File

@ -5229,18 +5229,13 @@ PaintDecorationLine(nsIFrame* aFrame,
{
nscolor lineColor = aOverrideColor ? *aOverrideColor : aColor;
if (aCallbacks) {
if (aDecorationType == eNormalDecoration) {
aCallbacks->NotifyBeforeDecorationLine(lineColor);
} else {
aCallbacks->NotifyBeforeSelectionDecorationLine(lineColor);
}
nsCSSRendering::DecorationLineToPath(aFrame, aCtx, aDirtyRect, lineColor,
aPt, aICoordInFrame, aLineSize, aAscent, aOffset, aDecoration, aStyle,
Rect path = nsCSSRendering::DecorationLineToPath(ToRect(aDirtyRect),
ToPoint(aPt), ToSize(aLineSize), aAscent, aOffset, aDecoration, aStyle,
aVertical, aDescentLimit);
if (aDecorationType == eNormalDecoration) {
aCallbacks->NotifyDecorationLinePathEmitted();
aCallbacks->PaintDecorationLine(path, lineColor);
} else {
aCallbacks->NotifySelectionDecorationLinePathEmitted();
aCallbacks->PaintSelectionDecorationLine(path, lineColor);
}
} else {
nsCSSRendering::PaintDecorationLine(aFrame, *aCtx->GetDrawTarget(),

View File

@ -305,13 +305,13 @@ public:
* Callbacks are invoked in the following order:
*
* (NotifySelectionBackgroundNeedsFill)?
* (NotifyBeforeDecorationLine NotifyDecorationLinePathEmitted)*
* PaintDecorationLine*
* NotifyBeforeText
* (NotifyGlyphPathEmitted |
* (NotifyBeforeSVGGlyphPainted NotifyAfterSVGGlyphPainted))*
* NotifyAfterText
* (NotifyBeforeDecorationLine NotifyDecorationLinePathEmitted)*
* (NotifyBeforeSelectionDecorationLine NotifySelectionDecorationLinePathEmitted)*
* PaintDecorationLine*
* PaintSelectionDecorationLine*
*
* The color of each part of the frame's text rendering is passed as an argument
* to the NotifyBefore* callback for that part. The nscolor can take on one of
@ -337,6 +337,19 @@ public:
nscolor aColor,
DrawTarget& aDrawTarget) { }
/**
* Called before (for under/over-line) or after (for line-through) the text
* is drawn to have a text decoration line drawn.
*/
virtual void PaintDecorationLine(Rect aPath, nscolor aColor) { }
/**
* Called after selected text is drawn to have a decoration line drawn over
* the text. (All types of text decoration are drawn after the text when
* text is selected.)
*/
virtual void PaintSelectionDecorationLine(Rect aPath, nscolor aColor) { }
/**
* Called just before any paths have been emitted to the gfxContext
* for the glyphs of the frame's text.
@ -349,18 +362,6 @@ public:
*/
virtual void NotifyAfterText() { }
/**
* Called just before a path corresponding to a text decoration line
* has been emitted to the gfxContext.
*/
virtual void NotifyBeforeDecorationLine(nscolor aColor) { }
/**
* Called just after a path corresponding to a text decoration line
* has been emitted to the gfxContext.
*/
virtual void NotifyDecorationLinePathEmitted() { }
/**
* Called just before a path corresponding to a selection decoration line
* has been emitted to the gfxContext.

View File

@ -1356,8 +1356,8 @@ skip-if(B2G&&browserIsRemote) == 486848-1.xul 486848-1-ref.xul # bug 974780
== 490177-1.svg 490177-1-ref.svg
== 490182-1a.html 490182-1-ref.html
== 490182-1b.html 490182-1-ref.html
== 491180-1.html 491180-1-ref.html
== 491180-2.html 491180-2-ref.html
pref(browser.display.focus_ring_width,1) == 491180-1.html 491180-1-ref.html
pref(browser.display.focus_ring_width,1) == 491180-2.html 491180-2-ref.html
skip-if(B2G&&browserIsRemote) == 491323-1.xul 491323-1-ref.xul # bug 974780
skip-if(B2G&&browserIsRemote) == 492239-1.xul 492239-1-ref.xul # bug 974780
== 492661-1.html 492661-1-ref.html

View File

@ -7,9 +7,9 @@
# on B2G, despite their "-moz-appearance: none; background: gray", so they
# don't quite match the reference case's normal <div>. That's why they're fuzzy.
fuzzy-if(B2G||Android,125,20) == percent-height-child-1.html percent-height-child-1-ref.html
fuzzy-if(B2G||Android,125,80) == percent-height-child-2.html percent-height-child-2-ref.html
pref(browser.display.focus_ring_width,1) fuzzy-if(B2G||Android,125,80) == percent-height-child-2.html percent-height-child-2-ref.html
fuzzy-if(B2G||Android,125,20) == percent-width-child-1.html percent-width-child-1-ref.html
fuzzy-if(B2G||Android,125,80) == percent-width-child-2.html percent-width-child-2-ref.html
pref(browser.display.focus_ring_width,1) fuzzy-if(B2G||Android,125,80) == percent-width-child-2.html percent-width-child-2-ref.html
== vertical-centering.html vertical-centering-ref.html

View File

@ -0,0 +1,45 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Test for bug 1135361</title>
<style>
body {
font: 48px sans-serif;
}
div {
display: inline-block;
width: 3em;
border: 1px solid silver;
padding: .5em;
}
p {
writing-mode: vertical-rl;
-webkit-writing-mode: vertical-rl;
-ms-writing-mode: tb-rl; /* old syntax. IE */
text-orientation: upright;
-webkit-text-orientation: upright;
height: 4ch;
}
rt {
font-size: 20%; /* ensure ruby is small enough that it won't affect inline spacing */
}
span {
display: inline-block;
height: .5ch; /* shim for fake justification */
}
</style>
</head>
<body>
<div>
<p>
<span></span><ruby><rt>to</rt></ruby>
</p>
</div>
<div>
<p style="text-align:right;">
<ruby><rt>kyo</rt></ruby><span></span>
</p>
</div>
</body>
</html>

View File

@ -0,0 +1,46 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Test for bug 1135361</title>
<style>
body {
font: 48px sans-serif;
}
div {
display: inline-block;
width: 3em;
border: 1px solid silver;
padding: .5em;
}
p {
writing-mode: vertical-rl;
-webkit-writing-mode: vertical-rl;
-ms-writing-mode: tb-rl; /* old syntax. IE */
text-orientation: upright;
-webkit-text-orientation: upright;
height: 4ch;
text-align: justify;
-moz-text-align-last: justify;
}
rt {
font-size: 20%; /* ensure ruby is small enough that it won't affect inline spacing */
}
.t {
color: transparent;
}
</style>
</head>
<body>
<div>
<p>
<ruby><rt>to</rt></ruby><ruby class="t"><rt>kyo</rt></ruby>
</p>
</div>
<div>
<p>
<ruby class="t"><rt>to</rt></ruby><ruby><rt>kyo</rt></ruby>
</p>
</div>
</body>
</html>

View File

@ -70,7 +70,8 @@ skip-if(!winWidget||/^Windows\x20NT\x205\.1/.test(http.oscpu)) == ua-style-sheet
skip-if(Android||B2G) == ua-style-sheet-button-1.html ua-style-sheet-button-1a-ref.html
skip-if(!(Android||B2G)) == ua-style-sheet-button-1.html ua-style-sheet-button-1b-ref.html
== ua-style-sheet-input-color-1.html ua-style-sheet-input-color-1-ref.html
fails == ua-style-sheet-input-number-1.html ua-style-sheet-input-number-1-ref.html # bug 1123299
== ua-style-sheet-input-number-1.html ua-style-sheet-input-number-1-ref.html
HTTP(..) == 1127488-align-default-horizontal-tb-ltr.html 1127488-align-top-left-ref.html
HTTP(..) == 1127488-align-start-horizontal-tb-ltr.html 1127488-align-top-left-ref.html
HTTP(..) == 1127488-align-end-horizontal-tb-ltr.html 1127488-align-top-right-ref.html
@ -102,3 +103,4 @@ HTTP(..) == 1127488-align-end-vertical-lr-ltr.html 1127488-align-bottom-left-ref
HTTP(..) == 1127488-align-left-vertical-lr-ltr.html 1127488-align-top-left-ref.html
HTTP(..) == 1127488-align-right-vertical-lr-ltr.html 1127488-align-bottom-left-ref.html
== 1131013-vertical-bidi.html 1131013-vertical-bidi-ref.html
fails-if(B2G) == 1135361-ruby-justify-1.html 1135361-ruby-justify-1-ref.html # bug 1136067

View File

@ -95,67 +95,84 @@ CSSAnimationPlayer::QueueEvents(EventArray& aEventsToDispatch)
ComputedTiming computedTiming = mSource->GetComputedTiming();
if (computedTiming.mPhase == ComputedTiming::AnimationPhase_Null) {
return; // do nothing
}
// Note that script can change the start time, so we have to handle moving
// backwards through the animation as well as forwards. An 'animationstart'
// is dispatched if we enter the active phase (regardless if that is from
// before or after the animation's active phase). An 'animationend' is
// dispatched if we leave the active phase (regardless if that is to before
// or after the animation's active phase).
bool wasActive = mPreviousPhaseOrIteration != PREVIOUS_PHASE_BEFORE &&
mPreviousPhaseOrIteration != PREVIOUS_PHASE_AFTER;
bool isActive =
computedTiming.mPhase == ComputedTiming::AnimationPhase_Active;
bool isSameIteration =
computedTiming.mCurrentIteration == mPreviousPhaseOrIteration;
bool skippedActivePhase =
(mPreviousPhaseOrIteration == PREVIOUS_PHASE_BEFORE &&
computedTiming.mPhase == ComputedTiming::AnimationPhase_After) ||
(mPreviousPhaseOrIteration == PREVIOUS_PHASE_AFTER &&
computedTiming.mPhase == ComputedTiming::AnimationPhase_Before);
MOZ_ASSERT(!skippedActivePhase || (!isActive && !wasActive),
"skippedActivePhase only makes sense if we were & are inactive");
if (computedTiming.mPhase == ComputedTiming::AnimationPhase_Before) {
mPreviousPhaseOrIteration = PREVIOUS_PHASE_BEFORE;
} else if (computedTiming.mPhase == ComputedTiming::AnimationPhase_Active) {
mPreviousPhaseOrIteration = computedTiming.mCurrentIteration;
} else if (computedTiming.mPhase == ComputedTiming::AnimationPhase_After) {
mPreviousPhaseOrIteration = PREVIOUS_PHASE_AFTER;
}
dom::Element* target;
nsCSSPseudoElements::Type targetPseudoType;
mSource->GetTarget(target, targetPseudoType);
switch (computedTiming.mPhase) {
case ComputedTiming::AnimationPhase_Null:
case ComputedTiming::AnimationPhase_Before:
// Do nothing
break;
uint32_t message;
case ComputedTiming::AnimationPhase_Active:
// Dispatch 'animationstart' or 'animationiteration' when needed.
if (computedTiming.mCurrentIteration != mLastNotification) {
// Notify 'animationstart' even if a negative delay puts us
// past the first iteration.
// Note that when somebody changes the animation-duration
// dynamically, this will fire an extra iteration event
// immediately in many cases. It's not clear to me if that's the
// right thing to do.
uint32_t message = mLastNotification == LAST_NOTIFICATION_NONE
? NS_ANIMATION_START
: NS_ANIMATION_ITERATION;
mLastNotification = computedTiming.mCurrentIteration;
TimeDuration iterationStart =
mSource->Timing().mIterationDuration *
computedTiming.mCurrentIteration;
TimeDuration elapsedTime =
std::max(iterationStart, mSource->InitialAdvance());
AnimationEventInfo ei(target, Name(), message,
StickyTimeDuration(elapsedTime),
PseudoTypeAsString(targetPseudoType));
aEventsToDispatch.AppendElement(ei);
}
break;
case ComputedTiming::AnimationPhase_After:
// If we skipped the animation interval entirely, dispatch
// 'animationstart' first
if (mLastNotification == LAST_NOTIFICATION_NONE) {
// Notifying for start of 0th iteration.
// (This is overwritten below but we set it here to maintain
// internal consistency.)
mLastNotification = 0;
StickyTimeDuration elapsedTime =
std::min(StickyTimeDuration(mSource->InitialAdvance()),
computedTiming.mActiveDuration);
AnimationEventInfo ei(target, Name(), NS_ANIMATION_START,
elapsedTime,
PseudoTypeAsString(targetPseudoType));
aEventsToDispatch.AppendElement(ei);
}
// Dispatch 'animationend' when needed.
if (mLastNotification != LAST_NOTIFICATION_END) {
mLastNotification = LAST_NOTIFICATION_END;
AnimationEventInfo ei(target, Name(), NS_ANIMATION_END,
computedTiming.mActiveDuration,
PseudoTypeAsString(targetPseudoType));
aEventsToDispatch.AppendElement(ei);
}
break;
if (!wasActive && isActive) {
message = NS_ANIMATION_START;
} else if (wasActive && !isActive) {
message = NS_ANIMATION_END;
} else if (wasActive && isActive && !isSameIteration) {
message = NS_ANIMATION_ITERATION;
} else if (skippedActivePhase) {
// First notifying for start of 0th iteration by appending an
// 'animationstart':
StickyTimeDuration elapsedTime =
std::min(StickyTimeDuration(mSource->InitialAdvance()),
computedTiming.mActiveDuration);
AnimationEventInfo ei(target, Name(), NS_ANIMATION_START,
elapsedTime,
PseudoTypeAsString(targetPseudoType));
aEventsToDispatch.AppendElement(ei);
// Then have the shared code below append an 'animationend':
message = NS_ANIMATION_END;
} else {
return; // No events need to be sent
}
StickyTimeDuration elapsedTime;
if (message == NS_ANIMATION_START ||
message == NS_ANIMATION_ITERATION) {
TimeDuration iterationStart = mSource->Timing().mIterationDuration *
computedTiming.mCurrentIteration;
elapsedTime = StickyTimeDuration(std::max(iterationStart,
mSource->InitialAdvance()));
} else {
MOZ_ASSERT(message == NS_ANIMATION_END);
elapsedTime = computedTiming.mActiveDuration;
}
AnimationEventInfo ei(target, Name(), message, elapsedTime,
PseudoTypeAsString(targetPseudoType));
aEventsToDispatch.AppendElement(ei);
}
CommonAnimationManager*

View File

@ -59,7 +59,7 @@ public:
: dom::AnimationPlayer(aTimeline)
, mIsStylePaused(false)
, mPauseShouldStick(false)
, mLastNotification(LAST_NOTIFICATION_NONE)
, mPreviousPhaseOrIteration(PREVIOUS_PHASE_BEFORE)
{
}
@ -139,12 +139,12 @@ protected:
bool mPauseShouldStick;
enum {
LAST_NOTIFICATION_NONE = uint64_t(-1),
LAST_NOTIFICATION_END = uint64_t(-2)
PREVIOUS_PHASE_BEFORE = uint64_t(-1),
PREVIOUS_PHASE_AFTER = uint64_t(-2)
};
// One of the LAST_NOTIFICATION_* constants, or an integer for the iteration
// One of the PREVIOUS_PHASE_* constants, or an integer for the iteration
// whose start we last notified on.
uint64_t mLastNotification;
uint64_t mPreviousPhaseOrIteration;
};
} /* namespace mozilla */

View File

@ -90,7 +90,7 @@ nsLayoutStylesheetCache::NumberControlSheet()
if (!gStyleCache->mNumberControlSheet) {
LoadSheetURL("resource://gre-resources/number-control.css",
gStyleCache->mNumberControlSheet, false);
gStyleCache->mNumberControlSheet, true);
}
return gStyleCache->mNumberControlSheet;

View File

@ -2722,15 +2722,13 @@ public:
void NotifySelectionBackgroundNeedsFill(const Rect& aBackgroundRect,
nscolor aColor,
DrawTarget& aDrawTarget) MOZ_OVERRIDE;
void PaintDecorationLine(Rect aPath, nscolor aColor) MOZ_OVERRIDE;
void PaintSelectionDecorationLine(Rect aPath, nscolor aColor) MOZ_OVERRIDE;
void NotifyBeforeText(nscolor aColor) MOZ_OVERRIDE;
void NotifyGlyphPathEmitted() MOZ_OVERRIDE;
void NotifyBeforeSVGGlyphPainted() MOZ_OVERRIDE;
void NotifyAfterSVGGlyphPainted() MOZ_OVERRIDE;
void NotifyAfterText() MOZ_OVERRIDE;
void NotifyBeforeDecorationLine(nscolor aColor) MOZ_OVERRIDE;
void NotifyDecorationLinePathEmitted() MOZ_OVERRIDE;
void NotifyBeforeSelectionDecorationLine(nscolor aColor) MOZ_OVERRIDE;
void NotifySelectionDecorationLinePathEmitted() MOZ_OVERRIDE;
private:
void SetupContext();
@ -2838,22 +2836,24 @@ SVGTextDrawPathCallbacks::NotifyAfterText()
}
void
SVGTextDrawPathCallbacks::NotifyBeforeDecorationLine(nscolor aColor)
SVGTextDrawPathCallbacks::PaintDecorationLine(Rect aPath, nscolor aColor)
{
mColor = aColor;
SetupContext();
}
AntialiasMode aaMode =
nsSVGUtils::ToAntialiasMode(mFrame->StyleSVG()->mTextRendering);
void
SVGTextDrawPathCallbacks::NotifyDecorationLinePathEmitted()
{
gfx->Save();
gfx->NewPath();
gfx->SetAntialiasMode(aaMode);
gfx->Rectangle(ThebesRect(aPath));
HandleTextGeometry();
gfx->NewPath();
gfx->Restore();
}
void
SVGTextDrawPathCallbacks::NotifyBeforeSelectionDecorationLine(nscolor aColor)
SVGTextDrawPathCallbacks::PaintSelectionDecorationLine(Rect aPath,
nscolor aColor)
{
if (IsClipPathChild()) {
// Don't paint selection decorations when in a clip path.
@ -2861,17 +2861,10 @@ SVGTextDrawPathCallbacks::NotifyBeforeSelectionDecorationLine(nscolor aColor)
}
mColor = aColor;
gfx->Save();
}
void
SVGTextDrawPathCallbacks::NotifySelectionDecorationLinePathEmitted()
{
if (IsClipPathChild()) {
// Don't paint selection decorations when in a clip path.
return;
}
gfx->NewPath();
gfx->Rectangle(ThebesRect(aPath));
FillAndStrokeGeometry();
gfx->Restore();
}

View File

@ -5,4 +5,4 @@ Makefile.in build files for the Mozilla build system.
The cubeb git repository is: git://github.com/kinetiknz/cubeb.git
The git commit ID used was cacaae79dd8b7220202d0dfe3f889d55e23a77a5.
The git commit ID used was 6de5d3e488d808dd925ae0885a7552fc0a25b449.

View File

@ -520,7 +520,14 @@ wasapi_stream_render_loop(LPVOID stream)
close_wasapi_stream(stm);
/* Reopen a stream and start it immediately. This will automatically pick the
* new default device for this role. */
setup_wasapi_stream(stm);
int r = setup_wasapi_stream(stm);
if (r != CUBEB_OK) {
/* Don't destroy the stream here, since we expect the caller to do
so after the error has propagated via the state callback. */
is_playing = false;
hr = -1;
continue;
}
}
stm->client->Start();
break;
@ -578,7 +585,7 @@ wasapi_stream_render_loop(LPVOID stream)
}
if (FAILED(hr)) {
stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_STOPPED);
stm->state_callback(stm, stm->user_ptr, CUBEB_STATE_ERROR);
}
stm->context->revert_mm_thread_characteristics(mmcss_handle);
@ -967,8 +974,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
hr = get_default_endpoint(&device);
if (FAILED(hr)) {
LOG("Could not get default endpoint, error: %x\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -980,8 +985,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
SafeRelease(device);
if (FAILED(hr)) {
LOG("Could not activate the device to get an audio client: error: %x\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -990,8 +993,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
hr = stm->client->GetMixFormat(&mix_format);
if (FAILED(hr)) {
LOG("Could not fetch current mix format from the audio client: error: %x\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -1015,16 +1016,12 @@ int setup_wasapi_stream(cubeb_stream * stm)
if (FAILED(hr)) {
LOG("Unable to initialize audio client: %x.\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
hr = stm->client->GetBufferSize(&stm->buffer_frame_count);
if (FAILED(hr)) {
LOG("Could not get the buffer size from the client %x.\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -1035,8 +1032,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
hr = stm->client->SetEventHandle(stm->refill_event);
if (FAILED(hr)) {
LOG("Could set the event handle for the client %x.\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -1044,8 +1039,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
(void **)&stm->render_client);
if (FAILED(hr)) {
LOG("Could not get the render client %x.\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -1053,8 +1046,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
(void **)&stm->audio_stream_volume);
if (FAILED(hr)) {
LOG("Could not get the IAudioStreamVolume %x.\n", hr);
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -1070,8 +1061,6 @@ int setup_wasapi_stream(cubeb_stream * stm)
CUBEB_RESAMPLER_QUALITY_DESKTOP);
if (!stm->resampler) {
LOG("Could not get a resampler\n");
stm->stream_reset_lock->leave();
wasapi_stream_destroy(stm);
return CUBEB_ERROR;
}
@ -1131,14 +1120,15 @@ wasapi_stream_init(cubeb * context, cubeb_stream ** stream,
}
{
/* Locking here is not stricly necessary, because we don't have a
/* Locking here is not strictly necessary, because we don't have a
notification client that can reset the stream yet, but it lets us
assert that the lock is held in the function. */
auto_lock lock(stm->stream_reset_lock);
rv = setup_wasapi_stream(stm);
if (rv != CUBEB_OK) {
return rv;
}
}
if (rv != CUBEB_OK) {
wasapi_stream_destroy(stm);
return rv;
}
hr = register_notification_client(stm);

View File

@ -199,7 +199,7 @@ public:
NR_CLOSED,
};
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(NrSocketIpc)
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(NrSocketIpc, MOZ_OVERRIDE)
NS_IMETHODIMP CallListenerError(const nsACString &message,
const nsACString &filename,

View File

@ -288,7 +288,7 @@ public:
NS_IMETHOD OnAddIceCandidateSuccess(ER&) MOZ_OVERRIDE;
NS_IMETHOD OnAddIceCandidateError(uint32_t code, const char *msg, ER&) MOZ_OVERRIDE;
NS_IMETHOD OnIceCandidate(uint16_t level, const char *mid, const char *cand, ER&) MOZ_OVERRIDE;
NS_IMETHODIMP OnNegotiationNeeded(ER&);
NS_IMETHOD OnNegotiationNeeded(ER&) MOZ_OVERRIDE;
// Hack because add_ice_candidates can happen asynchronously with respect
// to the API calls. The whole test suite needs a refactor.

View File

@ -1416,11 +1416,10 @@ bool AudioCodingModuleImpl::REDStatus() const {
}
// Configure RED status i.e on/off.
int AudioCodingModuleImpl::SetREDStatus(
#ifdef WEBRTC_CODEC_RED
bool enable_red) {
int AudioCodingModuleImpl::SetREDStatus(bool enable_red) {
CriticalSectionScoped lock(acm_crit_sect_);
#ifdef WEBRTC_CODEC_RED
if (enable_red == true && codec_fec_enabled_ == true) {
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
"Codec internal FEC and RED cannot be co-enabled.");
@ -1439,7 +1438,6 @@ int AudioCodingModuleImpl::SetREDStatus(
is_first_red_ = true; // Make sure we restart RED.
return 0;
#else
bool /* enable_red */) {
red_enabled_ = false;
WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceAudioCoding, id_,
" WEBRTC_CODEC_RED is undefined => red_enabled_ = %d",

View File

@ -82,6 +82,7 @@ CacheEntry::Callback::Callback(CacheEntry* aEntry,
, mCallback(aCallback)
, mTargetThread(do_GetCurrentThread())
, mReadOnly(aReadOnly)
, mRevalidating(false)
, mCheckOnAnyThread(aCheckOnAnyThread)
, mRecheckAfterWrite(false)
, mNotWanted(false)
@ -100,6 +101,7 @@ CacheEntry::Callback::Callback(CacheEntry::Callback const &aThat)
, mCallback(aThat.mCallback)
, mTargetThread(aThat.mTargetThread)
, mReadOnly(aThat.mReadOnly)
, mRevalidating(aThat.mRevalidating)
, mCheckOnAnyThread(aThat.mCheckOnAnyThread)
, mRecheckAfterWrite(aThat.mRecheckAfterWrite)
, mNotWanted(aThat.mNotWanted)
@ -674,6 +676,8 @@ bool CacheEntry::InvokeCallback(Callback & aCallback)
checkResult = ENTRY_NOT_WANTED;
}
aCallback.mRevalidating = checkResult == ENTRY_NEEDS_REVALIDATION;
switch (checkResult) {
case ENTRY_WANTED:
// Nothing more to do here, the consumer is responsible to handle
@ -788,7 +792,8 @@ void CacheEntry::InvokeAvailableCallback(Callback const & aCallback)
return;
}
if (aCallback.mReadOnly) {
// R/O callbacks may do revalidation, let them fall through
if (aCallback.mReadOnly && !aCallback.mRevalidating) {
LOG((" r/o and not ready, notifying OCEA with NS_ERROR_CACHE_KEY_NOT_FOUND"));
aCallback.mCallback->OnCacheEntryAvailable(
nullptr, false, nullptr, NS_ERROR_CACHE_KEY_NOT_FOUND);

View File

@ -152,6 +152,7 @@ private:
nsCOMPtr<nsICacheEntryOpenCallback> mCallback;
nsCOMPtr<nsIThread> mTargetThread;
bool mReadOnly : 1;
bool mRevalidating : 1;
bool mCheckOnAnyThread : 1;
bool mRecheckAfterWrite : 1;
bool mNotWanted : 1;

View File

@ -551,7 +551,7 @@ CacheFileContextEvictor::EvictEntries()
mEntries[0]->mIterator.get(), mEntries[0]->mInfo.get()));
nsRefPtr<CacheFileHandle> handle;
CacheFileIOManager::gInstance->mHandles.GetHandle(&hash, false,
CacheFileIOManager::gInstance->mHandles.GetHandle(&hash,
getter_AddRefs(handle));
if (handle) {
// We doom any active handle in CacheFileIOManager::EvictByContext(), so

View File

@ -310,7 +310,6 @@ CacheFileHandles::~CacheFileHandles()
nsresult
CacheFileHandles::GetHandle(const SHA1Sum::Hash *aHash,
bool aReturnDoomed,
CacheFileHandle **_retval)
{
MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
@ -344,16 +343,12 @@ CacheFileHandles::GetHandle(const SHA1Sum::Hash *aHash,
if (handle->IsDoomed()) {
LOG(("CacheFileHandles::GetHandle() hash=%08x%08x%08x%08x%08x "
"found doomed handle %p, entry %p", LOGSHA1(aHash), handle.get(), entry));
// If the consumer doesn't want doomed handles, exit with NOT_AVAIL.
if (!aReturnDoomed) {
return NS_ERROR_NOT_AVAILABLE;
}
} else {
LOG(("CacheFileHandles::GetHandle() hash=%08x%08x%08x%08x%08x "
"found handle %p, entry %p", LOGSHA1(aHash), handle.get(), entry));
return NS_ERROR_NOT_AVAILABLE;
}
LOG(("CacheFileHandles::GetHandle() hash=%08x%08x%08x%08x%08x "
"found handle %p, entry %p", LOGSHA1(aHash), handle.get(), entry));
handle.forget(_retval);
return NS_OK;
}
@ -810,7 +805,7 @@ public:
if (!mIOMan) {
rv = NS_ERROR_NOT_INITIALIZED;
} else {
rv = mIOMan->DoomFileByKeyInternal(&mHash, false);
rv = mIOMan->DoomFileByKeyInternal(&mHash);
mIOMan = nullptr;
}
@ -1575,7 +1570,7 @@ CacheFileIOManager::OpenFileInternal(const SHA1Sum::Hash *aHash,
NS_ENSURE_SUCCESS(rv, rv);
nsRefPtr<CacheFileHandle> handle;
mHandles.GetHandle(aHash, false, getter_AddRefs(handle));
mHandles.GetHandle(aHash, getter_AddRefs(handle));
if ((aFlags & (OPEN | CREATE | CREATE_NEW)) == CREATE_NEW) {
if (handle) {
@ -2074,11 +2069,10 @@ CacheFileIOManager::DoomFileByKey(const nsACString &aKey,
}
nsresult
CacheFileIOManager::DoomFileByKeyInternal(const SHA1Sum::Hash *aHash,
bool aFailIfAlreadyDoomed)
CacheFileIOManager::DoomFileByKeyInternal(const SHA1Sum::Hash *aHash)
{
LOG(("CacheFileIOManager::DoomFileByKeyInternal() [hash=%08x%08x%08x%08x%08x,"
" failIfAlreadyDoomed=%d]", LOGSHA1(aHash), aFailIfAlreadyDoomed));
LOG(("CacheFileIOManager::DoomFileByKeyInternal() [hash=%08x%08x%08x%08x%08x]"
, LOGSHA1(aHash)));
MOZ_ASSERT(CacheFileIOManager::IsOnIOThreadOrCeased());
@ -2094,15 +2088,11 @@ CacheFileIOManager::DoomFileByKeyInternal(const SHA1Sum::Hash *aHash,
// Find active handle
nsRefPtr<CacheFileHandle> handle;
mHandles.GetHandle(aHash, true, getter_AddRefs(handle));
mHandles.GetHandle(aHash, getter_AddRefs(handle));
if (handle) {
handle->Log();
if (handle->IsDoomed()) {
return aFailIfAlreadyDoomed ? NS_ERROR_NOT_AVAILABLE : NS_OK;
}
return DoomFileInternal(handle);
}
@ -2244,7 +2234,7 @@ CacheFileIOManager::GetEntryInfo(const SHA1Sum::Hash *aHash,
nsAutoCString uriSpec;
nsRefPtr<CacheFileHandle> handle;
ioMan->mHandles.GetHandle(aHash, false, getter_AddRefs(handle));
ioMan->mHandles.GetHandle(aHash, getter_AddRefs(handle));
if (handle) {
nsRefPtr<nsILoadContextInfo> info =
CacheFileUtils::ParseKey(handle->Key(), &enhanceId, &uriSpec);
@ -2615,7 +2605,7 @@ CacheFileIOManager::OverLimitEvictionInternal()
rv = CacheIndex::GetEntryForEviction(false, &hash, &cnt);
NS_ENSURE_SUCCESS(rv, rv);
rv = DoomFileByKeyInternal(&hash, true);
rv = DoomFileByKeyInternal(&hash);
if (NS_SUCCEEDED(rv)) {
consecutiveFailures = 0;
} else if (rv == NS_ERROR_NOT_AVAILABLE) {
@ -2623,16 +2613,6 @@ CacheFileIOManager::OverLimitEvictionInternal()
"DoomFileByKeyInternal() failed. [rv=0x%08x]", rv));
// TODO index is outdated, start update
#ifdef DEBUG
// Dooming should never fail due to already doomed handle, but bug 1028415
// shows that this unexpected state can happen. Assert in debug build so
// we can find the cause if we ever find a way to reproduce it with NSPR
// logging enabled.
nsRefPtr<CacheFileHandle> handle;
mHandles.GetHandle(&hash, true, getter_AddRefs(handle));
MOZ_ASSERT(!handle || !handle->IsDoomed());
#endif
// Make sure index won't return the same entry again
CacheIndex::RemoveEntry(&hash);
consecutiveFailures = 0;
@ -3650,7 +3630,7 @@ CacheFileIOManager::OpenNSPRHandle(CacheFileHandle *aHandle, bool aCreate)
rv = CacheIndex::GetEntryForEviction(true, &hash, &cnt);
if (NS_SUCCEEDED(rv)) {
rv = DoomFileByKeyInternal(&hash, true);
rv = DoomFileByKeyInternal(&hash);
}
if (NS_SUCCEEDED(rv)) {
rv = aHandle->mFile->OpenNSPRFileDesc(

View File

@ -90,7 +90,7 @@ public:
CacheFileHandles();
~CacheFileHandles();
nsresult GetHandle(const SHA1Sum::Hash *aHash, bool aReturnDoomed, CacheFileHandle **_retval);
nsresult GetHandle(const SHA1Sum::Hash *aHash, CacheFileHandle **_retval);
nsresult NewHandle(const SHA1Sum::Hash *aHash, bool aPriority, CacheFileHandle **_retval);
void RemoveHandle(CacheFileHandle *aHandlle);
void GetAllHandles(nsTArray<nsRefPtr<CacheFileHandle> > *_retval);
@ -330,8 +330,7 @@ private:
nsresult WriteInternal(CacheFileHandle *aHandle, int64_t aOffset,
const char *aBuf, int32_t aCount, bool aValidate);
nsresult DoomFileInternal(CacheFileHandle *aHandle);
nsresult DoomFileByKeyInternal(const SHA1Sum::Hash *aHash,
bool aFailIfAlreadyDoomed);
nsresult DoomFileByKeyInternal(const SHA1Sum::Hash *aHash);
nsresult ReleaseNSPRHandleInternal(CacheFileHandle *aHandle);
nsresult TruncateSeekSetEOFInternal(CacheFileHandle *aHandle,
int64_t aTruncatePos, int64_t aEOFPos);

View File

@ -1277,7 +1277,7 @@ bool CacheIndex::IsForcedValidEntry(const SHA1Sum::Hash *aHash)
nsRefPtr<CacheFileHandle> handle;
CacheFileIOManager::gInstance->mHandles.GetHandle(
aHash, false, getter_AddRefs(handle));
aHash, getter_AddRefs(handle));
if (!handle)
return false;
@ -2775,7 +2775,7 @@ CacheIndex::BuildIndex()
#ifdef DEBUG
nsRefPtr<CacheFileHandle> handle;
CacheFileIOManager::gInstance->mHandles.GetHandle(&hash, false,
CacheFileIOManager::gInstance->mHandles.GetHandle(&hash,
getter_AddRefs(handle));
#endif
@ -2988,7 +2988,7 @@ CacheIndex::UpdateIndex()
#ifdef DEBUG
nsRefPtr<CacheFileHandle> handle;
CacheFileIOManager::gInstance->mHandles.GetHandle(&hash, false,
CacheFileIOManager::gInstance->mHandles.GetHandle(&hash,
getter_AddRefs(handle));
#endif

View File

@ -4237,12 +4237,14 @@ nsHttpChannel::InstallCacheListener(int64_t offset)
do_CreateInstance(kStreamListenerTeeCID, &rv);
if (NS_FAILED(rv)) return rv;
nsCOMPtr<nsICacheStorageService> serv =
do_GetService("@mozilla.org/netwerk/cache-storage-service;1", &rv);
NS_ENSURE_SUCCESS(rv, rv);
nsCOMPtr<nsIEventTarget> cacheIOTarget;
serv->GetIoTarget(getter_AddRefs(cacheIOTarget));
if (!CacheObserver::UseNewCache()) {
nsCOMPtr<nsICacheStorageService> serv =
do_GetService("@mozilla.org/netwerk/cache-storage-service;1", &rv);
NS_ENSURE_SUCCESS(rv, rv);
serv->GetIoTarget(getter_AddRefs(cacheIOTarget));
}
if (!cacheIOTarget) {
LOG(("nsHttpChannel::InstallCacheListener sync tee %p rv=%x "

View File

@ -2811,7 +2811,7 @@ nsHttpConnectionMgr::ActivateTimeoutTick()
{
MOZ_ASSERT(PR_GetCurrentThread() == gSocketThread);
LOG(("nsHttpConnectionMgr::ActivateTimeoutTick() "
"this=%p mTimeoutTick=%p\n"));
"this=%p mTimeoutTick=%p\n", this, mTimeoutTick.get()));
// The timer tick should be enabled if it is not already pending.
// Upon running the tick will rearm itself if there are active

View File

@ -204,6 +204,7 @@ SandboxFilterImplContent::Build() {
Allow(SYSCALL_LARGEFILE(fstat, fstat64));
Allow(SYSCALL_LARGEFILE(stat, stat64));
Allow(SYSCALL_LARGEFILE(lstat, lstat64));
Allow(SYSCALL_LARGEFILE(newfstatat, fstatat64));
Allow(SOCKETCALL(socketpair, SOCKETPAIR));
Deny(EACCES, SOCKETCALL(socket, SOCKET));
Allow(SYSCALL(open));
@ -211,6 +212,7 @@ SandboxFilterImplContent::Build() {
Allow(SYSCALL(prctl));
Allow(SYSCALL(access));
Allow(SYSCALL(unlink));
Allow(SYSCALL(unlinkat));
Allow(SYSCALL(fsync));
Allow(SYSCALL(msync));

Some files were not shown because too many files have changed in this diff Show More