Backed out changesets bd9cb02d4da8 and 9f2ad189edc9 (bug 1150337) for WinXP debug jit-test OOMs.

CLOSED TREE
This commit is contained in:
Ryan VanderMeulen 2015-04-15 13:38:11 -04:00
parent 9b1a298d46
commit 7f69890e20
12 changed files with 59 additions and 132 deletions

View File

@ -97,10 +97,6 @@ AsmJSModule::AsmJSModule(ScriptSource* scriptSource, uint32_t srcStart, uint32_t
pod.strict_ = strict;
pod.usesSignalHandlers_ = canUseSignalHandlers;
// AsmJSCheckedImmediateRange should be defined to be at most the minimum
// heap length so that offsets can be folded into bounds checks.
MOZ_ASSERT(pod.minHeapLength_ - AsmJSCheckedImmediateRange <= pod.minHeapLength_);
scriptSource_->incref();
}

View File

@ -570,14 +570,14 @@ ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddre
uintptr_t result = address.disp();
if (address.hasBase()) {
if (address.base() != Registers::Invalid) {
uintptr_t base;
StoreValueFromGPReg(&base, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.base()));
result += base;
}
if (address.hasIndex()) {
if (address.index() != Registers::Invalid) {
uintptr_t index;
StoreValueFromGPReg(&index, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.index()));
@ -608,15 +608,15 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
// Check x64 asm.js heap access invariants.
MOZ_RELEASE_ASSERT(address.disp() >= 0);
MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code());
MOZ_RELEASE_ASSERT(address.index() != HeapReg.code());
MOZ_RELEASE_ASSERT(address.scale() == 0);
if (address.hasBase()) {
if (address.base() != Registers::Invalid) {
uintptr_t base;
StoreValueFromGPReg(&base, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.base()));
MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == module.maybeHeap());
}
if (address.hasIndex()) {
if (address.index() != Registers::Invalid) {
uintptr_t index;
StoreValueFromGPReg(&index, sizeof(uintptr_t),
AddressOfGPRegisterSlot(context, address.index()));

View File

@ -67,12 +67,12 @@ static_assert(jit::AsmJSCheckedImmediateRange <= jit::AsmJSImmediateRange,
// the internal ArrayBuffer data array is inflated to 4GiB (only the
// byteLength portion of which is accessible) so that out-of-bounds accesses
// (made using a uint32 index) are guaranteed to raise a SIGSEGV.
// Then, an additional extent is added to permit folding of immediate
// Then, an additional extent is added to permit folding of small immediate
// values into addresses. And finally, unaligned accesses and mask optimizations
// might also try to access a few bytes after this limit, so just inflate it by
// AsmJSPageSize.
static const size_t AsmJSMappedSize = 4 * 1024ULL * 1024ULL * 1024ULL +
jit::AsmJSImmediateRange +
jit::AsmJSCheckedImmediateRange +
AsmJSPageSize;
#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)

View File

@ -505,20 +505,3 @@ assertEq(f(0x424242),0xAA);
assertEq(f(0x1000000),0);
assertEq(asmLink(m, this, null, new ArrayBuffer(0x2000000))(0),0);
assertEq(asmLink(m, this, null, new ArrayBuffer(0x3000000))(0),0);
// Heap offsets
var asmMod = function test (glob, env, b) {
'use asm';
var i8 = new glob.Int8Array(b);
function f(i) {
i = i | 0;
i = i & 1;
i = (i - 0x40000000)|0;
i8[0x3fffffff] = 0;
return i8[(i + 0x7ffffffe) >> 0] | 0;
}
return f;
};
var buffer = new ArrayBuffer(0x40000000);
var asm = asmMod(this, {}, buffer);
assertEq(asm(-1),0);

View File

@ -97,21 +97,13 @@ class ComplexAddress {
return disp_;
}
bool hasBase() const {
return base_ != Registers::Invalid;
}
Register::Encoding base() const {
MOZ_ASSERT(hasBase());
MOZ_ASSERT(base_ != Registers::Invalid);
return base_;
}
bool hasIndex() const {
return index_ != Registers::Invalid;
}
Register::Encoding index() const {
MOZ_ASSERT(hasIndex());
MOZ_ASSERT(index_ != Registers::Invalid);
return index_;
}

View File

@ -101,38 +101,8 @@ AnalyzeLsh(TempAllocator& alloc, MLsh* lsh)
}
template<typename MAsmJSHeapAccessType>
bool
EffectiveAddressAnalysis::tryAddDisplacement(MAsmJSHeapAccessType *ins, int32_t o)
{
// Compute the new offset. Check for overflow and negative. In theory it
// ought to be possible to support negative offsets, but it'd require
// more elaborate bounds checking mechanisms than we currently have.
MOZ_ASSERT(ins->offset() >= 0);
int32_t newOffset = uint32_t(ins->offset()) + o;
if (newOffset < 0)
return false;
// Compute the new offset to the end of the access. Check for overflow
// and negative here also.
int32_t newEnd = uint32_t(newOffset) + ins->byteSize();
if (newEnd < 0)
return false;
MOZ_ASSERT(uint32_t(newEnd) >= uint32_t(newOffset));
// Determine the range of valid offsets which can be folded into this
// instruction and check whether our computed offset is within that range.
size_t range = mir_->foldableOffsetRange(ins);
if (size_t(newEnd) > range)
return false;
// Everything checks out. This is the new offset.
ins->setOffset(newOffset);
return true;
}
template<typename MAsmJSHeapAccessType>
void
EffectiveAddressAnalysis::analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins)
static void
AnalyzeAsmHeapAccess(MAsmJSHeapAccessType* ins, MIRGraph& graph)
{
MDefinition* ptr = ins->ptr();
@ -143,8 +113,8 @@ EffectiveAddressAnalysis::analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins)
// a situation where the sum of a constant pointer value and a non-zero
// offset doesn't actually fit into the address mode immediate.
int32_t imm = ptr->constantValue().toInt32();
if (imm != 0 && tryAddDisplacement(ins, imm)) {
MInstruction* zero = MConstant::New(graph_.alloc(), Int32Value(0));
if (imm != 0 && ins->tryAddDisplacement(imm)) {
MInstruction* zero = MConstant::New(graph.alloc(), Int32Value(0));
ins->block()->insertBefore(ins, zero);
ins->replacePtr(zero);
}
@ -158,7 +128,7 @@ EffectiveAddressAnalysis::analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins)
mozilla::Swap(op0, op1);
if (op1->isConstantValue()) {
int32_t imm = op1->constantValue().toInt32();
if (tryAddDisplacement(ins, imm))
if (ins->tryAddDisplacement(imm))
ins->replacePtr(op0);
}
}
@ -189,9 +159,9 @@ EffectiveAddressAnalysis::analyze()
if (i->isLsh())
AnalyzeLsh(graph_.alloc(), i->toLsh());
else if (i->isAsmJSLoadHeap())
analyzeAsmHeapAccess(i->toAsmJSLoadHeap());
AnalyzeAsmHeapAccess(i->toAsmJSLoadHeap(), graph_);
else if (i->isAsmJSStoreHeap())
analyzeAsmHeapAccess(i->toAsmJSStoreHeap());
AnalyzeAsmHeapAccess(i->toAsmJSStoreHeap(), graph_);
}
}
return true;

View File

@ -14,18 +14,11 @@ class MIRGraph;
class EffectiveAddressAnalysis
{
MIRGenerator* mir_;
MIRGraph& graph_;
template<typename MAsmJSHeapAccessType>
bool tryAddDisplacement(MAsmJSHeapAccessType *ins, int32_t o);
template<typename MAsmJSHeapAccessType>
void analyzeAsmHeapAccess(MAsmJSHeapAccessType* ins);
public:
EffectiveAddressAnalysis(MIRGenerator *mir, MIRGraph& graph)
: mir_(mir), graph_(graph)
explicit EffectiveAddressAnalysis(MIRGraph& graph)
: graph_(graph)
{}
bool analyze();

View File

@ -1411,7 +1411,7 @@ OptimizeMIR(MIRGenerator* mir)
if (mir->optimizationInfo().eaaEnabled()) {
AutoTraceLog log(logger, TraceLogger_EffectiveAddressAnalysis);
EffectiveAddressAnalysis eaa(mir, graph);
EffectiveAddressAnalysis eaa(graph);
if (!eaa.analyze())
return false;
IonSpewPass("Effective Address Analysis");

View File

@ -12611,9 +12611,32 @@ class MAsmJSHeapAccess
bool needsBoundsCheck() const { return needsBoundsCheck_; }
void removeBoundsCheck() { needsBoundsCheck_ = false; }
unsigned numSimdElems() const { MOZ_ASSERT(Scalar::isSimdType(accessType_)); return numSimdElems_; }
void setOffset(int32_t o) {
MOZ_ASSERT(o >= 0);
offset_ = o;
bool tryAddDisplacement(int32_t o) {
// Compute the new offset. Check for overflow and negative. In theory it
// ought to be possible to support negative offsets, but it'd require
// more elaborate bounds checking mechanisms than we currently have.
MOZ_ASSERT(offset_ >= 0);
int32_t newOffset = uint32_t(offset_) + o;
if (newOffset < 0)
return false;
// Compute the new offset to the end of the access. Check for overflow
// and negative here also.
int32_t newEnd = uint32_t(newOffset) + byteSize();
if (newEnd < 0)
return false;
MOZ_ASSERT(uint32_t(newEnd) >= uint32_t(newOffset));
// If we need bounds checking, keep it within the more restrictive
// AsmJSCheckedImmediateRange. Otherwise, just keep it within what
// the instruction set can support.
size_t range = needsBoundsCheck() ? AsmJSCheckedImmediateRange : AsmJSImmediateRange;
if (size_t(newEnd) > range)
return false;
offset_ = newOffset;
return true;
}
};

View File

@ -234,8 +234,17 @@ class MIRGenerator
Label* outOfBoundsLabel() const {
return outOfBoundsLabel_;
}
bool needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const;
size_t foldableOffsetRange(const MAsmJSHeapAccess* access) const;
bool needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const {
// A heap access needs a bounds-check branch if we're not relying on signal
// handlers to catch errors, and if it's not proven to be within bounds.
// We use signal-handlers on x64, but on x86 there isn't enough address
// space for a guard region.
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
if (usesSignalHandlersForAsmJSOOB_)
return false;
#endif
return access->needsBoundsCheck();
}
};
} // namespace jit

View File

@ -108,45 +108,6 @@ MIRGenerator::addAbortedPreliminaryGroup(ObjectGroup* group)
CrashAtUnhandlableOOM("addAbortedPreliminaryGroup");
}
bool
MIRGenerator::needsAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access) const
{
// A heap access needs a bounds-check branch if we're not relying on signal
// handlers to catch errors, and if it's not proven to be within bounds.
// We use signal-handlers on x64, but on x86 there isn't enough address
// space for a guard region.
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
if (usesSignalHandlersForAsmJSOOB_)
return false;
#endif
return access->needsBoundsCheck();
}
size_t
MIRGenerator::foldableOffsetRange(const MAsmJSHeapAccess* access) const
{
// This determines whether it's ok to fold up to AsmJSImmediateSize
// offsets, instead of just AsmJSCheckedImmediateSize.
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
// With signal-handler OOB handling, we reserve guard space for the full
// immediate size.
if (usesSignalHandlersForAsmJSOOB_)
return AsmJSImmediateRange;
#endif
// On 32-bit platforms, if we've proven the access is in bounds after
// 32-bit wrapping, we can fold full offsets because they're added with
// 32-bit arithmetic.
if (sizeof(intptr_t) == sizeof(int32_t) && !access->needsBoundsCheck())
return AsmJSImmediateRange;
// Otherwise, only allow the checked size. This is always less than the
// minimum heap length, and allows explicit bounds checks to fold in the
// offset without overflow.
return AsmJSCheckedImmediateRange;
}
void
MIRGraph::addBlock(MBasicBlock* block)
{

View File

@ -531,8 +531,8 @@ js::jit::Disassembler::DumpHeapAccess(const HeapAccess& access)
if (access.address().isPCRelative()) {
fprintf(stderr, MEM_o32r " ", ADDR_o32r(access.address().disp()));
} else if (access.address().hasIndex()) {
if (access.address().hasBase()) {
} else if (access.address().index() != X86Encoding::invalid_reg) {
if (access.address().base() != X86Encoding::invalid_reg) {
fprintf(stderr, MEM_obs " ",
ADDR_obs(access.address().disp(), access.address().base(),
access.address().index(), access.address().scale()));
@ -541,7 +541,7 @@ js::jit::Disassembler::DumpHeapAccess(const HeapAccess& access)
ADDR_os(access.address().disp(),
access.address().index(), access.address().scale()));
}
} else if (access.address().hasBase()) {
} else if (access.address().base() != X86Encoding::invalid_reg) {
fprintf(stderr, MEM_ob " ", ADDR_ob(access.address().disp(), access.address().base()));
} else {
fprintf(stderr, MEM_o " ", ADDR_o(access.address().disp()));