This commit is contained in:
Brian Hackett 2012-01-13 17:40:14 -08:00
commit e494be423b
27 changed files with 4471 additions and 185 deletions

View File

@ -2,5 +2,6 @@
global:
NSModule;
NSGetModule;
__RLD_MAP;
local: *;
};

View File

@ -2,5 +2,6 @@ EXPORTED {
global:
NSModule;
NSGetModule;
__RLD_MAP;
local: *;
};

View File

@ -319,10 +319,12 @@ endif # }
ifneq (86,$(findstring 86,$(OS_TEST))) # {
ifneq (arm,$(findstring arm,$(OS_TEST))) # {
ifneq (mips,$(findstring mips,$(OS_TEST))) # {
# Use mutex-backed atomics
CPPSRCS += atomicops_internals_mutex.cc
endif # }
endif # }
endif # }
OS_CXXFLAGS += $(TK_CFLAGS)

View File

@ -136,6 +136,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "base/atomicops_internals_x86_gcc.h"
#elif defined(COMPILER_GCC) && defined(ARCH_CPU_ARM_FAMILY)
#include "base/atomicops_internals_arm_gcc.h"
#elif defined(COMPILER_GCC) && defined(ARCH_CPU_MIPS)
#include "base/atomicops_internals_mips_gcc.h"
#else
#include "base/atomicops_internals_mutex.h"
#endif

View File

@ -333,6 +333,9 @@ endif
ifeq (sparc, $(findstring sparc,$(TARGET_CPU)))
ASFILES += TrampolineSparc.s
endif
ifeq (mips, $(findstring mips,$(TARGET_CPU)))
CPPSRCS += TrampolineMIPS.cpp
endif
#
# END enclude sources for the method JIT
#############################################
@ -361,7 +364,7 @@ CPPSRCS += checks.cc \
# For architectures without YARR JIT, PCRE is faster than the YARR
# interpreter (bug 684559).
ifeq (,$(filter arm% sparc %86 x86_64,$(TARGET_CPU)))
ifeq (,$(filter arm% sparc %86 x86_64 mips%,$(TARGET_CPU)))
VPATH += $(srcdir)/yarr/pcre \
$(NULL)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1028,7 +1028,7 @@
/* The JIT is enabled by default on all x86, x64-64, ARM platforms. */
#if !defined(ENABLE_JIT) \
&& (WTF_CPU_X86 || WTF_CPU_X86_64 || WTF_CPU_ARM || WTF_CPU_SPARC32) \
&& (WTF_CPU_X86 || WTF_CPU_X86_64 || WTF_CPU_ARM || WTF_CPU_SPARC32 || WTF_CPU_MIPS) \
&& (WTF_OS_DARWIN || !WTF_COMPILER_GCC || GCC_VERSION_AT_LEAST(4, 1, 0)) \
&& !WTF_OS_WINCE
#define ENABLE_JIT 1

View File

@ -2811,6 +2811,14 @@ sparc*-*)
AC_DEFINE(JS_CPU_SPARC)
AC_DEFINE(JS_NUNBOX32)
;;
mips*-*)
ENABLE_METHODJIT=1
ENABLE_MONOIC=1
ENABLE_POLYIC=1
ENABLE_METHODJIT_TYPED_ARRAY=1
AC_DEFINE(JS_CPU_MIPS)
AC_DEFINE(JS_NUNBOX32)
;;
esac
MOZ_ARG_DISABLE_BOOL(methodjit,

View File

@ -85,6 +85,7 @@ JITSTAT(archIs64BIT)
JITSTAT(archIsARM)
JITSTAT(archIsSPARC)
JITSTAT(archIsPPC)
JITSTAT(archIsMIPS)
#ifdef DEFINED_MONITOR_JITSTAT
#undef DEFINED_MONITOR_JITSTAT

View File

@ -92,7 +92,11 @@ JSDOUBLE_IS_NaN(jsdouble d)
{
jsdpun u;
u.d = d;
#if defined(mips) || defined(__mips__) || defined(MIPS) || defined(_MIPS_)
return (u.u64 & ~JSDOUBLE_SIGNBIT) > JSDOUBLE_EXPMASK;
#else
return (u.s.hi & JSDOUBLE_HI32_NAN) == JSDOUBLE_HI32_NAN;
#endif
}
static inline int

View File

@ -174,6 +174,10 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::ARMRegiste
static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = JSC::SparcRegisters::l2;
static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = JSC::SparcRegisters::l3;
static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegisters::l4;
#elif defined(JS_CPU_MIPS)
static const JSC::MacroAssembler::RegisterID JSReturnReg_Type = JSC::MIPSRegisters::a0;
static const JSC::MacroAssembler::RegisterID JSReturnReg_Data = JSC::MIPSRegisters::a2;
static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::MIPSRegisters::a1;
#endif
size_t distanceOf(Label l) {
@ -283,7 +287,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
static const uint64_t DoubleNegMask = 0x8000000000000000ULL;
loadDouble(&DoubleNegMask, Registers::FPConversionTemp);
xorDouble(Registers::FPConversionTemp, fpreg);
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC || defined JS_CPU_MIPS
negDouble(fpreg, fpreg);
#endif
}
@ -318,6 +322,13 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
*/
moveWithPatch(Imm32(intptr_t(fun)), JSC::SparcRegisters::i0);
return JS_FUNC_TO_DATA_PTR(void *, JaegerStubVeneer);
#elif defined(JS_CPU_MIPS)
/*
* For MIPS, we need to call JaegerStubVeneer by passing
* the real target address in v0.
*/
moveWithPatch(Imm32(intptr_t(fun)), JSC::MIPSRegisters::v0);
return JS_FUNC_TO_DATA_PTR(void *, JaegerStubVeneer);
#else
/*
* Architectures that push the return address to an easily-determined
@ -358,10 +369,14 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
pop(reg);
}
#if defined JS_CPU_MIPS
static const uint32_t StackAlignment = 8;
#else
static const uint32_t StackAlignment = 16;
#endif
static inline uint32_t alignForCall(uint32_t stackBytes) {
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
#if defined(JS_CPU_X86) || defined(JS_CPU_X64) || defined(JS_CPU_MIPS)
// If StackAlignment is a power of two, % is just two shifts.
// 16 - (x % 16) gives alignment, extra % 16 handles total == 0.
return align(stackBytes, StackAlignment);

View File

@ -944,8 +944,14 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
JaegerSpew(JSpew_Insns, "## Fast code (masm) size = %lu, Slow code (stubcc) size = %lu.\n",
(unsigned long) masm.size(), (unsigned long) stubcc.size());
/* To make inlineDoubles and oolDoubles aligned to sizeof(double) bytes,
MIPS adds extra sizeof(double) bytes to codeSize. */
size_t codeSize = masm.size() +
#if defined(JS_CPU_MIPS)
stubcc.size() + sizeof(double) +
#else
stubcc.size() +
#endif
(masm.numDoubles() * sizeof(double)) +
(stubcc.masm.numDoubles() * sizeof(double)) +
jumpTableOffsets.length() * sizeof(void *);
@ -1367,7 +1373,16 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
/* Link fast and slow paths together. */
stubcc.fixCrossJumps(result, masm.size(), masm.size() + stubcc.size());
#if defined(JS_CPU_MIPS)
/* Make sure doubleOffset is aligned to sizeof(double) bytes. */
size_t doubleOffset = (((size_t)result + masm.size() + stubcc.size() +
sizeof(double) - 1) & (~(sizeof(double) - 1))) -
(size_t)result;
JS_ASSERT((((size_t)result + doubleOffset) & 7) == 0);
#else
size_t doubleOffset = masm.size() + stubcc.size();
#endif
double *inlineDoubles = (double *) (result + doubleOffset);
double *oolDoubles = (double*) (result + doubleOffset +
masm.numDoubles() * sizeof(double));
@ -3398,7 +3413,7 @@ mjit::Compiler::interruptCheckHelper()
void *interrupt = (void*) &JS_THREAD_DATA(cx)->interruptFlags;
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_ARM)
#if defined(JS_CPU_X86) || defined(JS_CPU_ARM) || defined(JS_CPU_MIPS)
jump = masm.branch32(Assembler::NotEqual, AbsoluteAddress(interrupt), Imm32(0));
#else
/* Handle processors that can't load from absolute addresses. */

View File

@ -2324,6 +2324,12 @@ mjit::Compiler::jsop_stricteq(JSOp op)
masm.or32(result1, result);
}
frame.freeReg(result1);
#elif defined(JS_CPU_MIPS)
/* On MIPS the result 0.0/0.0 is 0x7FF7FFFF.
We need to manually set it to 0x7FF80000. */
static const int ShiftedCanonicalNaNType = 0x7FF80000 << 1;
masm.setShiftedCanonicalNaN(treg, treg);
masm.setPtr(oppositeCond, treg, Imm32(ShiftedCanonicalNaNType), result);
#elif !defined(JS_CPU_X64)
static const int ShiftedCanonicalNaNType = 0x7FF80000 << 1;
masm.setPtr(oppositeCond, treg, Imm32(ShiftedCanonicalNaNType), result);

View File

@ -88,6 +88,23 @@ class Repatcher : public JSC::RepatchBuffer
*/
CheckIsStubCall(call.labelAtOffset(0));
JSC::RepatchBuffer::relink(call.callAtOffset(-4), stub);
#elif defined JS_CPU_MIPS
/*
* Stub calls on MIPS look like this:
*
* lui v0, hi(stub)
* ori v0, v0, lo(stub)
* lui t9, hi(JaegerStubVeneer)
* ori t9, t9, lo(JaegerStubVeneer)
* jalr t9
* nop
* call label -> xxx
*
* MIPS has to run stub calls through a veneer in order for THROW to
* work properly. The address that must be patched is the load into
* 'v0', not the load into 't9'.
*/
JSC::RepatchBuffer::relink(call.callAtOffset(-8), stub);
#else
# error
#endif
@ -95,7 +112,7 @@ class Repatcher : public JSC::RepatchBuffer
/* Patch the offset of a Value load emitted by loadValueWithAddressOffsetPatch. */
void patchAddressOffsetForValueLoad(CodeLocationLabel label, uint32_t offset) {
#if defined JS_CPU_X64 || defined JS_CPU_ARM || defined JS_CPU_SPARC
#if defined JS_CPU_X64 || defined JS_CPU_ARM || defined JS_CPU_SPARC || defined JS_CPU_MIPS
repatch(label.dataLabel32AtOffset(0), offset);
#elif defined JS_CPU_X86
static const unsigned LOAD_TYPE_OFFSET = 6;
@ -115,7 +132,7 @@ class Repatcher : public JSC::RepatchBuffer
}
void patchAddressOffsetForValueStore(CodeLocationLabel label, uint32_t offset, bool typeConst) {
#if defined JS_CPU_ARM || defined JS_CPU_X64 || defined JS_CPU_SPARC
#if defined JS_CPU_ARM || defined JS_CPU_X64 || defined JS_CPU_SPARC || defined JS_CPU_MIPS
(void) typeConst;
repatch(label.dataLabel32AtOffset(0), offset);
#elif defined JS_CPU_X86

View File

@ -122,6 +122,8 @@ struct Registers {
static const RegisterID JSFrameReg = JSC::ARMRegisters::r10;
#elif defined(JS_CPU_SPARC)
static const RegisterID JSFrameReg = JSC::SparcRegisters::l0;
#elif defined(JS_CPU_MIPS)
static const RegisterID JSFrameReg = JSC::MIPSRegisters::s0;
#endif
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
@ -152,6 +154,12 @@ struct Registers {
static const RegisterID ArgReg3 = JSC::SparcRegisters::o3;
static const RegisterID ArgReg4 = JSC::SparcRegisters::o4;
static const RegisterID ArgReg5 = JSC::SparcRegisters::o5;
#elif JS_CPU_MIPS
static const RegisterID ReturnReg = JSC::MIPSRegisters::v0;
static const RegisterID ArgReg0 = JSC::MIPSRegisters::a0;
static const RegisterID ArgReg1 = JSC::MIPSRegisters::a1;
static const RegisterID ArgReg2 = JSC::MIPSRegisters::a2;
static const RegisterID ArgReg3 = JSC::MIPSRegisters::a3;
#endif
static const RegisterID StackPointer = JSC::MacroAssembler::stackPointerRegister;
@ -252,6 +260,33 @@ struct Registers {
| (1 << JSC::SparcRegisters::l6)
| (1 << JSC::SparcRegisters::l7);
static const uint32_t SingleByteRegs = TempRegs | SavedRegs;
#elif defined(JS_CPU_MIPS)
static const uint32_t TempRegs =
(1 << JSC::MIPSRegisters::at)
| (1 << JSC::MIPSRegisters::v0)
| (1 << JSC::MIPSRegisters::v1)
| (1 << JSC::MIPSRegisters::a0)
| (1 << JSC::MIPSRegisters::a1)
| (1 << JSC::MIPSRegisters::a2)
| (1 << JSC::MIPSRegisters::a3)
| (1 << JSC::MIPSRegisters::t5)
| (1 << JSC::MIPSRegisters::t6)
| (1 << JSC::MIPSRegisters::t7);
/* t0-t4,t9 is reserved as a scratch register for the assembler.
We don't use t8 ($24), as we limit ourselves within $0 to $23 to
leave the bitmask for 8 FP registers. */
static const uint32_t SavedRegs =
(1 << JSC::MIPSRegisters::s1)
| (1 << JSC::MIPSRegisters::s2)
| (1 << JSC::MIPSRegisters::s3)
| (1 << JSC::MIPSRegisters::s4)
| (1 << JSC::MIPSRegisters::s5)
| (1 << JSC::MIPSRegisters::s6)
| (1 << JSC::MIPSRegisters::s7);
// s0 is reserved for JSFrameReg.
static const uint32_t SingleByteRegs = TempRegs | SavedRegs;
#else
# error "Unsupported platform"
@ -287,6 +322,8 @@ struct Registers {
return 4;
#elif defined(JS_CPU_SPARC)
return 6;
#elif defined(JS_CPU_MIPS)
return 4;
#endif
}
@ -337,6 +374,13 @@ struct Registers {
JSC::SparcRegisters::o4,
JSC::SparcRegisters::o5
};
#elif defined(JS_CPU_MIPS)
static const RegisterID regs[] = {
JSC::MIPSRegisters::a0,
JSC::MIPSRegisters::a1,
JSC::MIPSRegisters::a2,
JSC::MIPSRegisters::a3,
};
#endif
JS_ASSERT(numArgRegs(conv) == mozilla::ArrayLength(regs));
if (i > mozilla::ArrayLength(regs))
@ -386,6 +430,19 @@ struct Registers {
| (1 << JSC::SparcRegisters::f6)
) << TotalRegisters;
static const FPRegisterID FPConversionTemp = JSC::SparcRegisters::f8;
#elif defined(JS_CPU_MIPS)
/* TotalRegisters is 24, so TotalFPRegisters can be 8 to have a 32-bit
bit mask.
Note that the O32 ABI can access only even FP registers. */
static const uint32_t TotalFPRegisters = 8;
static const uint32_t TempFPRegs = (uint32_t)(
(1 << JSC::MIPSRegisters::f0)
| (1 << JSC::MIPSRegisters::f2)
| (1 << JSC::MIPSRegisters::f4)
| (1 << JSC::MIPSRegisters::f6)
) << TotalRegisters;
// f16 is reserved as a scratch register for the assembler.
static const FPRegisterID FPConversionTemp = JSC::MIPSRegisters::f18;
#else
# error "Unsupported platform"
#endif
@ -397,6 +454,8 @@ struct Registers {
static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
#elif defined(JS_CPU_SPARC)
static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
#elif defined(JS_CPU_MIPS)
static const RegisterID ClobberInCall = JSC::MIPSRegisters::at;
#endif
static const uint32_t AvailFPRegs = TempFPRegs;

View File

@ -253,7 +253,7 @@ JS_STATIC_ASSERT(offsetof(FrameRegs, sp) == 0);
#if defined(__GNUC__) && !defined(_WIN64)
/* If this assert fails, you need to realign VMFrame to 16 bytes. */
#ifdef JS_CPU_ARM
#if defined(JS_CPU_ARM) || defined(JS_CPU_MIPS)
JS_STATIC_ASSERT(sizeof(VMFrame) % 8 == 0);
#else
JS_STATIC_ASSERT(sizeof(VMFrame) % 16 == 0);
@ -822,6 +822,7 @@ SYMBOL_STRING(JaegerStubVeneer) ":" "\n"
);
# elif defined(JS_CPU_SPARC)
# elif defined(JS_CPU_MIPS)
# else
# error "Unsupported CPU!"
# endif

View File

@ -52,7 +52,8 @@
#if !defined JS_CPU_X64 && \
!defined JS_CPU_X86 && \
!defined JS_CPU_SPARC && \
!defined JS_CPU_ARM
!defined JS_CPU_ARM && \
!defined JS_CPU_MIPS
# error "Oh no, you should define a platform so this compiles."
#endif
@ -97,6 +98,13 @@ struct VMFrame
void *reserve_0;
void *reserve_1;
#elif defined(JS_CPU_MIPS)
/* Reserved 16 bytes for a0-a3 space in MIPS O32 ABI */
void *unused0;
void *unused1;
void *unused2;
void *unused3;
#endif
union Arguments {
@ -204,6 +212,22 @@ struct VMFrame
inline void** returnAddressLocation() {
return reinterpret_cast<void**>(&this->veneerReturn);
}
#elif defined(JS_CPU_MIPS)
void *savedS0;
void *savedS1;
void *savedS2;
void *savedS3;
void *savedS4;
void *savedS5;
void *savedS6;
void *savedS7;
void *savedGP;
void *savedRA;
void *unused4; // For alignment.
inline void** returnAddressLocation() {
return reinterpret_cast<void**>(this) - 1;
}
#else
# error "The VMFrame layout isn't defined for your processor architecture!"
#endif
@ -226,6 +250,9 @@ struct VMFrame
#if defined(JS_CPU_SPARC)
static const size_t offsetOfFp = 30 * sizeof(void *) + FrameRegs::offsetOfFp;
static const size_t offsetOfInlined = 30 * sizeof(void *) + FrameRegs::offsetOfInlined;
#elif defined(JS_CPU_MIPS)
static const size_t offsetOfFp = 8 * sizeof(void *) + FrameRegs::offsetOfFp;
static const size_t offsetOfInlined = 8 * sizeof(void *) + FrameRegs::offsetOfInlined;
#else
static const size_t offsetOfFp = 4 * sizeof(void *) + FrameRegs::offsetOfFp;
static const size_t offsetOfInlined = 4 * sizeof(void *) + FrameRegs::offsetOfInlined;
@ -237,7 +264,7 @@ struct VMFrame
}
};
#if defined(JS_CPU_ARM) || defined(JS_CPU_SPARC)
#if defined(JS_CPU_ARM) || defined(JS_CPU_SPARC) || defined(JS_CPU_MIPS)
// WARNING: Do not call this function directly from C(++) code because it is not ABI-compliant.
extern "C" void JaegerStubVeneer(void);
#endif

View File

@ -207,6 +207,12 @@ class NunboxAssembler : public JSC::MacroAssembler
JS_ASSERT(differenceBetween(start, load) == 0);
(void) load;
return start;
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
load64WithPatch(address, treg, dreg, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
@ -232,6 +238,12 @@ class NunboxAssembler : public JSC::MacroAssembler
return start;
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
return store64WithAddressOffsetPatch(treg, dreg, address);
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
store64WithPatch(address, treg, dreg, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
@ -248,6 +260,12 @@ class NunboxAssembler : public JSC::MacroAssembler
return start;
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
return store64WithAddressOffsetPatch(type, dreg, address);
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
store64WithPatch(address, type, dreg, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
@ -267,6 +285,12 @@ class NunboxAssembler : public JSC::MacroAssembler
return start;
#elif defined JS_CPU_ARM || defined JS_CPU_SPARC
return store64WithAddressOffsetPatch(type, payload, address);
#elif defined JS_CPU_MIPS
/*
* On MIPS there are LUI/ORI to patch.
*/
store64WithPatch(address, type, payload, TAG_OFFSET, PAYLOAD_OFFSET);
return start;
#endif
}
@ -470,6 +494,12 @@ class NunboxAssembler : public JSC::MacroAssembler
#elif defined JS_CPU_ARM
// Yes, we are backwards from SPARC.
fastStoreDouble(srcDest, dataReg, typeReg);
#elif defined JS_CPU_MIPS
#if defined(IS_LITTLE_ENDIAN)
fastStoreDouble(srcDest, dataReg, typeReg);
#else
fastStoreDouble(srcDest, typeReg, dataReg);
#endif
#else
JS_NOT_REACHED("implement this - push double, pop pop is easiest");
#endif

View File

@ -0,0 +1,344 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=4 sw=4 et tw=99:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Jaegermonkey.
*
* The Initial Developer of the Original Code is the Mozilla Foundation.
*
* Portions created by the Initial Developer are Copyright (C) 2010
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Chao-ying Fu <fu@mips.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#include "jstypes.h"
/*
* The MIPS VMFrame is 112 bytes as follows.
*
* 108 [ unused4 ] For alignment.
* 104 [ ra ]
* 100 [ gp ] If PIC code is generated, we will save gp.
* 96 [ s7 ]
* 92 [ s6 ]
* 88 [ s5 ]
* 84 [ s4 ]
* 80 [ s3 ]
* 76 [ s2 ]
* 72 [ s1 ]
* 68 [ s0 ]
* 64 [ stubRejoin ]
* 60 [ entrycode ]
* 56 [ entryfp ]
* 52 [ stkLimit ]
* 48 [ cx ]
* 44 [ regs.fp_ ]
* 40 [ regs.inlined_]
* 36 [ regs.pc ]
* 32 [ regs.sp ]
* 28 [ scratch ]
* 24 [ previous ]
* 20 [ args.ptr2 ] [ dynamicArgc ] (union)
* 16 [ args.ptr ] [ lazyArgsObj ] (union)
* 12 [ unused3 ] O32 ABI, space for a3 (used in callee)
* 8 [ unused2 ] O32 ABI, space for a2 (used in callee)
* 4 [ unused1 ] O32 ABI, space for a1 (used in callee)
* 0 [ unused0 ] O32 ABI, space for a0 (used in callee)
*/
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerThrowpoline" "\n"
".ent JaegerThrowpoline" "\n"
".type JaegerThrowpoline,@function" "\n"
"JaegerThrowpoline:" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,js_InternalThrow" "\n"
".reloc 1f,R_MIPS_JALR,js_InternalThrow" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal js_InternalThrow" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"beq $2,$0,1f" "\n"
"nop" "\n"
"jr $2 # jump to a scripted handler" "\n"
"nop" "\n"
"1:" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,PopActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PopActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PopActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"lw $31,104($29)" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
#endif
"lw $23,96($29)" "\n"
"lw $22,92($29)" "\n"
"lw $21,88($29)" "\n"
"lw $20,84($29)" "\n"
"lw $19,80($29)" "\n"
"lw $18,76($29)" "\n"
"lw $17,72($29)" "\n"
"lw $16,68($29)" "\n"
"li $2,0 # return 0 to represent an unhandled exception." "\n"
"jr $31" "\n"
"addiu $29,$29,112" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerThrowpoline" "\n"
".size JaegerThrowpoline,.-JaegerThrowpoline" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerTrampoline" "\n"
".ent JaegerTrampoline" "\n"
".type JaegerTrampoline,@function" "\n"
"JaegerTrampoline:" "\n"
#if defined(__PIC__)
"lui $28,%hi(_gp_disp)" "\n"
"addiu $28,$28,%lo(_gp_disp)" "\n"
"addu $28,$28,$25" "\n"
#endif
"addiu $29,$29,-112" "\n"
"sw $31,104($29)" "\n"
#if defined(__PIC__)
"sw $28,100($29)" "\n"
#endif
"sw $23,96($29)" "\n"
"sw $22,92($29)" "\n"
"sw $21,88($29)" "\n"
"sw $20,84($29)" "\n"
"sw $19,80($29)" "\n"
"sw $18,76($29)" "\n"
"sw $17,72($29)" "\n"
"sw $16,68($29)" "\n"
"sw $0,64($29) # stubRejoin" "\n"
"sw $5,60($29) # entrycode" "\n"
"sw $5,56($29) # entryfp" "\n"
"sw $7,52($29) # stackLimit" "\n"
"sw $4,48($29) # cx" "\n"
"sw $5,44($29) # regs.fp" "\n"
"move $16,$5 # preserve fp to s0" "\n"
"move $17,$6 # preserve code to s1" "\n"
#if defined(__PIC__)
"la $25,PushActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PushActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PushActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"move $25,$17 # move code to $25" "\n"
"jr $25 # jump to the compiled JavaScript Function" "\n"
"nop" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerTrampoline" "\n"
".size JaegerTrampoline,.-JaegerTrampoline" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerTrampolineReturn" "\n"
".ent JaegerTrampolineReturn" "\n"
".type JaegerTrampolineReturn,@function" "\n"
"JaegerTrampolineReturn:" "\n"
#if defined(IS_LITTLE_ENDIAN)
"sw $4,28($16) # a0: fp->rval type for LITTLE-ENDIAN" "\n"
"sw $6,24($16) # a2: fp->rval data for LITTLE-ENDIAN" "\n"
#else
"sw $4,24($16) # a0: fp->rval type for BIG-ENDIAN" "\n"
"sw $6,28($16) # a2: fp->rval data for BIG-ENDIAN" "\n"
#endif
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,PopActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PopActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PopActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"lw $31,104($29)" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
#endif
"lw $23,96($29)" "\n"
"lw $22,92($29)" "\n"
"lw $21,88($29)" "\n"
"lw $20,84($29)" "\n"
"lw $19,80($29)" "\n"
"lw $18,76($29)" "\n"
"lw $17,72($29)" "\n"
"lw $16,68($29)" "\n"
"li $2,1 # return ture to indicate successful completion" "\n"
"jr $31" "\n"
"addiu $29,$29,112" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerTrampolineReturn" "\n"
".size JaegerTrampolineReturn,.-JaegerTrampolineReturn" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerStubVeneer" "\n"
".ent JaegerStubVeneer" "\n"
".type JaegerStubVeneer,@function" "\n"
"JaegerStubVeneer:" "\n"
"addiu $29,$29,-24 # Need 16 (a0-a3) + 4 (align) + 4 ($31) bytes" "\n"
"sw $31,20($29) # Store $31 to 20($29)" "\n"
"move $25,$2 # the target address is passed from $2" "\n"
"jalr $25" "\n"
"nop" "\n"
"lw $31,20($29)" "\n"
"jr $31" "\n"
"addiu $29,$29,24" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerStubVeneer" "\n"
".size JaegerStubVeneer,.-JaegerStubVeneer" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerInterpolineScripted" "\n"
".ent JaegerInterpolineScripted" "\n"
".type JaegerInterpolineScripted,@function" "\n"
"JaegerInterpolineScripted:" "\n"
"lw $16,16($16) # Load f->prev_" "\n"
"b JaegerInterpoline" "\n"
"sw $16,44($29) # Update f->regs->fp_" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerInterpolineScripted" "\n"
".size JaegerInterpolineScripted,.-JaegerInterpolineScripted" "\n"
);
asm (
".text" "\n"
".align 2" "\n"
".set noreorder" "\n"
".set nomacro" "\n"
".set nomips16" "\n"
".globl JaegerInterpoline" "\n"
".ent JaegerInterpoline" "\n"
".type JaegerInterpoline,@function" "\n"
"JaegerInterpoline:" "\n"
"move $5,$4 # returntype" "\n"
"move $4,$6 # returnData" "\n"
"move $6,$2 # returnReg" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,js_InternalInterpret" "\n"
".reloc 1f,R_MIPS_JALR,js_InternalInterpret" "\n"
"1: jalr $25" "\n"
"move $7,$29 # f" "\n"
#else
"jal js_InternalInterpret" "\n"
"move $7,$29 # f" "\n"
#endif
"lw $16,44($29) # Load f->regs->fp_ to s0" "\n"
#if defined(IS_LITTLE_ENDIAN)
"lw $4,28($16) # a0: fp->rval type for LITTLE-ENDIAN" "\n"
"lw $6,24($16) # a2: fp->rval data for LITTLE-ENDIAN" "\n"
#else
"lw $4,24($16) # a0: fp->rval type for BIG-ENDIAN" "\n"
"lw $6,28($16) # a2: fp->rval data for BIG-ENDIAN" "\n"
#endif
"lw $5,28($29) # Load sctrach -> argc" "\n"
"beq $2,$0,1f" "\n"
"nop" "\n"
"jr $2" "\n"
"nop" "\n"
"1:" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
"la $25,PopActiveVMFrame" "\n"
".reloc 1f,R_MIPS_JALR,PopActiveVMFrame" "\n"
"1: jalr $25" "\n"
"move $4,$29 # set up a0" "\n"
#else
"jal PopActiveVMFrame" "\n"
"move $4,$29 # set up a0" "\n"
#endif
"lw $31,104($29)" "\n"
#if defined(__PIC__)
"lw $28,100($29)" "\n"
#endif
"lw $23,96($29)" "\n"
"lw $22,92($29)" "\n"
"lw $21,88($29)" "\n"
"lw $20,84($29)" "\n"
"lw $19,80($29)" "\n"
"lw $18,76($29)" "\n"
"lw $17,72($29)" "\n"
"lw $16,68($29)" "\n"
"li $2,0 # return 0" "\n"
"jr $31" "\n"
"addiu $29,$29,112" "\n"
".set reorder" "\n"
".set macro" "\n"
".end JaegerInterpoline" "\n"
".size JaegerInterpoline,.-JaegerInterpoline" "\n"
);

View File

@ -573,9 +573,8 @@ public class GeckoAppShell
GeckoApp.mAppContext.getSystemService(Context.LOCATION_SERVICE);
if (enable) {
Criteria crit = new Criteria();
crit.setAccuracy(Criteria.ACCURACY_FINE);
String provider = lm.getBestProvider(crit, true);
Criteria criteria = new Criteria();
String provider = lm.getBestProvider(criteria, true);
if (provider == null)
return;

View File

@ -75,7 +75,6 @@ public:
static void RecordSlowStatement(const nsACString &statement,
const nsACString &dbName,
PRUint32 delay);
static nsresult GetHistogramEnumId(const char *name, Telemetry::ID *id);
struct StmtStats {
PRUint32 hitCount;
PRUint32 totalTime;
@ -87,16 +86,12 @@ private:
// Like GetHistogramById, but returns the underlying C++ object, not the JS one.
nsresult GetHistogramByName(const nsACString &name, Histogram **ret);
bool ShouldReflectHistogram(Histogram *h);
void IdentifyCorruptHistograms(StatisticsRecorder::Histograms &hs);
typedef StatisticsRecorder::Histograms::iterator HistogramIterator;
// This is used for speedy string->Telemetry::ID conversions
// This is used for speedy JS string->Telemetry::ID conversions
typedef nsBaseHashtableET<nsCharPtrHashKey, Telemetry::ID> CharPtrEntryType;
typedef nsTHashtable<CharPtrEntryType> HistogramMapType;
HistogramMapType mHistogramMap;
bool mCanRecord;
static TelemetryImpl *sTelemetry;
static bool gCorruptHistograms[Telemetry::HistogramCount];
nsTHashtable<SlowSQLEntryType> mSlowSQLOnMainThread;
nsTHashtable<SlowSQLEntryType> mSlowSQLOnOtherThread;
nsTHashtable<nsCStringHashKey> mTrackedDBs;
@ -220,23 +215,11 @@ FillRanges(JSContext *cx, JSObject *array, Histogram *h)
return true;
}
enum reflectStatus {
REFLECT_OK,
REFLECT_CORRUPT,
REFLECT_FAILURE
};
enum reflectStatus
JSBool
ReflectHistogramSnapshot(JSContext *cx, JSObject *obj, Histogram *h)
{
Histogram::SampleSet ss;
h->SnapshotSample(&ss);
// We don't want to reflect corrupt histograms.
if (h->FindCorruption(ss) != Histogram::NO_INCONSISTENCIES) {
return REFLECT_CORRUPT;
}
JSObject *counts_array;
JSObject *rarray;
const size_t count = h->bucket_count();
@ -250,14 +233,14 @@ ReflectHistogramSnapshot(JSContext *cx, JSObject *obj, Histogram *h)
&& (counts_array = JS_NewArrayObject(cx, count, NULL))
&& JS_DefineProperty(cx, obj, "counts", OBJECT_TO_JSVAL(counts_array), NULL, NULL, JSPROP_ENUMERATE)
)) {
return REFLECT_FAILURE;
return JS_FALSE;
}
for (size_t i = 0; i < count; i++) {
if (!JS_DefineElement(cx, counts_array, i, INT_TO_JSVAL(ss.counts(i)), NULL, NULL, JSPROP_ENUMERATE)) {
return REFLECT_FAILURE;
return JS_FALSE;
}
}
return REFLECT_OK;
return JS_TRUE;
}
JSBool
@ -307,20 +290,8 @@ JSHistogram_Snapshot(JSContext *cx, uintN argc, jsval *vp)
JSObject *snapshot = JS_NewObject(cx, NULL, NULL, NULL);
if (!snapshot)
return JS_FALSE;
switch (ReflectHistogramSnapshot(cx, snapshot, h)) {
case REFLECT_FAILURE:
return JS_FALSE;
case REFLECT_CORRUPT:
JS_ReportError(cx, "Histogram is corrupt");
return JS_FALSE;
case REFLECT_OK:
JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(snapshot));
return JS_TRUE;
default:
MOZ_NOT_REACHED("unhandled reflection status");
return JS_FALSE;
}
JS_SET_RVAL(cx, vp, OBJECT_TO_JSVAL(snapshot));
return ReflectHistogramSnapshot(cx, snapshot, h);
}
nsresult
@ -441,45 +412,28 @@ TelemetryImpl::AddSQLInfo(JSContext *cx, JSObject *rootObj, bool mainThread)
return true;
}
nsresult
TelemetryImpl::GetHistogramEnumId(const char *name, Telemetry::ID *id)
{
if (!sTelemetry) {
return NS_ERROR_FAILURE;
}
nsresult
TelemetryImpl::GetHistogramByName(const nsACString &name, Histogram **ret)
{
// Cache names
// Note the histogram names are statically allocated
TelemetryImpl::HistogramMapType *map = &sTelemetry->mHistogramMap;
if (!map->Count()) {
if (!mHistogramMap.Count()) {
for (PRUint32 i = 0; i < Telemetry::HistogramCount; i++) {
CharPtrEntryType *entry = map->PutEntry(gHistograms[i].id);
CharPtrEntryType *entry = mHistogramMap.PutEntry(gHistograms[i].id);
if (NS_UNLIKELY(!entry)) {
map->Clear();
mHistogramMap.Clear();
return NS_ERROR_OUT_OF_MEMORY;
}
entry->mData = (Telemetry::ID) i;
}
}
CharPtrEntryType *entry = map->GetEntry(name);
if (!entry) {
return NS_ERROR_INVALID_ARG;
}
*id = entry->mData;
return NS_OK;
}
CharPtrEntryType *entry = mHistogramMap.GetEntry(PromiseFlatCString(name).get());
if (!entry)
return NS_ERROR_FAILURE;
nsresult
TelemetryImpl::GetHistogramByName(const nsACString &name, Histogram **ret)
{
Telemetry::ID id;
nsresult rv = GetHistogramEnumId(PromiseFlatCString(name).get(), &id);
if (NS_FAILED(rv)) {
return rv;
}
rv = GetHistogramByEnumId(id, ret);
nsresult rv = GetHistogramByEnumId(entry->mData, ret);
if (NS_FAILED(rv))
return rv;
@ -513,69 +467,6 @@ TelemetryImpl::HistogramFrom(const nsACString &name, const nsACString &existing_
return WrapAndReturnHistogram(clone, cx, ret);
}
void
TelemetryImpl::IdentifyCorruptHistograms(StatisticsRecorder::Histograms &hs)
{
for (HistogramIterator it = hs.begin(); it != hs.end(); ++it) {
Histogram *h = *it;
Telemetry::ID id;
nsresult rv = GetHistogramEnumId(h->histogram_name().c_str(), &id);
// This histogram isn't a static histogram, just ignore it.
if (NS_FAILED(rv)) {
continue;
}
if (gCorruptHistograms[id]) {
continue;
}
Histogram::SampleSet ss;
h->SnapshotSample(&ss);
Histogram::Inconsistencies check = h->FindCorruption(ss);
bool corrupt = (check != Histogram::NO_INCONSISTENCIES);
if (corrupt) {
Telemetry::ID corruptID = Telemetry::HistogramCount;
if (check & Histogram::RANGE_CHECKSUM_ERROR) {
corruptID = Telemetry::RANGE_CHECKSUM_ERRORS;
} else if (check & Histogram::BUCKET_ORDER_ERROR) {
corruptID = Telemetry::BUCKET_ORDER_ERRORS;
} else if (check & Histogram::COUNT_HIGH_ERROR) {
corruptID = Telemetry::TOTAL_COUNT_HIGH_ERRORS;
} else if (check & Histogram::COUNT_LOW_ERROR) {
corruptID = Telemetry::TOTAL_COUNT_LOW_ERRORS;
}
Telemetry::Accumulate(corruptID, 1);
}
gCorruptHistograms[id] = corrupt;
}
}
bool
TelemetryImpl::ShouldReflectHistogram(Histogram *h)
{
const char *name = h->histogram_name().c_str();
Telemetry::ID id;
nsresult rv = GetHistogramEnumId(name, &id);
if (NS_FAILED(rv)) {
// GetHistogramEnumId generally should not fail. But a lookup
// failure shouldn't prevent us from reflecting histograms into JS.
//
// However, these two histograms are created by Histogram itself for
// tracking corruption. We have our own histograms for that, so
// ignore these two.
if (strcmp(name, "Histogram.InconsistentCountHigh") == 0
|| strcmp(name, "Histogram.InconsistentCountLow") == 0) {
return false;
}
return true;
} else {
return !gCorruptHistograms[id];
}
}
NS_IMETHODIMP
TelemetryImpl::GetHistogramSnapshots(JSContext *cx, jsval *ret)
{
@ -584,42 +475,17 @@ TelemetryImpl::GetHistogramSnapshots(JSContext *cx, jsval *ret)
return NS_ERROR_FAILURE;
*ret = OBJECT_TO_JSVAL(root_obj);
StatisticsRecorder::Histograms hs;
StatisticsRecorder::GetHistograms(&hs);
// We identify corrupt histograms first, rather than interspersing it
// in the loop below, to ensure that our corruption statistics don't
// depend on histogram enumeration order.
//
// Of course, we hope that all of these corruption-statistics
// histograms are not themselves corrupt...
IdentifyCorruptHistograms(hs);
// OK, now we can actually reflect things.
for (HistogramIterator it = hs.begin(); it != hs.end(); ++it) {
StatisticsRecorder::Histograms h;
StatisticsRecorder::GetHistograms(&h);
for (StatisticsRecorder::Histograms::iterator it = h.begin(); it != h.end();++it) {
Histogram *h = *it;
if (!ShouldReflectHistogram(h)) {
continue;
}
JSObject *hobj = JS_NewObject(cx, NULL, NULL, NULL);
if (!hobj) {
if (!(hobj
&& JS_DefineProperty(cx, root_obj, h->histogram_name().c_str(),
OBJECT_TO_JSVAL(hobj), NULL, NULL, JSPROP_ENUMERATE)
&& ReflectHistogramSnapshot(cx, hobj, h))) {
return NS_ERROR_FAILURE;
}
switch (ReflectHistogramSnapshot(cx, hobj, h)) {
case REFLECT_CORRUPT:
// We can still hit this case even if ShouldReflectHistograms
// returns true. The histogram lies outside of our control
// somehow; just skip it.
continue;
case REFLECT_FAILURE:
return NS_ERROR_FAILURE;
case REFLECT_OK:
if (!JS_DefineProperty(cx, root_obj, h->histogram_name().c_str(),
OBJECT_TO_JSVAL(hobj), NULL, NULL, JSPROP_ENUMERATE)) {
return NS_ERROR_FAILURE;
}
}
}
return NS_OK;
}

View File

@ -341,12 +341,5 @@ DOMSTORAGE_KEY_VAL_SIZE(SESSION, "session")
#undef DOMSTORAGE_KEY_VAL_SIZE
#undef DOMSTORAGE_HISTOGRAM
/**
* Telemetry telemetry.
*/
HISTOGRAM(RANGE_CHECKSUM_ERRORS, 1, 3000, 10, EXPONENTIAL, "Number of histograms with range checksum errors")
HISTOGRAM(BUCKET_ORDER_ERRORS, 1, 3000, 10, EXPONENTIAL, "Number of histograms with bucket order errors")
HISTOGRAM(TOTAL_COUNT_HIGH_ERRORS, 1, 3000, 10, EXPONENTIAL, "Number of histograms with total count high errors")
HISTOGRAM(TOTAL_COUNT_LOW_ERRORS, 1, 3000, 10, EXPONENTIAL, "Number of histograms with total count low errors")
#undef HISTOGRAM_BOOLEAN

View File

@ -220,8 +220,9 @@ var DownloadTaskbarProgressUpdater =
* 1. If the active window is the download window, then we always update
* the taskbar indicator.
* 2. If the active window isn't the download window, then we update only if
* the status is Normal, i.e. one or more downloads are currently
* progressing. If we aren't, then we clear the indicator.
* the status is normal or indeterminate. i.e. one or more downloads are
* currently progressing or in scan mode. If we aren't, then we clear the
* indicator.
*/
_updateTaskbar: function DTPU_updateTaskbar()
{
@ -230,9 +231,10 @@ var DownloadTaskbarProgressUpdater =
}
// If the active window is not the download manager window, set the state
// only if it is Normal
// only if it is normal or indeterminate.
if (this._activeWindowIsDownloadWindow ||
(this._taskbarState == Ci.nsITaskbarProgress.STATE_NORMAL)) {
(this._taskbarState == Ci.nsITaskbarProgress.STATE_NORMAL ||
this._taskbarState == Ci.nsITaskbarProgress.STATE_INDETERMINATE)) {
this._activeTaskbarProgress.setProgressState(this._taskbarState,
this._totalTransferred,
this._totalSize);

View File

@ -3757,9 +3757,11 @@ void nsWindow::DispatchPendingEvents()
--recursionBlocker;
}
// Quickly check to see if there are any
// paint events pending.
if (::GetQueueStatus(QS_PAINT)) {
// Quickly check to see if there are any paint events pending,
// but only dispatch them if it has been long enough since the
// last paint completed.
if (::GetQueueStatus(QS_PAINT) &&
((TimeStamp::Now() - mLastPaintEndTime).ToMilliseconds() >= 50)) {
// Find the top level window.
HWND topWnd = WinUtils::GetTopLevelHWND(mWnd);

View File

@ -61,6 +61,8 @@
#include "nsWindowDbg.h"
#include "cairo.h"
#include "nsITimer.h"
#include "mozilla/TimeStamp.h"
#ifdef CAIRO_HAS_D2D_SURFACE
#include "gfxD2DSurface.h"
#endif
@ -94,6 +96,8 @@ class imgIContainer;
class nsWindow : public nsBaseWidget
{
typedef mozilla::TimeStamp TimeStamp;
typedef mozilla::TimeDuration TimeDuration;
typedef mozilla::widget::WindowHook WindowHook;
#if MOZ_WINSDK_TARGETVER >= MOZ_NTDDI_WIN7
typedef mozilla::widget::TaskbarWindowPreview TaskbarWindowPreview;
@ -611,6 +615,10 @@ protected:
bool mHasTaskbarIconBeenCreated;
#endif
// The point in time at which the last paint completed. We use this to avoid
// painting too rapidly in response to frequent input events.
TimeStamp mLastPaintEndTime;
#ifdef ACCESSIBILITY
static BOOL sIsAccessibilityOn;
static HINSTANCE sAccLib;

View File

@ -605,6 +605,7 @@ bool nsWindow::OnPaint(HDC aDC, PRUint32 aNestingLevel)
}
mPaintDC = nsnull;
mLastPaintEndTime = TimeStamp::Now();
#if defined(WIDGET_DEBUG_OUTPUT)
if (debug_WantPaintFlashing())