2009-06-10 18:29:44 -07:00
|
|
|
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
2009-06-25 17:14:54 -07:00
|
|
|
* vim: set ts=4 sw=4 et tw=99 ft=cpp:
|
2008-05-30 18:58:43 -07:00
|
|
|
*
|
|
|
|
* ***** BEGIN LICENSE BLOCK *****
|
|
|
|
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the Mozilla Public License Version
|
|
|
|
* 1.1 (the "License"); you may not use this file except in compliance with
|
|
|
|
* the License. You may obtain a copy of the License at
|
|
|
|
* http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS IS" basis,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
|
|
|
* for the specific language governing rights and limitations under the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
* The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
|
|
|
|
* May 28, 2008.
|
|
|
|
*
|
|
|
|
* The Initial Developer of the Original Code is
|
2008-06-28 09:58:15 -07:00
|
|
|
* Brendan Eich <brendan@mozilla.org>
|
2008-05-30 18:58:43 -07:00
|
|
|
*
|
|
|
|
* Contributor(s):
|
2008-06-28 09:58:15 -07:00
|
|
|
* Andreas Gal <gal@mozilla.com>
|
2008-07-15 15:05:16 -07:00
|
|
|
* Mike Shaver <shaver@mozilla.org>
|
|
|
|
* David Anderson <danderson@mozilla.com>
|
2008-05-30 18:58:43 -07:00
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the terms of
|
|
|
|
* either of the GNU General Public License Version 2 or later (the "GPL"),
|
|
|
|
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
|
|
|
* in which case the provisions of the GPL or the LGPL are applicable instead
|
|
|
|
* of those above. If you wish to allow use of your version of this file only
|
|
|
|
* under the terms of either the GPL or the LGPL, and not to allow others to
|
|
|
|
* use your version of this file under the terms of the MPL, indicate your
|
|
|
|
* decision by deleting the provisions above and replace them with the notice
|
|
|
|
* and other provisions required by the GPL or the LGPL. If you do not delete
|
|
|
|
* the provisions above, a recipient may use your version of this file under
|
|
|
|
* the terms of any one of the MPL, the GPL or the LGPL.
|
|
|
|
*
|
|
|
|
* ***** END LICENSE BLOCK ***** */
|
|
|
|
|
2008-05-31 15:29:54 -07:00
|
|
|
#ifndef jstracer_h___
|
|
|
|
#define jstracer_h___
|
2008-05-30 18:58:43 -07:00
|
|
|
|
2008-09-11 15:53:00 -07:00
|
|
|
#ifdef JS_TRACER
|
2008-08-12 08:36:29 -07:00
|
|
|
|
2008-07-23 02:57:56 -07:00
|
|
|
#include "jstypes.h"
|
2009-09-14 17:29:46 -07:00
|
|
|
#include "jsbuiltins.h"
|
|
|
|
#include "jscntxt.h"
|
2009-09-29 19:05:19 -07:00
|
|
|
#include "jsdhash.h"
|
2009-09-14 17:29:46 -07:00
|
|
|
#include "jsinterp.h"
|
2008-05-30 18:58:43 -07:00
|
|
|
#include "jslock.h"
|
2008-07-02 14:38:12 -07:00
|
|
|
#include "jsnum.h"
|
2009-10-12 22:41:08 -07:00
|
|
|
#include "jsvector.h"
|
2008-07-05 16:28:03 -07:00
|
|
|
|
2010-01-22 14:49:18 -08:00
|
|
|
namespace js {
|
|
|
|
|
2008-10-29 21:56:35 -07:00
|
|
|
#if defined(DEBUG) && !defined(JS_JIT_SPEW)
|
|
|
|
#define JS_JIT_SPEW
|
|
|
|
#endif
|
|
|
|
|
2008-07-29 23:48:39 -07:00
|
|
|
template <typename T>
|
2009-08-20 17:22:47 -07:00
|
|
|
class Queue {
|
2008-07-29 23:48:39 -07:00
|
|
|
T* _data;
|
|
|
|
unsigned _len;
|
|
|
|
unsigned _max;
|
2009-08-20 17:22:47 -07:00
|
|
|
nanojit::Allocator* alloc;
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2009-08-20 13:06:27 -07:00
|
|
|
public:
|
2008-07-30 04:17:22 -07:00
|
|
|
void ensure(unsigned size) {
|
2009-12-11 16:12:48 -08:00
|
|
|
if (_max > size)
|
|
|
|
return;
|
2009-11-24 19:35:17 -08:00
|
|
|
if (!_max)
|
2009-12-11 16:12:48 -08:00
|
|
|
_max = 8;
|
|
|
|
_max = JS_MAX(_max * 2, size);
|
2009-08-20 17:22:47 -07:00
|
|
|
if (alloc) {
|
|
|
|
T* tmp = new (*alloc) T[_max];
|
|
|
|
memcpy(tmp, _data, _len * sizeof(T));
|
|
|
|
_data = tmp;
|
|
|
|
} else {
|
2010-04-27 14:17:11 -07:00
|
|
|
_data = (T*)js_realloc(_data, _max * sizeof(T));
|
2009-08-20 17:22:47 -07:00
|
|
|
}
|
2009-01-21 22:45:19 -08:00
|
|
|
#if defined(DEBUG)
|
|
|
|
memset(&_data[_len], 0xcd, _max - _len);
|
|
|
|
#endif
|
2008-07-30 04:17:22 -07:00
|
|
|
}
|
2009-08-20 13:06:27 -07:00
|
|
|
|
2009-08-29 18:57:57 -07:00
|
|
|
Queue(nanojit::Allocator* alloc)
|
2009-08-20 17:22:47 -07:00
|
|
|
: alloc(alloc)
|
|
|
|
{
|
2009-11-24 19:35:17 -08:00
|
|
|
this->_max =
|
2008-07-29 23:48:39 -07:00
|
|
|
this->_len = 0;
|
2009-11-24 19:35:17 -08:00
|
|
|
this->_data = NULL;
|
2008-07-29 23:48:39 -07:00
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-07-29 23:48:39 -07:00
|
|
|
~Queue() {
|
2009-11-24 19:35:17 -08:00
|
|
|
if (!alloc)
|
2010-04-27 14:17:11 -07:00
|
|
|
js_free(_data);
|
2008-07-29 23:48:39 -07:00
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-08-06 19:06:37 -07:00
|
|
|
bool contains(T a) {
|
2009-01-10 15:25:03 -08:00
|
|
|
for (unsigned n = 0; n < _len; ++n) {
|
2008-08-06 19:06:37 -07:00
|
|
|
if (_data[n] == a)
|
|
|
|
return true;
|
2009-01-10 15:25:03 -08:00
|
|
|
}
|
2008-08-06 19:06:37 -07:00
|
|
|
return false;
|
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-07-29 23:48:39 -07:00
|
|
|
void add(T a) {
|
2008-07-30 04:17:22 -07:00
|
|
|
ensure(_len + 1);
|
2008-08-26 01:00:53 -07:00
|
|
|
JS_ASSERT(_len <= _max);
|
2008-07-29 23:48:39 -07:00
|
|
|
_data[_len++] = a;
|
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-08-26 01:00:53 -07:00
|
|
|
void add(T* chunk, unsigned size) {
|
|
|
|
ensure(_len + size);
|
|
|
|
JS_ASSERT(_len <= _max);
|
|
|
|
memcpy(&_data[_len], chunk, size * sizeof(T));
|
|
|
|
_len += size;
|
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-08-11 15:16:34 -07:00
|
|
|
void addUnique(T a) {
|
|
|
|
if (!contains(a))
|
|
|
|
add(a);
|
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-07-30 04:17:22 -07:00
|
|
|
void setLength(unsigned len) {
|
|
|
|
ensure(len + 1);
|
|
|
|
_len = len;
|
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-07-29 23:48:39 -07:00
|
|
|
void clear() {
|
|
|
|
_len = 0;
|
|
|
|
}
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2009-08-07 14:08:23 -07:00
|
|
|
T & get(unsigned i) {
|
|
|
|
JS_ASSERT(i < length());
|
|
|
|
return _data[i];
|
|
|
|
}
|
|
|
|
|
2008-11-21 15:31:59 -08:00
|
|
|
const T & get(unsigned i) const {
|
2009-06-25 17:14:54 -07:00
|
|
|
JS_ASSERT(i < length());
|
2008-11-21 15:31:59 -08:00
|
|
|
return _data[i];
|
|
|
|
}
|
|
|
|
|
2009-08-07 14:08:23 -07:00
|
|
|
T & operator [](unsigned i) {
|
|
|
|
return get(i);
|
|
|
|
}
|
|
|
|
|
2009-06-25 17:14:54 -07:00
|
|
|
const T & operator [](unsigned i) const {
|
|
|
|
return get(i);
|
|
|
|
}
|
|
|
|
|
2008-07-29 23:48:39 -07:00
|
|
|
unsigned length() const {
|
|
|
|
return _len;
|
|
|
|
}
|
|
|
|
|
|
|
|
T* data() const {
|
|
|
|
return _data;
|
|
|
|
}
|
2009-12-11 19:10:36 -08:00
|
|
|
|
|
|
|
int offsetOf(T slot) {
|
|
|
|
T* p = _data;
|
|
|
|
unsigned n = 0;
|
|
|
|
for (n = 0; n < _len; ++n)
|
|
|
|
if (*p++ == slot)
|
|
|
|
return n;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2008-07-29 23:48:39 -07:00
|
|
|
};
|
|
|
|
|
2008-06-21 14:55:32 -07:00
|
|
|
/*
|
2008-07-07 02:55:03 -07:00
|
|
|
* Tracker is used to keep track of values being manipulated by the interpreter
|
2009-09-28 18:12:46 -07:00
|
|
|
* during trace recording. It maps opaque, 4-byte aligned address to LIns pointers.
|
|
|
|
* pointers. To do this efficiently, we observe that the addresses of jsvals
|
|
|
|
* living in the interpreter tend to be aggregated close to each other -
|
|
|
|
* usually on the same page (where a tracker page doesn't have to be the same
|
|
|
|
* size as the OS page size, but it's typically similar). The Tracker
|
|
|
|
* consists of a linked-list of structures representing a memory page, which
|
|
|
|
* are created on-demand as memory locations are used.
|
|
|
|
*
|
|
|
|
* For every address, first we split it into two parts: upper bits which
|
|
|
|
* represent the "base", and lower bits which represent an offset against the
|
|
|
|
* base. For the offset, we then right-shift it by two because the bottom two
|
|
|
|
* bits of a 4-byte aligned address are always zero. The mapping then
|
|
|
|
* becomes:
|
|
|
|
*
|
|
|
|
* page = page in pagelist such that Base(address) == page->base,
|
|
|
|
* page->map[Offset(address)]
|
2008-06-21 14:55:32 -07:00
|
|
|
*/
|
2008-07-07 02:55:03 -07:00
|
|
|
class Tracker {
|
2009-09-28 18:12:46 -07:00
|
|
|
#define TRACKER_PAGE_SZB 4096
|
|
|
|
#define TRACKER_PAGE_ENTRIES (TRACKER_PAGE_SZB >> 2) // each slot is 4 bytes
|
|
|
|
#define TRACKER_PAGE_MASK jsuword(TRACKER_PAGE_SZB - 1)
|
|
|
|
|
2009-09-24 23:01:55 -07:00
|
|
|
struct TrackerPage {
|
|
|
|
struct TrackerPage* next;
|
|
|
|
jsuword base;
|
2009-09-28 18:12:46 -07:00
|
|
|
nanojit::LIns* map[TRACKER_PAGE_ENTRIES];
|
2008-06-21 14:55:32 -07:00
|
|
|
};
|
2009-09-24 23:01:55 -07:00
|
|
|
struct TrackerPage* pagelist;
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2009-09-24 23:01:55 -07:00
|
|
|
jsuword getTrackerPageBase(const void* v) const;
|
2009-09-28 18:12:46 -07:00
|
|
|
jsuword getTrackerPageOffset(const void* v) const;
|
2009-09-24 23:01:55 -07:00
|
|
|
struct TrackerPage* findTrackerPage(const void* v) const;
|
|
|
|
struct TrackerPage* addTrackerPage(const void* v);
|
2008-07-07 02:21:04 -07:00
|
|
|
public:
|
2008-06-21 14:55:32 -07:00
|
|
|
Tracker();
|
|
|
|
~Tracker();
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2008-07-20 14:23:39 -07:00
|
|
|
bool has(const void* v) const;
|
2008-07-10 18:42:04 -07:00
|
|
|
nanojit::LIns* get(const void* v) const;
|
|
|
|
void set(const void* v, nanojit::LIns* ins);
|
2008-06-26 20:44:23 -07:00
|
|
|
void clear();
|
2008-06-21 14:55:32 -07:00
|
|
|
};
|
|
|
|
|
2009-11-05 15:04:54 -08:00
|
|
|
class VMFragment : public nanojit::Fragment {
|
|
|
|
public:
|
|
|
|
VMFragment(const void* _ip verbose_only(, uint32_t profFragID))
|
|
|
|
: Fragment(_ip verbose_only(, profFragID))
|
|
|
|
{}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is anchored off a TreeFragment, this points to that tree fragment.
|
|
|
|
* Otherwise, it is |this|.
|
|
|
|
*/
|
2009-11-12 18:34:24 -08:00
|
|
|
TreeFragment* root;
|
|
|
|
|
|
|
|
TreeFragment* toTreeFragment();
|
|
|
|
};
|
|
|
|
|
2009-10-15 16:04:55 -07:00
|
|
|
#if defined(JS_JIT_SPEW) || defined(NJ_NO_VARIADIC_MACROS)
|
2009-06-24 20:32:00 -07:00
|
|
|
|
|
|
|
enum LC_TMBits {
|
2009-07-29 09:58:19 -07:00
|
|
|
/*
|
|
|
|
* Output control bits for all non-Nanojit code. Only use bits 16 and
|
|
|
|
* above, since Nanojit uses 0 .. 15 itself.
|
|
|
|
*/
|
2009-06-24 20:32:00 -07:00
|
|
|
LC_TMMinimal = 1<<16,
|
|
|
|
LC_TMTracer = 1<<17,
|
|
|
|
LC_TMRecorder = 1<<18,
|
2009-08-31 17:08:46 -07:00
|
|
|
LC_TMAbort = 1<<19,
|
|
|
|
LC_TMStats = 1<<20,
|
|
|
|
LC_TMRegexp = 1<<21,
|
|
|
|
LC_TMTreeVis = 1<<22
|
2009-06-24 20:32:00 -07:00
|
|
|
};
|
|
|
|
|
2009-07-15 12:31:26 -07:00
|
|
|
#endif
|
|
|
|
|
2009-10-15 16:04:55 -07:00
|
|
|
#ifdef NJ_NO_VARIADIC_MACROS
|
2009-07-15 12:31:26 -07:00
|
|
|
|
|
|
|
#define debug_only_stmt(action) /* */
|
|
|
|
static void debug_only_printf(int mask, const char *fmt, ...) {}
|
|
|
|
#define debug_only_print0(mask, str) /* */
|
|
|
|
|
|
|
|
#elif defined(JS_JIT_SPEW)
|
|
|
|
|
2009-06-24 20:32:00 -07:00
|
|
|
// Top level logging controller object.
|
2010-01-22 14:49:18 -08:00
|
|
|
extern nanojit::LogControl LogController;
|
2009-06-24 20:32:00 -07:00
|
|
|
|
2009-09-15 15:05:53 -07:00
|
|
|
// Top level profiling hook, needed to harvest profile info from Fragments
|
|
|
|
// whose logical lifetime is about to finish
|
2010-01-22 14:49:18 -08:00
|
|
|
extern void FragProfiling_FragFinalizer(nanojit::Fragment* f, TraceMonitor*);
|
2009-09-15 15:05:53 -07:00
|
|
|
|
2009-06-24 20:32:00 -07:00
|
|
|
#define debug_only_stmt(stmt) \
|
|
|
|
stmt
|
2009-07-29 09:58:19 -07:00
|
|
|
|
|
|
|
#define debug_only_printf(mask, fmt, ...) \
|
|
|
|
JS_BEGIN_MACRO \
|
2010-01-22 14:49:18 -08:00
|
|
|
if ((LogController.lcbits & (mask)) > 0) { \
|
|
|
|
LogController.printf(fmt, __VA_ARGS__); \
|
2009-07-29 09:58:19 -07:00
|
|
|
fflush(stdout); \
|
|
|
|
} \
|
|
|
|
JS_END_MACRO
|
|
|
|
|
|
|
|
#define debug_only_print0(mask, str) \
|
|
|
|
JS_BEGIN_MACRO \
|
2010-01-22 14:49:18 -08:00
|
|
|
if ((LogController.lcbits & (mask)) > 0) { \
|
|
|
|
LogController.printf("%s", str); \
|
2009-07-29 09:58:19 -07:00
|
|
|
fflush(stdout); \
|
|
|
|
} \
|
|
|
|
JS_END_MACRO
|
2009-06-24 20:32:00 -07:00
|
|
|
|
2009-01-06 14:51:32 -08:00
|
|
|
#else
|
2009-06-24 20:32:00 -07:00
|
|
|
|
|
|
|
#define debug_only_stmt(action) /* */
|
|
|
|
#define debug_only_printf(mask, fmt, ...) /* */
|
|
|
|
#define debug_only_print0(mask, str) /* */
|
|
|
|
|
2009-01-06 14:51:32 -08:00
|
|
|
#endif
|
|
|
|
|
2008-08-07 15:28:43 -07:00
|
|
|
/*
|
2009-01-23 14:24:50 -08:00
|
|
|
* The oracle keeps track of hit counts for program counter locations, as
|
|
|
|
* well as slots that should not be demoted to int because we know them to
|
|
|
|
* overflow or they result in type-unstable traces. We are using simple
|
|
|
|
* hash tables. Collisions lead to loss of optimization (demotable slots
|
|
|
|
* are not demoted, etc.) but have no correctness implications.
|
2008-08-07 15:28:43 -07:00
|
|
|
*/
|
|
|
|
#define ORACLE_SIZE 4096
|
|
|
|
|
|
|
|
class Oracle {
|
2008-11-24 14:53:27 -08:00
|
|
|
avmplus::BitSet _stackDontDemote;
|
|
|
|
avmplus::BitSet _globalDontDemote;
|
2009-06-12 08:33:32 -07:00
|
|
|
avmplus::BitSet _pcDontDemote;
|
2010-10-18 15:13:55 -07:00
|
|
|
avmplus::BitSet _pcSlowZeroTest;
|
2008-08-07 15:28:43 -07:00
|
|
|
public:
|
2009-01-23 14:24:50 -08:00
|
|
|
Oracle();
|
|
|
|
|
2009-01-07 14:14:51 -08:00
|
|
|
JS_REQUIRES_STACK void markGlobalSlotUndemotable(JSContext* cx, unsigned slot);
|
|
|
|
JS_REQUIRES_STACK bool isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const;
|
|
|
|
JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot);
|
2009-09-30 15:28:00 -07:00
|
|
|
JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc);
|
2009-01-07 14:14:51 -08:00
|
|
|
JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot) const;
|
2009-09-30 15:28:00 -07:00
|
|
|
JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc) const;
|
2009-06-12 08:33:32 -07:00
|
|
|
void markInstructionUndemotable(jsbytecode* pc);
|
|
|
|
bool isInstructionUndemotable(jsbytecode* pc) const;
|
2010-10-18 15:13:55 -07:00
|
|
|
void markInstructionSlowZeroTest(jsbytecode* pc);
|
|
|
|
bool isInstructionSlowZeroTest(jsbytecode* pc) const;
|
2009-06-12 08:33:32 -07:00
|
|
|
|
2009-01-23 14:24:50 -08:00
|
|
|
void clearDemotability();
|
2009-02-04 22:57:25 -08:00
|
|
|
void clear() {
|
|
|
|
clearDemotability();
|
2009-01-23 14:24:50 -08:00
|
|
|
}
|
2008-08-07 15:28:43 -07:00
|
|
|
};
|
|
|
|
|
2008-08-07 17:22:21 -07:00
|
|
|
typedef Queue<uint16> SlotList;
|
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
class TypeMap : public Queue<JSValueType> {
|
2010-06-24 15:18:07 -07:00
|
|
|
Oracle *oracle;
|
2008-08-07 15:28:43 -07:00
|
|
|
public:
|
2010-06-21 12:28:56 -07:00
|
|
|
TypeMap(nanojit::Allocator* alloc) : Queue<JSValueType>(alloc) {}
|
2009-12-11 19:10:36 -08:00
|
|
|
void set(unsigned stackSlots, unsigned ngslots,
|
2010-06-21 12:28:56 -07:00
|
|
|
const JSValueType* stackTypeMap, const JSValueType* globalTypeMap);
|
2010-06-24 15:18:07 -07:00
|
|
|
JS_REQUIRES_STACK void captureTypes(JSContext* cx, JSObject* globalObj, SlotList& slots, unsigned callDepth,
|
|
|
|
bool speculate);
|
2009-06-19 18:48:05 -07:00
|
|
|
JS_REQUIRES_STACK void captureMissingGlobalTypes(JSContext* cx, JSObject* globalObj, SlotList& slots,
|
2010-06-24 15:18:07 -07:00
|
|
|
unsigned stackSlots, bool speculate);
|
2008-08-19 11:32:36 -07:00
|
|
|
bool matches(TypeMap& other) const;
|
2010-06-21 12:28:56 -07:00
|
|
|
void fromRaw(JSValueType* other, unsigned numSlots);
|
2008-08-07 15:28:43 -07:00
|
|
|
};
|
|
|
|
|
2009-06-25 15:32:50 -07:00
|
|
|
#define JS_TM_EXITCODES(_) \
|
|
|
|
/* \
|
|
|
|
* An exit at a possible branch-point in the trace at which to attach a \
|
|
|
|
* future secondary trace. Therefore the recorder must generate different \
|
|
|
|
* code to handle the other outcome of the branch condition from the \
|
|
|
|
* primary trace's outcome. \
|
|
|
|
*/ \
|
|
|
|
_(BRANCH) \
|
|
|
|
/* \
|
|
|
|
* Exit at a tableswitch via a numbered case. \
|
|
|
|
*/ \
|
|
|
|
_(CASE) \
|
|
|
|
/* \
|
|
|
|
* Exit at a tableswitch via the default case. \
|
|
|
|
*/ \
|
|
|
|
_(DEFAULT) \
|
|
|
|
_(LOOP) \
|
|
|
|
_(NESTED) \
|
|
|
|
/* \
|
|
|
|
* An exit from a trace because a condition relied upon at recording time \
|
|
|
|
* no longer holds, where the alternate path of execution is so rare or \
|
|
|
|
* difficult to address in native code that it is not traced at all, e.g. \
|
|
|
|
* negative array index accesses, which differ from positive indexes in \
|
|
|
|
* that they require a string-based property lookup rather than a simple \
|
|
|
|
* memory access. \
|
|
|
|
*/ \
|
|
|
|
_(MISMATCH) \
|
|
|
|
/* \
|
|
|
|
* A specialization of MISMATCH_EXIT to handle allocation failures. \
|
|
|
|
*/ \
|
|
|
|
_(OOM) \
|
|
|
|
_(OVERFLOW) \
|
2010-10-18 15:13:55 -07:00
|
|
|
_(MUL_ZERO) \
|
2009-06-25 15:32:50 -07:00
|
|
|
_(UNSTABLE_LOOP) \
|
|
|
|
_(TIMEOUT) \
|
|
|
|
_(DEEP_BAIL) \
|
2010-08-27 21:18:58 -07:00
|
|
|
_(STATUS)
|
2009-07-27 18:40:12 -07:00
|
|
|
|
2008-11-07 15:23:43 -08:00
|
|
|
enum ExitType {
|
2009-06-25 15:32:50 -07:00
|
|
|
#define MAKE_EXIT_CODE(x) x##_EXIT,
|
|
|
|
JS_TM_EXITCODES(MAKE_EXIT_CODE)
|
|
|
|
#undef MAKE_EXIT_CODE
|
2009-06-25 16:02:29 -07:00
|
|
|
TOTAL_EXIT_TYPES
|
2008-11-07 15:23:43 -08:00
|
|
|
};
|
|
|
|
|
2009-09-30 15:28:00 -07:00
|
|
|
struct FrameInfo;
|
|
|
|
|
2008-11-07 15:23:43 -08:00
|
|
|
struct VMSideExit : public nanojit::SideExit
|
|
|
|
{
|
2009-02-16 15:09:59 -08:00
|
|
|
jsbytecode* pc;
|
|
|
|
jsbytecode* imacpc;
|
2008-11-07 15:23:43 -08:00
|
|
|
intptr_t sp_adj;
|
|
|
|
intptr_t rp_adj;
|
|
|
|
int32_t calldepth;
|
|
|
|
uint32 numGlobalSlots;
|
|
|
|
uint32 numStackSlots;
|
|
|
|
uint32 numStackSlotsBelowCurrentFrame;
|
|
|
|
ExitType exitType;
|
2009-07-05 21:42:13 -07:00
|
|
|
uintN lookupFlags;
|
2009-09-30 15:28:00 -07:00
|
|
|
unsigned hitcount;
|
2009-05-05 14:26:06 -07:00
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
inline JSValueType* stackTypeMap() {
|
|
|
|
return (JSValueType*)(this + 1);
|
2009-08-10 22:15:52 -07:00
|
|
|
}
|
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
inline JSValueType& stackType(unsigned i) {
|
2009-09-30 15:28:00 -07:00
|
|
|
JS_ASSERT(i < numStackSlots);
|
|
|
|
return stackTypeMap()[i];
|
|
|
|
}
|
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
inline JSValueType* globalTypeMap() {
|
|
|
|
return (JSValueType*)(this + 1) + this->numStackSlots;
|
2009-08-10 22:15:52 -07:00
|
|
|
}
|
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
inline JSValueType* fullTypeMap() {
|
2009-08-10 22:15:52 -07:00
|
|
|
return stackTypeMap();
|
|
|
|
}
|
|
|
|
|
2009-11-05 15:04:54 -08:00
|
|
|
inline VMFragment* fromFrag() {
|
|
|
|
return (VMFragment*)from;
|
|
|
|
}
|
|
|
|
|
|
|
|
inline TreeFragment* root() {
|
|
|
|
return fromFrag()->root;
|
2009-08-10 22:15:52 -07:00
|
|
|
}
|
2008-11-07 15:23:43 -08:00
|
|
|
};
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2009-08-18 15:32:54 -07:00
|
|
|
class VMAllocator : public nanojit::Allocator
|
2009-07-15 16:50:01 -07:00
|
|
|
{
|
|
|
|
|
|
|
|
public:
|
|
|
|
VMAllocator() : mOutOfMemory(false), mSize(0)
|
|
|
|
{}
|
|
|
|
|
|
|
|
size_t size() {
|
|
|
|
return mSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool outOfMemory() {
|
|
|
|
return mOutOfMemory;
|
|
|
|
}
|
|
|
|
|
2009-09-25 17:20:01 -07:00
|
|
|
struct Mark
|
|
|
|
{
|
|
|
|
VMAllocator& vma;
|
|
|
|
bool committed;
|
|
|
|
nanojit::Allocator::Chunk* saved_chunk;
|
|
|
|
char* saved_top;
|
|
|
|
char* saved_limit;
|
|
|
|
size_t saved_size;
|
|
|
|
|
|
|
|
Mark(VMAllocator& vma) :
|
|
|
|
vma(vma),
|
|
|
|
committed(false),
|
|
|
|
saved_chunk(vma.current_chunk),
|
|
|
|
saved_top(vma.current_top),
|
|
|
|
saved_limit(vma.current_limit),
|
|
|
|
saved_size(vma.mSize)
|
|
|
|
{}
|
|
|
|
|
|
|
|
~Mark()
|
|
|
|
{
|
|
|
|
if (!committed)
|
|
|
|
vma.rewind(*this);
|
|
|
|
}
|
|
|
|
|
|
|
|
void commit() { committed = true; }
|
|
|
|
};
|
|
|
|
|
|
|
|
void rewind(const Mark& m) {
|
|
|
|
while (current_chunk != m.saved_chunk) {
|
|
|
|
Chunk *prev = current_chunk->prev;
|
|
|
|
freeChunk(current_chunk);
|
|
|
|
current_chunk = prev;
|
|
|
|
}
|
|
|
|
current_top = m.saved_top;
|
|
|
|
current_limit = m.saved_limit;
|
|
|
|
mSize = m.saved_size;
|
|
|
|
memset(current_top, 0, current_limit - current_top);
|
|
|
|
}
|
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
bool mOutOfMemory;
|
|
|
|
size_t mSize;
|
2009-08-20 17:22:47 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
/*
|
|
|
|
* FIXME: Area the LIR spills into if we encounter an OOM mid-way
|
|
|
|
* through compilation; we must check mOutOfMemory before we run out
|
|
|
|
* of mReserve, otherwise we're in undefined territory. This area
|
|
|
|
* used to be one page, now 16 to be "safer". This is a temporary
|
|
|
|
* and quite unsatisfactory approach to handling OOM in Nanojit.
|
|
|
|
*/
|
|
|
|
uintptr_t mReserve[0x10000];
|
|
|
|
};
|
|
|
|
|
2009-08-18 15:32:54 -07:00
|
|
|
struct REHashKey {
|
|
|
|
size_t re_length;
|
|
|
|
uint16 re_flags;
|
|
|
|
const jschar* re_chars;
|
|
|
|
|
|
|
|
REHashKey(size_t re_length, uint16 re_flags, const jschar *re_chars)
|
|
|
|
: re_length(re_length)
|
|
|
|
, re_flags(re_flags)
|
|
|
|
, re_chars(re_chars)
|
|
|
|
{}
|
|
|
|
|
|
|
|
bool operator==(const REHashKey& other) const
|
|
|
|
{
|
|
|
|
return ((this->re_length == other.re_length) &&
|
|
|
|
(this->re_flags == other.re_flags) &&
|
|
|
|
!memcmp(this->re_chars, other.re_chars,
|
|
|
|
this->re_length * sizeof(jschar)));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
struct REHashFn {
|
|
|
|
static size_t hash(const REHashKey& k) {
|
|
|
|
return
|
|
|
|
k.re_length +
|
|
|
|
k.re_flags +
|
|
|
|
nanojit::murmurhash(k.re_chars, k.re_length * sizeof(jschar));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2009-02-03 16:25:12 -08:00
|
|
|
struct FrameInfo {
|
|
|
|
JSObject* block; // caller block chain head
|
2009-02-16 15:09:59 -08:00
|
|
|
jsbytecode* pc; // caller fp->regs->pc
|
|
|
|
jsbytecode* imacpc; // caller fp->imacpc
|
2009-08-21 08:09:47 -07:00
|
|
|
uint32 spdist; // distance from fp->slots to fp->regs->sp at JSOP_CALL
|
2009-06-04 15:19:59 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Bit 15 (0x8000) is a flag that is set if constructing (called through new).
|
|
|
|
* Bits 0-14 are the actual argument count. This may be less than fun->nargs.
|
2009-08-21 08:09:47 -07:00
|
|
|
* NB: This is argc for the callee, not the caller.
|
2009-06-04 15:19:59 -07:00
|
|
|
*/
|
2009-08-21 08:09:47 -07:00
|
|
|
uint32 argc;
|
2009-06-01 14:50:02 -07:00
|
|
|
|
2009-05-29 18:50:35 -07:00
|
|
|
/*
|
2009-08-21 08:09:47 -07:00
|
|
|
* Number of stack slots in the caller, not counting slots pushed when
|
|
|
|
* invoking the callee. That is, slots after JSOP_CALL completes but
|
|
|
|
* without the return value. This is also equal to the number of slots
|
2010-08-09 22:43:33 -07:00
|
|
|
* between fp->prev->argv[-2] (calleR fp->callee) and fp->argv[-2]
|
2009-08-21 08:09:47 -07:00
|
|
|
* (calleE fp->callee).
|
2009-05-29 18:50:35 -07:00
|
|
|
*/
|
2009-08-21 08:09:47 -07:00
|
|
|
uint32 callerHeight;
|
|
|
|
|
|
|
|
/* argc of the caller */
|
|
|
|
uint32 callerArgc;
|
2009-06-04 15:19:59 -07:00
|
|
|
|
|
|
|
// Safer accessors for argc.
|
2009-08-21 08:09:47 -07:00
|
|
|
enum { CONSTRUCTING_FLAG = 0x10000 };
|
2009-06-04 15:19:59 -07:00
|
|
|
void set_argc(uint16 argc, bool constructing) {
|
2009-08-21 08:09:47 -07:00
|
|
|
this->argc = uint32(argc) | (constructing ? CONSTRUCTING_FLAG: 0);
|
2009-06-04 15:19:59 -07:00
|
|
|
}
|
2009-09-23 11:16:30 -07:00
|
|
|
uint16 get_argc() const { return uint16(argc & ~CONSTRUCTING_FLAG); }
|
2009-08-21 08:09:47 -07:00
|
|
|
bool is_constructing() const { return (argc & CONSTRUCTING_FLAG) != 0; }
|
2009-06-08 10:48:18 -07:00
|
|
|
|
|
|
|
// The typemap just before the callee is called.
|
2010-06-21 12:28:56 -07:00
|
|
|
JSValueType* get_typemap() { return (JSValueType*) (this+1); }
|
|
|
|
const JSValueType* get_typemap() const { return (JSValueType*) (this+1); }
|
2009-02-03 16:25:12 -08:00
|
|
|
};
|
2008-11-07 15:23:43 -08:00
|
|
|
|
2008-10-27 20:42:49 -07:00
|
|
|
struct UnstableExit
|
|
|
|
{
|
2009-11-05 15:04:54 -08:00
|
|
|
VMFragment* fragment;
|
2008-11-07 15:23:43 -08:00
|
|
|
VMSideExit* exit;
|
2008-10-27 20:42:49 -07:00
|
|
|
UnstableExit* next;
|
|
|
|
};
|
|
|
|
|
2009-12-04 19:59:55 -08:00
|
|
|
struct LinkableFragment : public VMFragment
|
|
|
|
{
|
|
|
|
LinkableFragment(const void* _ip, nanojit::Allocator* alloc
|
|
|
|
verbose_only(, uint32_t profFragID))
|
|
|
|
: VMFragment(_ip verbose_only(, profFragID)), typeMap(alloc), nStackTypes(0)
|
|
|
|
{ }
|
|
|
|
|
|
|
|
uint32 branchCount;
|
2009-01-21 22:45:19 -08:00
|
|
|
TypeMap typeMap;
|
2009-01-29 15:59:52 -08:00
|
|
|
unsigned nStackTypes;
|
2010-05-12 23:04:05 -07:00
|
|
|
unsigned spOffsetAtEntry;
|
2009-01-29 15:59:52 -08:00
|
|
|
SlotList* globalSlots;
|
2009-12-04 19:59:55 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* argc is cx->fp->argc at the trace loop header, i.e., the number of arguments
|
|
|
|
* pushed for the innermost JS frame. This is required as part of the fragment
|
|
|
|
* key because the fragment will write those arguments back to the interpreter
|
|
|
|
* stack when it exits, using its typemap, which implicitly incorporates a
|
|
|
|
* given value of argc. Without this feature, a fragment could be called as an
|
|
|
|
* inner tree with two different values of argc, and entry type checking or
|
|
|
|
* exit frame synthesis could crash.
|
|
|
|
*/
|
|
|
|
struct TreeFragment : public LinkableFragment
|
|
|
|
{
|
|
|
|
TreeFragment(const void* _ip, nanojit::Allocator* alloc, JSObject* _globalObj,
|
|
|
|
uint32 _globalShape, uint32 _argc verbose_only(, uint32_t profFragID)):
|
|
|
|
LinkableFragment(_ip, alloc verbose_only(, profFragID)),
|
|
|
|
first(NULL),
|
|
|
|
next(NULL),
|
|
|
|
peer(NULL),
|
|
|
|
globalObj(_globalObj),
|
|
|
|
globalShape(_globalShape),
|
|
|
|
argc(_argc),
|
|
|
|
dependentTrees(alloc),
|
|
|
|
linkedTrees(alloc),
|
|
|
|
sideExits(alloc),
|
|
|
|
gcthings(alloc),
|
2010-08-29 11:57:08 -07:00
|
|
|
shapes(alloc)
|
2009-12-04 19:59:55 -08:00
|
|
|
{ }
|
|
|
|
|
|
|
|
TreeFragment* first;
|
|
|
|
TreeFragment* next;
|
|
|
|
TreeFragment* peer;
|
|
|
|
JSObject* globalObj;
|
|
|
|
uint32 globalShape;
|
|
|
|
uint32 argc;
|
2009-02-24 19:52:09 -08:00
|
|
|
/* Dependent trees must be trashed if this tree dies, and updated on missing global types */
|
2009-12-04 19:59:55 -08:00
|
|
|
Queue<TreeFragment*> dependentTrees;
|
2009-02-24 19:52:09 -08:00
|
|
|
/* Linked trees must be updated on missing global types, but are not dependent */
|
2009-12-04 19:59:55 -08:00
|
|
|
Queue<TreeFragment*> linkedTrees;
|
2009-02-25 18:47:22 -08:00
|
|
|
#ifdef DEBUG
|
|
|
|
const char* treeFileName;
|
|
|
|
uintN treeLineNumber;
|
|
|
|
uintN treePCOffset;
|
|
|
|
#endif
|
2009-12-04 19:59:55 -08:00
|
|
|
JSScript* script;
|
|
|
|
UnstableExit* unstableExits;
|
|
|
|
Queue<VMSideExit*> sideExits;
|
|
|
|
ptrdiff_t nativeStackBase;
|
|
|
|
unsigned maxCallDepth;
|
|
|
|
/* All embedded GC things are registered here so the GC can scan them. */
|
2010-06-10 17:20:26 -07:00
|
|
|
Queue<Value> gcthings;
|
2010-08-29 11:57:08 -07:00
|
|
|
Queue<const js::Shape*> shapes;
|
2009-12-04 19:59:55 -08:00
|
|
|
unsigned maxNativeStackSlots;
|
2010-08-05 18:04:21 -07:00
|
|
|
uintN execs;
|
2009-01-21 22:45:19 -08:00
|
|
|
|
2009-01-29 15:59:52 -08:00
|
|
|
inline unsigned nGlobalTypes() {
|
|
|
|
return typeMap.length() - nStackTypes;
|
2009-01-21 22:45:19 -08:00
|
|
|
}
|
2010-06-21 12:28:56 -07:00
|
|
|
inline JSValueType* globalTypeMap() {
|
2009-01-29 15:59:52 -08:00
|
|
|
return typeMap.data() + nStackTypes;
|
2009-01-21 22:45:19 -08:00
|
|
|
}
|
2010-06-21 12:28:56 -07:00
|
|
|
inline JSValueType* stackTypeMap() {
|
2009-01-21 22:45:19 -08:00
|
|
|
return typeMap.data();
|
|
|
|
}
|
2009-08-10 22:15:52 -07:00
|
|
|
|
2010-06-24 15:18:07 -07:00
|
|
|
JS_REQUIRES_STACK void initialize(JSContext* cx, SlotList *globalSlots, bool speculate);
|
2009-08-10 22:15:52 -07:00
|
|
|
UnstableExit* removeUnstableExit(VMSideExit* exit);
|
2008-07-04 13:23:42 -07:00
|
|
|
};
|
2008-10-12 15:39:32 -07:00
|
|
|
|
2009-12-04 19:59:55 -08:00
|
|
|
inline TreeFragment*
|
|
|
|
VMFragment::toTreeFragment()
|
|
|
|
{
|
|
|
|
JS_ASSERT(root == this);
|
|
|
|
return static_cast<TreeFragment*>(this);
|
|
|
|
}
|
|
|
|
|
2010-08-31 11:33:30 -07:00
|
|
|
/*
|
|
|
|
* BUILTIN_NO_FIXUP_NEEDED indicates that after the initial LeaveTree of a deep
|
|
|
|
* bail, the builtin call needs no further fixup when the trace exits and calls
|
|
|
|
* LeaveTree the second time.
|
|
|
|
*/
|
2010-01-22 14:49:18 -08:00
|
|
|
typedef enum BuiltinStatus {
|
|
|
|
BUILTIN_BAILED = 1,
|
2010-08-31 11:33:30 -07:00
|
|
|
BUILTIN_ERROR = 2,
|
|
|
|
BUILTIN_NO_FIXUP_NEEDED = 4,
|
|
|
|
|
|
|
|
BUILTIN_ERROR_NO_FIXUP_NEEDED = BUILTIN_ERROR | BUILTIN_NO_FIXUP_NEEDED
|
2010-01-22 14:49:18 -08:00
|
|
|
} BuiltinStatus;
|
2009-04-09 16:07:00 -07:00
|
|
|
|
2010-03-11 12:19:36 -08:00
|
|
|
static JS_INLINE void
|
2010-08-31 11:33:30 -07:00
|
|
|
SetBuiltinError(JSContext *cx, BuiltinStatus status = BUILTIN_ERROR)
|
2010-03-11 12:19:36 -08:00
|
|
|
{
|
2010-08-31 11:33:30 -07:00
|
|
|
cx->tracerState->builtinStatus |= status;
|
2010-03-11 12:19:36 -08:00
|
|
|
}
|
2009-04-09 16:07:00 -07:00
|
|
|
|
2009-09-28 09:03:21 -07:00
|
|
|
#ifdef DEBUG_RECORDING_STATUS_NOT_BOOL
|
|
|
|
/* #define DEBUG_RECORDING_STATUS_NOT_BOOL to detect misuses of RecordingStatus */
|
|
|
|
struct RecordingStatus {
|
2009-04-30 17:30:46 -07:00
|
|
|
int code;
|
2009-09-28 09:03:21 -07:00
|
|
|
bool operator==(RecordingStatus &s) { return this->code == s.code; };
|
|
|
|
bool operator!=(RecordingStatus &s) { return this->code != s.code; };
|
2008-12-21 03:55:09 -08:00
|
|
|
};
|
2009-09-28 09:03:21 -07:00
|
|
|
enum RecordingStatusCodes {
|
|
|
|
RECORD_ERROR_code = 0,
|
|
|
|
RECORD_STOP_code = 1,
|
|
|
|
|
|
|
|
RECORD_CONTINUE_code = 3,
|
|
|
|
RECORD_IMACRO_code = 4
|
|
|
|
};
|
|
|
|
RecordingStatus RECORD_CONTINUE = { RECORD_CONTINUE_code };
|
|
|
|
RecordingStatus RECORD_STOP = { RECORD_STOP_code };
|
|
|
|
RecordingStatus RECORD_IMACRO = { RECORD_IMACRO_code };
|
|
|
|
RecordingStatus RECORD_ERROR = { RECORD_ERROR_code };
|
|
|
|
|
|
|
|
struct AbortableRecordingStatus {
|
|
|
|
int code;
|
|
|
|
bool operator==(AbortableRecordingStatus &s) { return this->code == s.code; };
|
|
|
|
bool operator!=(AbortableRecordingStatus &s) { return this->code != s.code; };
|
2009-04-30 17:30:46 -07:00
|
|
|
};
|
2009-09-28 09:03:21 -07:00
|
|
|
enum AbortableRecordingStatusCodes {
|
2009-11-12 18:34:24 -08:00
|
|
|
ARECORD_ERROR_code = 0,
|
|
|
|
ARECORD_STOP_code = 1,
|
|
|
|
ARECORD_ABORTED_code = 2,
|
|
|
|
ARECORD_CONTINUE_code = 3,
|
|
|
|
ARECORD_IMACRO_code = 4,
|
2010-06-16 20:29:52 -07:00
|
|
|
ARECORD_IMACRO_ABORTED_code = 5,
|
|
|
|
ARECORD_COMPLETED_code = 6
|
2009-09-28 09:03:21 -07:00
|
|
|
};
|
|
|
|
AbortableRecordingStatus ARECORD_ERROR = { ARECORD_ERROR_code };
|
|
|
|
AbortableRecordingStatus ARECORD_STOP = { ARECORD_STOP_code };
|
|
|
|
AbortableRecordingStatus ARECORD_CONTINUE = { ARECORD_CONTINUE_code };
|
|
|
|
AbortableRecordingStatus ARECORD_IMACRO = { ARECORD_IMACRO_code };
|
2010-06-16 20:29:52 -07:00
|
|
|
AbortableRecordingStatus ARECORD_IMACRO_ABORTED = { ARECORD_IMACRO_ABORTED_code };
|
2009-09-28 09:03:21 -07:00
|
|
|
AbortableRecordingStatus ARECORD_ABORTED = { ARECORD_ABORTED_code };
|
2009-11-12 18:34:24 -08:00
|
|
|
AbortableRecordingStatus ARECORD_COMPLETED = { ARECORD_COMPLETED_code };
|
2009-09-28 09:03:21 -07:00
|
|
|
|
|
|
|
static inline AbortableRecordingStatus
|
|
|
|
InjectStatus(RecordingStatus rs)
|
|
|
|
{
|
|
|
|
AbortableRecordingStatus ars = { rs.code };
|
|
|
|
return ars;
|
|
|
|
}
|
|
|
|
static inline AbortableRecordingStatus
|
|
|
|
InjectStatus(AbortableRecordingStatus ars)
|
|
|
|
{
|
|
|
|
return ars;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
2010-04-26 09:30:06 -07:00
|
|
|
StatusAbortsRecorderIfActive(AbortableRecordingStatus ars)
|
2009-09-28 09:03:21 -07:00
|
|
|
{
|
2009-11-12 18:34:24 -08:00
|
|
|
return ars == ARECORD_ERROR || ars == ARECORD_STOP;
|
2009-09-28 09:03:21 -07:00
|
|
|
}
|
2009-04-30 17:30:46 -07:00
|
|
|
#else
|
2009-09-28 09:03:21 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Normally, during recording, when the recorder cannot continue, it returns
|
|
|
|
* ARECORD_STOP to indicate that recording should be aborted by the top-level
|
|
|
|
* recording function. However, if the recorder reenters the interpreter (e.g.,
|
|
|
|
* when executing an inner loop), there will be an immediate abort. This
|
|
|
|
* condition must be carefully detected and propagated out of all nested
|
|
|
|
* recorder calls lest the now-invalid TraceRecorder object be accessed
|
|
|
|
* accidentally. This condition is indicated by the ARECORD_ABORTED value.
|
|
|
|
*
|
|
|
|
* The AbortableRecordingStatus enumeration represents the general set of
|
|
|
|
* possible results of calling a recorder function. Functions that cannot
|
|
|
|
* possibly return ARECORD_ABORTED may statically guarantee this to the caller
|
|
|
|
* using the RecordingStatus enumeration. Ideally, C++ would allow subtyping
|
|
|
|
* of enumerations, but it doesn't. To simulate subtype conversion manually,
|
|
|
|
* code should call InjectStatus to inject a value of the restricted set into a
|
|
|
|
* value of the general set.
|
|
|
|
*/
|
|
|
|
|
|
|
|
enum RecordingStatus {
|
2010-04-26 09:30:06 -07:00
|
|
|
RECORD_STOP = 0, // Recording should be aborted at the top-level
|
2009-09-28 09:03:21 -07:00
|
|
|
// call to the recorder.
|
2010-04-26 09:30:06 -07:00
|
|
|
RECORD_ERROR = 1, // Recording should be aborted at the top-level
|
|
|
|
// call to the recorder and the interpreter should
|
|
|
|
// goto error
|
|
|
|
RECORD_CONTINUE = 2, // Continue recording.
|
|
|
|
RECORD_IMACRO = 3 // Entered imacro; continue recording.
|
2009-09-28 09:03:21 -07:00
|
|
|
// Only JSOP_IS_IMACOP opcodes may return this.
|
2009-04-30 17:30:46 -07:00
|
|
|
};
|
2009-09-28 09:03:21 -07:00
|
|
|
|
|
|
|
enum AbortableRecordingStatus {
|
2010-04-26 09:30:06 -07:00
|
|
|
ARECORD_STOP = 0, // see RECORD_STOP
|
|
|
|
ARECORD_ERROR = 1, // Recording may or may not have been aborted.
|
|
|
|
// Recording should be aborted at the top-level
|
|
|
|
// if it has not already been and the interpreter
|
|
|
|
// should goto error
|
|
|
|
ARECORD_CONTINUE = 2, // see RECORD_CONTINUE
|
|
|
|
ARECORD_IMACRO = 3, // see RECORD_IMACRO
|
2010-06-16 20:29:52 -07:00
|
|
|
ARECORD_IMACRO_ABORTED = 4, // see comment in TR::monitorRecording.
|
|
|
|
ARECORD_ABORTED = 5, // Recording has already been aborted; the
|
2010-04-26 09:30:06 -07:00
|
|
|
// interpreter should continue executing
|
2010-06-16 20:29:52 -07:00
|
|
|
ARECORD_COMPLETED = 6 // Recording completed successfully, the
|
2010-04-26 09:30:06 -07:00
|
|
|
// trace recorder has been deleted
|
2009-09-28 09:03:21 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static JS_ALWAYS_INLINE AbortableRecordingStatus
|
|
|
|
InjectStatus(RecordingStatus rs)
|
|
|
|
{
|
|
|
|
return static_cast<AbortableRecordingStatus>(rs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static JS_ALWAYS_INLINE AbortableRecordingStatus
|
|
|
|
InjectStatus(AbortableRecordingStatus ars)
|
|
|
|
{
|
|
|
|
return ars;
|
|
|
|
}
|
|
|
|
|
2009-11-12 18:34:24 -08:00
|
|
|
/*
|
|
|
|
* Return whether the recording status requires the current recording session
|
2010-04-26 09:30:06 -07:00
|
|
|
* to be deleted. ERROR means the recording session should be deleted if it
|
|
|
|
* hasn't already. ABORTED and COMPLETED indicate the recording session is
|
2009-11-12 18:34:24 -08:00
|
|
|
* already deleted, so they return 'false'.
|
|
|
|
*/
|
2009-09-28 09:03:21 -07:00
|
|
|
static JS_ALWAYS_INLINE bool
|
2010-04-26 09:30:06 -07:00
|
|
|
StatusAbortsRecorderIfActive(AbortableRecordingStatus ars)
|
2009-09-28 09:03:21 -07:00
|
|
|
{
|
2010-04-26 09:30:06 -07:00
|
|
|
return ars <= ARECORD_ERROR;
|
2009-09-28 09:03:21 -07:00
|
|
|
}
|
2009-04-30 17:30:46 -07:00
|
|
|
#endif
|
|
|
|
|
2009-08-07 14:08:23 -07:00
|
|
|
class SlotMap;
|
2009-09-30 15:28:00 -07:00
|
|
|
class SlurpInfo;
|
2009-04-30 17:30:46 -07:00
|
|
|
|
2009-08-07 14:08:23 -07:00
|
|
|
/* Results of trying to compare two typemaps together */
|
|
|
|
enum TypeConsensus
|
|
|
|
{
|
|
|
|
TypeConsensus_Okay, /* Two typemaps are compatible */
|
|
|
|
TypeConsensus_Undemotes, /* Not compatible now, but would be with pending undemotes. */
|
|
|
|
TypeConsensus_Bad /* Typemaps are not compatible */
|
|
|
|
};
|
2008-12-21 03:55:09 -08:00
|
|
|
|
2010-04-26 09:30:06 -07:00
|
|
|
enum MonitorResult {
|
|
|
|
MONITOR_RECORDING,
|
|
|
|
MONITOR_NOT_RECORDING,
|
|
|
|
MONITOR_ERROR
|
|
|
|
};
|
|
|
|
|
2010-07-20 18:01:16 -07:00
|
|
|
enum TracePointAction {
|
|
|
|
TPA_Nothing,
|
|
|
|
TPA_RanStuff,
|
|
|
|
TPA_Recorded,
|
|
|
|
TPA_Error
|
|
|
|
};
|
|
|
|
|
2010-02-06 10:14:05 -08:00
|
|
|
typedef HashMap<nanojit::LIns*, JSObject*> GuardedShapeTable;
|
|
|
|
|
2009-11-12 18:34:24 -08:00
|
|
|
#ifdef DEBUG
|
2010-01-22 14:49:18 -08:00
|
|
|
# define AbortRecording(cx, reason) AbortRecordingImpl(cx, reason)
|
2009-11-12 18:34:24 -08:00
|
|
|
#else
|
2010-01-22 14:49:18 -08:00
|
|
|
# define AbortRecording(cx, reason) AbortRecordingImpl(cx)
|
2009-11-12 18:34:24 -08:00
|
|
|
#endif
|
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
class TraceRecorder
|
|
|
|
{
|
|
|
|
/*************************************************************** Recording session constants */
|
|
|
|
|
|
|
|
/* The context in which recording started. */
|
|
|
|
JSContext* const cx;
|
|
|
|
|
|
|
|
/* Cached value of JS_TRACE_MONITOR(cx). */
|
2010-01-22 14:49:18 -08:00
|
|
|
TraceMonitor* const traceMonitor;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2010-04-27 14:17:11 -07:00
|
|
|
/* Cached oracle keeps track of hit counts for program counter locations */
|
|
|
|
Oracle* oracle;
|
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
/* The Fragment being recorded by this recording session. */
|
2009-11-05 15:04:54 -08:00
|
|
|
VMFragment* const fragment;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2009-12-04 19:59:55 -08:00
|
|
|
/* The root fragment representing the tree. */
|
|
|
|
TreeFragment* const tree;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
|
|
|
/* The global object from the start of recording until now. */
|
|
|
|
JSObject* const globalObj;
|
|
|
|
|
2010-10-19 11:08:25 -07:00
|
|
|
/* If non-null, the script of outer loop aborted to start recording this loop. */
|
|
|
|
JSScript* const outerScript;
|
|
|
|
|
|
|
|
/* If non-null, the pc of the outer loop aborted to start recording this loop. */
|
|
|
|
jsbytecode* const outerPC;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2010-10-19 11:08:25 -07:00
|
|
|
/* If |outerPC|, the argc to use when looking up |outerPC| in the fragments table. */
|
2009-10-26 13:06:17 -07:00
|
|
|
uint32 const outerArgc;
|
|
|
|
|
|
|
|
/* If non-null, the side exit from which we are growing. */
|
|
|
|
VMSideExit* const anchor;
|
|
|
|
|
|
|
|
/* The LIR-generation pipeline used to build |fragment|. */
|
|
|
|
nanojit::LirWriter* const lir;
|
2010-10-18 12:55:56 -07:00
|
|
|
nanojit::CseFilter* const cse_filter;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2010-04-22 11:54:11 -07:00
|
|
|
/* Instructions yielding the corresponding trace-const members of TracerState. */
|
2009-10-26 13:06:17 -07:00
|
|
|
nanojit::LIns* const cx_ins;
|
|
|
|
nanojit::LIns* const eos_ins;
|
|
|
|
nanojit::LIns* const eor_ins;
|
|
|
|
nanojit::LIns* const loopLabel;
|
|
|
|
|
2009-12-11 19:10:36 -08:00
|
|
|
/* Lazy slot import state. */
|
|
|
|
unsigned importStackSlots;
|
|
|
|
unsigned importGlobalSlots;
|
|
|
|
TypeMap importTypeMap;
|
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
/*
|
|
|
|
* The LirBuffer used to supply memory to our LirWriter pipeline. Also contains the most recent
|
|
|
|
* instruction for {sp, rp, state}. Also contains names for debug JIT spew. Should be split.
|
|
|
|
*/
|
|
|
|
nanojit::LirBuffer* const lirbuf;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remembers traceAlloc state before recording started; automatically rewinds when mark is
|
|
|
|
* destroyed on a failed compilation.
|
|
|
|
*/
|
|
|
|
VMAllocator::Mark mark;
|
|
|
|
|
2009-11-13 15:47:08 -08:00
|
|
|
/* Remembers the number of sideExits in treeInfo before recording started. */
|
|
|
|
const unsigned numSideExitsBefore;
|
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
/*********************************************************** Recording session mutable state */
|
|
|
|
|
|
|
|
/* Maps interpreter stack values to the instruction generating that value. */
|
|
|
|
Tracker tracker;
|
|
|
|
|
|
|
|
/* Maps interpreter stack values to the instruction writing back to the native stack. */
|
|
|
|
Tracker nativeFrameTracker;
|
|
|
|
|
2010-10-18 12:55:56 -07:00
|
|
|
/* The start of the global object's slots we assume for the trackers. */
|
|
|
|
Value* global_slots;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
|
|
|
/* The number of interpreted calls entered (and not yet left) since recording began. */
|
|
|
|
unsigned callDepth;
|
|
|
|
|
|
|
|
/* The current atom table, mirroring the interpreter loop's variable of the same name. */
|
|
|
|
JSAtom** atoms;
|
2010-06-10 17:20:26 -07:00
|
|
|
Value* consts;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2010-09-15 13:43:55 -07:00
|
|
|
/* An instruction yielding the current script's strict mode code flag. */
|
|
|
|
nanojit::LIns* strictModeCode_ins;
|
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
/* FIXME: Dead, but soon to be used for something or other. */
|
|
|
|
Queue<jsbytecode*> cfgMerges;
|
|
|
|
|
|
|
|
/* Indicates whether the current tree should be trashed when the recording session ends. */
|
|
|
|
bool trashSelf;
|
|
|
|
|
|
|
|
/* A list of trees to trash at the end of the recording session. */
|
2009-11-05 15:04:54 -08:00
|
|
|
Queue<TreeFragment*> whichTreesToTrash;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2010-02-06 10:14:05 -08:00
|
|
|
/* The set of objects whose shapes already have been guarded. */
|
|
|
|
GuardedShapeTable guardedShapeTable;
|
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
/***************************************** Temporal state hoisted into the recording session */
|
|
|
|
|
|
|
|
/* Carry the return value from a STOP/RETURN to the subsequent record_LeaveFrame. */
|
|
|
|
nanojit::LIns* rval_ins;
|
|
|
|
|
|
|
|
/* Carry the return value from a native call to the record_NativeCallComplete. */
|
|
|
|
nanojit::LIns* native_rval_ins;
|
|
|
|
|
2010-10-04 14:13:33 -07:00
|
|
|
/* Carry the return value of js_CreateThis to record_NativeCallComplete. */
|
2009-10-26 13:06:17 -07:00
|
|
|
nanojit::LIns* newobj_ins;
|
|
|
|
|
|
|
|
/* Carry the JSSpecializedNative used to generate a call to record_NativeCallComplete. */
|
|
|
|
JSSpecializedNative* pendingSpecializedNative;
|
|
|
|
|
|
|
|
/* Carry whether this is a jsval on the native stack from finishGetProp to monitorRecording. */
|
2010-06-10 17:20:26 -07:00
|
|
|
Value* pendingUnboxSlot;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
|
|
|
/* Carry a guard condition to the beginning of the next monitorRecording. */
|
|
|
|
nanojit::LIns* pendingGuardCondition;
|
|
|
|
|
|
|
|
/* Carry whether we have an always-exit from emitIf to checkTraceEnd. */
|
|
|
|
bool pendingLoop;
|
|
|
|
|
|
|
|
/* Temporary JSSpecializedNative used to describe non-specialized fast natives. */
|
|
|
|
JSSpecializedNative generatedSpecializedNative;
|
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
/* Temporary JSValueType array used to construct temporary typemaps. */
|
|
|
|
js::Vector<JSValueType, 256> tempTypeMap;
|
2009-10-26 13:06:17 -07:00
|
|
|
|
|
|
|
/************************************************************* 10 bajillion member functions */
|
2008-08-29 18:59:21 -07:00
|
|
|
|
2010-10-18 12:55:56 -07:00
|
|
|
/*
|
|
|
|
* These can be put around a control-flow diamond if it's important that
|
|
|
|
* CSE work across the diamond. Duplicated expressions within the diamond
|
|
|
|
* will be CSE'd, but expressions defined within the diamond won't be
|
|
|
|
* added to the tables of CSEable expressions. Loads are still
|
|
|
|
* invalidated if they alias any stores that occur within diamonds.
|
|
|
|
*/
|
|
|
|
void suspendCSE() { if (cse_filter) cse_filter->suspend(); }
|
|
|
|
void resumeCSE() { if (cse_filter) cse_filter->resume(); }
|
|
|
|
|
2010-06-10 17:20:26 -07:00
|
|
|
nanojit::LIns* insImmVal(const Value& val);
|
2009-08-17 14:50:57 -07:00
|
|
|
nanojit::LIns* insImmObj(JSObject* obj);
|
|
|
|
nanojit::LIns* insImmFun(JSFunction* fun);
|
|
|
|
nanojit::LIns* insImmStr(JSString* str);
|
2010-08-29 11:57:08 -07:00
|
|
|
nanojit::LIns* insImmShape(const js::Shape* shape);
|
2010-06-16 18:21:34 -07:00
|
|
|
nanojit::LIns* insImmId(jsid id);
|
2009-09-10 15:38:18 -07:00
|
|
|
nanojit::LIns* p2i(nanojit::LIns* ins);
|
2009-08-17 14:50:57 -07:00
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
/*
|
|
|
|
* Examines current interpreter state to record information suitable for returning to the
|
|
|
|
* interpreter through a side exit of the given type.
|
|
|
|
*/
|
|
|
|
JS_REQUIRES_STACK VMSideExit* snapshot(ExitType exitType);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Creates a separate but identical copy of the given side exit, allowing the guards associated
|
|
|
|
* with each to be entirely separate even after subsequent patching.
|
|
|
|
*/
|
|
|
|
JS_REQUIRES_STACK VMSideExit* copy(VMSideExit* exit);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Creates an instruction whose payload is a GuardRecord for the given exit. The instruction
|
|
|
|
* is suitable for use as the final argument of a single call to LirBuffer::insGuard; do not
|
|
|
|
* reuse the returned value.
|
|
|
|
*/
|
|
|
|
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
|
|
|
|
|
2010-04-27 14:17:11 -07:00
|
|
|
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot);
|
|
|
|
|
|
|
|
JS_REQUIRES_STACK JS_INLINE void markSlotUndemotable(LinkableFragment* f, unsigned slot, const void* pc);
|
|
|
|
|
|
|
|
JS_REQUIRES_STACK unsigned findUndemotesInTypemaps(const TypeMap& typeMap, LinkableFragment* f,
|
|
|
|
Queue<unsigned>& undemotes);
|
|
|
|
|
|
|
|
JS_REQUIRES_STACK void assertDownFrameIsConsistent(VMSideExit* anchor, FrameInfo* fi);
|
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
JS_REQUIRES_STACK void captureStackTypes(unsigned callDepth, JSValueType* typeMap);
|
2010-04-27 14:17:11 -07:00
|
|
|
|
2010-07-04 00:12:06 -07:00
|
|
|
bool isVoidPtrGlobal(const void* p) const;
|
2010-06-10 17:20:26 -07:00
|
|
|
bool isGlobal(const Value* p) const;
|
|
|
|
ptrdiff_t nativeGlobalSlot(const Value *p) const;
|
2010-07-04 00:12:06 -07:00
|
|
|
ptrdiff_t nativeGlobalOffset(const Value* p) const;
|
|
|
|
JS_REQUIRES_STACK ptrdiff_t nativeStackOffsetImpl(const void* p) const;
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK ptrdiff_t nativeStackOffset(const Value* p) const;
|
2010-07-04 00:12:06 -07:00
|
|
|
JS_REQUIRES_STACK ptrdiff_t nativeStackSlotImpl(const void* p) const;
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK ptrdiff_t nativeStackSlot(const Value* p) const;
|
2010-07-04 00:12:06 -07:00
|
|
|
JS_REQUIRES_STACK ptrdiff_t nativespOffsetImpl(const void* p) const;
|
|
|
|
JS_REQUIRES_STACK ptrdiff_t nativespOffset(const Value* p) const;
|
2010-10-18 12:55:56 -07:00
|
|
|
JS_REQUIRES_STACK void importImpl(nanojit::LIns* base, ptrdiff_t offset, nanojit::AccSet accSet,
|
|
|
|
const void* p, JSValueType t,
|
2010-07-04 00:12:06 -07:00
|
|
|
const char *prefix, uintN index, JSStackFrame *fp);
|
2010-10-18 12:55:56 -07:00
|
|
|
JS_REQUIRES_STACK void import(nanojit::LIns* base, ptrdiff_t offset, nanojit::AccSet accSet,
|
|
|
|
const Value* p, JSValueType t,
|
2008-12-09 08:38:32 -08:00
|
|
|
const char *prefix, uintN index, JSStackFrame *fp);
|
2009-12-04 19:59:55 -08:00
|
|
|
JS_REQUIRES_STACK void import(TreeFragment* tree, nanojit::LIns* sp, unsigned stackSlots,
|
2010-06-21 12:28:56 -07:00
|
|
|
unsigned callDepth, unsigned ngslots, JSValueType* typeMap);
|
2008-07-23 23:18:02 -07:00
|
|
|
void trackNativeStackUse(unsigned slots);
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2010-08-29 11:57:08 -07:00
|
|
|
JS_REQUIRES_STACK bool isValidSlot(JSObject *obj, const js::Shape* shape);
|
2008-12-09 08:38:32 -08:00
|
|
|
JS_REQUIRES_STACK bool lazilyImportGlobalSlot(unsigned slot);
|
2009-12-11 19:10:36 -08:00
|
|
|
JS_REQUIRES_STACK void importGlobalSlot(unsigned slot);
|
2008-08-29 18:59:21 -07:00
|
|
|
|
2010-10-07 15:51:57 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus guard(bool expected, nanojit::LIns* cond, ExitType exitType,
|
|
|
|
bool abortIfAlwaysExits = false);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus guard(bool expected, nanojit::LIns* cond, VMSideExit* exit,
|
|
|
|
bool abortIfAlwaysExits = false);
|
2010-02-18 20:12:37 -08:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* guard_xov(nanojit::LOpcode op, nanojit::LIns* d0,
|
|
|
|
nanojit::LIns* d1, VMSideExit* exit);
|
2008-12-21 03:55:09 -08:00
|
|
|
|
2008-07-17 02:00:23 -07:00
|
|
|
nanojit::LIns* addName(nanojit::LIns* ins, const char* name);
|
2008-06-30 17:12:52 -07:00
|
|
|
|
2009-09-08 16:47:55 -07:00
|
|
|
nanojit::LIns* writeBack(nanojit::LIns* i, nanojit::LIns* base, ptrdiff_t offset,
|
|
|
|
bool demote);
|
2010-07-04 00:12:06 -07:00
|
|
|
|
|
|
|
#ifdef DEBUG
|
2010-08-09 22:43:33 -07:00
|
|
|
bool isValidFrameObjPtr(void *obj);
|
2010-07-04 00:12:06 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
JS_REQUIRES_STACK void setImpl(void* p, nanojit::LIns* l, bool demote = true);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK void set(Value* p, nanojit::LIns* l, bool demote = true);
|
2010-08-09 22:43:33 -07:00
|
|
|
JS_REQUIRES_STACK void setFrameObjPtr(void* p, nanojit::LIns* l, bool demote = true);
|
2010-07-04 00:12:06 -07:00
|
|
|
nanojit::LIns* getFromTrackerImpl(const void *p);
|
2010-06-10 17:20:26 -07:00
|
|
|
nanojit::LIns* getFromTracker(const Value* p);
|
2010-07-04 00:12:06 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* getImpl(const void* p);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* get(const Value* p);
|
2010-08-09 22:43:33 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* getFrameObjPtr(void* p);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* attemptImport(const Value* p);
|
|
|
|
JS_REQUIRES_STACK nanojit::LIns* addr(Value* p);
|
2009-08-03 09:38:28 -07:00
|
|
|
|
2010-07-04 00:12:06 -07:00
|
|
|
JS_REQUIRES_STACK bool knownImpl(const void* p);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK bool known(const Value* p);
|
2010-07-04 00:12:06 -07:00
|
|
|
JS_REQUIRES_STACK bool known(JSObject** p);
|
2010-09-01 17:11:19 -07:00
|
|
|
/*
|
2010-10-18 12:55:56 -07:00
|
|
|
* The slots of the global object are sometimes reallocated by the
|
2010-09-01 17:11:19 -07:00
|
|
|
* interpreter. This function checks for that condition and re-maps the
|
|
|
|
* entries of the tracker accordingly.
|
|
|
|
*/
|
|
|
|
JS_REQUIRES_STACK void checkForGlobalObjectReallocation() {
|
2010-10-18 12:55:56 -07:00
|
|
|
if (global_slots != globalObj->getSlots())
|
2010-09-01 17:11:19 -07:00
|
|
|
checkForGlobalObjectReallocationHelper();
|
|
|
|
}
|
|
|
|
JS_REQUIRES_STACK void checkForGlobalObjectReallocationHelper();
|
2008-07-06 11:48:41 -07:00
|
|
|
|
2009-08-07 14:08:23 -07:00
|
|
|
JS_REQUIRES_STACK TypeConsensus selfTypeStability(SlotMap& smap);
|
2009-09-30 15:28:00 -07:00
|
|
|
JS_REQUIRES_STACK TypeConsensus peerTypeStability(SlotMap& smap, const void* ip,
|
2009-11-05 15:04:54 -08:00
|
|
|
TreeFragment** peer);
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK Value& argval(unsigned n) const;
|
|
|
|
JS_REQUIRES_STACK Value& varval(unsigned n) const;
|
|
|
|
JS_REQUIRES_STACK Value& stackval(int n) const;
|
|
|
|
|
|
|
|
JS_REQUIRES_STACK void updateAtoms();
|
|
|
|
JS_REQUIRES_STACK void updateAtoms(JSScript *script);
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2009-07-31 17:52:30 -07:00
|
|
|
struct NameResult {
|
|
|
|
// |tracked| is true iff the result of the name lookup is a variable that
|
|
|
|
// is already in the tracker. The rest of the fields are set only if
|
|
|
|
// |tracked| is false.
|
|
|
|
bool tracked;
|
2010-06-10 17:20:26 -07:00
|
|
|
Value v; // current property value
|
2009-07-31 17:52:30 -07:00
|
|
|
JSObject *obj; // Call object where name was found
|
2009-08-27 15:40:37 -07:00
|
|
|
nanojit::LIns *obj_ins; // LIR value for obj
|
2010-08-29 11:57:08 -07:00
|
|
|
js::Shape *shape; // shape name was resolved to
|
2009-07-31 17:52:30 -07:00
|
|
|
};
|
|
|
|
|
2010-01-15 11:32:14 -08:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* scopeChain();
|
|
|
|
JS_REQUIRES_STACK nanojit::LIns* entryScopeChain() const;
|
2010-08-22 16:00:20 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* entryFrameIns() const;
|
2009-07-08 11:16:41 -07:00
|
|
|
JS_REQUIRES_STACK JSStackFrame* frameIfInRange(JSObject* obj, unsigned* depthp = NULL) const;
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus traverseScopeChain(JSObject *obj, nanojit::LIns *obj_ins, JSObject *obj2, nanojit::LIns *&obj2_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus scopeChainProp(JSObject* obj, Value*& vp, nanojit::LIns*& ins, NameResult& nr);
|
2010-08-29 11:57:08 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus callProp(JSObject* obj, JSProperty* shape, jsid id, Value*& vp, nanojit::LIns*& ins, NameResult& nr);
|
2008-08-20 22:40:39 -07:00
|
|
|
|
2008-12-09 08:38:32 -08:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* arg(unsigned n);
|
|
|
|
JS_REQUIRES_STACK void arg(unsigned n, nanojit::LIns* i);
|
|
|
|
JS_REQUIRES_STACK nanojit::LIns* var(unsigned n);
|
|
|
|
JS_REQUIRES_STACK void var(unsigned n, nanojit::LIns* i);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* upvar(JSScript* script, JSUpvarArray* uva, uintN index, Value& v);
|
2010-03-23 16:07:19 -07:00
|
|
|
nanojit::LIns* stackLoad(nanojit::LIns* addr, nanojit::AccSet accSet, uint8 type);
|
2008-12-09 08:38:32 -08:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* stack(int n);
|
|
|
|
JS_REQUIRES_STACK void stack(int n, nanojit::LIns* i);
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2010-10-18 15:13:55 -07:00
|
|
|
JS_REQUIRES_STACK void guardNonNeg(nanojit::LIns* d0, nanojit::LIns* d1, VMSideExit* exit);
|
2009-02-04 22:57:25 -08:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* alu(nanojit::LOpcode op, jsdouble v0, jsdouble v1,
|
2009-01-07 14:14:51 -08:00
|
|
|
nanojit::LIns* s0, nanojit::LIns* s1);
|
2010-03-18 12:12:06 -07:00
|
|
|
|
2010-10-13 16:39:20 -07:00
|
|
|
bool condBranch(nanojit::LOpcode op, nanojit::LIns* cond, nanojit::LIns** brOut);
|
|
|
|
nanojit::LIns* unoptimizableCondBranch(nanojit::LOpcode op, nanojit::LIns* cond);
|
|
|
|
void labelForBranch(nanojit::LIns* br);
|
|
|
|
void labelForBranches(nanojit::LIns* br1, nanojit::LIns* br2);
|
|
|
|
|
2010-04-21 21:42:19 -07:00
|
|
|
nanojit::LIns* i2d(nanojit::LIns* i);
|
2010-05-05 16:50:50 -07:00
|
|
|
nanojit::LIns* d2i(nanojit::LIns* f, bool resultCanBeImpreciseIfFractional = false);
|
2009-12-11 19:48:14 -08:00
|
|
|
nanojit::LIns* f2u(nanojit::LIns* f);
|
2010-10-07 15:51:57 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus makeNumberInt32(nanojit::LIns* d, nanojit::LIns** num_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* stringify(const Value& v);
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2010-08-11 23:27:33 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* newArguments(nanojit::LIns* callee_ins, bool strict);
|
2009-09-09 11:40:21 -07:00
|
|
|
|
2010-04-22 15:10:38 -07:00
|
|
|
JS_REQUIRES_STACK bool canCallImacro() const;
|
|
|
|
JS_REQUIRES_STACK RecordingStatus callImacro(jsbytecode* imacro);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus callImacroInfallibly(jsbytecode* imacro);
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus ifop();
|
|
|
|
JS_REQUIRES_STACK RecordingStatus switchop();
|
2009-02-11 17:40:27 -08:00
|
|
|
#ifdef NANOJIT_IA32
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus tableswitch();
|
2009-02-11 17:40:27 -08:00
|
|
|
#endif
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus inc(Value& v, jsint incr, bool pre = true);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus inc(const Value &v, nanojit::LIns*& v_ins, jsint incr,
|
2009-08-10 13:03:50 -07:00
|
|
|
bool pre = true);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus incHelper(const Value &v, nanojit::LIns* v_ins,
|
2009-08-10 13:03:50 -07:00
|
|
|
nanojit::LIns*& v_after, jsint incr);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus incProp(jsint incr, bool pre = true);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus incElem(jsint incr, bool pre = true);
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus incName(jsint incr, bool pre = true);
|
2008-08-28 23:50:48 -07:00
|
|
|
|
2008-12-18 10:35:09 -08:00
|
|
|
JS_REQUIRES_STACK void strictEquality(bool equal, bool cmpCase);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus equality(bool negate, bool tryBranchAfterCond);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus equalityHelper(Value& l, Value& r,
|
2009-09-28 09:03:21 -07:00
|
|
|
nanojit::LIns* l_ins, nanojit::LIns* r_ins,
|
|
|
|
bool negate, bool tryBranchAfterCond,
|
2010-06-10 17:20:26 -07:00
|
|
|
Value& rval);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus relational(nanojit::LOpcode op, bool tryBranchAfterCond);
|
2008-07-06 13:16:34 -07:00
|
|
|
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus unary(nanojit::LOpcode op);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus binary(nanojit::LOpcode op);
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2009-09-29 19:05:19 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus guardShape(nanojit::LIns* obj_ins, JSObject* obj,
|
2010-03-29 08:35:38 -07:00
|
|
|
uint32 shape, const char* name, VMSideExit* exit);
|
2009-09-29 19:05:19 -07:00
|
|
|
|
|
|
|
#if defined DEBUG_notme && defined XP_UNIX
|
|
|
|
void dumpGuardedShapes(const char* prefix);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void forgetGuardedShapes();
|
2008-07-07 02:21:04 -07:00
|
|
|
|
2010-08-29 11:57:08 -07:00
|
|
|
inline nanojit::LIns* shape_ins(nanojit::LIns *obj_ins);
|
2010-10-18 12:55:56 -07:00
|
|
|
inline nanojit::LIns* slots(nanojit::LIns *obj_ins);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins,
|
2010-03-26 07:01:01 -07:00
|
|
|
JSObject*& obj2, PCVal& pcval);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus guardPropertyCacheHit(nanojit::LIns* obj_ins,
|
2010-03-24 19:20:44 -07:00
|
|
|
JSObject* aobj,
|
|
|
|
JSObject* obj2,
|
|
|
|
PropertyCacheEntry* entry,
|
2010-03-26 07:01:01 -07:00
|
|
|
PCVal& pcval);
|
2009-07-27 14:47:34 -07:00
|
|
|
|
2010-06-28 18:08:28 -07:00
|
|
|
void stobj_set_fslot(nanojit::LIns *obj_ins, unsigned slot, const Value &v,
|
2009-08-17 16:27:48 -07:00
|
|
|
nanojit::LIns* v_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
void stobj_set_dslot(nanojit::LIns *obj_ins, unsigned slot,
|
2010-10-18 12:55:56 -07:00
|
|
|
nanojit::LIns*& slots_ins, const Value &v, nanojit::LIns* v_ins);
|
2010-10-13 11:49:22 -07:00
|
|
|
void stobj_set_slot(JSObject *obj, nanojit::LIns* obj_ins, unsigned slot,
|
2010-10-18 12:55:56 -07:00
|
|
|
nanojit::LIns*& slots_ins, const Value &v, nanojit::LIns* v_ins);
|
2010-06-28 18:08:28 -07:00
|
|
|
|
2010-10-13 11:49:22 -07:00
|
|
|
nanojit::LIns* stobj_get_slot_uint32(nanojit::LIns* obj_ins, unsigned slot);
|
2010-06-28 18:08:28 -07:00
|
|
|
nanojit::LIns* unbox_slot(JSObject *obj, nanojit::LIns *obj_ins, uint32 slot,
|
|
|
|
VMSideExit *exit);
|
2010-06-17 15:23:17 -07:00
|
|
|
nanojit::LIns* stobj_get_parent(nanojit::LIns* obj_ins);
|
|
|
|
nanojit::LIns* stobj_get_private(nanojit::LIns* obj_ins);
|
2010-10-13 11:49:22 -07:00
|
|
|
nanojit::LIns* stobj_get_private_uint32(nanojit::LIns* obj_ins);
|
2010-06-17 15:23:17 -07:00
|
|
|
nanojit::LIns* stobj_get_proto(nanojit::LIns* obj_ins);
|
2009-07-30 11:34:35 -07:00
|
|
|
|
2010-10-13 11:49:22 -07:00
|
|
|
/* For slots holding private pointers. */
|
|
|
|
nanojit::LIns* stobj_get_const_private_ptr(nanojit::LIns *obj_ins, unsigned slot);
|
|
|
|
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus name(Value*& vp, nanojit::LIns*& ins, NameResult& nr);
|
2010-01-14 09:33:14 -08:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus prop(JSObject* obj, nanojit::LIns* obj_ins,
|
|
|
|
uint32 *slotp, nanojit::LIns** v_insp,
|
2010-06-10 17:20:26 -07:00
|
|
|
Value* outp);
|
2010-04-22 15:10:38 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus propTail(JSObject* obj, nanojit::LIns* obj_ins,
|
|
|
|
JSObject* obj2, PCVal pcval,
|
|
|
|
uint32 *slotp, nanojit::LIns** v_insp,
|
2010-06-10 17:20:26 -07:00
|
|
|
Value* outp);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus denseArrayElement(Value& oval, Value& idx, Value*& vp,
|
2010-01-25 17:06:36 -08:00
|
|
|
nanojit::LIns*& v_ins,
|
2010-09-08 19:44:47 -07:00
|
|
|
nanojit::LIns*& addr_ins,
|
|
|
|
VMSideExit* exit);
|
2010-08-03 22:06:44 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns *canonicalizeNaNs(nanojit::LIns *dval_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus typedArrayElement(Value& oval, Value& idx, Value*& vp,
|
2010-01-25 17:06:36 -08:00
|
|
|
nanojit::LIns*& v_ins,
|
|
|
|
nanojit::LIns*& addr_ins);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus getProp(JSObject* obj, nanojit::LIns* obj_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus getProp(Value& v);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getThis(nanojit::LIns*& this_ins);
|
2008-12-09 08:38:32 -08:00
|
|
|
|
2010-06-28 18:08:28 -07:00
|
|
|
JS_REQUIRES_STACK void storeMagic(JSWhyMagic why, nanojit::LIns *addr_ins, ptrdiff_t offset,
|
|
|
|
nanojit::AccSet accSet);
|
2010-05-07 17:52:52 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus unboxNextValue(nanojit::LIns* &v_ins);
|
|
|
|
|
2009-09-11 15:44:30 -07:00
|
|
|
JS_REQUIRES_STACK VMSideExit* enterDeepBailCall();
|
2009-08-03 09:38:28 -07:00
|
|
|
JS_REQUIRES_STACK void leaveDeepBailCall();
|
|
|
|
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus primitiveToStringInPlace(Value* vp);
|
2009-08-03 09:38:28 -07:00
|
|
|
JS_REQUIRES_STACK void finishGetProp(nanojit::LIns* obj_ins, nanojit::LIns* vp_ins,
|
2010-06-10 17:20:26 -07:00
|
|
|
nanojit::LIns* ok_ins, Value* outp);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus getPropertyByName(nanojit::LIns* obj_ins, Value* idvalp,
|
|
|
|
Value* outp);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getPropertyByIndex(nanojit::LIns* obj_ins,
|
2010-06-10 17:20:26 -07:00
|
|
|
nanojit::LIns* index_ins, Value* outp);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus getPropertyById(nanojit::LIns* obj_ins, Value* outp);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getPropertyWithNativeGetter(nanojit::LIns* obj_ins,
|
2010-08-29 11:57:08 -07:00
|
|
|
const js::Shape* shape,
|
2010-06-10 17:20:26 -07:00
|
|
|
Value* outp);
|
2010-04-22 15:10:38 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getPropertyWithScriptGetter(JSObject *obj,
|
|
|
|
nanojit::LIns* obj_ins,
|
2010-08-29 11:57:08 -07:00
|
|
|
const js::Shape* shape);
|
2009-08-03 09:38:28 -07:00
|
|
|
|
2010-10-18 12:55:56 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* getStringLengthAndFlags(nanojit::LIns* str_ins);
|
2010-08-06 03:29:14 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* getStringLength(nanojit::LIns* str_ins);
|
|
|
|
JS_REQUIRES_STACK nanojit::LIns* getStringChars(nanojit::LIns* str_ins);
|
2010-10-07 15:51:57 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getCharCodeAt(JSString *str,
|
|
|
|
nanojit::LIns* str_ins, nanojit::LIns* idx_ins,
|
|
|
|
nanojit::LIns** out_ins);
|
2010-10-04 19:57:15 -07:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* getUnitString(nanojit::LIns* str_ins, nanojit::LIns* idx_ins);
|
2010-10-07 15:51:57 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getCharAt(JSString *str,
|
|
|
|
nanojit::LIns* str_ins, nanojit::LIns* idx_ins,
|
|
|
|
JSOp mode, nanojit::LIns** out_ins);
|
2010-08-05 22:54:34 -07:00
|
|
|
|
2010-06-02 16:01:36 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus nativeSet(JSObject* obj, nanojit::LIns* obj_ins,
|
2010-08-29 11:57:08 -07:00
|
|
|
const js::Shape* shape,
|
|
|
|
const Value &v, nanojit::LIns* v_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus setProp(Value &l, PropertyCacheEntry* entry,
|
2010-08-29 11:57:08 -07:00
|
|
|
const js::Shape* shape,
|
|
|
|
Value &v, nanojit::LIns*& v_ins,
|
|
|
|
bool isDefinitelyAtom);
|
2010-06-02 16:01:36 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus setCallProp(JSObject *callobj, nanojit::LIns *callobj_ins,
|
2010-08-29 11:57:08 -07:00
|
|
|
const js::Shape *shape, nanojit::LIns *v_ins,
|
|
|
|
const Value &v);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus initOrSetPropertyByName(nanojit::LIns* obj_ins,
|
2010-08-29 11:57:08 -07:00
|
|
|
Value* idvalp, Value* rvalp,
|
|
|
|
bool init);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus initOrSetPropertyByIndex(nanojit::LIns* obj_ins,
|
2010-08-29 11:57:08 -07:00
|
|
|
nanojit::LIns* index_ins,
|
|
|
|
Value* rvalp, bool init);
|
2009-12-02 21:50:04 -08:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus setElem(int lval_spindex, int idx_spindex,
|
|
|
|
int v_spindex);
|
2009-07-27 14:47:34 -07:00
|
|
|
|
2010-07-27 22:33:42 -07:00
|
|
|
void box_undefined_into(nanojit::LIns *dstaddr_ins, ptrdiff_t offset, nanojit::AccSet accSet);
|
2010-06-28 18:08:28 -07:00
|
|
|
#if JS_BITS_PER_WORD == 32
|
2010-07-27 22:33:42 -07:00
|
|
|
void box_null_into(nanojit::LIns *dstaddr_ins, ptrdiff_t offset, nanojit::AccSet accSet);
|
2010-06-28 18:08:28 -07:00
|
|
|
nanojit::LIns* unbox_number_as_double(nanojit::LIns* vaddr_ins, ptrdiff_t offset,
|
|
|
|
nanojit::LIns* tag_ins, VMSideExit* exit,
|
2010-07-27 22:33:42 -07:00
|
|
|
nanojit::AccSet accSet);
|
2010-06-29 22:33:52 -07:00
|
|
|
nanojit::LIns* unbox_object(nanojit::LIns* vaddr_ins, ptrdiff_t offset,
|
|
|
|
nanojit::LIns* tag_ins, JSValueType type, VMSideExit* exit,
|
2010-07-27 22:33:42 -07:00
|
|
|
nanojit::AccSet accSet);
|
2010-06-29 22:33:52 -07:00
|
|
|
nanojit::LIns* unbox_non_double_object(nanojit::LIns* vaddr_ins, ptrdiff_t offset,
|
|
|
|
nanojit::LIns* tag_ins, JSValueType type,
|
2010-07-27 22:33:42 -07:00
|
|
|
VMSideExit* exit, nanojit::AccSet accSet);
|
2010-06-28 18:08:28 -07:00
|
|
|
#elif JS_BITS_PER_WORD == 64
|
2010-06-29 22:33:52 -07:00
|
|
|
nanojit::LIns* non_double_object_value_has_type(nanojit::LIns* v_ins, JSValueType type);
|
2010-06-28 18:08:28 -07:00
|
|
|
nanojit::LIns* unpack_ptr(nanojit::LIns* v_ins);
|
|
|
|
nanojit::LIns* unbox_number_as_double(nanojit::LIns* v_ins, VMSideExit* exit);
|
2010-06-29 22:33:52 -07:00
|
|
|
nanojit::LIns* unbox_object(nanojit::LIns* v_ins, JSValueType type, VMSideExit* exit);
|
|
|
|
nanojit::LIns* unbox_non_double_object(nanojit::LIns* v_ins, JSValueType type, VMSideExit* exit);
|
2010-06-28 18:08:28 -07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
nanojit::LIns* unbox_value(const Value& v, nanojit::LIns* vaddr_ins,
|
2010-10-18 12:55:56 -07:00
|
|
|
ptrdiff_t offset, nanojit::AccSet accSet, VMSideExit* exit,
|
2010-06-28 18:08:28 -07:00
|
|
|
bool force_double=false);
|
2010-06-29 22:33:52 -07:00
|
|
|
void unbox_any_object(nanojit::LIns* vaddr_ins, nanojit::LIns** obj_ins,
|
2010-07-27 22:33:42 -07:00
|
|
|
nanojit::LIns** is_obj_ins, nanojit::AccSet accSet);
|
|
|
|
nanojit::LIns* is_boxed_true(nanojit::LIns* vaddr_ins, nanojit::AccSet accSet);
|
2010-10-06 10:09:40 -07:00
|
|
|
nanojit::LIns* is_boxed_magic(nanojit::LIns* vaddr_ins, JSWhyMagic why, nanojit::AccSet accSet);
|
2010-06-28 18:08:28 -07:00
|
|
|
|
|
|
|
nanojit::LIns* is_string_id(nanojit::LIns* id_ins);
|
|
|
|
nanojit::LIns* unbox_string_id(nanojit::LIns* id_ins);
|
|
|
|
nanojit::LIns* unbox_int_id(nanojit::LIns* id_ins);
|
|
|
|
|
|
|
|
/* Box a slot on trace into the given address at the given offset. */
|
|
|
|
void box_value_into(const Value& v, nanojit::LIns* v_ins,
|
|
|
|
nanojit::LIns* dstaddr_ins, ptrdiff_t offset,
|
2010-07-27 22:33:42 -07:00
|
|
|
nanojit::AccSet accSet);
|
2010-06-28 18:08:28 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Box a slot so that it may be passed with value semantics to a native. On
|
|
|
|
* 32-bit, this currently means boxing the value into insAlloc'd memory and
|
|
|
|
* returning the address which is passed as a Value*. On 64-bit, this
|
|
|
|
* currently means returning the boxed value which is passed as a jsval.
|
|
|
|
*/
|
|
|
|
nanojit::LIns* box_value_for_native_call(const Value& v, nanojit::LIns* v_ins);
|
|
|
|
|
|
|
|
/* Box a slot into insAlloc'd memory. */
|
|
|
|
nanojit::LIns* box_value_into_alloc(const Value& v, nanojit::LIns* v_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
|
|
|
|
JS_REQUIRES_STACK void guardClassHelper(bool cond, nanojit::LIns* obj_ins, Class* clasp,
|
2010-07-27 22:33:42 -07:00
|
|
|
VMSideExit* exit, nanojit::LoadQual loadQual);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK void guardClass(nanojit::LIns* obj_ins, Class* clasp,
|
2010-07-27 22:33:42 -07:00
|
|
|
VMSideExit* exit, nanojit::LoadQual loadQual);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK void guardNotClass(nanojit::LIns* obj_ins, Class* clasp,
|
2010-07-27 22:33:42 -07:00
|
|
|
VMSideExit* exit, nanojit::LoadQual loadQual);
|
2010-05-19 18:09:54 -07:00
|
|
|
JS_REQUIRES_STACK void guardDenseArray(nanojit::LIns* obj_ins, ExitType exitType);
|
|
|
|
JS_REQUIRES_STACK void guardDenseArray(nanojit::LIns* obj_ins, VMSideExit* exit);
|
2009-06-23 19:22:54 -07:00
|
|
|
JS_REQUIRES_STACK bool guardHasPrototype(JSObject* obj, nanojit::LIns* obj_ins,
|
|
|
|
JSObject** pobj, nanojit::LIns** pobj_ins,
|
|
|
|
VMSideExit* exit);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus guardPrototypeHasNoIndexedProperties(JSObject* obj,
|
2010-09-08 18:31:47 -07:00
|
|
|
nanojit::LIns* obj_ins,
|
|
|
|
VMSideExit *exit);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus guardNativeConversion(Value& v);
|
2010-01-04 11:31:10 -08:00
|
|
|
JS_REQUIRES_STACK void clearCurrentFrameSlotsFromTracker(Tracker& which);
|
2010-01-15 11:32:14 -08:00
|
|
|
JS_REQUIRES_STACK void putActivationObjects();
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus guardCallee(Value& callee);
|
2009-08-05 12:39:10 -07:00
|
|
|
JS_REQUIRES_STACK JSStackFrame *guardArguments(JSObject *obj, nanojit::LIns* obj_ins,
|
|
|
|
unsigned *depthp);
|
2010-01-14 16:23:05 -08:00
|
|
|
JS_REQUIRES_STACK nanojit::LIns* guardArgsLengthNotAssigned(nanojit::LIns* argsobj_ins);
|
2010-10-06 10:09:40 -07:00
|
|
|
JS_REQUIRES_STACK void guardNotHole(nanojit::LIns *argsobj_ins, nanojit::LIns *ids_ins);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getClassPrototype(JSObject* ctor,
|
2009-04-30 17:30:46 -07:00
|
|
|
nanojit::LIns*& proto_ins);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus getClassPrototype(JSProtoKey key,
|
2009-04-30 17:30:46 -07:00
|
|
|
nanojit::LIns*& proto_ins);
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus newArray(JSObject* ctor, uint32 argc, Value* argv,
|
|
|
|
Value* rval);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus newString(JSObject* ctor, uint32 argc, Value* argv,
|
|
|
|
Value* rval);
|
|
|
|
JS_REQUIRES_STACK RecordingStatus interpretedFunctionCall(Value& fval, JSFunction* fun,
|
2009-04-30 17:30:46 -07:00
|
|
|
uintN argc, bool constructing);
|
2009-07-27 14:47:34 -07:00
|
|
|
JS_REQUIRES_STACK void propagateFailureToBuiltinStatus(nanojit::LIns *ok_ins,
|
|
|
|
nanojit::LIns *&status_ins);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus emitNativeCall(JSSpecializedNative* sn, uintN argc,
|
2009-09-04 17:00:08 -07:00
|
|
|
nanojit::LIns* args[], bool rooted);
|
2010-08-29 11:57:08 -07:00
|
|
|
JS_REQUIRES_STACK void emitNativePropertyOp(const js::Shape* shape,
|
2009-07-27 14:47:34 -07:00
|
|
|
nanojit::LIns* obj_ins,
|
|
|
|
bool setflag,
|
2010-06-28 18:08:28 -07:00
|
|
|
nanojit::LIns* addr_boxed_val_ins);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus callSpecializedNative(JSNativeTraceInfo* trcinfo, uintN argc,
|
2009-09-04 13:44:31 -07:00
|
|
|
bool constructing);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus callNative(uintN argc, JSOp mode);
|
2010-10-18 15:10:52 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus callFloatReturningInt(uintN argc,
|
|
|
|
const nanojit::CallInfo *ci);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus functionCall(uintN argc, JSOp mode);
|
2008-07-31 16:30:00 -07:00
|
|
|
|
2008-12-09 08:38:32 -08:00
|
|
|
JS_REQUIRES_STACK void trackCfgMerges(jsbytecode* pc);
|
2009-03-21 01:07:51 -07:00
|
|
|
JS_REQUIRES_STACK void emitIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
|
2008-12-09 08:38:32 -08:00
|
|
|
JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus checkTraceEnd(jsbytecode* pc);
|
2008-09-05 18:29:08 -07:00
|
|
|
|
2010-04-26 09:30:06 -07:00
|
|
|
AbortableRecordingStatus hasMethod(JSObject* obj, jsid id, bool& found);
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus hasIteratorMethod(JSObject* obj, bool& found);
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2009-04-05 21:51:42 -07:00
|
|
|
JS_REQUIRES_STACK jsatomid getFullIndex(ptrdiff_t pcoff = 0);
|
2009-04-05 21:17:22 -07:00
|
|
|
|
2010-06-21 12:28:56 -07:00
|
|
|
JS_REQUIRES_STACK JSValueType determineSlotType(Value* vp);
|
2009-04-18 16:47:23 -07:00
|
|
|
|
2010-06-10 17:20:26 -07:00
|
|
|
JS_REQUIRES_STACK RecordingStatus setUpwardTrackedVar(Value* stackVp, const Value& v,
|
2010-04-20 22:42:00 -07:00
|
|
|
nanojit::LIns* v_ins);
|
|
|
|
|
2009-11-12 18:34:24 -08:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus compile();
|
2009-09-30 15:28:00 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop();
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop(VMSideExit* exit);
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop(SlotMap& slotMap, VMSideExit* exit);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus endLoop();
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus endLoop(VMSideExit* exit);
|
2009-11-05 15:04:54 -08:00
|
|
|
JS_REQUIRES_STACK void joinEdgesToEntry(TreeFragment* peer_root);
|
|
|
|
JS_REQUIRES_STACK void adjustCallerTypes(TreeFragment* f);
|
2010-03-07 13:09:35 -08:00
|
|
|
JS_REQUIRES_STACK void prepareTreeCall(TreeFragment* inner);
|
|
|
|
JS_REQUIRES_STACK void emitTreeCall(TreeFragment* inner, VMSideExit* exit);
|
2010-06-21 12:28:56 -07:00
|
|
|
JS_REQUIRES_STACK void determineGlobalTypes(JSValueType* typeMap);
|
2009-10-26 13:06:17 -07:00
|
|
|
JS_REQUIRES_STACK VMSideExit* downSnapshot(FrameInfo* downFrame);
|
2009-11-05 15:04:54 -08:00
|
|
|
JS_REQUIRES_STACK TreeFragment* findNestedCompatiblePeer(TreeFragment* f);
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus attemptTreeCall(TreeFragment* inner,
|
2010-07-20 23:41:46 -07:00
|
|
|
uintN& inlineCallCount);
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2010-04-26 09:30:06 -07:00
|
|
|
static JS_REQUIRES_STACK MonitorResult recordLoopEdge(JSContext* cx, TraceRecorder* r,
|
2010-07-20 23:41:46 -07:00
|
|
|
uintN& inlineCallCount);
|
2009-09-30 15:28:00 -07:00
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
/* Allocators associated with this recording session. */
|
|
|
|
VMAllocator& tempAlloc() const { return *traceMonitor->tempAlloc; }
|
|
|
|
VMAllocator& traceAlloc() const { return *traceMonitor->traceAlloc; }
|
|
|
|
VMAllocator& dataAlloc() const { return *traceMonitor->dataAlloc; }
|
|
|
|
|
|
|
|
/* Member declarations for each opcode, to be called before interpreting the opcode. */
|
|
|
|
#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus record_##op();
|
|
|
|
# include "jsopcode.tbl"
|
|
|
|
#undef OPDEF
|
|
|
|
|
2010-04-27 14:17:11 -07:00
|
|
|
inline void* operator new(size_t size) { return js_calloc(size); }
|
|
|
|
inline void operator delete(void *p) { js_free(p); }
|
2009-11-12 18:34:24 -08:00
|
|
|
|
|
|
|
JS_REQUIRES_STACK
|
2009-12-04 19:59:55 -08:00
|
|
|
TraceRecorder(JSContext* cx, VMSideExit*, VMFragment*,
|
2010-06-21 12:28:56 -07:00
|
|
|
unsigned stackSlots, unsigned ngslots, JSValueType* typeMap,
|
2010-10-19 11:08:25 -07:00
|
|
|
VMSideExit* expectedInnerExit, JSScript* outerScript, jsbytecode* outerPC,
|
2010-08-27 21:18:58 -07:00
|
|
|
uint32 outerArgc, bool speculate);
|
2009-11-12 18:34:24 -08:00
|
|
|
|
|
|
|
/* The destructor should only be called through finish*, not directly. */
|
|
|
|
~TraceRecorder();
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus finishSuccessfully();
|
2010-09-29 11:31:52 -07:00
|
|
|
|
|
|
|
enum AbortResult { NORMAL_ABORT, JIT_RESET };
|
|
|
|
JS_REQUIRES_STACK AbortResult finishAbort(const char* reason);
|
2009-11-12 18:34:24 -08:00
|
|
|
|
2009-10-26 13:06:17 -07:00
|
|
|
friend class ImportBoxedStackSlotVisitor;
|
|
|
|
friend class ImportUnboxedStackSlotVisitor;
|
|
|
|
friend class ImportGlobalSlotVisitor;
|
|
|
|
friend class AdjustCallerGlobalTypesVisitor;
|
|
|
|
friend class AdjustCallerStackTypesVisitor;
|
|
|
|
friend class TypeCompatibilityVisitor;
|
2009-12-16 22:06:07 -08:00
|
|
|
friend class ImportFrameSlotsVisitor;
|
2009-10-26 13:06:17 -07:00
|
|
|
friend class SlotMap;
|
|
|
|
friend class DefaultSlotMap;
|
|
|
|
friend class DetermineTypesVisitor;
|
|
|
|
friend class RecursiveSlotMap;
|
|
|
|
friend class UpRecursiveSlotMap;
|
2010-08-27 21:18:58 -07:00
|
|
|
friend MonitorResult MonitorLoopEdge(JSContext*, uintN&);
|
2010-07-20 23:41:46 -07:00
|
|
|
friend TracePointAction MonitorTracePoint(JSContext*, uintN &inlineCallCount,
|
2010-07-20 18:01:16 -07:00
|
|
|
bool &blacklist);
|
2010-09-29 11:31:52 -07:00
|
|
|
friend AbortResult AbortRecording(JSContext*, const char*);
|
2010-08-09 22:43:33 -07:00
|
|
|
friend class BoxArg;
|
2010-09-21 05:58:19 -07:00
|
|
|
friend void TraceMonitor::sweep();
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2010-09-21 05:58:19 -07:00
|
|
|
public:
|
2009-11-12 18:34:24 -08:00
|
|
|
static bool JS_REQUIRES_STACK
|
2009-12-04 19:59:55 -08:00
|
|
|
startRecorder(JSContext*, VMSideExit*, VMFragment*,
|
2010-06-21 12:28:56 -07:00
|
|
|
unsigned stackSlots, unsigned ngslots, JSValueType* typeMap,
|
2010-10-19 11:08:25 -07:00
|
|
|
VMSideExit* expectedInnerExit, JSScript* outerScript, jsbytecode* outerPC,
|
2010-08-27 21:18:58 -07:00
|
|
|
uint32 outerArgc, bool speculate);
|
2009-10-26 13:06:17 -07:00
|
|
|
|
|
|
|
/* Accessors. */
|
2009-11-12 18:34:24 -08:00
|
|
|
VMFragment* getFragment() const { return fragment; }
|
2009-12-04 19:59:55 -08:00
|
|
|
TreeFragment* getTree() const { return tree; }
|
2009-11-12 18:34:24 -08:00
|
|
|
bool outOfMemory() const { return traceMonitor->outOfMemory(); }
|
2010-06-24 15:18:07 -07:00
|
|
|
Oracle* getOracle() const { return oracle; }
|
2009-10-26 13:06:17 -07:00
|
|
|
|
|
|
|
/* Entry points / callbacks from the interpreter. */
|
2009-11-12 18:34:24 -08:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus monitorRecording(JSOp op);
|
2010-08-27 21:18:58 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus record_EnterFrame();
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus record_LeaveFrame();
|
2010-06-02 16:01:36 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus record_SetPropHit(PropertyCacheEntry* entry,
|
2010-08-29 11:57:08 -07:00
|
|
|
const js::Shape* shape);
|
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus record_DefLocalFunSetSlot(uint32 slot,
|
|
|
|
JSObject* obj);
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_REQUIRES_STACK AbortableRecordingStatus record_NativeCallComplete();
|
2009-09-29 19:05:19 -07:00
|
|
|
void forgetGuardedShapesForObject(JSObject* obj);
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2009-09-22 15:02:50 -07:00
|
|
|
#ifdef DEBUG
|
2009-10-26 13:06:17 -07:00
|
|
|
/* Debug printing functionality to emit printf() on trace. */
|
2009-09-29 10:24:37 -07:00
|
|
|
JS_REQUIRES_STACK void tprint(const char *format, int count, nanojit::LIns *insa[]);
|
|
|
|
JS_REQUIRES_STACK void tprint(const char *format);
|
|
|
|
JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins);
|
|
|
|
JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
|
|
|
|
nanojit::LIns *ins2);
|
|
|
|
JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
|
|
|
|
nanojit::LIns *ins2, nanojit::LIns *ins3);
|
|
|
|
JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
|
|
|
|
nanojit::LIns *ins2, nanojit::LIns *ins3,
|
|
|
|
nanojit::LIns *ins4);
|
|
|
|
JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
|
|
|
|
nanojit::LIns *ins2, nanojit::LIns *ins3,
|
|
|
|
nanojit::LIns *ins4, nanojit::LIns *ins5);
|
|
|
|
JS_REQUIRES_STACK void tprint(const char *format, nanojit::LIns *ins1,
|
|
|
|
nanojit::LIns *ins2, nanojit::LIns *ins3,
|
|
|
|
nanojit::LIns *ins4, nanojit::LIns *ins5,
|
|
|
|
nanojit::LIns *ins6);
|
2009-09-22 15:02:50 -07:00
|
|
|
#endif
|
2008-06-22 09:30:04 -07:00
|
|
|
};
|
2009-10-26 13:06:17 -07:00
|
|
|
|
2010-09-15 22:27:17 -07:00
|
|
|
#define TRACING_ENABLED(cx) ((cx)->traceJitEnabled)
|
|
|
|
#define REGEX_JIT_ENABLED(cx) ((cx)->traceJitEnabled || (cx)->methodJitEnabled)
|
2008-09-09 22:22:52 -07:00
|
|
|
#define TRACE_RECORDER(cx) (JS_TRACE_MONITOR(cx).recorder)
|
|
|
|
#define SET_TRACE_RECORDER(cx,tr) (JS_TRACE_MONITOR(cx).recorder = (tr))
|
2008-05-30 18:58:43 -07:00
|
|
|
|
2009-04-05 21:17:22 -07:00
|
|
|
#define JSOP_IN_RANGE(op,lo,hi) (uintN((op) - (lo)) <= uintN((hi) - (lo)))
|
|
|
|
#define JSOP_IS_BINARY(op) JSOP_IN_RANGE(op, JSOP_BITOR, JSOP_MOD)
|
|
|
|
#define JSOP_IS_UNARY(op) JSOP_IN_RANGE(op, JSOP_NEG, JSOP_POS)
|
|
|
|
#define JSOP_IS_EQUALITY(op) JSOP_IN_RANGE(op, JSOP_EQ, JSOP_NE)
|
2008-11-13 00:30:20 -08:00
|
|
|
|
2008-12-21 03:55:09 -08:00
|
|
|
#define TRACE_ARGS_(x,args) \
|
2008-09-09 22:22:52 -07:00
|
|
|
JS_BEGIN_MACRO \
|
2009-09-23 18:21:41 -07:00
|
|
|
if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
|
2009-09-28 09:03:21 -07:00
|
|
|
AbortableRecordingStatus status = tr_->record_##x args; \
|
2010-04-26 09:30:06 -07:00
|
|
|
if (StatusAbortsRecorderIfActive(status)) { \
|
|
|
|
if (TRACE_RECORDER(cx)) { \
|
|
|
|
JS_ASSERT(TRACE_RECORDER(cx) == tr_); \
|
|
|
|
AbortRecording(cx, #x); \
|
|
|
|
} \
|
2009-09-28 09:03:21 -07:00
|
|
|
if (status == ARECORD_ERROR) \
|
2009-04-30 17:30:46 -07:00
|
|
|
goto error; \
|
|
|
|
} \
|
2009-09-28 09:03:21 -07:00
|
|
|
JS_ASSERT(status != ARECORD_IMACRO); \
|
2009-04-30 17:30:46 -07:00
|
|
|
} \
|
2008-07-16 22:58:06 -07:00
|
|
|
JS_END_MACRO
|
|
|
|
|
2008-12-21 03:55:09 -08:00
|
|
|
#define TRACE_ARGS(x,args) TRACE_ARGS_(x, args)
|
2008-09-18 16:53:09 -07:00
|
|
|
#define TRACE_0(x) TRACE_ARGS(x, ())
|
2008-09-09 22:22:52 -07:00
|
|
|
#define TRACE_1(x,a) TRACE_ARGS(x, (a))
|
|
|
|
#define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
|
|
|
|
|
2010-04-26 09:30:06 -07:00
|
|
|
extern JS_REQUIRES_STACK MonitorResult
|
2010-08-27 21:18:58 -07:00
|
|
|
MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount);
|
2010-07-20 18:01:16 -07:00
|
|
|
|
|
|
|
extern JS_REQUIRES_STACK TracePointAction
|
2010-07-20 23:41:46 -07:00
|
|
|
MonitorTracePoint(JSContext*, uintN& inlineCallCount, bool& blacklist);
|
2008-08-28 22:33:22 -07:00
|
|
|
|
2010-09-29 11:31:52 -07:00
|
|
|
extern JS_REQUIRES_STACK TraceRecorder::AbortResult
|
2010-01-22 14:49:18 -08:00
|
|
|
AbortRecording(JSContext* cx, const char* reason);
|
2008-06-28 18:19:21 -07:00
|
|
|
|
2008-07-05 19:15:00 -07:00
|
|
|
extern void
|
2010-01-22 14:49:18 -08:00
|
|
|
InitJIT(TraceMonitor *tm);
|
2008-07-05 19:15:00 -07:00
|
|
|
|
2008-07-17 10:22:40 -07:00
|
|
|
extern void
|
2010-01-22 14:49:18 -08:00
|
|
|
FinishJIT(TraceMonitor *tm);
|
2008-07-17 10:22:40 -07:00
|
|
|
|
2009-02-09 16:58:39 -08:00
|
|
|
extern void
|
2010-01-22 14:49:18 -08:00
|
|
|
PurgeScriptFragments(JSContext* cx, JSScript* script);
|
2009-02-09 16:58:39 -08:00
|
|
|
|
2009-03-31 20:51:01 -07:00
|
|
|
extern bool
|
2010-01-22 14:49:18 -08:00
|
|
|
OverfullJITCache(TraceMonitor* tm);
|
2009-09-12 23:04:27 -07:00
|
|
|
|
|
|
|
extern void
|
2010-01-22 14:49:18 -08:00
|
|
|
FlushJITCache(JSContext* cx);
|
2009-03-31 20:51:01 -07:00
|
|
|
|
2009-01-28 07:24:35 -08:00
|
|
|
extern JSObject *
|
2010-01-22 14:49:18 -08:00
|
|
|
GetBuiltinFunction(JSContext *cx, uintN index);
|
2009-01-28 07:24:35 -08:00
|
|
|
|
2009-04-22 16:52:59 -07:00
|
|
|
extern void
|
2010-01-22 14:49:18 -08:00
|
|
|
SetMaxCodeCacheBytes(JSContext* cx, uint32 bytes);
|
2009-04-22 16:52:59 -07:00
|
|
|
|
2010-06-22 09:43:21 -07:00
|
|
|
extern void
|
|
|
|
ExternNativeToValue(JSContext* cx, Value& v, JSValueType type, double* slot);
|
2009-09-09 11:40:21 -07:00
|
|
|
|
2009-06-16 01:30:01 -07:00
|
|
|
#ifdef MOZ_TRACEVIS
|
|
|
|
|
|
|
|
extern JS_FRIEND_API(bool)
|
2010-02-16 13:41:40 -08:00
|
|
|
StartTraceVis(const char* filename);
|
2009-06-16 01:30:01 -07:00
|
|
|
|
|
|
|
extern JS_FRIEND_API(JSBool)
|
2010-09-20 12:43:52 -07:00
|
|
|
StartTraceVisNative(JSContext *cx, uintN argc, jsval *vp);
|
2009-06-16 01:30:01 -07:00
|
|
|
|
|
|
|
extern JS_FRIEND_API(bool)
|
2010-02-16 13:41:40 -08:00
|
|
|
StopTraceVis();
|
2009-06-16 01:30:01 -07:00
|
|
|
|
|
|
|
extern JS_FRIEND_API(JSBool)
|
2010-09-20 12:43:52 -07:00
|
|
|
StopTraceVisNative(JSContext *cx, uintN argc, jsval *vp);
|
2009-06-16 01:30:01 -07:00
|
|
|
|
|
|
|
/* Must contain no more than 16 items. */
|
|
|
|
enum TraceVisState {
|
2009-09-10 16:06:34 -07:00
|
|
|
// Special: means we returned from current activity to last
|
2009-06-16 01:30:01 -07:00
|
|
|
S_EXITLAST,
|
2009-09-10 16:06:34 -07:00
|
|
|
// Activities
|
2009-06-16 01:30:01 -07:00
|
|
|
S_INTERP,
|
|
|
|
S_MONITOR,
|
|
|
|
S_RECORD,
|
|
|
|
S_COMPILE,
|
|
|
|
S_EXECUTE,
|
2009-09-10 16:06:34 -07:00
|
|
|
S_NATIVE,
|
|
|
|
// Events: these all have (bit 3) == 1.
|
|
|
|
S_RESET = 8
|
2009-06-16 01:30:01 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Reason for an exit to the interpreter. */
|
|
|
|
enum TraceVisExitReason {
|
|
|
|
R_NONE,
|
|
|
|
R_ABORT,
|
2010-01-22 14:49:18 -08:00
|
|
|
/* Reasons in MonitorLoopEdge */
|
2009-06-16 01:30:01 -07:00
|
|
|
R_INNER_SIDE_EXIT,
|
|
|
|
R_DOUBLES,
|
|
|
|
R_CALLBACK_PENDING,
|
|
|
|
R_OOM_GETANCHOR,
|
|
|
|
R_BACKED_OFF,
|
|
|
|
R_COLD,
|
|
|
|
R_FAIL_RECORD_TREE,
|
|
|
|
R_MAX_PEERS,
|
|
|
|
R_FAIL_EXECUTE_TREE,
|
|
|
|
R_FAIL_STABILIZE,
|
|
|
|
R_FAIL_EXTEND_FLUSH,
|
|
|
|
R_FAIL_EXTEND_MAX_BRANCHES,
|
|
|
|
R_FAIL_EXTEND_START,
|
|
|
|
R_FAIL_EXTEND_COLD,
|
2010-04-19 07:21:10 -07:00
|
|
|
R_FAIL_SCOPE_CHAIN_CHECK,
|
2009-06-16 01:30:01 -07:00
|
|
|
R_NO_EXTEND_OUTER,
|
|
|
|
R_MISMATCH_EXIT,
|
|
|
|
R_OOM_EXIT,
|
|
|
|
R_TIMEOUT_EXIT,
|
|
|
|
R_DEEP_BAIL_EXIT,
|
|
|
|
R_STATUS_EXIT,
|
|
|
|
R_OTHER_EXIT
|
|
|
|
};
|
|
|
|
|
2009-09-10 16:06:34 -07:00
|
|
|
enum TraceVisFlushReason {
|
|
|
|
FR_DEEP_BAIL,
|
|
|
|
FR_OOM,
|
|
|
|
FR_GLOBAL_SHAPE_MISMATCH,
|
|
|
|
FR_GLOBALS_FULL
|
|
|
|
};
|
|
|
|
|
2009-07-30 11:48:02 -07:00
|
|
|
const unsigned long long MS64_MASK = 0xfull << 60;
|
|
|
|
const unsigned long long MR64_MASK = 0x1full << 55;
|
2009-06-16 01:30:01 -07:00
|
|
|
const unsigned long long MT64_MASK = ~(MS64_MASK | MR64_MASK);
|
|
|
|
|
|
|
|
extern FILE* traceVisLogFile;
|
2009-07-30 11:48:02 -07:00
|
|
|
extern JSHashTable *traceVisScriptTable;
|
|
|
|
|
|
|
|
extern JS_FRIEND_API(void)
|
2010-01-22 14:49:18 -08:00
|
|
|
StoreTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r);
|
2009-06-16 01:30:01 -07:00
|
|
|
|
|
|
|
static inline void
|
2010-01-22 14:49:18 -08:00
|
|
|
LogTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
|
2009-06-16 01:30:01 -07:00
|
|
|
{
|
|
|
|
if (traceVisLogFile) {
|
|
|
|
unsigned long long sllu = s;
|
|
|
|
unsigned long long rllu = r;
|
|
|
|
unsigned long long d = (sllu << 60) | (rllu << 55) | (rdtsc() & MT64_MASK);
|
|
|
|
fwrite(&d, sizeof(d), 1, traceVisLogFile);
|
|
|
|
}
|
2009-07-30 11:48:02 -07:00
|
|
|
if (traceVisScriptTable) {
|
2010-01-22 14:49:18 -08:00
|
|
|
StoreTraceVisState(cx, s, r);
|
2009-07-30 11:48:02 -07:00
|
|
|
}
|
2009-06-16 01:30:01 -07:00
|
|
|
}
|
|
|
|
|
2009-09-10 16:06:34 -07:00
|
|
|
/*
|
2010-01-22 14:49:18 -08:00
|
|
|
* Although this runs the same code as LogTraceVisState, it is a separate
|
2009-09-10 16:06:34 -07:00
|
|
|
* function because the meaning of the log entry is different. Also, the entry
|
|
|
|
* formats may diverge someday.
|
|
|
|
*/
|
|
|
|
static inline void
|
2010-01-22 14:49:18 -08:00
|
|
|
LogTraceVisEvent(JSContext *cx, TraceVisState s, TraceVisFlushReason r)
|
2009-09-10 16:06:34 -07:00
|
|
|
{
|
2010-01-22 14:49:18 -08:00
|
|
|
LogTraceVisState(cx, s, (TraceVisExitReason) r);
|
2009-09-10 16:06:34 -07:00
|
|
|
}
|
|
|
|
|
2009-07-27 18:40:12 -07:00
|
|
|
static inline void
|
2010-01-22 14:49:18 -08:00
|
|
|
EnterTraceVisState(JSContext *cx, TraceVisState s, TraceVisExitReason r)
|
2009-06-16 01:30:01 -07:00
|
|
|
{
|
2010-01-22 14:49:18 -08:00
|
|
|
LogTraceVisState(cx, s, r);
|
2009-06-16 01:30:01 -07:00
|
|
|
}
|
|
|
|
|
2009-07-27 18:40:12 -07:00
|
|
|
static inline void
|
2010-01-22 14:49:18 -08:00
|
|
|
ExitTraceVisState(JSContext *cx, TraceVisExitReason r)
|
2009-06-16 01:30:01 -07:00
|
|
|
{
|
2010-01-22 14:49:18 -08:00
|
|
|
LogTraceVisState(cx, S_EXITLAST, r);
|
2009-06-16 01:30:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct TraceVisStateObj {
|
|
|
|
TraceVisExitReason r;
|
2009-07-30 11:48:02 -07:00
|
|
|
JSContext *mCx;
|
2009-06-16 01:30:01 -07:00
|
|
|
|
2009-07-30 11:48:02 -07:00
|
|
|
inline TraceVisStateObj(JSContext *cx, TraceVisState s) : r(R_NONE)
|
2009-06-16 01:30:01 -07:00
|
|
|
{
|
2010-01-22 14:49:18 -08:00
|
|
|
EnterTraceVisState(cx, s, R_NONE);
|
2009-07-30 11:48:02 -07:00
|
|
|
mCx = cx;
|
2009-06-16 01:30:01 -07:00
|
|
|
}
|
|
|
|
inline ~TraceVisStateObj()
|
|
|
|
{
|
2010-01-22 14:49:18 -08:00
|
|
|
ExitTraceVisState(mCx, r);
|
2009-06-16 01:30:01 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* MOZ_TRACEVIS */
|
|
|
|
|
2010-01-22 14:49:18 -08:00
|
|
|
} /* namespace js */
|
2009-08-18 15:43:17 -07:00
|
|
|
|
2008-09-11 15:53:00 -07:00
|
|
|
#else /* !JS_TRACER */
|
|
|
|
|
2008-09-22 13:04:47 -07:00
|
|
|
#define TRACE_0(x) ((void)0)
|
2008-09-11 15:53:00 -07:00
|
|
|
#define TRACE_1(x,a) ((void)0)
|
|
|
|
#define TRACE_2(x,a,b) ((void)0)
|
|
|
|
|
|
|
|
#endif /* !JS_TRACER */
|
2008-08-12 08:36:29 -07:00
|
|
|
|
2008-05-31 15:29:54 -07:00
|
|
|
#endif /* jstracer_h___ */
|