gecko/js/src/nanojit/LIR.h

1126 lines
36 KiB
C
Raw Normal View History

/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2008-06-18 21:11:15 -07:00
/* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is [Open Source Virtual Machine].
*
* The Initial Developer of the Original Code is
* Adobe System Incorporated.
* Portions created by the Initial Developer are Copyright (C) 2004-2007
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
* Adobe AS3 Team
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef __nanojit_LIR__
#define __nanojit_LIR__
2008-09-19 14:54:49 -07:00
/**
* Fundamentally, the arguments to the various operands can be grouped along
* two dimensions. One dimension is size: can the arguments fit into a 32-bit
* register, or not? The other dimension is whether the argument is an integer
* (including pointers) or a floating-point value. In all comments below,
* "integer" means integer of any size, including 64-bit, unless otherwise
* specified. All floating-point values are always 64-bit. Below, "quad" is
* used for a 64-bit value that might be either integer or floating-point.
*/
2008-06-18 21:11:15 -07:00
namespace nanojit
{
enum LOpcode
#if defined(_MSC_VER) && _MSC_VER >= 1400
: unsigned
#endif
2008-06-18 21:11:15 -07:00
{
// flags; upper bits reserved
LIR64 = 0x40, // result is double or quad
#define OPDEF(op, number, args, repkind) \
LIR_##op = (number),
#define OPDEF64(op, number, args, repkind) \
LIR_##op = ((number) | LIR64),
#include "LIRopcode.tbl"
LIR_sentinel
#undef OPDEF
#undef OPDEF64
2008-06-18 21:11:15 -07:00
};
2008-08-18 12:32:14 -07:00
#if defined NANOJIT_64BIT
#define LIR_ldp LIR_ldq
#define LIR_piadd LIR_qiadd
#define LIR_piand LIR_qiand
#define LIR_pilsh LIR_qilsh
#define LIR_pcmov LIR_qcmov
#define LIR_pior LIR_qior
2008-07-31 13:28:12 -07:00
#else
2008-08-18 12:32:14 -07:00
#define LIR_ldp LIR_ld
#define LIR_piadd LIR_add
#define LIR_piand LIR_and
#define LIR_pilsh LIR_lsh
#define LIR_pcmov LIR_cmov
#define LIR_pior LIR_or
2008-07-31 13:28:12 -07:00
#endif
struct GuardRecord;
2008-06-18 21:11:15 -07:00
struct SideExit;
struct Page;
enum AbiKind {
ABI_FASTCALL,
ABI_THISCALL,
ABI_STDCALL,
ABI_CDECL
};
enum ArgSize {
ARGSIZE_NONE = 0,
ARGSIZE_F = 1,
ARGSIZE_LO = 2,
ARGSIZE_Q = 3,
_ARGSIZE_MASK_INT = 2,
_ARGSIZE_MASK_ANY = 3
};
struct CallInfo
{
uintptr_t _address;
uint32_t _argtypes:18; // 9 2-bit fields indicating arg type, by ARGSIZE above (including ret type): a1 a2 a3 a4 a5 ret
uint8_t _cse:1; // true if no side effects
uint8_t _fold:1; // true if no side effects
AbiKind _abi:3;
verbose_only ( const char* _name; )
uint32_t FASTCALL _count_args(uint32_t mask) const;
uint32_t get_sizes(ArgSize*) const;
inline uint32_t FASTCALL count_args() const {
return _count_args(_ARGSIZE_MASK_ANY);
}
inline uint32_t FASTCALL count_iargs() const {
return _count_args(_ARGSIZE_MASK_INT);
}
// fargs = args - iargs
};
/*
* Record for extra data used to compile switches as jump tables.
*/
struct SwitchInfo
{
NIns** table; // Jump table; a jump address is NIns*
uint32_t count; // Number of table entries
// Index value at last execution of the switch. The index value
// is the offset into the jump table. Thus it is computed as
// (switch expression) - (lowest case value).
uint32_t index;
};
inline bool isCseOpcode(LOpcode op) {
op = LOpcode(op & ~LIR64);
return op >= LIR_int && op <= LIR_uge;
}
inline bool isRetOpcode(LOpcode op) {
return (op & ~LIR64) == LIR_ret;
}
2008-06-18 21:11:15 -07:00
// The opcode is not logically part of the Reservation, but we include it
// in this struct to ensure that opcode plus the Reservation fits in a
// single word. Yuk.
struct Reservation
{
uint32_t arIndex:16; // index into stack frame. displ is -4*arIndex
Register reg:7; // register UnknownReg implies not in register
uint32_t used:1; // when set, the reservation is active
LOpcode opcode:8;
inline void init() {
reg = UnknownReg;
arIndex = 0;
used = 1;
}
inline void clear() {
used = 0;
}
};
//-----------------------------------------------------------------------
// Low-level instructions. This is a bit complicated, because we have a
// variable-width representation to minimise space usage.
//
// - Instruction size is always an integral multiple of word size.
//
// - Every instruction has at least one word, holding the opcode and the
// reservation info. That word is in class LIns.
//
// - Beyond that, most instructions have 1, 2 or 3 extra words. These
// extra words are in classes LInsOp1, LInsOp2, etc (collectively called
// "LInsXYZ" in what follows). Each LInsXYZ class also contains a word,
// accessible by the 'ins' member, which holds the LIns data; its type
// is void* (which is the same size as LIns) rather than LIns to avoid a
// recursive dependency between LIns and LInsXYZ.
//
// - LIR is written forward, but read backwards. When reading backwards,
// in order to find the opcode, it must be in a predictable place in the
// LInsXYZ isn't affected by instruction width. Therefore, the LIns
// word (which contains the opcode) is always the *last* word in an
// instruction.
//
// - Each instruction is created by casting pre-allocated bytes from a
// LirBuffer to the LInsXYZ type. Therefore there are no constructors
// for LIns or LInsXYZ.
//
// - The standard handle for an instruction is a LIns*. This actually
// points to the LIns word, ie. to the final word in the instruction.
// This is a bit odd, but it allows the instruction's opcode to be
// easily accessed. Once you've looked at the opcode and know what kind
// of instruction it is, if you want to access any of the other words,
// you need to use toLInsXYZ(), which takes the LIns* and gives you an
// LInsXYZ*, ie. the pointer to the actual start of the instruction's
// bytes. From there you can access the instruction-specific extra
// words.
//
// - However, from outside class LIns, LInsXYZ isn't visible, nor is
// toLInsXYZ() -- from outside LIns, all LIR instructions are handled
// via LIns pointers and get/set methods are used for all LIns/LInsXYZ
// accesses. In fact, all data members in LInsXYZ are private and can
// only be accessed by LIns, which is a friend class. The only thing
// anyone outside LIns can do with a LInsXYZ is call getLIns().
//
// - An example Op2 instruction and the likely pointers to it (each line
// represents a word, and pointers to a line point to the start of the
// word on that line):
//
// [ oprnd_2 <-- LInsOp2* insOp2 == toLInsOp2(ins)
// oprnd_1
// opcode + resv ] <-- LIns* ins
//
// - LIR_skip instructions are more complicated. They allow an arbitrary
// blob of data (the "payload") to be placed in the LIR stream. The
// size of the payload is always a multiple of the word size. A skip
// instruction's operand points to the previous instruction, which lets
// the payload be skipped over when reading backwards. Here's an
// example of a skip instruction with a 3-word payload preceded by an
// LInsOp1:
//
// [ oprnd_1
// +-> opcode + resv ]
// | [ data
// | data
// | data
// +---- prevLIns <-- LInsSk* insSk == toLInsSk(ins)
// opcode==LIR_skip + resv ] <-- LIns* ins
//
// Skips are also used to link code pages. If the first instruction on
// a page isn't a LIR_start, it will be a skip, and the skip's operand
// will point to the last LIns on the previous page. In this case there
// isn't a payload as such; in fact, the previous page might be at a
// higher address, ie. the operand might point forward rather than
// backward.
//
// LInsSk has the same layout as LInsOp1, but we represent it as a
// different class because there are some places where we treat
// skips specially and so having it separate seems like a good idea.
//
// - Call instructions (LIR_call, LIR_fcall, LIR_calli, LIR_fcalli) are
// also more complicated. They are preceded by the arguments to the
// call, which are laid out in reverse order. For example, a call with
// 3 args will look like this:
//
// [ arg #2
// arg #1
// arg #0
// argc <-- LInsC insC == toLInsC(ins)
// ci
// opcode + resv ] <-- LIns* ins
//
// - Various things about the size and layout of LIns and LInsXYZ are
// statically checked in staticSanityCheck(). In particular, this is
// worthwhile because there's nothing that guarantees that all the
// LInsXYZ classes have a size that is a multiple of word size (but in
// practice all sane compilers use a layout that results in this). We
// also check that every LInsXYZ is word-aligned in
// LirBuffer::makeRoom(); this seems sensible to avoid potential
// slowdowns due to misalignment. It relies on pages themselves being
// word-aligned, which is extremely likely.
//
// - There is an enum, LInsRepKind, with one member for each of the
// LInsXYZ kinds. Each opcode is categorised with its LInsRepKind value
// in LIRopcode.tbl, and this is used in various places.
//-----------------------------------------------------------------------
enum LInsRepKind {
// LRK_XYZ corresponds to class LInsXYZ.
LRK_Op0,
LRK_Op1,
LRK_Op2,
LRK_Sti,
LRK_Sk,
LRK_C,
LRK_P,
LRK_I,
LRK_I64,
LRK_None // this one is used for unused opcode numbers
};
// 0-operand form. Used for LIR_start and LIR_label.
class LInsOp0
{
private:
friend class LIns;
2008-06-18 21:11:15 -07:00
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// 1-operand form. Used for LIR_ret, LIR_ov, unary arithmetic/logic ops,
// etc.
class LInsOp1
{
private:
friend class LIns;
2008-06-18 21:11:15 -07:00
// Nb: oprnd_1 position relative to 'ins' must match that in
// LIns{Op2,Sti}. Checked in LirBufWriter::LirBufWriter().
LIns* oprnd_1;
2008-07-16 14:21:31 -07:00
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
2008-06-18 21:11:15 -07:00
// 2-operand form. Used for loads, guards, branches, comparisons, binary
// arithmetic/logic ops, etc.
class LInsOp2
{
private:
friend class LIns;
2008-06-18 21:11:15 -07:00
// Nb: oprnd_{1,2} position relative to 'ins' must match that in
// LIns{Op1,Sti}. Checked in LirBufWriter::LirBufWriter().
LIns* oprnd_2;
LIns* oprnd_1;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_sti and LIR_stqi.
class LInsSti
{
private:
friend class LIns;
int32_t disp;
// Nb: oprnd_{1,2} position relative to 'ins' must match that in
// LIns{Op1,Op2}. Checked in LIns::staticSanityCheck().
LIns* oprnd_2;
LIns* oprnd_1;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_skip.
class LInsSk
{
private:
friend class LIns;
LIns* prevLIns;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for all variants of LIR_call.
class LInsC
{
private:
friend class LIns;
uintptr_t argc:8;
const CallInfo* ci;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_param.
class LInsP
{
private:
friend class LIns;
uintptr_t arg:8;
uintptr_t kind:8;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_int and LIR_alloc.
class LInsI
{
private:
friend class LIns;
int32_t imm32;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_quad.
class LInsI64
{
private:
friend class LIns;
int32_t imm64_0;
int32_t imm64_1;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used only as a placeholder for OPDEF macros for unused opcodes in
// LIRopcode.tbl.
class LInsNone
{
};
class LIns
{
private:
// Last word: fields shared by all LIns kinds. The reservation fields
// are read/written during assembly.
Reservation lastWord;
// LIns-to-LInsXYZ converters.
LInsOp0* toLInsOp0() const { return (LInsOp0*)( uintptr_t(this+1) - sizeof(LInsOp0) ); }
LInsOp1* toLInsOp1() const { return (LInsOp1*)( uintptr_t(this+1) - sizeof(LInsOp1) ); }
LInsOp2* toLInsOp2() const { return (LInsOp2*)( uintptr_t(this+1) - sizeof(LInsOp2) ); }
LInsSti* toLInsSti() const { return (LInsSti*)( uintptr_t(this+1) - sizeof(LInsSti) ); }
LInsSk* toLInsSk() const { return (LInsSk* )( uintptr_t(this+1) - sizeof(LInsSk ) ); }
LInsC* toLInsC() const { return (LInsC* )( uintptr_t(this+1) - sizeof(LInsC ) ); }
LInsP* toLInsP() const { return (LInsP* )( uintptr_t(this+1) - sizeof(LInsP ) ); }
LInsI* toLInsI() const { return (LInsI* )( uintptr_t(this+1) - sizeof(LInsI ) ); }
LInsI64* toLInsI64() const { return (LInsI64*)( uintptr_t(this+1) - sizeof(LInsI64) ); }
// This is never called, but that's ok because it contains only static
// assertions.
void staticSanityCheck()
{
// LIns must be word-sized.
NanoStaticAssert(sizeof(LIns) == 1*sizeof(void*));
// LInsXYZ have expected sizes too.
NanoStaticAssert(sizeof(LInsOp0) == 1*sizeof(void*));
NanoStaticAssert(sizeof(LInsOp1) == 2*sizeof(void*));
NanoStaticAssert(sizeof(LInsOp2) == 3*sizeof(void*));
NanoStaticAssert(sizeof(LInsSti) == 4*sizeof(void*));
NanoStaticAssert(sizeof(LInsSk) == 2*sizeof(void*));
NanoStaticAssert(sizeof(LInsC) == 3*sizeof(void*));
NanoStaticAssert(sizeof(LInsP) == 2*sizeof(void*));
NanoStaticAssert(sizeof(LInsI) == 2*sizeof(void*));
#if defined NANOJIT_64BIT
NanoStaticAssert(sizeof(LInsI64) == 2*sizeof(void*));
#else
NanoStaticAssert(sizeof(LInsI64) == 3*sizeof(void*));
#endif
// oprnd_1 must be in the same position in LIns{Op1,Op2,Sti}
// because oprnd1() is used for all of them.
NanoStaticAssert( (offsetof(LInsOp1, ins) - offsetof(LInsOp1, oprnd_1)) ==
(offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) );
NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) ==
(offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_1)) );
// oprnd_2 must be in the same position in LIns{Op2,Sti}
// because oprnd2() is used for both of them.
NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_2)) ==
(offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_2)) );
}
public:
void initLInsOp0(LOpcode opcode) {
lastWord.clear();
lastWord.opcode = opcode;
NanoAssert(isLInsOp0());
}
void initLInsOp1(LOpcode opcode, LIns* oprnd1) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsOp1()->oprnd_1 = oprnd1;
NanoAssert(isLInsOp1());
}
void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsOp2()->oprnd_1 = oprnd1;
toLInsOp2()->oprnd_2 = oprnd2;
NanoAssert(isLInsOp2());
}
void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsSti()->oprnd_1 = val;
toLInsSti()->oprnd_2 = base;
toLInsSti()->disp = d;
NanoAssert(isLInsSti());
}
void initLInsSk(LIns* prevLIns) {
lastWord.clear();
lastWord.opcode = LIR_skip;
toLInsSk()->prevLIns = prevLIns;
NanoAssert(isLInsSk());
}
// Nb: this does NOT initialise the arguments. That must be done
// separately.
void initLInsC(LOpcode opcode, int32_t argc, const CallInfo* ci) {
NanoAssert(isU8(argc));
lastWord.clear();
lastWord.opcode = opcode;
toLInsC()->argc = argc;
toLInsC()->ci = ci;
NanoAssert(isLInsC());
}
void initLInsP(int32_t arg, int32_t kind) {
lastWord.clear();
lastWord.opcode = LIR_param;
NanoAssert(isU8(arg) && isU8(kind));
toLInsP()->arg = arg;
toLInsP()->kind = kind;
NanoAssert(isLInsP());
}
void initLInsI(LOpcode opcode, int32_t imm32) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsI()->imm32 = imm32;
NanoAssert(isLInsI());
}
void initLInsI64(LOpcode opcode, int64_t imm64) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsI64()->imm64_0 = int32_t(imm64);
toLInsI64()->imm64_1 = int32_t(imm64 >> 32);
NanoAssert(isLInsI64());
}
LIns* oprnd1() const {
NanoAssert(isLInsOp1() || isLInsOp2() || isStore());
return toLInsOp2()->oprnd_1;
}
LIns* oprnd2() const {
NanoAssert(isLInsOp2() || isStore());
return toLInsOp2()->oprnd_2;
}
LIns* prevLIns() const {
NanoAssert(isop(LIR_skip));
return toLInsSk()->prevLIns;
}
inline LOpcode opcode() const { return lastWord.opcode; }
inline uint8_t paramArg() const { NanoAssert(isop(LIR_param)); return toLInsP()->arg; }
inline uint8_t paramKind() const { NanoAssert(isop(LIR_param)); return toLInsP()->kind; }
inline int32_t imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; }
inline int32_t imm64_0() const { NanoAssert(isconstq()); return toLInsI64()->imm64_0; }
inline int32_t imm64_1() const { NanoAssert(isconstq()); return toLInsI64()->imm64_1; }
uint64_t imm64() const;
double imm64f() const;
Reservation* resv() { return &lastWord; }
void* payload() const;
inline Page* page() { return (Page*) alignTo(this,NJ_PAGE_SIZE); }
inline int32_t size() const {
NanoAssert(isop(LIR_alloc));
return toLInsI()->imm32 << 2;
}
2008-06-18 21:11:15 -07:00
Bug 468484 - LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions, r=danderson. LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions. A new page is allocated LIR_BUF_THRESHOLD instructions prior to reaching the end of page. If the page allocation fails, call to outOmem() will return true. The buffer can still be safely written to during during this period but it is assumed the higher level code will catch this condition and handle it appropriately as writing LIR_BUF_THRESHOLD instructions past this point will cause a crash. This opportunity was also taken to re-factor the code for LirBufWriter making it more platform agnostic. - All non-LInsp data in the instruction stream is now managed through structures that overlay the memory region. - prepFor() was added to replace the multiple ensureReferenceable() calls for each instruction. - insCall() was also modified somewhat in that the arguments are now stored growing downwards from the position of the pseudo instruction LirCallIns. CodegenLIR now has LirBuffer checks at the granularity of each emitXXX() call that is exposed publicly. This seemed like a reasonable approach since a client could potentially call at this level indefinitely. If we want to reduce the frequency of these checks then we'd have to push the check up into the verifier. Assembler OOM handling has also changed. The variable _startingIns was added and contains the location at which the assembler began writing code for the current begin/assem/end sequence. If an OOM condition occurs the assembler will reset the current instruction pointer to _startingIns, effectively overwriting the code that has been generated. This allows the assembler to produce code indefinitely (and without error) until the upper layers have noticed the error and respond accordingly. The constant LARGEST_UNDERRUN_PROT was added and needs to be set to a platform specific value that is equal to or greater than the number of NIns written for the largest possible instruction. i.e. you cannot write more than this number of NIns to the buffer for each call to underrunProtect().
2008-11-14 12:46:35 -08:00
LIns* arg(uint32_t i);
2008-07-16 14:21:31 -07:00
inline int32_t immdisp() const
{
NanoAssert(isStore());
return toLInsSti()->disp;
2008-06-18 21:11:15 -07:00
}
inline void* constvalp() const
{
#ifdef AVMPLUS_64BIT
return (void*)imm64();
#else
return (void*)imm32();
#endif
}
bool isCse() const;
bool isRet() const { return nanojit::isRetOpcode(opcode()); }
bool isop(LOpcode o) const { return opcode() == o; }
#if defined(_DEBUG)
// isLInsXYZ() returns true if the instruction has the LInsXYZ form.
// Note that there is some overlap with other predicates, eg.
// isStore()==isLInsSti(), isCall()==isLInsC(), but that's ok; these
// ones are used only to check that opcodes are appropriate for
// instruction layouts, the others are used for non-debugging
// purposes.
bool isLInsOp0() const;
bool isLInsOp1() const;
bool isLInsOp2() const;
bool isLInsSti() const;
bool isLInsSk() const;
bool isLInsC() const;
bool isLInsP() const;
bool isLInsI() const;
bool isLInsI64() const;
#endif
bool isQuad() const;
2008-06-30 15:33:41 -07:00
bool isCond() const;
bool isFloat() const;
2008-06-18 21:11:15 -07:00
bool isCmp() const;
bool isCall() const {
LOpcode op = LOpcode(opcode() & ~LIR64);
return op == LIR_call;
}
bool isStore() const {
LOpcode op = LOpcode(opcode() & ~LIR64);
return op == LIR_sti;
}
bool isLoad() const {
LOpcode op = opcode();
return op == LIR_ldq || op == LIR_ld || op == LIR_ldc ||
op == LIR_ldqc || op == LIR_ldcs || op == LIR_ldcb;
}
bool isGuard() const {
LOpcode op = opcode();
return op == LIR_x || op == LIR_xf || op == LIR_xt ||
op == LIR_loop || op == LIR_xbarrier || op == LIR_xtbl;
}
2008-09-19 14:54:49 -07:00
// True if the instruction is a 32-bit or smaller constant integer.
bool isconst() const { return opcode() == LIR_int; }
2008-09-19 14:54:49 -07:00
// True if the instruction is a 32-bit or smaller constant integer and
// has the value val when treated as a 32-bit signed integer.
2008-06-18 21:11:15 -07:00
bool isconstval(int32_t val) const;
2008-09-19 14:54:49 -07:00
// True if the instruction is a constant quad value.
2008-06-18 21:11:15 -07:00
bool isconstq() const;
2008-09-19 14:54:49 -07:00
// True if the instruction is a constant pointer value.
bool isconstp() const;
bool isBranch() const {
return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j);
}
void setTarget(LIns* t);
LIns* getTarget();
2008-06-18 21:11:15 -07:00
GuardRecord *record();
2008-07-16 14:21:31 -07:00
inline uint32_t argc() const {
2008-07-16 14:21:31 -07:00
NanoAssert(isCall());
return toLInsC()->argc;
2008-07-16 14:21:31 -07:00
}
Bug 468484 - LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions, r=danderson. LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions. A new page is allocated LIR_BUF_THRESHOLD instructions prior to reaching the end of page. If the page allocation fails, call to outOmem() will return true. The buffer can still be safely written to during during this period but it is assumed the higher level code will catch this condition and handle it appropriately as writing LIR_BUF_THRESHOLD instructions past this point will cause a crash. This opportunity was also taken to re-factor the code for LirBufWriter making it more platform agnostic. - All non-LInsp data in the instruction stream is now managed through structures that overlay the memory region. - prepFor() was added to replace the multiple ensureReferenceable() calls for each instruction. - insCall() was also modified somewhat in that the arguments are now stored growing downwards from the position of the pseudo instruction LirCallIns. CodegenLIR now has LirBuffer checks at the granularity of each emitXXX() call that is exposed publicly. This seemed like a reasonable approach since a client could potentially call at this level indefinitely. If we want to reduce the frequency of these checks then we'd have to push the check up into the verifier. Assembler OOM handling has also changed. The variable _startingIns was added and contains the location at which the assembler began writing code for the current begin/assem/end sequence. If an OOM condition occurs the assembler will reset the current instruction pointer to _startingIns, effectively overwriting the code that has been generated. This allows the assembler to produce code indefinitely (and without error) until the upper layers have noticed the error and respond accordingly. The constant LARGEST_UNDERRUN_PROT was added and needs to be set to a platform specific value that is equal to or greater than the number of NIns written for the largest possible instruction. i.e. you cannot write more than this number of NIns to the buffer for each call to underrunProtect().
2008-11-14 12:46:35 -08:00
const CallInfo *callInfo() const;
2008-06-18 21:11:15 -07:00
};
typedef LIns* LInsp;
2008-06-18 21:11:15 -07:00
LIns* FASTCALL callArgN(LInsp i, uint32_t n);
extern const uint8_t operandCount[];
2008-06-18 21:11:15 -07:00
class Fragmento; // @todo remove this ; needed for minbuild for some reason?!? Should not be compiling this code at all
2008-07-01 14:46:10 -07:00
// make it a GCObject so we can explicitly delete it early
class LirWriter : public avmplus::GCObject
2008-06-18 21:11:15 -07:00
{
2008-07-01 14:46:10 -07:00
public:
2008-06-18 21:11:15 -07:00
LirWriter *out;
virtual ~LirWriter() {}
LirWriter(LirWriter* out)
: out(out) {}
2008-06-18 21:11:15 -07:00
virtual LInsp ins0(LOpcode v) {
return out->ins0(v);
}
virtual LInsp ins1(LOpcode v, LIns* a) {
return out->ins1(v, a);
}
virtual LInsp ins2(LOpcode v, LIns* a, LIns* b) {
return out->ins2(v, a, b);
}
virtual LInsp insGuard(LOpcode v, LIns *c, LIns *x) {
2008-06-18 21:11:15 -07:00
return out->insGuard(v, c, x);
}
virtual LInsp insBranch(LOpcode v, LInsp condition, LInsp to) {
return out->insBranch(v, condition, to);
}
// arg: 0=first, 1=second, ...
// kind: 0=arg 1=saved-reg
virtual LInsp insParam(int32_t arg, int32_t kind) {
return out->insParam(arg, kind);
2008-06-18 21:11:15 -07:00
}
virtual LInsp insImm(int32_t imm) {
return out->insImm(imm);
}
virtual LInsp insImmq(uint64_t imm) {
return out->insImmq(imm);
}
virtual LInsp insLoad(LOpcode op, LIns* base, LIns* d) {
return out->insLoad(op, base, d);
}
virtual LInsp insStorei(LIns* value, LIns* base, int32_t d) {
return out->insStorei(value, base, d);
2008-06-18 21:11:15 -07:00
}
virtual LInsp insCall(const CallInfo *call, LInsp args[]) {
return out->insCall(call, args);
2008-06-18 21:11:15 -07:00
}
virtual LInsp insAlloc(int32_t size) {
return out->insAlloc(size);
}
virtual LInsp insSkip(size_t size) {
return out->insSkip(size);
}
2008-06-18 21:11:15 -07:00
// convenience
LIns* insLoadi(LIns *base, int disp);
LIns* insLoad(LOpcode op, LIns *base, int disp);
2008-10-08 11:37:03 -07:00
// Inserts a conditional to execute and branches to execute if
// the condition is true and false respectively.
LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse);
2008-09-19 14:54:49 -07:00
// Inserts an integer comparison to 0
2008-06-18 21:11:15 -07:00
LIns* ins_eq0(LIns* oprnd1);
2008-10-08 11:37:03 -07:00
// Inserts a binary operation where the second operand is an
// integer immediate.
2008-06-18 21:11:15 -07:00
LIns* ins2i(LOpcode op, LIns *oprnd1, int32_t);
LIns* qjoin(LInsp lo, LInsp hi);
2008-06-30 15:33:41 -07:00
LIns* insImmPtr(const void *ptr);
LIns* insImmf(double f);
2008-06-18 21:11:15 -07:00
};
// Each page has a header; the rest of it holds code.
#define NJ_PAGE_CODE_AREA_SZB (NJ_PAGE_SIZE - sizeof(PageHeader))
// The first instruction on a page is always a start instruction, or a
// payload-less skip instruction linking to the previous page. The
// biggest possible instruction would take up the entire rest of the page.
#define NJ_MAX_LINS_SZB (NJ_PAGE_CODE_AREA_SZB - sizeof(LInsSk))
// The maximum skip payload size is determined by the maximum instruction
// size. We require that a skip's payload be adjacent to the skip LIns
// itself.
#define NJ_MAX_SKIP_PAYLOAD_SZB (NJ_MAX_LINS_SZB - sizeof(LInsSk))
2008-06-18 21:11:15 -07:00
#ifdef NJ_VERBOSE
extern const char* lirNames[];
2008-06-18 21:11:15 -07:00
/**
* map address ranges to meaningful names.
*/
2008-07-15 13:06:05 -07:00
class LabelMap MMGC_SUBCLASS_DECL
2008-06-18 21:11:15 -07:00
{
2008-07-15 13:06:05 -07:00
class Entry MMGC_SUBCLASS_DECL
{
2008-06-18 21:11:15 -07:00
public:
Entry(int) : name(0), size(0), align(0) {}
Entry(avmplus::String *n, size_t s, size_t a) : name(n),size(s),align(a) {}
~Entry();
2008-06-18 21:11:15 -07:00
DRCWB(avmplus::String*) name;
size_t size:29, align:3;
};
avmplus::SortedMap<const void*, Entry*, avmplus::LIST_GCObjects> names;
bool addrs, pad[3];
char buf[1000], *end;
void formatAddr(const void *p, char *buf);
public:
avmplus::AvmCore *core;
LabelMap(avmplus::AvmCore *);
~LabelMap();
2008-06-18 21:11:15 -07:00
void add(const void *p, size_t size, size_t align, const char *name);
void add(const void *p, size_t size, size_t align, avmplus::String*);
const char *dup(const char *);
const char *format(const void *p);
void clear();
2008-06-18 21:11:15 -07:00
};
2008-07-15 13:06:05 -07:00
class LirNameMap MMGC_SUBCLASS_DECL
2008-06-18 21:11:15 -07:00
{
template <class Key>
class CountMap: public avmplus::SortedMap<Key, int, avmplus::LIST_NonGCObjects> {
2008-06-18 21:11:15 -07:00
public:
CountMap(avmplus::GC*gc) : avmplus::SortedMap<Key, int, avmplus::LIST_NonGCObjects>(gc) {}
int add(Key k) {
2008-06-18 21:11:15 -07:00
int c = 1;
if (containsKey(k)) {
c = 1+get(k);
2008-06-18 21:11:15 -07:00
}
put(k,c);
2008-06-18 21:11:15 -07:00
return c;
}
};
CountMap<int> lircounts;
CountMap<const CallInfo *> funccounts;
2008-07-15 13:06:05 -07:00
class Entry MMGC_SUBCLASS_DECL
{
2008-06-18 21:11:15 -07:00
public:
Entry(int) : name(0) {}
Entry(avmplus::String *n) : name(n) {}
~Entry();
2008-06-18 21:11:15 -07:00
DRCWB(avmplus::String*) name;
};
avmplus::SortedMap<LInsp, Entry*, avmplus::LIST_GCObjects> names;
LabelMap *labels;
void formatImm(int32_t c, char *buf);
public:
LirNameMap(avmplus::GC *gc, LabelMap *r)
2008-06-18 21:11:15 -07:00
: lircounts(gc),
funccounts(gc),
names(gc),
labels(r)
{}
~LirNameMap();
2008-06-18 21:11:15 -07:00
void addName(LInsp i, const char *s);
bool addName(LInsp i, avmplus::String *s);
2008-06-18 21:11:15 -07:00
void copyName(LInsp i, const char *s, int suffix);
const char *formatRef(LIns *ref);
const char *formatIns(LInsp i);
void formatGuard(LInsp i, char *buf);
2008-06-18 21:11:15 -07:00
};
2008-06-30 15:33:41 -07:00
class VerboseWriter : public LirWriter
{
Bug 468484 - LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions, r=danderson. LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions. A new page is allocated LIR_BUF_THRESHOLD instructions prior to reaching the end of page. If the page allocation fails, call to outOmem() will return true. The buffer can still be safely written to during during this period but it is assumed the higher level code will catch this condition and handle it appropriately as writing LIR_BUF_THRESHOLD instructions past this point will cause a crash. This opportunity was also taken to re-factor the code for LirBufWriter making it more platform agnostic. - All non-LInsp data in the instruction stream is now managed through structures that overlay the memory region. - prepFor() was added to replace the multiple ensureReferenceable() calls for each instruction. - insCall() was also modified somewhat in that the arguments are now stored growing downwards from the position of the pseudo instruction LirCallIns. CodegenLIR now has LirBuffer checks at the granularity of each emitXXX() call that is exposed publicly. This seemed like a reasonable approach since a client could potentially call at this level indefinitely. If we want to reduce the frequency of these checks then we'd have to push the check up into the verifier. Assembler OOM handling has also changed. The variable _startingIns was added and contains the location at which the assembler began writing code for the current begin/assem/end sequence. If an OOM condition occurs the assembler will reset the current instruction pointer to _startingIns, effectively overwriting the code that has been generated. This allows the assembler to produce code indefinitely (and without error) until the upper layers have noticed the error and respond accordingly. The constant LARGEST_UNDERRUN_PROT was added and needs to be set to a platform specific value that is equal to or greater than the number of NIns written for the largest possible instruction. i.e. you cannot write more than this number of NIns to the buffer for each call to underrunProtect().
2008-11-14 12:46:35 -08:00
InsList code;
DWB(LirNameMap*) names;
LogControl* logc;
2008-06-30 15:33:41 -07:00
public:
VerboseWriter(avmplus::GC *gc, LirWriter *out,
LirNameMap* names, LogControl* logc)
: LirWriter(out), code(gc), names(names), logc(logc)
2008-06-30 15:33:41 -07:00
{}
LInsp add(LInsp i) {
if (i)
code.add(i);
2008-06-30 15:33:41 -07:00
return i;
}
LInsp add_flush(LInsp i) {
if ((i = add(i)) != 0)
flush();
return i;
}
2008-06-30 15:33:41 -07:00
void flush()
{
int n = code.size();
if (n) {
for (int i=0; i < n; i++)
logc->printf(" %s\n",names->formatIns(code[i]));
code.clear();
if (n > 1)
logc->printf("\n");
}
2008-06-30 15:33:41 -07:00
}
LIns* insGuard(LOpcode op, LInsp cond, LIns *x) {
return add_flush(out->insGuard(op,cond,x));
}
LIns* insBranch(LOpcode v, LInsp condition, LInsp to) {
return add_flush(out->insBranch(v, condition, to));
2008-06-30 15:33:41 -07:00
}
LIns* ins0(LOpcode v) {
if (v == LIR_label || v == LIR_start) {
flush();
}
return add(out->ins0(v));
2008-06-30 15:33:41 -07:00
}
LIns* ins1(LOpcode v, LInsp a) {
return isRetOpcode(v) ? add_flush(out->ins1(v, a)) : add(out->ins1(v, a));
2008-06-30 15:33:41 -07:00
}
LIns* ins2(LOpcode v, LInsp a, LInsp b) {
return v == LIR_2 ? out->ins2(v,a,b) : add(out->ins2(v, a, b));
}
LIns* insCall(const CallInfo *call, LInsp args[]) {
return add_flush(out->insCall(call, args));
2008-06-30 15:33:41 -07:00
}
LIns* insParam(int32_t i, int32_t kind) {
return add(out->insParam(i, kind));
2008-06-30 15:33:41 -07:00
}
LIns* insLoad(LOpcode v, LInsp base, LInsp disp) {
return add(out->insLoad(v, base, disp));
}
LIns* insStorei(LInsp v, LInsp b, int32_t d) {
return add(out->insStorei(v, b, d));
}
LIns* insAlloc(int32_t size) {
return add(out->insAlloc(size));
}
LIns* insImm(int32_t imm) {
return add(out->insImm(imm));
}
LIns* insImmq(uint64_t imm) {
return add(out->insImmq(imm));
}
2008-06-30 15:33:41 -07:00
};
2008-06-18 21:11:15 -07:00
#endif
class ExprFilter: public LirWriter
{
public:
ExprFilter(LirWriter *out) : LirWriter(out) {}
LIns* ins1(LOpcode v, LIns* a);
LIns* ins2(LOpcode v, LIns* a, LIns* b);
LIns* insGuard(LOpcode, LIns *cond, LIns *);
LIns* insBranch(LOpcode, LIns *cond, LIns *target);
2008-06-18 21:11:15 -07:00
};
// @todo, this could be replaced by a generic HashMap or HashSet, if we had one
class LInsHashSet
{
// must be a power of 2.
// don't start too small, or we'll waste time growing and rehashing.
// don't start too large, will waste memory.
static const uint32_t kInitialCap = 64;
2008-06-18 21:11:15 -07:00
LInsp *m_list; // explicit WB's are used, no DWB needed.
uint32_t m_used, m_cap;
avmplus::GC* m_gc;
2008-06-18 21:11:15 -07:00
static uint32_t FASTCALL hashcode(LInsp i);
uint32_t FASTCALL find(LInsp name, uint32_t hash, const LInsp *list, uint32_t cap);
2008-06-18 21:11:15 -07:00
static bool FASTCALL equals(LInsp a, LInsp b);
void FASTCALL grow();
public:
LInsHashSet(avmplus::GC* gc);
~LInsHashSet();
2008-06-18 21:11:15 -07:00
LInsp find32(int32_t a, uint32_t &i);
LInsp find64(uint64_t a, uint32_t &i);
LInsp find1(LOpcode v, LInsp a, uint32_t &i);
LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &i);
LInsp findcall(const CallInfo *call, uint32_t argc, LInsp args[], uint32_t &i);
2008-06-18 21:11:15 -07:00
LInsp add(LInsp i, uint32_t k);
void replace(LInsp i);
void clear();
2008-06-18 21:11:15 -07:00
static uint32_t FASTCALL hashimm(int32_t);
static uint32_t FASTCALL hashimmq(uint64_t);
static uint32_t FASTCALL hash1(LOpcode v, LInsp);
static uint32_t FASTCALL hash2(LOpcode v, LInsp, LInsp);
static uint32_t FASTCALL hashcall(const CallInfo *call, uint32_t argc, LInsp args[]);
2008-06-18 21:11:15 -07:00
};
class CseFilter: public LirWriter
{
public:
LInsHashSet exprs;
CseFilter(LirWriter *out, avmplus::GC *gc);
2008-06-18 21:11:15 -07:00
LIns* insImm(int32_t imm);
LIns* insImmq(uint64_t q);
LIns* ins0(LOpcode v);
2008-06-18 21:11:15 -07:00
LIns* ins1(LOpcode v, LInsp);
LIns* ins2(LOpcode v, LInsp, LInsp);
LIns* insLoad(LOpcode v, LInsp b, LInsp d);
LIns* insCall(const CallInfo *call, LInsp args[]);
LIns* insGuard(LOpcode op, LInsp cond, LIns *x);
2008-06-18 21:11:15 -07:00
};
class LirBuffer : public avmplus::GCFinalizedObject
2008-06-18 21:11:15 -07:00
{
public:
DWB(Fragmento*) _frago;
LirBuffer(Fragmento* frago);
2008-06-18 21:11:15 -07:00
virtual ~LirBuffer();
2008-07-15 13:06:05 -07:00
void clear();
void rewind();
uintptr_t makeRoom(size_t szB); // make room for an instruction
Bug 468484 - LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions, r=danderson. LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions. A new page is allocated LIR_BUF_THRESHOLD instructions prior to reaching the end of page. If the page allocation fails, call to outOmem() will return true. The buffer can still be safely written to during during this period but it is assumed the higher level code will catch this condition and handle it appropriately as writing LIR_BUF_THRESHOLD instructions past this point will cause a crash. This opportunity was also taken to re-factor the code for LirBufWriter making it more platform agnostic. - All non-LInsp data in the instruction stream is now managed through structures that overlay the memory region. - prepFor() was added to replace the multiple ensureReferenceable() calls for each instruction. - insCall() was also modified somewhat in that the arguments are now stored growing downwards from the position of the pseudo instruction LirCallIns. CodegenLIR now has LirBuffer checks at the granularity of each emitXXX() call that is exposed publicly. This seemed like a reasonable approach since a client could potentially call at this level indefinitely. If we want to reduce the frequency of these checks then we'd have to push the check up into the verifier. Assembler OOM handling has also changed. The variable _startingIns was added and contains the location at which the assembler began writing code for the current begin/assem/end sequence. If an OOM condition occurs the assembler will reset the current instruction pointer to _startingIns, effectively overwriting the code that has been generated. This allows the assembler to produce code indefinitely (and without error) until the upper layers have noticed the error and respond accordingly. The constant LARGEST_UNDERRUN_PROT was added and needs to be set to a platform specific value that is equal to or greater than the number of NIns written for the largest possible instruction. i.e. you cannot write more than this number of NIns to the buffer for each call to underrunProtect().
2008-11-14 12:46:35 -08:00
bool outOMem() { return _noMem != 0; }
debug_only (void validate() const;)
2008-07-15 13:06:05 -07:00
verbose_only(DWB(LirNameMap*) names;)
int32_t insCount();
size_t byteCount();
2008-06-18 21:11:15 -07:00
// stats
struct
{
2008-07-15 13:06:05 -07:00
uint32_t lir; // # instructions
2008-06-18 21:11:15 -07:00
}
_stats;
AbiKind abi;
2008-07-16 14:21:31 -07:00
LInsp state,param1,sp,rp;
LInsp savedRegs[NumSavedRegs];
bool explicitSavedRegs;
protected:
2008-06-18 21:11:15 -07:00
Page* pageAlloc();
void moveToNewPage(uintptr_t addrOfLastLInsOnCurrentPage);
2008-06-18 21:11:15 -07:00
Bug 468484 - LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions, r=danderson. LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions. A new page is allocated LIR_BUF_THRESHOLD instructions prior to reaching the end of page. If the page allocation fails, call to outOmem() will return true. The buffer can still be safely written to during during this period but it is assumed the higher level code will catch this condition and handle it appropriately as writing LIR_BUF_THRESHOLD instructions past this point will cause a crash. This opportunity was also taken to re-factor the code for LirBufWriter making it more platform agnostic. - All non-LInsp data in the instruction stream is now managed through structures that overlay the memory region. - prepFor() was added to replace the multiple ensureReferenceable() calls for each instruction. - insCall() was also modified somewhat in that the arguments are now stored growing downwards from the position of the pseudo instruction LirCallIns. CodegenLIR now has LirBuffer checks at the granularity of each emitXXX() call that is exposed publicly. This seemed like a reasonable approach since a client could potentially call at this level indefinitely. If we want to reduce the frequency of these checks then we'd have to push the check up into the verifier. Assembler OOM handling has also changed. The variable _startingIns was added and contains the location at which the assembler began writing code for the current begin/assem/end sequence. If an OOM condition occurs the assembler will reset the current instruction pointer to _startingIns, effectively overwriting the code that has been generated. This allows the assembler to produce code indefinitely (and without error) until the upper layers have noticed the error and respond accordingly. The constant LARGEST_UNDERRUN_PROT was added and needs to be set to a platform specific value that is equal to or greater than the number of NIns written for the largest possible instruction. i.e. you cannot write more than this number of NIns to the buffer for each call to underrunProtect().
2008-11-14 12:46:35 -08:00
PageList _pages;
Page* _nextPage; // allocated in preperation of a needing to growing the buffer
uintptr_t _unused; // next unused instruction slot
int _noMem; // set if ran out of memory when writing to buffer
2008-06-18 21:11:15 -07:00
};
class LirBufWriter : public LirWriter
{
DWB(LirBuffer*) _buf; // underlying buffer housing the instructions
Bug 468484 - LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions, r=danderson. LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions. A new page is allocated LIR_BUF_THRESHOLD instructions prior to reaching the end of page. If the page allocation fails, call to outOmem() will return true. The buffer can still be safely written to during during this period but it is assumed the higher level code will catch this condition and handle it appropriately as writing LIR_BUF_THRESHOLD instructions past this point will cause a crash. This opportunity was also taken to re-factor the code for LirBufWriter making it more platform agnostic. - All non-LInsp data in the instruction stream is now managed through structures that overlay the memory region. - prepFor() was added to replace the multiple ensureReferenceable() calls for each instruction. - insCall() was also modified somewhat in that the arguments are now stored growing downwards from the position of the pseudo instruction LirCallIns. CodegenLIR now has LirBuffer checks at the granularity of each emitXXX() call that is exposed publicly. This seemed like a reasonable approach since a client could potentially call at this level indefinitely. If we want to reduce the frequency of these checks then we'd have to push the check up into the verifier. Assembler OOM handling has also changed. The variable _startingIns was added and contains the location at which the assembler began writing code for the current begin/assem/end sequence. If an OOM condition occurs the assembler will reset the current instruction pointer to _startingIns, effectively overwriting the code that has been generated. This allows the assembler to produce code indefinitely (and without error) until the upper layers have noticed the error and respond accordingly. The constant LARGEST_UNDERRUN_PROT was added and needs to be set to a platform specific value that is equal to or greater than the number of NIns written for the largest possible instruction. i.e. you cannot write more than this number of NIns to the buffer for each call to underrunProtect().
2008-11-14 12:46:35 -08:00
public:
2008-06-18 21:11:15 -07:00
LirBufWriter(LirBuffer* buf)
: LirWriter(0), _buf(buf) {
}
// LirWriter interface
LInsp insLoad(LOpcode op, LInsp base, LInsp off);
LInsp insStorei(LInsp o1, LInsp o2, int32_t imm);
LInsp ins0(LOpcode op);
LInsp ins1(LOpcode op, LInsp o1);
LInsp ins2(LOpcode op, LInsp o1, LInsp o2);
LInsp insParam(int32_t i, int32_t kind);
2008-06-18 21:11:15 -07:00
LInsp insImm(int32_t imm);
LInsp insImmq(uint64_t imm);
LInsp insCall(const CallInfo *call, LInsp args[]);
LInsp insGuard(LOpcode op, LInsp cond, LIns *x);
LInsp insBranch(LOpcode v, LInsp condition, LInsp to);
LInsp insAlloc(int32_t size);
LInsp insSkip(size_t);
2008-06-18 21:11:15 -07:00
};
class LirFilter
{
public:
LirFilter *in;
LirFilter(LirFilter *in) : in(in) {}
virtual ~LirFilter(){}
2008-06-18 21:11:15 -07:00
virtual LInsp read() {
return in->read();
}
virtual LInsp pos() {
return in->pos();
}
};
// concrete
class LirReader : public LirFilter
{
LInsp _i; // current instruction that this decoder is operating on.
public:
LirReader(LInsp i) : LirFilter(0), _i(i) { }
virtual ~LirReader() {}
// LirReader i/f
LInsp read(); // advance to the prior instruction
LInsp pos() {
return _i;
}
void setpos(LIns *i) {
_i = i;
}
2008-06-18 21:11:15 -07:00
};
class Assembler;
void compile(Assembler *assm, Fragment *frag);
verbose_only(void live(avmplus::GC *gc, LirBuffer *lirbuf);)
2008-06-18 21:11:15 -07:00
class StackFilter: public LirFilter
2008-06-18 21:11:15 -07:00
{
avmplus::GC *gc;
LirBuffer *lirbuf;
LInsp sp;
avmplus::BitSet stk;
int top;
int getTop(LInsp br);
2008-06-18 21:11:15 -07:00
public:
StackFilter(LirFilter *in, avmplus::GC *gc, LirBuffer *lirbuf, LInsp sp);
virtual ~StackFilter() {}
2008-06-18 21:11:15 -07:00
LInsp read();
};
class CseReader: public LirFilter
{
LInsHashSet *exprs;
public:
CseReader(LirFilter *in, LInsHashSet *exprs);
LInsp read();
};
// eliminate redundant loads by watching for stores & mutator calls
class LoadFilter: public LirWriter
{
public:
LInsp sp, rp;
LInsHashSet exprs;
void clear(LInsp p);
public:
LoadFilter(LirWriter *out, avmplus::GC *gc)
: LirWriter(out), exprs(gc) { }
LInsp ins0(LOpcode);
LInsp insLoad(LOpcode, LInsp base, LInsp disp);
LInsp insStorei(LInsp v, LInsp b, int32_t d);
LInsp insCall(const CallInfo *call, LInsp args[]);
Bug 468484 - LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions, r=danderson. LirBuffer has been modified to provide advance warning of out of memory (OOM) conditions. A new page is allocated LIR_BUF_THRESHOLD instructions prior to reaching the end of page. If the page allocation fails, call to outOmem() will return true. The buffer can still be safely written to during during this period but it is assumed the higher level code will catch this condition and handle it appropriately as writing LIR_BUF_THRESHOLD instructions past this point will cause a crash. This opportunity was also taken to re-factor the code for LirBufWriter making it more platform agnostic. - All non-LInsp data in the instruction stream is now managed through structures that overlay the memory region. - prepFor() was added to replace the multiple ensureReferenceable() calls for each instruction. - insCall() was also modified somewhat in that the arguments are now stored growing downwards from the position of the pseudo instruction LirCallIns. CodegenLIR now has LirBuffer checks at the granularity of each emitXXX() call that is exposed publicly. This seemed like a reasonable approach since a client could potentially call at this level indefinitely. If we want to reduce the frequency of these checks then we'd have to push the check up into the verifier. Assembler OOM handling has also changed. The variable _startingIns was added and contains the location at which the assembler began writing code for the current begin/assem/end sequence. If an OOM condition occurs the assembler will reset the current instruction pointer to _startingIns, effectively overwriting the code that has been generated. This allows the assembler to produce code indefinitely (and without error) until the upper layers have noticed the error and respond accordingly. The constant LARGEST_UNDERRUN_PROT was added and needs to be set to a platform specific value that is equal to or greater than the number of NIns written for the largest possible instruction. i.e. you cannot write more than this number of NIns to the buffer for each call to underrunProtect().
2008-11-14 12:46:35 -08:00
};
2008-06-18 21:11:15 -07:00
}
#endif // __nanojit_LIR__