2009-06-23 14:51:15 -07:00
|
|
|
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
|
2009-07-10 12:58:34 -07:00
|
|
|
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
|
2008-06-18 21:11:15 -07:00
|
|
|
/* ***** BEGIN LICENSE BLOCK *****
|
|
|
|
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the Mozilla Public License Version
|
|
|
|
* 1.1 (the "License"); you may not use this file except in compliance with
|
|
|
|
* the License. You may obtain a copy of the License at
|
|
|
|
* http://www.mozilla.org/MPL/
|
|
|
|
*
|
|
|
|
* Software distributed under the License is distributed on an "AS IS" basis,
|
|
|
|
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
|
|
|
* for the specific language governing rights and limitations under the
|
|
|
|
* License.
|
|
|
|
*
|
|
|
|
* The Original Code is [Open Source Virtual Machine].
|
|
|
|
*
|
|
|
|
* The Initial Developer of the Original Code is
|
|
|
|
* Adobe System Incorporated.
|
|
|
|
* Portions created by the Initial Developer are Copyright (C) 2004-2007
|
|
|
|
* the Initial Developer. All Rights Reserved.
|
|
|
|
*
|
|
|
|
* Contributor(s):
|
|
|
|
* Adobe AS3 Team
|
|
|
|
*
|
|
|
|
* Alternatively, the contents of this file may be used under the terms of
|
|
|
|
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
|
|
|
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
|
|
|
* in which case the provisions of the GPL or the LGPL are applicable instead
|
|
|
|
* of those above. If you wish to allow use of your version of this file only
|
|
|
|
* under the terms of either the GPL or the LGPL, and not to allow others to
|
|
|
|
* use your version of this file under the terms of the MPL, indicate your
|
|
|
|
* decision by deleting the provisions above and replace them with the notice
|
|
|
|
* and other provisions required by the GPL or the LGPL. If you do not delete
|
|
|
|
* the provisions above, a recipient may use your version of this file under
|
|
|
|
* the terms of any one of the MPL, the GPL or the LGPL.
|
|
|
|
*
|
|
|
|
* ***** END LICENSE BLOCK ***** */
|
|
|
|
|
|
|
|
#ifndef __nanojit_LIR__
|
|
|
|
#define __nanojit_LIR__
|
|
|
|
|
|
|
|
namespace nanojit
|
|
|
|
{
|
2009-12-17 13:24:39 -08:00
|
|
|
enum LOpcode
|
|
|
|
#if defined(_MSC_VER) && _MSC_VER >= 1400
|
|
|
|
#pragma warning(disable:4480) // nonstandard extension used: specifying underlying type for enum
|
|
|
|
: unsigned
|
|
|
|
#endif
|
|
|
|
{
|
2010-02-28 17:02:01 -08:00
|
|
|
#define OP___(op, number, repKind, retType, isCse) \
|
2009-12-17 13:24:39 -08:00
|
|
|
LIR_##op = (number),
|
|
|
|
#include "LIRopcode.tbl"
|
|
|
|
LIR_sentinel,
|
2010-02-11 17:30:16 -08:00
|
|
|
#undef OP___
|
2009-12-17 13:24:39 -08:00
|
|
|
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
# define PTR_SIZE(a,b) b
|
|
|
|
#else
|
|
|
|
# define PTR_SIZE(a,b) a
|
|
|
|
#endif
|
|
|
|
|
2010-03-31 15:07:50 -07:00
|
|
|
// Pointer-sized synonyms.
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_paramp = PTR_SIZE(LIR_parami, LIR_paramq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_allocp = PTR_SIZE(LIR_alloci, LIR_allocq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_retp = PTR_SIZE(LIR_reti, LIR_retq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_livep = PTR_SIZE(LIR_livei, LIR_liveq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_ldp = PTR_SIZE(LIR_ldi, LIR_ldq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_stp = PTR_SIZE(LIR_sti, LIR_stq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_callp = PTR_SIZE(LIR_calli, LIR_callq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_eqp = PTR_SIZE(LIR_eqi, LIR_eqq),
|
|
|
|
LIR_ltp = PTR_SIZE(LIR_lti, LIR_ltq),
|
|
|
|
LIR_gtp = PTR_SIZE(LIR_gti, LIR_gtq),
|
|
|
|
LIR_lep = PTR_SIZE(LIR_lei, LIR_leq),
|
|
|
|
LIR_gep = PTR_SIZE(LIR_gei, LIR_geq),
|
|
|
|
LIR_ltup = PTR_SIZE(LIR_ltui, LIR_ltuq),
|
|
|
|
LIR_gtup = PTR_SIZE(LIR_gtui, LIR_gtuq),
|
|
|
|
LIR_leup = PTR_SIZE(LIR_leui, LIR_leuq),
|
|
|
|
LIR_geup = PTR_SIZE(LIR_geui, LIR_geuq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_addp = PTR_SIZE(LIR_addi, LIR_addq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_andp = PTR_SIZE(LIR_andi, LIR_andq),
|
|
|
|
LIR_orp = PTR_SIZE(LIR_ori, LIR_orq),
|
|
|
|
LIR_xorp = PTR_SIZE(LIR_xori, LIR_xorq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_lshp = PTR_SIZE(LIR_lshi, LIR_lshq),
|
|
|
|
LIR_rshp = PTR_SIZE(LIR_rshi, LIR_rshq),
|
|
|
|
LIR_rshup = PTR_SIZE(LIR_rshui, LIR_rshuq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_cmovp = PTR_SIZE(LIR_cmovi, LIR_cmovq),
|
2010-03-31 15:07:50 -07:00
|
|
|
|
|
|
|
// XXX: temporary synonyms for old opcode names and old pointer-sized
|
|
|
|
// synonyms, for the Great Opcode Renaming transition period (bug
|
|
|
|
// 504506). Those in comments have not changed and so don't need a
|
|
|
|
// temporary synonym.
|
|
|
|
|
|
|
|
// LIR_start
|
|
|
|
|
|
|
|
// LIR_regfence
|
|
|
|
|
|
|
|
// LIR_skip
|
|
|
|
|
|
|
|
#ifndef NANOJIT_64BIT
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_iparam = LIR_parami,
|
2010-03-31 15:07:50 -07:00
|
|
|
#else
|
|
|
|
LIR_qparam = LIR_paramq,
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef NANOJIT_64BIT
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_ialloc = LIR_alloci,
|
2010-03-31 15:07:50 -07:00
|
|
|
#else
|
|
|
|
LIR_qalloc = LIR_allocq,
|
|
|
|
#endif
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_ret = LIR_reti,
|
2010-03-31 15:07:50 -07:00
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_qret = LIR_retq,
|
|
|
|
#endif
|
|
|
|
LIR_fret = LIR_retd,
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_live = LIR_livei,
|
2010-03-31 15:07:50 -07:00
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_qlive = LIR_liveq,
|
|
|
|
#endif
|
|
|
|
LIR_flive = LIR_lived,
|
|
|
|
|
|
|
|
// file
|
|
|
|
// line
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_ldsb = LIR_ldc2i,
|
|
|
|
LIR_ldss = LIR_lds2i,
|
|
|
|
LIR_ldzb = LIR_lduc2ui,
|
|
|
|
LIR_ldzs = LIR_ldus2ui,
|
|
|
|
LIR_ld = LIR_ldi,
|
2010-03-31 15:07:50 -07:00
|
|
|
// LIR_ldq
|
|
|
|
LIR_ldf = LIR_ldd,
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_ld32f = LIR_ldf2d,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_stb = LIR_sti2c,
|
|
|
|
LIR_sts = LIR_sti2s,
|
2010-03-31 15:07:50 -07:00
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_stqi = LIR_stq,
|
|
|
|
#endif
|
|
|
|
LIR_stfi = LIR_std,
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_st32f = LIR_std2f,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_icall = LIR_calli,
|
2010-03-31 15:07:50 -07:00
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_qcall = LIR_callq,
|
|
|
|
#endif
|
|
|
|
LIR_fcall = LIR_calld,
|
|
|
|
|
|
|
|
// LIR_j
|
|
|
|
// LIR_jt
|
|
|
|
// LIR_jf
|
|
|
|
// LIR_jtbl
|
|
|
|
|
|
|
|
// LIR_label = LIR_label
|
|
|
|
|
|
|
|
// LIR_x
|
|
|
|
// LIR_xt
|
|
|
|
// LIR_xf
|
|
|
|
// LIR_xtbl
|
|
|
|
// LIR_xbarrier
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_int = LIR_immi,
|
2010-03-31 15:07:50 -07:00
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_quad = LIR_immq,
|
|
|
|
#endif
|
|
|
|
LIR_float = LIR_immd,
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_eq = LIR_eqi,
|
|
|
|
LIR_lt = LIR_lti,
|
|
|
|
LIR_gt = LIR_gti,
|
|
|
|
LIR_le = LIR_lei,
|
|
|
|
LIR_ge = LIR_gei,
|
|
|
|
LIR_ult = LIR_ltui,
|
|
|
|
LIR_ugt = LIR_gtui,
|
|
|
|
LIR_ule = LIR_leui,
|
|
|
|
LIR_uge = LIR_geui,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_qeq = LIR_eqq,
|
|
|
|
LIR_qlt = LIR_ltq,
|
|
|
|
LIR_qgt = LIR_gtq,
|
|
|
|
LIR_qle = LIR_leq,
|
|
|
|
LIR_qge = LIR_geq,
|
|
|
|
LIR_qult = LIR_ltuq,
|
|
|
|
LIR_qugt = LIR_gtuq,
|
|
|
|
LIR_qule = LIR_leuq,
|
|
|
|
LIR_quge = LIR_geuq,
|
|
|
|
#endif
|
|
|
|
|
|
|
|
LIR_feq = LIR_eqd,
|
|
|
|
LIR_flt = LIR_ltd,
|
|
|
|
LIR_fgt = LIR_gtd,
|
|
|
|
LIR_fle = LIR_led,
|
|
|
|
LIR_fge = LIR_ged,
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_neg = LIR_negi,
|
|
|
|
LIR_add = LIR_addi,
|
|
|
|
LIR_sub = LIR_subi,
|
|
|
|
LIR_mul = LIR_muli,
|
2010-03-31 15:14:28 -07:00
|
|
|
#if defined NANOJIT_IA32 || defined NANOJIT_X64
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_div = LIR_divi,
|
|
|
|
LIR_mod = LIR_modi,
|
2010-03-31 15:14:28 -07:00
|
|
|
#endif
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_not = LIR_noti,
|
|
|
|
LIR_and = LIR_andi,
|
|
|
|
LIR_or = LIR_ori,
|
|
|
|
LIR_xor = LIR_xori,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_lsh = LIR_lshi,
|
|
|
|
LIR_rsh = LIR_rshi,
|
|
|
|
LIR_ush = LIR_rshui,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_qiadd = LIR_addq,
|
|
|
|
|
|
|
|
LIR_qiand = LIR_andq,
|
|
|
|
LIR_qior = LIR_orq,
|
|
|
|
LIR_qxor = LIR_xorq,
|
|
|
|
|
|
|
|
LIR_qilsh = LIR_lshq,
|
|
|
|
LIR_qirsh = LIR_rshq,
|
|
|
|
LIR_qursh = LIR_rshuq,
|
|
|
|
#endif
|
|
|
|
|
|
|
|
LIR_fneg = LIR_negd,
|
|
|
|
LIR_fadd = LIR_addd,
|
|
|
|
LIR_fsub = LIR_subd,
|
|
|
|
LIR_fmul = LIR_muld,
|
|
|
|
LIR_fdiv = LIR_divd,
|
|
|
|
LIR_fmod = LIR_modd,
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_cmov = LIR_cmovi,
|
2010-03-31 15:07:50 -07:00
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LIR_qcmov = LIR_cmovq,
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef NANOJIT_64BIT
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_u2q = LIR_ui2uq,
|
2010-03-31 15:07:50 -07:00
|
|
|
#endif
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_i2f = LIR_i2d,
|
|
|
|
LIR_u2f = LIR_ui2d,
|
|
|
|
LIR_f2i = LIR_d2i,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_addxov = LIR_addxovi,
|
|
|
|
LIR_subxov = LIR_subxovi,
|
|
|
|
LIR_mulxov = LIR_mulxovi,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
|
|
|
#if NJ_SOFTFLOAT_SUPPORTED
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_qlo = LIR_dlo2i,
|
|
|
|
LIR_qhi = LIR_dhi2i,
|
|
|
|
LIR_qjoin = LIR_ii2d,
|
|
|
|
LIR_callh = LIR_hcalli,
|
2010-03-31 15:07:50 -07:00
|
|
|
#endif
|
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_param = LIR_paramp,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_alloc = LIR_allocp,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_pret = LIR_retp,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_plive = LIR_livep,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_stpi = LIR_stp,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_pcall = LIR_callp,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_peq = LIR_eqp,
|
|
|
|
LIR_plt = LIR_ltp,
|
|
|
|
LIR_pgt = LIR_gtp,
|
|
|
|
LIR_ple = LIR_lep,
|
|
|
|
LIR_pge = LIR_gep,
|
|
|
|
LIR_pult = LIR_ltup,
|
|
|
|
LIR_pugt = LIR_gtup,
|
|
|
|
LIR_pule = LIR_leup,
|
|
|
|
LIR_puge = LIR_geup,
|
|
|
|
LIR_piadd = LIR_addp,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_piand = LIR_andp,
|
|
|
|
LIR_pior = LIR_orp,
|
|
|
|
LIR_pxor = LIR_xorp,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
LIR_pilsh = LIR_lshp,
|
|
|
|
LIR_pirsh = LIR_rshp,
|
|
|
|
LIR_pursh = LIR_rshup,
|
2010-03-31 15:07:50 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
LIR_pcmov = LIR_cmovp,
|
2009-12-17 13:24:39 -08:00
|
|
|
};
|
|
|
|
|
2010-02-28 17:02:01 -08:00
|
|
|
// 32-bit integer comparisons must be contiguous, as must 64-bit integer
|
|
|
|
// comparisons and 64-bit float comparisons.
|
2010-04-21 18:13:17 -07:00
|
|
|
NanoStaticAssert(LIR_eqi + 1 == LIR_lti &&
|
|
|
|
LIR_eqi + 2 == LIR_gti &&
|
|
|
|
LIR_eqi + 3 == LIR_lei &&
|
|
|
|
LIR_eqi + 4 == LIR_gei &&
|
|
|
|
LIR_eqi + 5 == LIR_ltui &&
|
|
|
|
LIR_eqi + 6 == LIR_gtui &&
|
|
|
|
LIR_eqi + 7 == LIR_leui &&
|
|
|
|
LIR_eqi + 8 == LIR_geui);
|
2010-02-28 17:02:01 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-04-16 16:20:24 -07:00
|
|
|
NanoStaticAssert(LIR_eqq + 1 == LIR_ltq &&
|
|
|
|
LIR_eqq + 2 == LIR_gtq &&
|
|
|
|
LIR_eqq + 3 == LIR_leq &&
|
|
|
|
LIR_eqq + 4 == LIR_geq &&
|
|
|
|
LIR_eqq + 5 == LIR_ltuq &&
|
|
|
|
LIR_eqq + 6 == LIR_gtuq &&
|
|
|
|
LIR_eqq + 7 == LIR_leuq &&
|
|
|
|
LIR_eqq + 8 == LIR_geuq);
|
2010-02-28 17:02:01 -08:00
|
|
|
#endif
|
2010-04-16 16:20:24 -07:00
|
|
|
NanoStaticAssert(LIR_eqd + 1 == LIR_ltd &&
|
|
|
|
LIR_eqd + 2 == LIR_gtd &&
|
|
|
|
LIR_eqd + 3 == LIR_led &&
|
|
|
|
LIR_eqd + 4 == LIR_ged);
|
2010-02-28 17:02:01 -08:00
|
|
|
|
|
|
|
// Various opcodes must be changeable to their opposite with op^1
|
|
|
|
// (although we use invertXyz() when possible, ie. outside static
|
|
|
|
// assertions).
|
|
|
|
NanoStaticAssert((LIR_jt^1) == LIR_jf && (LIR_jf^1) == LIR_jt);
|
|
|
|
|
|
|
|
NanoStaticAssert((LIR_xt^1) == LIR_xf && (LIR_xf^1) == LIR_xt);
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
NanoStaticAssert((LIR_lti^1) == LIR_gti && (LIR_gti^1) == LIR_lti);
|
|
|
|
NanoStaticAssert((LIR_lei^1) == LIR_gei && (LIR_gei^1) == LIR_lei);
|
|
|
|
NanoStaticAssert((LIR_ltui^1) == LIR_gtui && (LIR_gtui^1) == LIR_ltui);
|
|
|
|
NanoStaticAssert((LIR_leui^1) == LIR_geui && (LIR_geui^1) == LIR_leui);
|
2010-03-24 17:41:39 -07:00
|
|
|
|
2010-02-28 17:02:01 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-04-16 16:20:24 -07:00
|
|
|
NanoStaticAssert((LIR_ltq^1) == LIR_gtq && (LIR_gtq^1) == LIR_ltq);
|
|
|
|
NanoStaticAssert((LIR_leq^1) == LIR_geq && (LIR_geq^1) == LIR_leq);
|
|
|
|
NanoStaticAssert((LIR_ltuq^1) == LIR_gtuq && (LIR_gtuq^1) == LIR_ltuq);
|
|
|
|
NanoStaticAssert((LIR_leuq^1) == LIR_geuq && (LIR_geuq^1) == LIR_leuq);
|
2010-02-28 17:02:01 -08:00
|
|
|
#endif
|
2010-03-24 17:41:39 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
NanoStaticAssert((LIR_ltd^1) == LIR_gtd && (LIR_gtd^1) == LIR_ltd);
|
|
|
|
NanoStaticAssert((LIR_led^1) == LIR_ged && (LIR_ged^1) == LIR_led);
|
2010-02-28 17:02:01 -08:00
|
|
|
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
struct GuardRecord;
|
2008-06-18 21:11:15 -07:00
|
|
|
struct SideExit;
|
2008-10-13 13:29:18 -07:00
|
|
|
|
|
|
|
enum AbiKind {
|
|
|
|
ABI_FASTCALL,
|
|
|
|
ABI_THISCALL,
|
2009-07-10 12:58:34 -07:00
|
|
|
ABI_STDCALL,
|
2008-10-13 13:29:18 -07:00
|
|
|
ABI_CDECL
|
|
|
|
};
|
|
|
|
|
2010-03-21 19:47:02 -07:00
|
|
|
// All values must fit into three bits. See CallInfo for details.
|
|
|
|
enum ArgType {
|
|
|
|
ARGTYPE_V = 0, // void
|
|
|
|
ARGTYPE_F = 1, // double (64bit)
|
|
|
|
ARGTYPE_I = 2, // int32_t
|
|
|
|
ARGTYPE_U = 3, // uint32_t
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-03-21 19:47:02 -07:00
|
|
|
ARGTYPE_Q = 4, // uint64_t
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2009-07-29 17:44:34 -07:00
|
|
|
|
|
|
|
// aliases
|
2010-03-21 19:47:02 -07:00
|
|
|
ARGTYPE_P = PTR_SIZE(ARGTYPE_I, ARGTYPE_Q), // pointer
|
|
|
|
ARGTYPE_LO = ARGTYPE_I, // int32_t
|
|
|
|
ARGTYPE_B = ARGTYPE_I // bool
|
2008-10-13 13:29:18 -07:00
|
|
|
};
|
|
|
|
|
2010-03-21 19:47:02 -07:00
|
|
|
// In _typesig, each entry is three bits.
|
|
|
|
static const int ARGTYPE_SHIFT = 3;
|
|
|
|
static const int ARGTYPE_MASK = 0x7;
|
|
|
|
|
2009-08-02 15:54:02 -07:00
|
|
|
enum IndirectCall {
|
|
|
|
CALL_INDIRECT = 0
|
|
|
|
};
|
|
|
|
|
2010-03-01 17:43:20 -08:00
|
|
|
//-----------------------------------------------------------------------
|
|
|
|
// Aliasing
|
|
|
|
// --------
|
|
|
|
// *Aliasing* occurs when a single memory location can be accessed through
|
|
|
|
// multiple names. For example, consider this code:
|
|
|
|
//
|
|
|
|
// ld a[0]
|
|
|
|
// sti b[0]
|
|
|
|
// ld a[0]
|
|
|
|
//
|
|
|
|
// In general, it's possible that a[0] and b[0] may refer to the same
|
|
|
|
// memory location. This means, for example, that you cannot safely
|
|
|
|
// perform CSE on the two loads. However, if you know that 'a' cannot be
|
|
|
|
// an alias of 'b' (ie. the two loads do not alias with the store) then
|
|
|
|
// you can safely perform CSE.
|
|
|
|
//
|
|
|
|
// Access regions
|
|
|
|
// --------------
|
|
|
|
// Doing alias analysis precisely is difficult. But it turns out that
|
|
|
|
// keeping track of aliasing at a very coarse level is enough to help with
|
|
|
|
// many optimisations. So we conceptually divide the memory that is
|
|
|
|
// accessible from LIR into a small number of "access regions". An access
|
|
|
|
// region may be non-contiguous. No two access regions can overlap. The
|
|
|
|
// union of all access regions covers all memory accessible from LIR.
|
|
|
|
//
|
|
|
|
// In general a (static) load or store may be executed more than once, and
|
|
|
|
// thus may access multiple regions; however, in practice almost all
|
|
|
|
// loads and stores will obviously access only a single region. A
|
|
|
|
// function called from LIR may load and/or store multiple access regions
|
|
|
|
// (even if executed only once).
|
|
|
|
//
|
|
|
|
// If two loads/stores/calls are known to not access the same region(s),
|
|
|
|
// then they do not alias.
|
|
|
|
//
|
|
|
|
// The access regions used are as follows:
|
|
|
|
//
|
|
|
|
// - READONLY: all memory that is read-only, ie. never stored to.
|
|
|
|
// A load from a READONLY region will never alias with any stores.
|
|
|
|
//
|
|
|
|
// - STACK: the stack. Stack loads/stores can usually be easily
|
2010-03-23 15:05:47 -07:00
|
|
|
// identified because they use SP as the base pointer.
|
|
|
|
//
|
|
|
|
// - RSTACK: the return stack. Return stack loads/stores can usually be
|
|
|
|
// easily identified because they use RP as the base pointer.
|
2010-03-01 17:43:20 -08:00
|
|
|
//
|
|
|
|
// - OTHER: all other regions of memory.
|
|
|
|
//
|
|
|
|
// It makes sense to add new access regions when doing so will help with
|
|
|
|
// one or more optimisations.
|
|
|
|
//
|
|
|
|
// One subtlety is that the meanings of the access region markings only
|
|
|
|
// apply to the LIR fragment that they are in. For example, if a memory
|
|
|
|
// location M is read-only in a particular LIR fragment, all loads
|
|
|
|
// involving M in that fragment can be safely marked READONLY, even if M
|
|
|
|
// is modified elsewhere. This is safe because the a LIR fragment is the
|
|
|
|
// unit of analysis in which the markings are used. In other words alias
|
|
|
|
// region markings are only used for intra-fragment optimisations.
|
|
|
|
//
|
|
|
|
// Access region sets and instruction markings
|
|
|
|
// -------------------------------------------
|
|
|
|
// The LIR generator must mark each load/store with an "access region
|
|
|
|
// set", which is a set of one or more access regions. This indicates
|
2010-03-24 17:41:39 -07:00
|
|
|
// which parts of LIR-accessible memory the load/store may touch.
|
2010-03-01 17:43:20 -08:00
|
|
|
//
|
|
|
|
// The LIR generator must also mark each function called from LIR with an
|
|
|
|
// access region set for memory stored to by the function. (We could also
|
|
|
|
// have a marking for memory loads, but there's no need at the moment.)
|
|
|
|
// These markings apply to the function itself, not the call site (ie.
|
|
|
|
// they're not context-sensitive).
|
|
|
|
//
|
|
|
|
// These load/store/call markings MUST BE ACCURATE -- if they are wrong
|
|
|
|
// then invalid optimisations might occur that change the meaning of the
|
|
|
|
// code. However, they can safely be imprecise (ie. conservative), in the
|
|
|
|
// following ways:
|
|
|
|
//
|
|
|
|
// - A load that accesses a READONLY region can be safely marked instead
|
|
|
|
// as loading from OTHER. In other words, it's safe to underestimate
|
|
|
|
// the size of the READONLY region. (This would also apply to the load
|
|
|
|
// set of a function, if we recorded that.)
|
|
|
|
//
|
|
|
|
// - A load/store can safely be marked as accessing regions that it
|
|
|
|
// doesn't, so long as the regions it does access are also included (one
|
|
|
|
// exception: marking a store with READONLY is nonsense and will cause
|
|
|
|
// assertions).
|
|
|
|
//
|
|
|
|
// In other words, a load/store can be marked with an access region set
|
|
|
|
// that is a superset of its actual access region set. Taking this to
|
|
|
|
// its logical conclusion, any load can be safely marked with LOAD_ANY and
|
|
|
|
// any store can be safely marked with with STORE_ANY (and the latter is
|
|
|
|
// true for the store set of a function.)
|
|
|
|
//
|
|
|
|
// Such imprecision is safe but may reduce optimisation opportunities.
|
2010-03-23 15:05:47 -07:00
|
|
|
//
|
|
|
|
// Optimisations that use access region info
|
|
|
|
// -----------------------------------------
|
|
|
|
// Currently only CseFilter uses this, and only for determining whether
|
|
|
|
// loads can be CSE'd. Note that CseFilter treats loads that are marked
|
|
|
|
// with a single access region precisely, but all loads marked with
|
|
|
|
// multiple access regions get lumped together. So if you can't mark a
|
|
|
|
// load with a single access region, you might as well use ACC_LOAD_ANY.
|
2010-03-01 17:43:20 -08:00
|
|
|
//-----------------------------------------------------------------------
|
|
|
|
|
|
|
|
// An access region set is represented as a bitset. Nb: this restricts us
|
|
|
|
// to at most eight alias regions for the moment.
|
|
|
|
typedef uint8_t AccSet;
|
|
|
|
|
|
|
|
// The access regions. Note that because of the bitset representation
|
|
|
|
// these constants are also valid (singleton) AccSet values. If you add
|
2010-03-23 15:05:47 -07:00
|
|
|
// new ones please update ACC_ALL_STORABLE and formatAccSet() and
|
|
|
|
// CseFilter.
|
2010-03-01 17:43:20 -08:00
|
|
|
//
|
|
|
|
static const AccSet ACC_READONLY = 1 << 0; // 0000_0001b
|
|
|
|
static const AccSet ACC_STACK = 1 << 1; // 0000_0010b
|
2010-03-23 15:05:47 -07:00
|
|
|
static const AccSet ACC_RSTACK = 1 << 2; // 0000_0100b
|
|
|
|
static const AccSet ACC_OTHER = 1 << 3; // 0000_1000b
|
2010-03-01 17:43:20 -08:00
|
|
|
|
|
|
|
// Some common (non-singleton) access region sets. ACC_NONE does not make
|
|
|
|
// sense for loads or stores (which must access at least one region), it
|
|
|
|
// only makes sense for calls.
|
|
|
|
//
|
|
|
|
// A convention that's worth using: use ACC_LOAD_ANY/ACC_STORE_ANY for
|
|
|
|
// cases that you're unsure about or haven't considered carefully. Use
|
2010-03-23 15:05:47 -07:00
|
|
|
// ACC_ALL/ACC_ALL_STORABLE for cases that you have considered carefully.
|
2010-03-01 17:43:20 -08:00
|
|
|
// That way it's easy to tell which ones have been considered and which
|
|
|
|
// haven't.
|
|
|
|
static const AccSet ACC_NONE = 0x0;
|
2010-03-23 15:05:47 -07:00
|
|
|
static const AccSet ACC_ALL_STORABLE = ACC_STACK | ACC_RSTACK | ACC_OTHER;
|
|
|
|
static const AccSet ACC_ALL = ACC_READONLY | ACC_ALL_STORABLE;
|
2010-03-01 17:43:20 -08:00
|
|
|
static const AccSet ACC_LOAD_ANY = ACC_ALL; // synonym
|
2010-03-23 15:05:47 -07:00
|
|
|
static const AccSet ACC_STORE_ANY = ACC_ALL_STORABLE; // synonym
|
2010-03-01 17:43:20 -08:00
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
struct CallInfo
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2010-03-21 19:47:02 -07:00
|
|
|
private:
|
|
|
|
|
|
|
|
public:
|
2009-07-29 17:44:34 -07:00
|
|
|
uintptr_t _address;
|
2010-03-21 19:47:02 -07:00
|
|
|
uint32_t _typesig:27; // 9 3-bit fields indicating arg type, by ARGTYPE above (including ret type): a1 a2 a3 a4 a5 ret
|
2008-10-13 13:29:18 -07:00
|
|
|
AbiKind _abi:3;
|
2010-03-01 17:43:20 -08:00
|
|
|
uint8_t _isPure:1; // _isPure=1 means no side-effects, result only depends on args
|
|
|
|
AccSet _storeAccSet; // access regions stored by the function
|
2009-07-10 12:58:34 -07:00
|
|
|
verbose_only ( const char* _name; )
|
|
|
|
|
2010-03-21 19:47:02 -07:00
|
|
|
uint32_t count_args() const;
|
|
|
|
uint32_t count_int32_args() const;
|
2010-02-14 22:24:21 -08:00
|
|
|
// Nb: uses right-to-left order, eg. sizes[0] is the size of the right-most arg.
|
2010-03-21 19:47:02 -07:00
|
|
|
uint32_t getArgTypes(ArgType* types) const;
|
2010-01-13 14:40:35 -08:00
|
|
|
|
2010-03-21 19:47:02 -07:00
|
|
|
inline ArgType returnType() const {
|
|
|
|
return ArgType(_typesig & ARGTYPE_MASK);
|
2010-02-14 16:01:04 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool isIndirect() const {
|
|
|
|
return _address < 256;
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Record for extra data used to compile switches as jump tables.
|
|
|
|
*/
|
|
|
|
struct SwitchInfo
|
|
|
|
{
|
|
|
|
NIns** table; // Jump table; a jump address is NIns*
|
|
|
|
uint32_t count; // Number of table entries
|
|
|
|
// Index value at last execution of the switch. The index value
|
|
|
|
// is the offset into the jump table. Thus it is computed as
|
|
|
|
// (switch expression) - (lowest case value).
|
|
|
|
uint32_t index;
|
|
|
|
};
|
2009-02-11 17:40:27 -08:00
|
|
|
|
2010-02-28 17:02:01 -08:00
|
|
|
// Array holding the 'isCse' field from LIRopcode.tbl.
|
|
|
|
extern const int8_t isCses[]; // cannot be uint8_t, some values are negative
|
|
|
|
|
2009-05-17 22:38:00 -07:00
|
|
|
inline bool isCseOpcode(LOpcode op) {
|
2010-02-28 17:02:01 -08:00
|
|
|
NanoAssert(isCses[op] != -1); // see LIRopcode.tbl to understand this
|
|
|
|
return isCses[op] == 1;
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-05-17 22:38:00 -07:00
|
|
|
inline bool isRetOpcode(LOpcode op) {
|
2010-03-24 17:41:39 -07:00
|
|
|
return
|
2010-02-11 17:30:16 -08:00
|
|
|
#if defined NANOJIT_64BIT
|
2010-04-16 16:20:24 -07:00
|
|
|
op == LIR_retq ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-04-21 18:13:17 -07:00
|
|
|
op == LIR_reti || op == LIR_retd;
|
2010-02-11 17:30:16 -08:00
|
|
|
}
|
|
|
|
inline bool isCmovOpcode(LOpcode op) {
|
2010-03-24 17:41:39 -07:00
|
|
|
return
|
2010-02-11 17:30:16 -08:00
|
|
|
#if defined NANOJIT_64BIT
|
2010-04-16 16:20:24 -07:00
|
|
|
op == LIR_cmovq ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-04-21 18:13:17 -07:00
|
|
|
op == LIR_cmovi;
|
2010-02-11 17:30:16 -08:00
|
|
|
}
|
2010-02-28 17:02:01 -08:00
|
|
|
inline bool isICmpOpcode(LOpcode op) {
|
2010-04-21 18:13:17 -07:00
|
|
|
return LIR_eqi <= op && op <= LIR_geui;
|
2010-02-28 17:02:01 -08:00
|
|
|
}
|
|
|
|
inline bool isSICmpOpcode(LOpcode op) {
|
2010-04-21 18:13:17 -07:00
|
|
|
return LIR_eqi <= op && op <= LIR_gei;
|
2010-02-28 17:02:01 -08:00
|
|
|
}
|
|
|
|
inline bool isUICmpOpcode(LOpcode op) {
|
2010-04-21 18:13:17 -07:00
|
|
|
return LIR_eqi == op || (LIR_ltui <= op && op <= LIR_geui);
|
2010-02-28 17:02:01 -08:00
|
|
|
}
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
inline bool isQCmpOpcode(LOpcode op) {
|
2010-04-16 16:20:24 -07:00
|
|
|
return LIR_eqq <= op && op <= LIR_geuq;
|
2010-02-28 17:02:01 -08:00
|
|
|
}
|
|
|
|
inline bool isSQCmpOpcode(LOpcode op) {
|
2010-04-16 16:20:24 -07:00
|
|
|
return LIR_eqq <= op && op <= LIR_geq;
|
2010-02-28 17:02:01 -08:00
|
|
|
}
|
|
|
|
inline bool isUQCmpOpcode(LOpcode op) {
|
2010-04-16 16:20:24 -07:00
|
|
|
return LIR_eqq == op || (LIR_ltuq <= op && op <= LIR_geuq);
|
2010-02-28 17:02:01 -08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
inline bool isFCmpOpcode(LOpcode op) {
|
2010-04-16 16:20:24 -07:00
|
|
|
return LIR_eqd <= op && op <= LIR_ged;
|
2010-02-28 17:02:01 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
inline LOpcode invertCondJmpOpcode(LOpcode op) {
|
|
|
|
NanoAssert(op == LIR_jt || op == LIR_jf);
|
|
|
|
return LOpcode(op ^ 1);
|
|
|
|
}
|
|
|
|
inline LOpcode invertCondGuardOpcode(LOpcode op) {
|
|
|
|
NanoAssert(op == LIR_xt || op == LIR_xf);
|
|
|
|
return LOpcode(op ^ 1);
|
|
|
|
}
|
|
|
|
inline LOpcode invertICmpOpcode(LOpcode op) {
|
|
|
|
NanoAssert(isICmpOpcode(op));
|
|
|
|
return LOpcode(op ^ 1);
|
|
|
|
}
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
inline LOpcode invertQCmpOpcode(LOpcode op) {
|
|
|
|
NanoAssert(isQCmpOpcode(op));
|
|
|
|
return LOpcode(op ^ 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
inline LOpcode invertFCmpOpcode(LOpcode op) {
|
|
|
|
NanoAssert(isFCmpOpcode(op));
|
|
|
|
return LOpcode(op ^ 1);
|
|
|
|
}
|
|
|
|
|
2010-02-11 17:30:16 -08:00
|
|
|
inline LOpcode getCallOpcode(const CallInfo* ci) {
|
2010-04-16 16:20:24 -07:00
|
|
|
LOpcode op = LIR_callp;
|
2010-02-11 17:30:16 -08:00
|
|
|
switch (ci->returnType()) {
|
2010-04-16 16:20:24 -07:00
|
|
|
case ARGTYPE_V: op = LIR_callp; break;
|
2010-03-21 19:47:02 -07:00
|
|
|
case ARGTYPE_I:
|
2010-04-21 18:13:17 -07:00
|
|
|
case ARGTYPE_U: op = LIR_calli; break;
|
2010-04-16 16:20:24 -07:00
|
|
|
case ARGTYPE_F: op = LIR_calld; break;
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-04-16 16:20:24 -07:00
|
|
|
case ARGTYPE_Q: op = LIR_callq; break;
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-03-21 19:47:02 -07:00
|
|
|
default: NanoAssert(0); break;
|
2010-02-11 17:30:16 -08:00
|
|
|
}
|
|
|
|
return op;
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
|
2009-12-09 15:49:15 -08:00
|
|
|
LOpcode f64arith_to_i32arith(LOpcode op);
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2009-12-09 15:49:15 -08:00
|
|
|
LOpcode i32cmp_to_i64cmp(LOpcode op);
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-03-31 15:07:50 -07:00
|
|
|
LOpcode f64cmp_to_i32cmp(LOpcode op);
|
|
|
|
LOpcode f64cmp_to_u32cmp(LOpcode op);
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2009-12-09 15:49:15 -08:00
|
|
|
// Array holding the 'repKind' field from LIRopcode.tbl.
|
2009-09-09 21:02:59 -07:00
|
|
|
extern const uint8_t repKinds[];
|
|
|
|
|
2010-02-11 17:30:16 -08:00
|
|
|
enum LTy {
|
|
|
|
LTy_Void, // no value/no type
|
|
|
|
LTy_I32, // 32-bit integer
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
LTy_I64, // 64-bit integer
|
|
|
|
#endif
|
|
|
|
LTy_F64, // 64-bit float
|
2010-01-21 15:10:59 -08:00
|
|
|
|
|
|
|
LTy_Ptr = PTR_SIZE(LTy_I32, LTy_I64) // word-sized integer
|
2009-12-09 15:49:15 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
// Array holding the 'retType' field from LIRopcode.tbl.
|
|
|
|
extern const LTy retTypes[];
|
|
|
|
|
2010-01-24 15:39:36 -08:00
|
|
|
inline RegisterMask rmask(Register r)
|
|
|
|
{
|
|
|
|
return RegisterMask(1) << r;
|
|
|
|
}
|
|
|
|
|
2009-06-30 21:18:55 -07:00
|
|
|
//-----------------------------------------------------------------------
|
|
|
|
// Low-level instructions. This is a bit complicated, because we have a
|
|
|
|
// variable-width representation to minimise space usage.
|
|
|
|
//
|
|
|
|
// - Instruction size is always an integral multiple of word size.
|
|
|
|
//
|
|
|
|
// - Every instruction has at least one word, holding the opcode and the
|
2010-04-20 17:51:50 -07:00
|
|
|
// reservation info ("SharedFields"). That word is in class LIns.
|
2009-06-30 21:18:55 -07:00
|
|
|
//
|
|
|
|
// - Beyond that, most instructions have 1, 2 or 3 extra words. These
|
|
|
|
// extra words are in classes LInsOp1, LInsOp2, etc (collectively called
|
2009-09-08 16:18:41 -07:00
|
|
|
// "LInsXYZ" in what follows). Each LInsXYZ class also contains an LIns,
|
|
|
|
// accessible by the 'ins' member, which holds the LIns data.
|
2009-06-30 21:18:55 -07:00
|
|
|
//
|
|
|
|
// - LIR is written forward, but read backwards. When reading backwards,
|
|
|
|
// in order to find the opcode, it must be in a predictable place in the
|
|
|
|
// LInsXYZ isn't affected by instruction width. Therefore, the LIns
|
|
|
|
// word (which contains the opcode) is always the *last* word in an
|
|
|
|
// instruction.
|
|
|
|
//
|
|
|
|
// - Each instruction is created by casting pre-allocated bytes from a
|
|
|
|
// LirBuffer to the LInsXYZ type. Therefore there are no constructors
|
|
|
|
// for LIns or LInsXYZ.
|
|
|
|
//
|
|
|
|
// - The standard handle for an instruction is a LIns*. This actually
|
|
|
|
// points to the LIns word, ie. to the final word in the instruction.
|
|
|
|
// This is a bit odd, but it allows the instruction's opcode to be
|
|
|
|
// easily accessed. Once you've looked at the opcode and know what kind
|
|
|
|
// of instruction it is, if you want to access any of the other words,
|
|
|
|
// you need to use toLInsXYZ(), which takes the LIns* and gives you an
|
|
|
|
// LInsXYZ*, ie. the pointer to the actual start of the instruction's
|
|
|
|
// bytes. From there you can access the instruction-specific extra
|
|
|
|
// words.
|
|
|
|
//
|
|
|
|
// - However, from outside class LIns, LInsXYZ isn't visible, nor is
|
|
|
|
// toLInsXYZ() -- from outside LIns, all LIR instructions are handled
|
|
|
|
// via LIns pointers and get/set methods are used for all LIns/LInsXYZ
|
|
|
|
// accesses. In fact, all data members in LInsXYZ are private and can
|
|
|
|
// only be accessed by LIns, which is a friend class. The only thing
|
|
|
|
// anyone outside LIns can do with a LInsXYZ is call getLIns().
|
|
|
|
//
|
|
|
|
// - An example Op2 instruction and the likely pointers to it (each line
|
|
|
|
// represents a word, and pointers to a line point to the start of the
|
|
|
|
// word on that line):
|
|
|
|
//
|
|
|
|
// [ oprnd_2 <-- LInsOp2* insOp2 == toLInsOp2(ins)
|
|
|
|
// oprnd_1
|
|
|
|
// opcode + resv ] <-- LIns* ins
|
|
|
|
//
|
2009-12-21 12:05:48 -08:00
|
|
|
// - LIR_skip instructions are used to link code chunks. If the first
|
2009-11-12 21:18:40 -08:00
|
|
|
// instruction on a chunk isn't a LIR_start, it will be a skip, and the
|
2009-12-21 12:05:48 -08:00
|
|
|
// skip's operand will point to the last LIns on the preceding chunk.
|
2009-06-30 21:18:55 -07:00
|
|
|
// LInsSk has the same layout as LInsOp1, but we represent it as a
|
|
|
|
// different class because there are some places where we treat
|
|
|
|
// skips specially and so having it separate seems like a good idea.
|
|
|
|
//
|
|
|
|
// - Various things about the size and layout of LIns and LInsXYZ are
|
|
|
|
// statically checked in staticSanityCheck(). In particular, this is
|
|
|
|
// worthwhile because there's nothing that guarantees that all the
|
|
|
|
// LInsXYZ classes have a size that is a multiple of word size (but in
|
|
|
|
// practice all sane compilers use a layout that results in this). We
|
|
|
|
// also check that every LInsXYZ is word-aligned in
|
|
|
|
// LirBuffer::makeRoom(); this seems sensible to avoid potential
|
2009-11-12 21:18:40 -08:00
|
|
|
// slowdowns due to misalignment. It relies on chunks themselves being
|
2009-06-30 21:18:55 -07:00
|
|
|
// word-aligned, which is extremely likely.
|
|
|
|
//
|
|
|
|
// - There is an enum, LInsRepKind, with one member for each of the
|
|
|
|
// LInsXYZ kinds. Each opcode is categorised with its LInsRepKind value
|
|
|
|
// in LIRopcode.tbl, and this is used in various places.
|
|
|
|
//-----------------------------------------------------------------------
|
|
|
|
|
|
|
|
enum LInsRepKind {
|
|
|
|
// LRK_XYZ corresponds to class LInsXYZ.
|
|
|
|
LRK_Op0,
|
|
|
|
LRK_Op1,
|
|
|
|
LRK_Op2,
|
2009-07-21 17:28:25 -07:00
|
|
|
LRK_Op3,
|
2009-07-06 16:26:12 -07:00
|
|
|
LRK_Ld,
|
2009-06-30 21:18:55 -07:00
|
|
|
LRK_Sti,
|
|
|
|
LRK_Sk,
|
|
|
|
LRK_C,
|
|
|
|
LRK_P,
|
|
|
|
LRK_I,
|
2009-12-09 15:49:15 -08:00
|
|
|
LRK_N64,
|
2009-11-11 11:38:12 -08:00
|
|
|
LRK_Jtbl,
|
2009-06-30 21:18:55 -07:00
|
|
|
LRK_None // this one is used for unused opcode numbers
|
|
|
|
};
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
class LInsOp0;
|
|
|
|
class LInsOp1;
|
|
|
|
class LInsOp2;
|
|
|
|
class LInsOp3;
|
|
|
|
class LInsLd;
|
|
|
|
class LInsSti;
|
|
|
|
class LInsSk;
|
|
|
|
class LInsC;
|
|
|
|
class LInsP;
|
|
|
|
class LInsI;
|
2009-12-09 15:49:15 -08:00
|
|
|
class LInsN64;
|
2009-11-11 11:38:12 -08:00
|
|
|
class LInsJtbl;
|
2009-09-08 16:18:41 -07:00
|
|
|
|
|
|
|
class LIns
|
|
|
|
{
|
|
|
|
private:
|
2010-04-20 17:51:50 -07:00
|
|
|
// SharedFields: fields shared by all LIns kinds. The .inReg, .reg,
|
2010-01-24 15:39:36 -08:00
|
|
|
// .inAr and .arIndex fields form a "reservation" that is used
|
|
|
|
// temporarily during assembly to record information relating to
|
|
|
|
// register allocation. See class RegAlloc for more details.
|
|
|
|
//
|
|
|
|
// Note: all combinations of .inReg/.inAr are possible, ie. 0/0, 0/1,
|
|
|
|
// 1/0, 1/1.
|
2010-04-20 17:51:50 -07:00
|
|
|
struct SharedFields {
|
2010-01-24 15:39:36 -08:00
|
|
|
uint32_t inReg:1; // if 1, 'reg' is active
|
|
|
|
Register reg:7;
|
|
|
|
uint32_t inAr:1; // if 1, 'arIndex' is active
|
|
|
|
uint32_t arIndex:15; // index into stack frame; displ is -4*arIndex
|
2009-11-24 23:00:23 -08:00
|
|
|
|
|
|
|
LOpcode opcode:8; // instruction's opcode
|
|
|
|
};
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
union {
|
2010-04-20 17:51:50 -07:00
|
|
|
SharedFields sharedFields;
|
2009-11-24 23:00:23 -08:00
|
|
|
// Force sizeof(LIns)==8 and 8-byte alignment on 64-bit machines.
|
2010-04-20 17:51:50 -07:00
|
|
|
// This is necessary because sizeof(SharedFields)==4 and we want all
|
2009-09-08 16:18:41 -07:00
|
|
|
// instances of LIns to be pointer-aligned.
|
2010-04-20 17:51:50 -07:00
|
|
|
void* wholeWord;
|
2009-09-08 16:18:41 -07:00
|
|
|
};
|
|
|
|
|
2010-04-20 17:51:50 -07:00
|
|
|
inline void initSharedFields(LOpcode opcode)
|
|
|
|
{
|
|
|
|
// We must zero .inReg and .inAR, but zeroing the whole word is
|
|
|
|
// easier. Then we set the opcode.
|
|
|
|
wholeWord = 0;
|
|
|
|
sharedFields.opcode = opcode;
|
|
|
|
}
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
// LIns-to-LInsXYZ converters.
|
|
|
|
inline LInsOp0* toLInsOp0() const;
|
|
|
|
inline LInsOp1* toLInsOp1() const;
|
|
|
|
inline LInsOp2* toLInsOp2() const;
|
|
|
|
inline LInsOp3* toLInsOp3() const;
|
|
|
|
inline LInsLd* toLInsLd() const;
|
|
|
|
inline LInsSti* toLInsSti() const;
|
|
|
|
inline LInsSk* toLInsSk() const;
|
|
|
|
inline LInsC* toLInsC() const;
|
|
|
|
inline LInsP* toLInsP() const;
|
|
|
|
inline LInsI* toLInsI() const;
|
2009-12-09 15:49:15 -08:00
|
|
|
inline LInsN64* toLInsN64() const;
|
2009-11-11 11:38:12 -08:00
|
|
|
inline LInsJtbl*toLInsJtbl()const;
|
2009-09-08 16:18:41 -07:00
|
|
|
|
|
|
|
void staticSanityCheck();
|
|
|
|
|
|
|
|
public:
|
|
|
|
// LIns initializers.
|
|
|
|
inline void initLInsOp0(LOpcode opcode);
|
|
|
|
inline void initLInsOp1(LOpcode opcode, LIns* oprnd1);
|
|
|
|
inline void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2);
|
|
|
|
inline void initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns* oprnd3);
|
2010-03-01 17:43:20 -08:00
|
|
|
inline void initLInsLd(LOpcode opcode, LIns* val, int32_t d, AccSet accSet);
|
|
|
|
inline void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet);
|
2009-09-08 16:18:41 -07:00
|
|
|
inline void initLInsSk(LIns* prevLIns);
|
2009-11-09 19:12:18 -08:00
|
|
|
// Nb: args[] must be allocated and initialised before being passed in;
|
|
|
|
// initLInsC() just copies the pointer into the LInsC.
|
|
|
|
inline void initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci);
|
2009-09-08 16:18:41 -07:00
|
|
|
inline void initLInsP(int32_t arg, int32_t kind);
|
|
|
|
inline void initLInsI(LOpcode opcode, int32_t imm32);
|
2009-12-09 15:49:15 -08:00
|
|
|
inline void initLInsN64(LOpcode opcode, int64_t imm64);
|
2009-11-11 11:38:12 -08:00
|
|
|
inline void initLInsJtbl(LIns* index, uint32_t size, LIns** table);
|
2009-09-08 16:18:41 -07:00
|
|
|
|
2010-04-20 17:51:50 -07:00
|
|
|
LOpcode opcode() const { return sharedFields.opcode; }
|
2009-09-08 16:18:41 -07:00
|
|
|
|
2010-01-24 15:39:36 -08:00
|
|
|
// XXX: old reservation manipulating functions. See bug 538924.
|
|
|
|
// Replacement strategy:
|
|
|
|
// - deprecated_markAsClear() --> clearReg() and/or clearArIndex()
|
|
|
|
// - deprecated_hasKnownReg() --> isInReg()
|
|
|
|
// - deprecated_getReg() --> getReg() after checking isInReg()
|
|
|
|
//
|
|
|
|
void deprecated_markAsClear() {
|
2010-04-20 17:51:50 -07:00
|
|
|
sharedFields.inReg = 0;
|
|
|
|
sharedFields.inAr = 0;
|
2010-01-24 15:39:36 -08:00
|
|
|
}
|
|
|
|
bool deprecated_hasKnownReg() {
|
|
|
|
NanoAssert(isUsed());
|
|
|
|
return isInReg();
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
2010-01-24 15:39:36 -08:00
|
|
|
Register deprecated_getReg() {
|
|
|
|
NanoAssert(isUsed());
|
2010-04-20 17:51:50 -07:00
|
|
|
return ( isInReg() ? sharedFields.reg : deprecated_UnknownReg );
|
2010-01-24 15:39:36 -08:00
|
|
|
}
|
|
|
|
uint32_t deprecated_getArIndex() {
|
|
|
|
NanoAssert(isUsed());
|
2010-04-20 17:51:50 -07:00
|
|
|
return ( isInAr() ? sharedFields.arIndex : 0 );
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
2010-01-24 15:39:36 -08:00
|
|
|
|
|
|
|
// Reservation manipulation.
|
2009-09-09 18:00:18 -07:00
|
|
|
bool isUsed() {
|
2010-01-24 15:39:36 -08:00
|
|
|
return isInReg() || isInAr();
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
2010-01-24 15:39:36 -08:00
|
|
|
bool isInReg() {
|
2010-04-20 17:51:50 -07:00
|
|
|
return sharedFields.inReg;
|
2010-01-24 15:39:36 -08:00
|
|
|
}
|
|
|
|
bool isInRegMask(RegisterMask allow) {
|
|
|
|
return isInReg() && (rmask(getReg()) & allow);
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
|
|
|
Register getReg() {
|
2010-01-24 15:39:36 -08:00
|
|
|
NanoAssert(isInReg());
|
2010-04-20 17:51:50 -07:00
|
|
|
return sharedFields.reg;
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
|
|
|
void setReg(Register r) {
|
2010-04-20 17:51:50 -07:00
|
|
|
sharedFields.inReg = 1;
|
|
|
|
sharedFields.reg = r;
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
2010-01-24 15:39:36 -08:00
|
|
|
void clearReg() {
|
2010-04-20 17:51:50 -07:00
|
|
|
sharedFields.inReg = 0;
|
2010-01-24 15:39:36 -08:00
|
|
|
}
|
|
|
|
bool isInAr() {
|
2010-04-20 17:51:50 -07:00
|
|
|
return sharedFields.inAr;
|
2010-01-24 15:39:36 -08:00
|
|
|
}
|
2009-09-09 18:00:18 -07:00
|
|
|
uint32_t getArIndex() {
|
2010-01-24 15:39:36 -08:00
|
|
|
NanoAssert(isInAr());
|
2010-04-20 17:51:50 -07:00
|
|
|
return sharedFields.arIndex;
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
|
|
|
void setArIndex(uint32_t arIndex) {
|
2010-04-20 17:51:50 -07:00
|
|
|
sharedFields.inAr = 1;
|
|
|
|
sharedFields.arIndex = arIndex;
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
2010-01-24 15:39:36 -08:00
|
|
|
void clearArIndex() {
|
2010-04-20 17:51:50 -07:00
|
|
|
sharedFields.inAr = 0;
|
2009-09-09 18:00:18 -07:00
|
|
|
}
|
2009-09-08 16:18:41 -07:00
|
|
|
|
2009-09-09 18:00:18 -07:00
|
|
|
// For various instruction kinds.
|
2009-09-08 16:18:41 -07:00
|
|
|
inline LIns* oprnd1() const;
|
|
|
|
inline LIns* oprnd2() const;
|
|
|
|
inline LIns* oprnd3() const;
|
|
|
|
|
|
|
|
// For branches.
|
|
|
|
inline LIns* getTarget() const;
|
|
|
|
inline void setTarget(LIns* label);
|
|
|
|
|
|
|
|
// For guards.
|
|
|
|
inline GuardRecord* record() const;
|
|
|
|
|
2010-03-01 17:43:20 -08:00
|
|
|
// For loads/stores.
|
2009-09-08 16:18:41 -07:00
|
|
|
inline int32_t disp() const;
|
2010-03-05 14:14:31 -08:00
|
|
|
inline AccSet accSet() const;
|
2009-09-08 16:18:41 -07:00
|
|
|
|
|
|
|
// For LInsSk.
|
|
|
|
inline LIns* prevLIns() const;
|
|
|
|
|
|
|
|
// For LInsP.
|
|
|
|
inline uint8_t paramArg() const;
|
|
|
|
inline uint8_t paramKind() const;
|
|
|
|
|
|
|
|
// For LInsI.
|
|
|
|
inline int32_t imm32() const;
|
|
|
|
|
2009-12-09 15:49:15 -08:00
|
|
|
// For LInsN64.
|
2009-09-08 16:18:41 -07:00
|
|
|
inline int32_t imm64_0() const;
|
|
|
|
inline int32_t imm64_1() const;
|
|
|
|
inline uint64_t imm64() const;
|
|
|
|
inline double imm64f() const;
|
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
// For LIR_allocp.
|
2009-09-08 16:18:41 -07:00
|
|
|
inline int32_t size() const;
|
|
|
|
inline void setSize(int32_t nbytes);
|
|
|
|
|
|
|
|
// For LInsC.
|
2010-01-21 15:10:59 -08:00
|
|
|
inline LIns* arg(uint32_t i) const; // right-to-left-order: arg(0) is rightmost
|
2009-09-08 16:18:41 -07:00
|
|
|
inline uint32_t argc() const;
|
|
|
|
inline LIns* callArgN(uint32_t n) const;
|
|
|
|
inline const CallInfo* callInfo() const;
|
|
|
|
|
2009-11-11 11:38:12 -08:00
|
|
|
// For LIR_jtbl
|
|
|
|
inline uint32_t getTableSize() const;
|
|
|
|
inline LIns* getTarget(uint32_t index) const;
|
|
|
|
inline void setTarget(uint32_t index, LIns* label) const;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
// isLInsXYZ() returns true if the instruction has the LInsXYZ form.
|
|
|
|
// Note that there is some overlap with other predicates, eg.
|
|
|
|
// isStore()==isLInsSti(), isCall()==isLInsC(), but that's ok; these
|
2009-09-09 21:02:59 -07:00
|
|
|
// ones are used mostly to check that opcodes are appropriate for
|
2009-09-08 16:18:41 -07:00
|
|
|
// instruction layouts, the others are used for non-debugging
|
|
|
|
// purposes.
|
2009-09-09 21:02:59 -07:00
|
|
|
bool isLInsOp0() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Op0 == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsOp1() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Op1 == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsOp2() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Op2 == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsOp3() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Op3 == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsLd() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Ld == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsSti() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Sti == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsSk() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Sk == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsC() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_C == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsP() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_P == repKinds[opcode()];
|
|
|
|
}
|
|
|
|
bool isLInsI() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_I == repKinds[opcode()];
|
|
|
|
}
|
2009-12-09 15:49:15 -08:00
|
|
|
bool isLInsN64() const {
|
2009-09-09 21:02:59 -07:00
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
2009-12-09 15:49:15 -08:00
|
|
|
return LRK_N64 == repKinds[opcode()];
|
2009-09-09 21:02:59 -07:00
|
|
|
}
|
2009-11-11 11:38:12 -08:00
|
|
|
bool isLInsJtbl() const {
|
|
|
|
NanoAssert(LRK_None != repKinds[opcode()]);
|
|
|
|
return LRK_Jtbl == repKinds[opcode()];
|
|
|
|
}
|
2009-09-09 21:02:59 -07:00
|
|
|
|
|
|
|
// LIns predicates.
|
2010-02-28 17:02:01 -08:00
|
|
|
bool isop(LOpcode o) const {
|
|
|
|
return opcode() == o;
|
2009-09-09 21:02:59 -07:00
|
|
|
}
|
|
|
|
bool isRet() const {
|
|
|
|
return isRetOpcode(opcode());
|
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
bool isLive() const {
|
2010-04-21 18:13:17 -07:00
|
|
|
return isop(LIR_livei) ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#if defined NANOJIT_64BIT
|
2010-04-16 16:20:24 -07:00
|
|
|
isop(LIR_liveq) ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-04-16 16:20:24 -07:00
|
|
|
isop(LIR_lived);
|
2009-09-09 21:02:59 -07:00
|
|
|
}
|
|
|
|
bool isCmp() const {
|
|
|
|
LOpcode op = opcode();
|
2010-02-28 17:02:01 -08:00
|
|
|
return isICmpOpcode(op) ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#if defined NANOJIT_64BIT
|
2010-02-28 17:02:01 -08:00
|
|
|
isQCmpOpcode(op) ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-02-28 17:02:01 -08:00
|
|
|
isFCmpOpcode(op);
|
2009-09-09 21:02:59 -07:00
|
|
|
}
|
2009-09-08 16:18:41 -07:00
|
|
|
bool isCall() const {
|
2010-04-21 18:13:17 -07:00
|
|
|
return isop(LIR_calli) ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#if defined NANOJIT_64BIT
|
2010-04-16 16:20:24 -07:00
|
|
|
isop(LIR_callq) ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-04-16 16:20:24 -07:00
|
|
|
isop(LIR_calld);
|
2010-02-11 17:30:16 -08:00
|
|
|
}
|
|
|
|
bool isCmov() const {
|
|
|
|
return isCmovOpcode(opcode());
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
|
|
|
bool isStore() const {
|
2009-12-01 16:50:03 -08:00
|
|
|
return isLInsSti();
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
|
|
|
bool isLoad() const {
|
2009-12-01 16:50:03 -08:00
|
|
|
return isLInsLd();
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
|
|
|
bool isGuard() const {
|
2009-12-09 15:49:15 -08:00
|
|
|
return isop(LIR_x) || isop(LIR_xf) || isop(LIR_xt) ||
|
2010-02-18 20:04:55 -08:00
|
|
|
isop(LIR_xbarrier) || isop(LIR_xtbl) ||
|
2010-04-21 18:13:17 -07:00
|
|
|
isop(LIR_addxovi) || isop(LIR_subxovi) || isop(LIR_mulxovi);
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
2010-03-28 16:49:42 -07:00
|
|
|
// True if the instruction is a 32-bit integer immediate.
|
2009-09-09 21:02:59 -07:00
|
|
|
bool isconst() const {
|
2010-04-21 18:13:17 -07:00
|
|
|
return isop(LIR_immi);
|
2009-09-09 21:02:59 -07:00
|
|
|
}
|
2010-03-28 16:49:42 -07:00
|
|
|
// True if the instruction is a 32-bit integer immediate and
|
|
|
|
// has the value 'val' when treated as a 32-bit signed integer.
|
2009-09-09 21:02:59 -07:00
|
|
|
bool isconstval(int32_t val) const {
|
|
|
|
return isconst() && imm32()==val;
|
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-03-28 16:49:42 -07:00
|
|
|
// True if the instruction is a 64-bit integer immediate.
|
|
|
|
bool isconstq() const {
|
2010-04-16 16:20:24 -07:00
|
|
|
return isop(LIR_immq);
|
2009-09-09 21:02:59 -07:00
|
|
|
}
|
2010-03-28 16:49:42 -07:00
|
|
|
#endif
|
|
|
|
// True if the instruction is a pointer-sized integer immediate.
|
2009-09-09 21:24:57 -07:00
|
|
|
bool isconstp() const
|
2009-09-09 21:02:59 -07:00
|
|
|
{
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
return isconstq();
|
|
|
|
#else
|
|
|
|
return isconst();
|
|
|
|
#endif
|
|
|
|
}
|
2010-03-28 16:49:42 -07:00
|
|
|
// True if the instruction is a 64-bit float immediate.
|
2009-09-09 21:02:59 -07:00
|
|
|
bool isconstf() const {
|
2010-04-16 16:20:24 -07:00
|
|
|
return isop(LIR_immd);
|
2009-09-09 21:02:59 -07:00
|
|
|
}
|
2010-03-28 16:49:42 -07:00
|
|
|
// True if the instruction is a 64-bit integer or float immediate.
|
|
|
|
bool isconstqf() const {
|
|
|
|
return
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
isconstq() ||
|
|
|
|
#endif
|
|
|
|
isconstf();
|
|
|
|
}
|
|
|
|
// True if the instruction an any type of immediate.
|
|
|
|
bool isImmAny() const {
|
|
|
|
return isconst() || isconstqf();
|
|
|
|
}
|
2009-09-09 21:02:59 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
bool isBranch() const {
|
2009-11-11 11:38:12 -08:00
|
|
|
return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j) || isop(LIR_jtbl);
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
|
|
|
|
2010-01-21 15:10:59 -08:00
|
|
|
LTy retType() const {
|
|
|
|
return retTypes[opcode()];
|
|
|
|
}
|
2010-01-10 20:51:49 -08:00
|
|
|
bool isVoid() const {
|
2010-01-21 15:10:59 -08:00
|
|
|
return retType() == LTy_Void;
|
2010-01-10 20:51:49 -08:00
|
|
|
}
|
|
|
|
bool isI32() const {
|
2010-01-21 15:10:59 -08:00
|
|
|
return retType() == LTy_I32;
|
2010-01-10 20:51:49 -08:00
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-01-10 20:51:49 -08:00
|
|
|
bool isI64() const {
|
2010-01-21 15:10:59 -08:00
|
|
|
return retType() == LTy_I64;
|
2010-01-10 20:51:49 -08:00
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-01-10 20:51:49 -08:00
|
|
|
bool isF64() const {
|
2010-01-21 15:10:59 -08:00
|
|
|
return retType() == LTy_F64;
|
2010-01-10 20:51:49 -08:00
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
bool isN64() const {
|
2010-03-24 17:41:39 -07:00
|
|
|
return
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-03-24 17:41:39 -07:00
|
|
|
isI64() ||
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
|
|
|
isF64();
|
|
|
|
}
|
2010-01-10 20:51:49 -08:00
|
|
|
bool isPtr() const {
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
return isI64();
|
2009-09-08 16:18:41 -07:00
|
|
|
#else
|
2010-01-10 20:51:49 -08:00
|
|
|
return isI32();
|
2009-09-08 16:18:41 -07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Return true if removal of 'ins' from a LIR fragment could
|
|
|
|
// possibly change the behaviour of that fragment, even if any
|
|
|
|
// value computed by 'ins' is not used later in the fragment.
|
2009-12-09 15:49:15 -08:00
|
|
|
// In other words, can 'ins' possibly alter control flow or memory?
|
2009-09-08 16:18:41 -07:00
|
|
|
// Note, this assumes that loads will never fault and hence cannot
|
|
|
|
// affect the control flow.
|
|
|
|
bool isStmt() {
|
2010-03-24 15:34:34 -07:00
|
|
|
NanoAssert(!isop(LIR_skip));
|
2010-02-28 17:02:01 -08:00
|
|
|
// All instructions with Void retType are statements, as are calls
|
2010-03-01 17:43:20 -08:00
|
|
|
// to impure functions.
|
2010-01-04 15:46:34 -08:00
|
|
|
if (isCall())
|
2010-03-01 17:43:20 -08:00
|
|
|
return !callInfo()->_isPure;
|
2010-01-04 15:46:34 -08:00
|
|
|
else
|
2010-01-10 20:51:49 -08:00
|
|
|
return isVoid();
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
inline void* constvalp() const
|
|
|
|
{
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
return (void*)imm64();
|
|
|
|
#else
|
|
|
|
return (void*)imm32();
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef LIns* LInsp;
|
|
|
|
typedef SeqBuilder<LIns*> InsList;
|
2010-01-07 16:41:42 -08:00
|
|
|
typedef SeqBuilder<char*> StringList;
|
2009-09-08 16:18:41 -07:00
|
|
|
|
2009-08-04 08:33:14 -07:00
|
|
|
|
2009-06-30 21:18:55 -07:00
|
|
|
// 0-operand form. Used for LIR_start and LIR_label.
|
|
|
|
class LInsOp0
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-06-30 21:18:55 -07:00
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
// 1-operand form. Used for LIR_reti, unary arithmetic/logic ops, etc.
|
2009-06-30 21:18:55 -07:00
|
|
|
class LInsOp1
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-06-30 21:18:55 -07:00
|
|
|
LIns* oprnd_1;
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-06-30 21:18:55 -07:00
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-06-30 21:18:55 -07:00
|
|
|
// 2-operand form. Used for loads, guards, branches, comparisons, binary
|
|
|
|
// arithmetic/logic ops, etc.
|
|
|
|
class LInsOp2
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
2009-06-30 18:13:22 -07:00
|
|
|
|
2009-06-30 21:18:55 -07:00
|
|
|
LIns* oprnd_2;
|
|
|
|
|
|
|
|
LIns* oprnd_1;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
|
|
|
|
2010-02-18 20:04:55 -08:00
|
|
|
// 3-operand form. Used for conditional moves and xov guards.
|
2009-07-21 17:28:25 -07:00
|
|
|
class LInsOp3
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
|
|
|
LIns* oprnd_3;
|
|
|
|
|
|
|
|
LIns* oprnd_2;
|
|
|
|
|
|
|
|
LIns* oprnd_1;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-07-21 17:28:25 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-07-21 17:28:25 -07:00
|
|
|
};
|
|
|
|
|
2009-07-06 16:26:12 -07:00
|
|
|
// Used for all loads.
|
|
|
|
class LInsLd
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
2010-03-01 17:43:20 -08:00
|
|
|
// Nb: the LIR writer pipeline handles things if a displacement
|
|
|
|
// exceeds 16 bits. This is rare, but does happen occasionally. We
|
|
|
|
// could go to 24 bits but then it would happen so rarely that the
|
|
|
|
// handler code would be difficult to test and thus untrustworthy.
|
|
|
|
int16_t disp;
|
|
|
|
AccSet accSet;
|
2009-07-06 16:26:12 -07:00
|
|
|
|
|
|
|
LIns* oprnd_1;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-07-06 16:26:12 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-07-06 16:26:12 -07:00
|
|
|
};
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
// Used for LIR_sti and LIR_stq.
|
2009-06-30 21:18:55 -07:00
|
|
|
class LInsSti
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
2010-03-01 17:43:20 -08:00
|
|
|
int16_t disp;
|
|
|
|
AccSet accSet;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
LIns* oprnd_2;
|
|
|
|
|
|
|
|
LIns* oprnd_1;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
// Used for LIR_skip.
|
|
|
|
class LInsSk
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
|
|
|
LIns* prevLIns;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
// Used for all variants of LIR_call.
|
|
|
|
class LInsC
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
2009-11-09 19:12:18 -08:00
|
|
|
// Arguments in reverse order, just like insCall() (ie. args[0] holds
|
|
|
|
// the rightmost arg). The array should be allocated by the same
|
|
|
|
// allocator as the LIR buffers, because it has the same lifetime.
|
|
|
|
LIns** args;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
const CallInfo* ci;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
// Used for LIR_paramp.
|
2009-06-30 21:18:55 -07:00
|
|
|
class LInsP
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
|
|
|
uintptr_t arg:8;
|
|
|
|
uintptr_t kind:8;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
// Used for LIR_immi and LIR_allocp.
|
2009-06-30 21:18:55 -07:00
|
|
|
class LInsI
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
|
|
|
int32_t imm32;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
// Used for LIR_immq and LIR_immd.
|
2009-12-09 15:49:15 -08:00
|
|
|
class LInsN64
|
2009-06-30 21:18:55 -07:00
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
|
|
|
int32_t imm64_0;
|
|
|
|
|
|
|
|
int32_t imm64_1;
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns ins;
|
2009-06-30 21:18:55 -07:00
|
|
|
|
|
|
|
public:
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* getLIns() { return &ins; };
|
2009-06-30 21:18:55 -07:00
|
|
|
};
|
|
|
|
|
2010-01-21 15:10:59 -08:00
|
|
|
// Used for LIR_jtbl. 'oprnd_1' must be a uint32_t index in
|
2009-11-11 11:38:12 -08:00
|
|
|
// the range 0 <= index < size; no range check is performed.
|
2010-01-21 15:10:59 -08:00
|
|
|
// 'table' is an array of labels.
|
2009-11-11 11:38:12 -08:00
|
|
|
class LInsJtbl
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
friend class LIns;
|
|
|
|
|
|
|
|
uint32_t size; // number of entries in table
|
|
|
|
LIns** table; // pointer to table[size] with same lifetime as this LInsJtbl
|
|
|
|
LIns* oprnd_1; // uint32_t index expression
|
|
|
|
|
|
|
|
LIns ins;
|
|
|
|
|
|
|
|
public:
|
|
|
|
LIns* getLIns() { return &ins; }
|
|
|
|
};
|
|
|
|
|
2010-02-11 17:30:16 -08:00
|
|
|
// Used only as a placeholder for OP___ macros for unused opcodes in
|
2009-06-30 21:18:55 -07:00
|
|
|
// LIRopcode.tbl.
|
|
|
|
class LInsNone
|
|
|
|
{
|
|
|
|
};
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LInsOp0* LIns::toLInsOp0() const { return (LInsOp0*)( uintptr_t(this+1) - sizeof(LInsOp0) ); }
|
|
|
|
LInsOp1* LIns::toLInsOp1() const { return (LInsOp1*)( uintptr_t(this+1) - sizeof(LInsOp1) ); }
|
|
|
|
LInsOp2* LIns::toLInsOp2() const { return (LInsOp2*)( uintptr_t(this+1) - sizeof(LInsOp2) ); }
|
|
|
|
LInsOp3* LIns::toLInsOp3() const { return (LInsOp3*)( uintptr_t(this+1) - sizeof(LInsOp3) ); }
|
|
|
|
LInsLd* LIns::toLInsLd() const { return (LInsLd* )( uintptr_t(this+1) - sizeof(LInsLd ) ); }
|
|
|
|
LInsSti* LIns::toLInsSti() const { return (LInsSti*)( uintptr_t(this+1) - sizeof(LInsSti) ); }
|
|
|
|
LInsSk* LIns::toLInsSk() const { return (LInsSk* )( uintptr_t(this+1) - sizeof(LInsSk ) ); }
|
|
|
|
LInsC* LIns::toLInsC() const { return (LInsC* )( uintptr_t(this+1) - sizeof(LInsC ) ); }
|
|
|
|
LInsP* LIns::toLInsP() const { return (LInsP* )( uintptr_t(this+1) - sizeof(LInsP ) ); }
|
|
|
|
LInsI* LIns::toLInsI() const { return (LInsI* )( uintptr_t(this+1) - sizeof(LInsI ) ); }
|
2009-12-09 15:49:15 -08:00
|
|
|
LInsN64* LIns::toLInsN64() const { return (LInsN64*)( uintptr_t(this+1) - sizeof(LInsN64) ); }
|
2009-11-11 11:38:12 -08:00
|
|
|
LInsJtbl*LIns::toLInsJtbl()const { return (LInsJtbl*)(uintptr_t(this+1) - sizeof(LInsJtbl)); }
|
2009-09-08 16:18:41 -07:00
|
|
|
|
|
|
|
void LIns::initLInsOp0(LOpcode opcode) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-09-08 16:18:41 -07:00
|
|
|
NanoAssert(isLInsOp0());
|
|
|
|
}
|
|
|
|
void LIns::initLInsOp1(LOpcode opcode, LIns* oprnd1) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsOp1()->oprnd_1 = oprnd1;
|
|
|
|
NanoAssert(isLInsOp1());
|
|
|
|
}
|
|
|
|
void LIns::initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsOp2()->oprnd_1 = oprnd1;
|
|
|
|
toLInsOp2()->oprnd_2 = oprnd2;
|
|
|
|
NanoAssert(isLInsOp2());
|
|
|
|
}
|
|
|
|
void LIns::initLInsOp3(LOpcode opcode, LIns* oprnd1, LIns* oprnd2, LIns* oprnd3) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsOp3()->oprnd_1 = oprnd1;
|
|
|
|
toLInsOp3()->oprnd_2 = oprnd2;
|
|
|
|
toLInsOp3()->oprnd_3 = oprnd3;
|
|
|
|
NanoAssert(isLInsOp3());
|
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
void LIns::initLInsLd(LOpcode opcode, LIns* val, int32_t d, AccSet accSet) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsLd()->oprnd_1 = val;
|
2010-03-04 22:44:28 -08:00
|
|
|
NanoAssert(d == int16_t(d));
|
|
|
|
toLInsLd()->disp = int16_t(d);
|
2010-03-01 17:43:20 -08:00
|
|
|
toLInsLd()->accSet = accSet;
|
2009-09-08 16:18:41 -07:00
|
|
|
NanoAssert(isLInsLd());
|
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
void LIns::initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d, AccSet accSet) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsSti()->oprnd_1 = val;
|
|
|
|
toLInsSti()->oprnd_2 = base;
|
2010-03-04 22:44:28 -08:00
|
|
|
NanoAssert(d == int16_t(d));
|
|
|
|
toLInsSti()->disp = int16_t(d);
|
2010-03-01 17:43:20 -08:00
|
|
|
toLInsSti()->accSet = accSet;
|
2009-09-08 16:18:41 -07:00
|
|
|
NanoAssert(isLInsSti());
|
|
|
|
}
|
|
|
|
void LIns::initLInsSk(LIns* prevLIns) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(LIR_skip);
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsSk()->prevLIns = prevLIns;
|
|
|
|
NanoAssert(isLInsSk());
|
|
|
|
}
|
2009-11-09 19:12:18 -08:00
|
|
|
void LIns::initLInsC(LOpcode opcode, LIns** args, const CallInfo* ci) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-11-09 19:12:18 -08:00
|
|
|
toLInsC()->args = args;
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsC()->ci = ci;
|
|
|
|
NanoAssert(isLInsC());
|
|
|
|
}
|
|
|
|
void LIns::initLInsP(int32_t arg, int32_t kind) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(LIR_paramp);
|
2009-09-08 16:18:41 -07:00
|
|
|
NanoAssert(isU8(arg) && isU8(kind));
|
|
|
|
toLInsP()->arg = arg;
|
|
|
|
toLInsP()->kind = kind;
|
|
|
|
NanoAssert(isLInsP());
|
|
|
|
}
|
|
|
|
void LIns::initLInsI(LOpcode opcode, int32_t imm32) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsI()->imm32 = imm32;
|
|
|
|
NanoAssert(isLInsI());
|
|
|
|
}
|
2009-12-09 15:49:15 -08:00
|
|
|
void LIns::initLInsN64(LOpcode opcode, int64_t imm64) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(opcode);
|
2009-12-09 15:49:15 -08:00
|
|
|
toLInsN64()->imm64_0 = int32_t(imm64);
|
|
|
|
toLInsN64()->imm64_1 = int32_t(imm64 >> 32);
|
|
|
|
NanoAssert(isLInsN64());
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
2009-11-11 11:38:12 -08:00
|
|
|
void LIns::initLInsJtbl(LIns* index, uint32_t size, LIns** table) {
|
2010-04-20 17:51:50 -07:00
|
|
|
initSharedFields(LIR_jtbl);
|
2009-11-11 11:38:12 -08:00
|
|
|
toLInsJtbl()->oprnd_1 = index;
|
|
|
|
toLInsJtbl()->table = table;
|
|
|
|
toLInsJtbl()->size = size;
|
|
|
|
NanoAssert(isLInsJtbl());
|
|
|
|
}
|
2009-05-17 23:15:24 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* LIns::oprnd1() const {
|
2009-11-11 11:38:12 -08:00
|
|
|
NanoAssert(isLInsOp1() || isLInsOp2() || isLInsOp3() || isLInsLd() || isLInsSti() || isLInsJtbl());
|
2009-09-08 16:18:41 -07:00
|
|
|
return toLInsOp2()->oprnd_1;
|
|
|
|
}
|
|
|
|
LIns* LIns::oprnd2() const {
|
|
|
|
NanoAssert(isLInsOp2() || isLInsOp3() || isLInsSti());
|
|
|
|
return toLInsOp2()->oprnd_2;
|
|
|
|
}
|
|
|
|
LIns* LIns::oprnd3() const {
|
|
|
|
NanoAssert(isLInsOp3());
|
|
|
|
return toLInsOp3()->oprnd_3;
|
|
|
|
}
|
2009-06-30 21:18:55 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* LIns::getTarget() const {
|
2009-11-11 11:38:12 -08:00
|
|
|
NanoAssert(isBranch() && !isop(LIR_jtbl));
|
2009-09-08 16:18:41 -07:00
|
|
|
return oprnd2();
|
|
|
|
}
|
2008-10-08 15:08:33 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
void LIns::setTarget(LIns* label) {
|
|
|
|
NanoAssert(label && label->isop(LIR_label));
|
2009-11-11 11:38:12 -08:00
|
|
|
NanoAssert(isBranch() && !isop(LIR_jtbl));
|
2009-09-08 16:18:41 -07:00
|
|
|
toLInsOp2()->oprnd_2 = label;
|
|
|
|
}
|
2009-05-09 11:38:34 -07:00
|
|
|
|
2009-11-11 11:38:12 -08:00
|
|
|
LIns* LIns::getTarget(uint32_t index) const {
|
|
|
|
NanoAssert(isop(LIR_jtbl));
|
|
|
|
NanoAssert(index < toLInsJtbl()->size);
|
|
|
|
return toLInsJtbl()->table[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
void LIns::setTarget(uint32_t index, LIns* label) const {
|
|
|
|
NanoAssert(label && label->isop(LIR_label));
|
|
|
|
NanoAssert(isop(LIR_jtbl));
|
|
|
|
NanoAssert(index < toLInsJtbl()->size);
|
|
|
|
toLInsJtbl()->table[index] = label;
|
|
|
|
}
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
GuardRecord *LIns::record() const {
|
|
|
|
NanoAssert(isGuard());
|
2010-02-18 20:04:55 -08:00
|
|
|
switch (opcode()) {
|
|
|
|
case LIR_x:
|
|
|
|
case LIR_xt:
|
|
|
|
case LIR_xf:
|
|
|
|
case LIR_xtbl:
|
|
|
|
case LIR_xbarrier:
|
|
|
|
return (GuardRecord*)oprnd2();
|
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
case LIR_addxovi:
|
|
|
|
case LIR_subxovi:
|
|
|
|
case LIR_mulxovi:
|
2010-02-18 20:04:55 -08:00
|
|
|
return (GuardRecord*)oprnd3();
|
|
|
|
|
|
|
|
default:
|
|
|
|
NanoAssert(0);
|
|
|
|
return NULL;
|
|
|
|
}
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
2009-06-30 21:18:55 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
int32_t LIns::disp() const {
|
|
|
|
if (isLInsSti()) {
|
|
|
|
return toLInsSti()->disp;
|
|
|
|
} else {
|
|
|
|
NanoAssert(isLInsLd());
|
|
|
|
return toLInsLd()->disp;
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2010-03-05 14:14:31 -08:00
|
|
|
AccSet LIns::accSet() const {
|
2010-03-01 17:43:20 -08:00
|
|
|
if (isLInsSti()) {
|
|
|
|
return toLInsSti()->accSet;
|
|
|
|
} else {
|
|
|
|
NanoAssert(isLInsLd());
|
|
|
|
return toLInsLd()->accSet;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* LIns::prevLIns() const {
|
|
|
|
NanoAssert(isLInsSk());
|
|
|
|
return toLInsSk()->prevLIns;
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2010-04-16 16:20:24 -07:00
|
|
|
inline uint8_t LIns::paramArg() const { NanoAssert(isop(LIR_paramp)); return toLInsP()->arg; }
|
|
|
|
inline uint8_t LIns::paramKind() const { NanoAssert(isop(LIR_paramp)); return toLInsP()->kind; }
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
inline int32_t LIns::imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; }
|
2009-07-14 02:22:41 -07:00
|
|
|
|
2010-03-28 16:49:42 -07:00
|
|
|
inline int32_t LIns::imm64_0() const { NanoAssert(isconstqf()); return toLInsN64()->imm64_0; }
|
|
|
|
inline int32_t LIns::imm64_1() const { NanoAssert(isconstqf()); return toLInsN64()->imm64_1; }
|
2009-09-08 16:18:41 -07:00
|
|
|
uint64_t LIns::imm64() const {
|
2010-03-28 16:49:42 -07:00
|
|
|
NanoAssert(isconstqf());
|
2009-12-09 15:49:15 -08:00
|
|
|
return (uint64_t(toLInsN64()->imm64_1) << 32) | uint32_t(toLInsN64()->imm64_0);
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
|
|
|
double LIns::imm64f() const {
|
2010-03-28 16:49:42 -07:00
|
|
|
NanoAssert(isconstf());
|
2009-09-08 16:18:41 -07:00
|
|
|
union {
|
|
|
|
double f;
|
|
|
|
uint64_t q;
|
|
|
|
} u;
|
|
|
|
u.q = imm64();
|
|
|
|
return u.f;
|
|
|
|
}
|
2009-08-27 17:52:46 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
int32_t LIns::size() const {
|
2010-04-16 16:20:24 -07:00
|
|
|
NanoAssert(isop(LIR_allocp));
|
2009-09-08 16:18:41 -07:00
|
|
|
return toLInsI()->imm32 << 2;
|
|
|
|
}
|
2009-07-13 11:50:42 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
void LIns::setSize(int32_t nbytes) {
|
2010-04-16 16:20:24 -07:00
|
|
|
NanoAssert(isop(LIR_allocp));
|
2009-09-08 16:18:41 -07:00
|
|
|
NanoAssert(nbytes > 0);
|
|
|
|
toLInsI()->imm32 = (nbytes+3)>>2; // # of required 32bit words
|
|
|
|
}
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2009-11-09 19:12:18 -08:00
|
|
|
// Index args in reverse order, i.e. arg(0) returns the rightmost arg.
|
2009-09-08 16:18:41 -07:00
|
|
|
// Nb: this must be kept in sync with insCall().
|
|
|
|
LIns* LIns::arg(uint32_t i) const
|
|
|
|
{
|
|
|
|
NanoAssert(isCall());
|
2009-11-09 19:12:18 -08:00
|
|
|
NanoAssert(i < callInfo()->count_args());
|
|
|
|
return toLInsC()->args[i]; // args[] is in right-to-left order as well
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
2008-07-16 14:21:31 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
uint32_t LIns::argc() const {
|
2009-11-09 19:12:18 -08:00
|
|
|
return callInfo()->count_args();
|
2009-09-08 16:18:41 -07:00
|
|
|
}
|
2009-06-30 21:18:55 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
LIns* LIns::callArgN(uint32_t n) const
|
|
|
|
{
|
|
|
|
return arg(argc()-n-1);
|
|
|
|
}
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2009-09-08 16:18:41 -07:00
|
|
|
const CallInfo* LIns::callInfo() const
|
|
|
|
{
|
|
|
|
NanoAssert(isCall());
|
|
|
|
return toLInsC()->ci;
|
2009-08-30 00:11:12 -07:00
|
|
|
}
|
|
|
|
|
2009-11-11 11:38:12 -08:00
|
|
|
uint32_t LIns::getTableSize() const
|
|
|
|
{
|
|
|
|
NanoAssert(isLInsJtbl());
|
|
|
|
return toLInsJtbl()->size;
|
|
|
|
}
|
|
|
|
|
2009-09-10 16:29:36 -07:00
|
|
|
class LirWriter
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
public:
|
|
|
|
LirWriter *out;
|
|
|
|
|
|
|
|
LirWriter(LirWriter* out)
|
|
|
|
: out(out) {}
|
2009-09-10 16:29:36 -07:00
|
|
|
virtual ~LirWriter() {}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
virtual LInsp ins0(LOpcode v) {
|
|
|
|
return out->ins0(v);
|
|
|
|
}
|
|
|
|
virtual LInsp ins1(LOpcode v, LIns* a) {
|
|
|
|
return out->ins1(v, a);
|
|
|
|
}
|
|
|
|
virtual LInsp ins2(LOpcode v, LIns* a, LIns* b) {
|
|
|
|
return out->ins2(v, a, b);
|
|
|
|
}
|
2009-07-21 17:28:25 -07:00
|
|
|
virtual LInsp ins3(LOpcode v, LIns* a, LIns* b, LIns* c) {
|
|
|
|
return out->ins3(v, a, b, c);
|
|
|
|
}
|
2009-09-22 16:06:52 -07:00
|
|
|
virtual LInsp insGuard(LOpcode v, LIns *c, GuardRecord *gr) {
|
|
|
|
return out->insGuard(v, c, gr);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2010-02-18 20:04:55 -08:00
|
|
|
virtual LInsp insGuardXov(LOpcode v, LIns *a, LIns* b, GuardRecord *gr) {
|
|
|
|
return out->insGuardXov(v, a, b, gr);
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
virtual LInsp insBranch(LOpcode v, LInsp condition, LInsp to) {
|
|
|
|
return out->insBranch(v, condition, to);
|
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
// arg: 0=first, 1=second, ...
|
|
|
|
// kind: 0=arg 1=saved-reg
|
2009-07-10 12:58:34 -07:00
|
|
|
virtual LInsp insParam(int32_t arg, int32_t kind) {
|
|
|
|
return out->insParam(arg, kind);
|
|
|
|
}
|
|
|
|
virtual LInsp insImm(int32_t imm) {
|
|
|
|
return out->insImm(imm);
|
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2009-07-10 12:58:34 -07:00
|
|
|
virtual LInsp insImmq(uint64_t imm) {
|
|
|
|
return out->insImmq(imm);
|
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2009-09-01 14:26:24 -07:00
|
|
|
virtual LInsp insImmf(double d) {
|
|
|
|
return out->insImmf(d);
|
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet) {
|
|
|
|
return out->insLoad(op, base, d, accSet);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
virtual LInsp insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet) {
|
|
|
|
return out->insStore(op, value, base, d, accSet);
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2009-11-09 19:12:18 -08:00
|
|
|
// args[] is in reverse order, ie. args[0] holds the rightmost arg.
|
2009-07-10 12:58:34 -07:00
|
|
|
virtual LInsp insCall(const CallInfo *call, LInsp args[]) {
|
|
|
|
return out->insCall(call, args);
|
|
|
|
}
|
|
|
|
virtual LInsp insAlloc(int32_t size) {
|
2009-07-21 12:14:33 -07:00
|
|
|
NanoAssert(size != 0);
|
2009-07-10 12:58:34 -07:00
|
|
|
return out->insAlloc(size);
|
|
|
|
}
|
2009-11-11 11:38:12 -08:00
|
|
|
virtual LInsp insJtbl(LIns* index, uint32_t size) {
|
|
|
|
return out->insJtbl(index, size);
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
// convenience functions
|
|
|
|
|
|
|
|
// Inserts a conditional to execute and branches to execute if
|
|
|
|
// the condition is true and false respectively.
|
2010-03-01 17:43:20 -08:00
|
|
|
LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov);
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// Inserts an integer comparison to 0
|
2010-03-15 18:05:43 -07:00
|
|
|
LIns* ins_eq0(LIns* oprnd1) {
|
2010-04-21 18:13:17 -07:00
|
|
|
return ins2i(LIR_eqi, oprnd1, 0);
|
2010-03-15 18:05:43 -07:00
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
|
2009-09-09 11:15:48 -07:00
|
|
|
// Inserts a pointer comparison to 0
|
2010-03-15 18:05:43 -07:00
|
|
|
LIns* ins_peq0(LIns* oprnd1) {
|
2010-04-16 16:20:24 -07:00
|
|
|
return ins2(LIR_eqp, oprnd1, insImmWord(0));
|
2010-03-15 18:05:43 -07:00
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// Inserts a binary operation where the second operand is an
|
|
|
|
// integer immediate.
|
2010-03-15 18:05:43 -07:00
|
|
|
LIns* ins2i(LOpcode v, LIns* oprnd1, int32_t imm) {
|
|
|
|
return ins2(v, oprnd1, insImm(imm));
|
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
|
2010-02-11 17:30:16 -08:00
|
|
|
#if NJ_SOFTFLOAT_SUPPORTED
|
2010-03-15 18:05:43 -07:00
|
|
|
LIns* qjoin(LInsp lo, LInsp hi) {
|
2010-04-21 18:13:17 -07:00
|
|
|
return ins2(LIR_ii2d, lo, hi);
|
2010-03-15 18:05:43 -07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
LIns* insImmPtr(const void *ptr) {
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
return insImmq((uint64_t)ptr);
|
|
|
|
#else
|
|
|
|
return insImm((int32_t)ptr);
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-03-15 18:05:43 -07:00
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
|
2010-03-15 18:05:43 -07:00
|
|
|
LIns* insImmWord(intptr_t value) {
|
|
|
|
#ifdef NANOJIT_64BIT
|
|
|
|
return insImmq(value);
|
|
|
|
#else
|
|
|
|
return insImm(value);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sign-extend integers to native integers. On 32-bit this is a no-op.
|
|
|
|
LIns* ins_i2p(LIns* intIns) {
|
|
|
|
#ifdef NANOJIT_64BIT
|
2010-04-21 18:13:17 -07:00
|
|
|
return ins1(LIR_i2q, intIns);
|
2010-03-15 18:05:43 -07:00
|
|
|
#else
|
|
|
|
return intIns;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
// Zero-extend integers to native integers. On 32-bit this is a no-op.
|
|
|
|
LIns* ins_u2p(LIns* uintIns) {
|
|
|
|
#ifdef NANOJIT_64BIT
|
2010-04-21 18:13:17 -07:00
|
|
|
return ins1(LIR_ui2uq, uintIns);
|
2010-03-15 18:05:43 -07:00
|
|
|
#else
|
|
|
|
return uintIns;
|
|
|
|
#endif
|
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
|
2010-04-21 18:13:17 -07:00
|
|
|
// Chooses LIR_sti or LIR_stq based on size of value.
|
2010-03-01 17:43:20 -08:00
|
|
|
LIns* insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet);
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2009-05-21 18:44:37 -07:00
|
|
|
|
2008-06-18 21:11:15 -07:00
|
|
|
#ifdef NJ_VERBOSE
|
2009-07-10 12:58:34 -07:00
|
|
|
extern const char* lirNames[];
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2010-03-17 16:42:30 -07:00
|
|
|
// Maps address ranges to meaningful names.
|
|
|
|
class AddrNameMap
|
2008-06-18 21:11:15 -07:00
|
|
|
{
|
2009-07-15 16:50:01 -07:00
|
|
|
Allocator& allocator;
|
2009-07-15 09:34:17 -07:00
|
|
|
class Entry
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
public:
|
2009-09-29 16:36:37 -07:00
|
|
|
Entry(int) : name(0), size(0), align(0) {}
|
2010-03-17 16:42:30 -07:00
|
|
|
Entry(char *n, size_t s, size_t a) : name(n), size(s), align(a) {}
|
2009-07-15 09:34:17 -07:00
|
|
|
char* name;
|
2009-07-10 12:58:34 -07:00
|
|
|
size_t size:29, align:3;
|
|
|
|
};
|
2010-03-17 16:42:30 -07:00
|
|
|
TreeMap<const void*, Entry*> names; // maps code regions to names
|
2008-06-18 21:11:15 -07:00
|
|
|
public:
|
2010-03-17 16:42:30 -07:00
|
|
|
AddrNameMap(Allocator& allocator);
|
|
|
|
void addAddrRange(const void *p, size_t size, size_t align, const char *name);
|
|
|
|
void lookupAddr(void *p, char*& name, int32_t& offset);
|
2008-06-18 21:11:15 -07:00
|
|
|
};
|
|
|
|
|
2010-03-17 16:42:30 -07:00
|
|
|
// Maps LIR instructions to meaningful names.
|
2009-08-04 11:18:38 -07:00
|
|
|
class LirNameMap
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
2010-03-17 16:42:30 -07:00
|
|
|
private:
|
2009-08-04 08:33:14 -07:00
|
|
|
Allocator& alloc;
|
2009-07-15 16:50:01 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
template <class Key>
|
2009-08-04 11:18:38 -07:00
|
|
|
class CountMap: public HashMap<Key, int> {
|
2009-07-10 12:58:34 -07:00
|
|
|
public:
|
2009-08-04 11:18:38 -07:00
|
|
|
CountMap(Allocator& alloc) : HashMap<Key, int>(alloc) {}
|
2009-07-10 12:58:34 -07:00
|
|
|
int add(Key k) {
|
|
|
|
int c = 1;
|
|
|
|
if (containsKey(k)) {
|
|
|
|
c = 1+get(k);
|
|
|
|
}
|
|
|
|
put(k,c);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
};
|
2010-03-17 16:42:30 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
CountMap<int> lircounts;
|
|
|
|
CountMap<const CallInfo *> funccounts;
|
2010-03-17 16:42:30 -07:00
|
|
|
CountMap<const char *> namecounts;
|
|
|
|
|
|
|
|
void addNameWithSuffix(LInsp i, const char *s, int suffix, bool ignoreOneSuffix);
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-07-15 09:34:17 -07:00
|
|
|
class Entry
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
public:
|
2009-09-29 16:36:37 -07:00
|
|
|
Entry(int) : name(0) {}
|
2009-07-15 09:34:17 -07:00
|
|
|
Entry(char* n) : name(n) {}
|
|
|
|
char* name;
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
2010-03-17 16:42:30 -07:00
|
|
|
|
2009-08-04 11:18:38 -07:00
|
|
|
HashMap<LInsp, Entry*> names;
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-12-04 11:58:42 -08:00
|
|
|
public:
|
2010-03-17 16:42:30 -07:00
|
|
|
LirNameMap(Allocator& alloc)
|
2009-08-04 08:33:14 -07:00
|
|
|
: alloc(alloc),
|
2009-08-04 11:18:38 -07:00
|
|
|
lircounts(alloc),
|
|
|
|
funccounts(alloc),
|
2010-03-17 16:42:30 -07:00
|
|
|
namecounts(alloc),
|
|
|
|
names(alloc)
|
2009-07-10 12:58:34 -07:00
|
|
|
{}
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2010-03-17 16:42:30 -07:00
|
|
|
void addName(LInsp ins, const char *s); // gives 'ins' a special name
|
|
|
|
const char* createName(LInsp ins); // gives 'ins' a generic name
|
|
|
|
const char* lookupName(LInsp ins);
|
|
|
|
};
|
|
|
|
|
|
|
|
// We use big buffers for cases where we need to fit a whole instruction,
|
|
|
|
// and smaller buffers for all the others. These should easily be long
|
|
|
|
// enough, but for safety the formatXyz() functions check and won't exceed
|
|
|
|
// those limits.
|
|
|
|
class InsBuf {
|
|
|
|
public:
|
|
|
|
static const size_t len = 1000;
|
|
|
|
char buf[len];
|
|
|
|
};
|
|
|
|
class RefBuf {
|
|
|
|
public:
|
|
|
|
static const size_t len = 200;
|
|
|
|
char buf[len];
|
|
|
|
};
|
|
|
|
|
|
|
|
class LInsPrinter
|
|
|
|
{
|
|
|
|
private:
|
|
|
|
Allocator& alloc;
|
|
|
|
|
|
|
|
void formatImm(RefBuf* buf, int32_t c);
|
|
|
|
void formatImmq(RefBuf* buf, uint64_t c);
|
|
|
|
void formatGuard(InsBuf* buf, LInsp ins);
|
|
|
|
void formatGuardXov(InsBuf* buf, LInsp ins);
|
|
|
|
|
|
|
|
public:
|
|
|
|
LInsPrinter(Allocator& alloc)
|
|
|
|
: alloc(alloc)
|
|
|
|
{
|
|
|
|
addrNameMap = new (alloc) AddrNameMap(alloc);
|
|
|
|
lirNameMap = new (alloc) LirNameMap(alloc);
|
|
|
|
}
|
|
|
|
|
|
|
|
char *formatAddr(RefBuf* buf, void* p);
|
|
|
|
char *formatRef(RefBuf* buf, LInsp ref);
|
|
|
|
char *formatIns(InsBuf* buf, LInsp ins);
|
2010-03-23 15:05:47 -07:00
|
|
|
char *formatAccSet(RefBuf* buf, AccSet accSet);
|
2010-03-17 16:42:30 -07:00
|
|
|
|
|
|
|
AddrNameMap* addrNameMap;
|
|
|
|
LirNameMap* lirNameMap;
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2008-07-08 17:09:53 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
class VerboseWriter : public LirWriter
|
|
|
|
{
|
|
|
|
InsList code;
|
2010-03-17 16:42:30 -07:00
|
|
|
LInsPrinter* printer;
|
2009-07-10 12:58:34 -07:00
|
|
|
LogControl* logc;
|
2010-01-07 16:41:42 -08:00
|
|
|
const char* const prefix;
|
|
|
|
bool const always_flush;
|
2008-06-30 15:33:41 -07:00
|
|
|
public:
|
2010-03-17 16:42:30 -07:00
|
|
|
VerboseWriter(Allocator& alloc, LirWriter *out, LInsPrinter* printer, LogControl* logc,
|
|
|
|
const char* prefix = "", bool always_flush = false)
|
|
|
|
: LirWriter(out), code(alloc), printer(printer), logc(logc), prefix(prefix), always_flush(always_flush)
|
2009-07-10 12:58:34 -07:00
|
|
|
{}
|
2008-06-30 15:33:41 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
LInsp add(LInsp i) {
|
2010-01-07 16:41:42 -08:00
|
|
|
if (i) {
|
2008-12-10 17:25:46 -08:00
|
|
|
code.add(i);
|
2010-01-07 16:41:42 -08:00
|
|
|
if (always_flush)
|
|
|
|
flush();
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
return i;
|
|
|
|
}
|
2008-06-30 15:33:41 -07:00
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
LInsp add_flush(LInsp i) {
|
2009-07-10 12:58:34 -07:00
|
|
|
if ((i = add(i)) != 0)
|
2008-10-13 13:29:18 -07:00
|
|
|
flush();
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
void flush()
|
|
|
|
{
|
2009-08-04 10:11:53 -07:00
|
|
|
if (!code.isEmpty()) {
|
2010-03-17 16:42:30 -07:00
|
|
|
InsBuf b;
|
2009-08-04 10:11:53 -07:00
|
|
|
int32_t count = 0;
|
|
|
|
for (Seq<LIns*>* p = code.get(); p != NULL; p = p->tail) {
|
2010-03-17 16:42:30 -07:00
|
|
|
logc->printf("%s %s\n", prefix, printer->formatIns(&b, p->head));
|
2009-08-04 10:11:53 -07:00
|
|
|
count++;
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
code.clear();
|
2009-08-04 10:11:53 -07:00
|
|
|
if (count > 1)
|
2009-07-10 12:58:34 -07:00
|
|
|
logc->printf("\n");
|
2008-10-13 13:29:18 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-06-30 15:33:41 -07:00
|
|
|
|
2009-09-22 16:06:52 -07:00
|
|
|
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr) {
|
|
|
|
return add_flush(out->insGuard(op,cond,gr));
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
|
2010-02-18 20:04:55 -08:00
|
|
|
LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr) {
|
|
|
|
return add_flush(out->insGuardXov(op,a,b,gr));
|
|
|
|
}
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* insBranch(LOpcode v, LInsp condition, LInsp to) {
|
|
|
|
return add_flush(out->insBranch(v, condition, to));
|
|
|
|
}
|
2008-06-30 15:33:41 -07:00
|
|
|
|
2009-11-11 11:38:12 -08:00
|
|
|
LIns* insJtbl(LIns* index, uint32_t size) {
|
|
|
|
return add_flush(out->insJtbl(index, size));
|
|
|
|
}
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* ins0(LOpcode v) {
|
2008-10-13 13:29:18 -07:00
|
|
|
if (v == LIR_label || v == LIR_start) {
|
|
|
|
flush();
|
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
return add(out->ins0(v));
|
|
|
|
}
|
2008-06-30 15:33:41 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* ins1(LOpcode v, LInsp a) {
|
2009-05-17 22:38:00 -07:00
|
|
|
return isRetOpcode(v) ? add_flush(out->ins1(v, a)) : add(out->ins1(v, a));
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
LIns* ins2(LOpcode v, LInsp a, LInsp b) {
|
2009-07-21 17:28:25 -07:00
|
|
|
return add(out->ins2(v, a, b));
|
|
|
|
}
|
|
|
|
LIns* ins3(LOpcode v, LInsp a, LInsp b, LInsp c) {
|
|
|
|
return add(out->ins3(v, a, b, c));
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
LIns* insCall(const CallInfo *call, LInsp args[]) {
|
|
|
|
return add_flush(out->insCall(call, args));
|
|
|
|
}
|
|
|
|
LIns* insParam(int32_t i, int32_t kind) {
|
|
|
|
return add(out->insParam(i, kind));
|
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
LIns* insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet) {
|
|
|
|
return add(out->insLoad(v, base, disp, accSet));
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2010-03-01 17:43:20 -08:00
|
|
|
LIns* insStore(LOpcode op, LInsp v, LInsp b, int32_t d, AccSet accSet) {
|
|
|
|
return add(out->insStore(op, v, b, d, accSet));
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
2008-10-13 13:29:18 -07:00
|
|
|
LIns* insAlloc(int32_t size) {
|
|
|
|
return add(out->insAlloc(size));
|
|
|
|
}
|
2008-11-17 00:05:29 -08:00
|
|
|
LIns* insImm(int32_t imm) {
|
|
|
|
return add(out->insImm(imm));
|
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2008-11-17 00:05:29 -08:00
|
|
|
LIns* insImmq(uint64_t imm) {
|
|
|
|
return add(out->insImmq(imm));
|
|
|
|
}
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2009-09-01 14:26:24 -07:00
|
|
|
LIns* insImmf(double d) {
|
|
|
|
return add(out->insImmf(d));
|
|
|
|
}
|
2008-06-30 15:33:41 -07:00
|
|
|
};
|
|
|
|
|
2008-06-18 21:11:15 -07:00
|
|
|
#endif
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
class ExprFilter: public LirWriter
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
ExprFilter(LirWriter *out) : LirWriter(out) {}
|
|
|
|
LIns* ins1(LOpcode v, LIns* a);
|
|
|
|
LIns* ins2(LOpcode v, LIns* a, LIns* b);
|
2009-07-21 17:28:25 -07:00
|
|
|
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
|
2009-09-22 16:06:52 -07:00
|
|
|
LIns* insGuard(LOpcode, LIns *cond, GuardRecord *);
|
2010-02-18 20:04:55 -08:00
|
|
|
LIns* insGuardXov(LOpcode, LIns* a, LIns* b, GuardRecord *);
|
2008-10-13 13:29:18 -07:00
|
|
|
LIns* insBranch(LOpcode, LIns *cond, LIns *target);
|
2010-03-01 17:43:20 -08:00
|
|
|
LIns* insLoad(LOpcode op, LInsp base, int32_t off, AccSet accSet);
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
|
|
|
|
2009-11-04 14:40:33 -08:00
|
|
|
enum LInsHashKind {
|
|
|
|
// We divide instruction kinds into groups for the use of LInsHashSet.
|
|
|
|
// LIns0 isn't present because we don't need to record any 0-ary
|
|
|
|
// instructions.
|
2010-03-23 15:05:47 -07:00
|
|
|
LInsImm = 0,
|
|
|
|
LInsImmq = 1, // only occurs on 64-bit platforms
|
|
|
|
LInsImmf = 2,
|
|
|
|
LIns1 = 3,
|
|
|
|
LIns2 = 4,
|
|
|
|
LIns3 = 5,
|
|
|
|
LInsCall = 6,
|
|
|
|
|
|
|
|
// Loads are special. We group them by access region: one table for
|
|
|
|
// each region, and then a catch-all table for any loads marked with
|
|
|
|
// multiple regions. This arrangement makes the removal of
|
|
|
|
// invalidated loads fast -- eg. we can invalidate all STACK loads by
|
|
|
|
// just clearing the LInsLoadStack table. The disadvantage is that
|
|
|
|
// loads marked with multiple regions must be invalidated
|
|
|
|
// conservatively, eg. if any intervening stores occur. But loads
|
|
|
|
// marked with multiple regions should be rare.
|
|
|
|
LInsLoadReadOnly = 7,
|
|
|
|
LInsLoadStack = 8,
|
|
|
|
LInsLoadRStack = 9,
|
|
|
|
LInsLoadOther = 10,
|
|
|
|
LInsLoadMultiple = 11,
|
2009-11-04 14:40:33 -08:00
|
|
|
|
|
|
|
LInsFirst = 0,
|
2010-03-23 15:05:47 -07:00
|
|
|
LInsLast = 11,
|
2009-12-22 16:09:19 -08:00
|
|
|
// need a value after "last" to outsmart compilers that will insist last+1 is impossible
|
2010-03-23 15:05:47 -07:00
|
|
|
LInsInvalid = 12
|
2009-11-04 14:40:33 -08:00
|
|
|
};
|
|
|
|
#define nextKind(kind) LInsHashKind(kind+1)
|
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
class LInsHashSet
|
|
|
|
{
|
2009-11-04 14:40:33 -08:00
|
|
|
// Must be a power of 2.
|
|
|
|
// Don't start too small, or we'll waste time growing and rehashing.
|
|
|
|
// Don't start too large, will waste memory.
|
|
|
|
static const uint32_t kInitialCap[LInsLast + 1];
|
|
|
|
|
|
|
|
// There is one list for each instruction kind. This lets us size the
|
|
|
|
// lists appropriately (some instructions are more common than others).
|
|
|
|
// It also lets us have kind-specific find/add/grow functions, which
|
|
|
|
// are faster than generic versions.
|
|
|
|
LInsp *m_list[LInsLast + 1];
|
|
|
|
uint32_t m_cap[LInsLast + 1];
|
|
|
|
uint32_t m_used[LInsLast + 1];
|
|
|
|
typedef uint32_t (LInsHashSet::*find_t)(LInsp);
|
|
|
|
find_t m_find[LInsLast + 1];
|
2010-03-23 15:05:47 -07:00
|
|
|
|
2009-08-06 06:41:07 -07:00
|
|
|
Allocator& alloc;
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-11-04 14:40:33 -08:00
|
|
|
static uint32_t hashImm(int32_t);
|
2010-03-23 15:05:47 -07:00
|
|
|
static uint32_t hashImmq(uint64_t); // not NANOJIT_64BIT-only -- used by findImmf()
|
|
|
|
static uint32_t hash1(LOpcode op, LInsp);
|
|
|
|
static uint32_t hash2(LOpcode op, LInsp, LInsp);
|
|
|
|
static uint32_t hash3(LOpcode op, LInsp, LInsp, LInsp);
|
|
|
|
static uint32_t hashLoad(LOpcode op, LInsp, int32_t, AccSet);
|
2009-11-04 14:40:33 -08:00
|
|
|
static uint32_t hashCall(const CallInfo *call, uint32_t argc, LInsp args[]);
|
|
|
|
|
|
|
|
// These private versions are used after an LIns has been created;
|
|
|
|
// they are used for rehashing after growing.
|
|
|
|
uint32_t findImm(LInsp ins);
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2009-11-04 14:40:33 -08:00
|
|
|
uint32_t findImmq(LInsp ins);
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2009-11-04 14:40:33 -08:00
|
|
|
uint32_t findImmf(LInsp ins);
|
|
|
|
uint32_t find1(LInsp ins);
|
|
|
|
uint32_t find2(LInsp ins);
|
|
|
|
uint32_t find3(LInsp ins);
|
|
|
|
uint32_t findCall(LInsp ins);
|
2010-03-23 15:05:47 -07:00
|
|
|
uint32_t findLoadReadOnly(LInsp ins);
|
|
|
|
uint32_t findLoadStack(LInsp ins);
|
|
|
|
uint32_t findLoadRStack(LInsp ins);
|
|
|
|
uint32_t findLoadOther(LInsp ins);
|
|
|
|
uint32_t findLoadMultiple(LInsp ins);
|
2009-11-04 14:40:33 -08:00
|
|
|
|
|
|
|
void grow(LInsHashKind kind);
|
|
|
|
|
|
|
|
public:
|
|
|
|
// kInitialCaps[i] holds the initial size for m_list[i].
|
|
|
|
LInsHashSet(Allocator&, uint32_t kInitialCaps[]);
|
|
|
|
|
|
|
|
// These public versions are used before an LIns has been created.
|
|
|
|
LInsp findImm(int32_t a, uint32_t &k);
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2009-11-04 14:40:33 -08:00
|
|
|
LInsp findImmq(uint64_t a, uint32_t &k);
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2009-12-20 17:41:22 -08:00
|
|
|
LInsp findImmf(uint64_t d, uint32_t &k);
|
2009-11-04 14:40:33 -08:00
|
|
|
LInsp find1(LOpcode v, LInsp a, uint32_t &k);
|
|
|
|
LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k);
|
|
|
|
LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k);
|
2010-03-23 15:05:47 -07:00
|
|
|
LInsp findLoad(LOpcode v, LInsp a, int32_t b, AccSet accSet, LInsHashKind kind,
|
|
|
|
uint32_t &k);
|
2009-11-04 14:40:33 -08:00
|
|
|
LInsp findCall(const CallInfo *call, uint32_t argc, LInsp args[], uint32_t &k);
|
|
|
|
|
|
|
|
// 'k' is the index found by findXYZ().
|
2010-03-23 15:05:47 -07:00
|
|
|
void add(LInsHashKind kind, LInsp ins, uint32_t k);
|
2009-11-04 14:40:33 -08:00
|
|
|
|
2010-03-23 15:05:47 -07:00
|
|
|
void clear(); // clears all tables
|
|
|
|
void clear(LInsHashKind); // clears one table
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
class CseFilter: public LirWriter
|
|
|
|
{
|
2009-11-04 14:40:33 -08:00
|
|
|
private:
|
|
|
|
LInsHashSet* exprs;
|
2010-03-23 15:05:47 -07:00
|
|
|
AccSet storesSinceLastLoad; // regions stored to since the last load
|
2009-11-04 14:40:33 -08:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
public:
|
2009-08-06 06:41:07 -07:00
|
|
|
CseFilter(LirWriter *out, Allocator&);
|
2009-11-04 14:40:33 -08:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* insImm(int32_t imm);
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* insImmq(uint64_t q);
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2009-09-01 14:26:24 -07:00
|
|
|
LIns* insImmf(double d);
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* ins0(LOpcode v);
|
|
|
|
LIns* ins1(LOpcode v, LInsp);
|
|
|
|
LIns* ins2(LOpcode v, LInsp, LInsp);
|
2009-07-21 17:28:25 -07:00
|
|
|
LIns* ins3(LOpcode v, LInsp, LInsp, LInsp);
|
2010-03-23 15:05:47 -07:00
|
|
|
LIns* insLoad(LOpcode op, LInsp base, int32_t d, AccSet accSet);
|
|
|
|
LIns* insStore(LOpcode op, LInsp value, LInsp base, int32_t d, AccSet accSet);
|
2009-07-10 12:58:34 -07:00
|
|
|
LIns* insCall(const CallInfo *call, LInsp args[]);
|
2009-09-22 16:06:52 -07:00
|
|
|
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
|
2010-02-18 20:04:55 -08:00
|
|
|
LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr);
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
|
|
|
|
2009-08-04 12:03:34 -07:00
|
|
|
class LirBuffer
|
2009-07-10 12:58:34 -07:00
|
|
|
{
|
|
|
|
public:
|
2009-08-06 06:41:07 -07:00
|
|
|
LirBuffer(Allocator& alloc);
|
2009-07-10 12:58:34 -07:00
|
|
|
void clear();
|
2009-06-16 14:01:31 -07:00
|
|
|
uintptr_t makeRoom(size_t szB); // make room for an instruction
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
debug_only (void validate() const;)
|
2010-03-17 16:42:30 -07:00
|
|
|
verbose_only(LInsPrinter* printer;)
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2009-06-16 14:01:31 -07:00
|
|
|
int32_t insCount();
|
|
|
|
size_t byteCount();
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
// stats
|
|
|
|
struct
|
|
|
|
{
|
|
|
|
uint32_t lir; // # instructions
|
|
|
|
}
|
|
|
|
_stats;
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2008-10-13 13:29:18 -07:00
|
|
|
AbiKind abi;
|
2008-07-16 14:21:31 -07:00
|
|
|
LInsp state,param1,sp,rp;
|
2008-10-31 16:48:14 -07:00
|
|
|
LInsp savedRegs[NumSavedRegs];
|
2008-10-13 13:29:18 -07:00
|
|
|
|
2009-09-29 16:36:37 -07:00
|
|
|
protected:
|
|
|
|
friend class LirBufWriter;
|
|
|
|
|
2009-11-12 21:18:40 -08:00
|
|
|
/** Each chunk is just a raw area of LIns instances, with no header
|
|
|
|
and no more than 8-byte alignment. The chunk size is somewhat arbitrary. */
|
2009-07-15 16:50:01 -07:00
|
|
|
static const size_t CHUNK_SZB = 8000;
|
|
|
|
|
2009-11-12 21:18:40 -08:00
|
|
|
/** Get CHUNK_SZB more memory for LIR instructions. */
|
2009-07-15 16:50:01 -07:00
|
|
|
void chunkAlloc();
|
|
|
|
void moveToNewChunk(uintptr_t addrOfLastLInsOnCurrentChunk);
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2009-07-15 16:50:01 -07:00
|
|
|
Allocator& _allocator;
|
|
|
|
uintptr_t _unused; // next unused instruction slot in the current LIR chunk
|
|
|
|
uintptr_t _limit; // one past the last usable byte of the current LIR chunk
|
|
|
|
size_t _bytesAllocated;
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
class LirBufWriter : public LirWriter
|
|
|
|
{
|
2010-01-26 10:37:38 -08:00
|
|
|
LirBuffer* _buf; // underlying buffer housing the instructions
|
2010-02-15 17:56:41 -08:00
|
|
|
const Config& _config;
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
public:
|
2010-02-15 17:56:41 -08:00
|
|
|
LirBufWriter(LirBuffer* buf, const Config& config)
|
2010-01-26 10:37:38 -08:00
|
|
|
: LirWriter(0), _buf(buf), _config(config) {
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// LirWriter interface
|
2010-03-01 17:43:20 -08:00
|
|
|
LInsp insLoad(LOpcode op, LInsp base, int32_t disp, AccSet accSet);
|
|
|
|
LInsp insStore(LOpcode op, LInsp o1, LInsp o2, int32_t disp, AccSet accSet);
|
2009-11-12 21:18:40 -08:00
|
|
|
LInsp ins0(LOpcode op);
|
|
|
|
LInsp ins1(LOpcode op, LInsp o1);
|
|
|
|
LInsp ins2(LOpcode op, LInsp o1, LInsp o2);
|
|
|
|
LInsp ins3(LOpcode op, LInsp o1, LInsp o2, LInsp o3);
|
|
|
|
LInsp insParam(int32_t i, int32_t kind);
|
|
|
|
LInsp insImm(int32_t imm);
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2009-11-12 21:18:40 -08:00
|
|
|
LInsp insImmq(uint64_t imm);
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2009-11-12 21:18:40 -08:00
|
|
|
LInsp insImmf(double d);
|
|
|
|
LInsp insCall(const CallInfo *call, LInsp args[]);
|
|
|
|
LInsp insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
|
2010-02-18 20:04:55 -08:00
|
|
|
LInsp insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr);
|
2009-11-12 21:18:40 -08:00
|
|
|
LInsp insBranch(LOpcode v, LInsp condition, LInsp to);
|
|
|
|
LInsp insAlloc(int32_t size);
|
|
|
|
LInsp insJtbl(LIns* index, uint32_t size);
|
2009-07-10 12:58:34 -07:00
|
|
|
};
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
class LirFilter
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
LirFilter *in;
|
|
|
|
LirFilter(LirFilter *in) : in(in) {}
|
2008-12-10 17:25:46 -08:00
|
|
|
virtual ~LirFilter(){}
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2010-03-24 15:34:34 -07:00
|
|
|
// It's crucial that once this reaches the LIR_start at the beginning
|
|
|
|
// of the buffer, that it just keeps returning that LIR_start LIns on
|
|
|
|
// any subsequent calls.
|
2009-07-10 12:58:34 -07:00
|
|
|
virtual LInsp read() {
|
|
|
|
return in->read();
|
|
|
|
}
|
2010-03-24 15:34:34 -07:00
|
|
|
virtual LInsp finalIns() {
|
|
|
|
return in->finalIns();
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// concrete
|
|
|
|
class LirReader : public LirFilter
|
|
|
|
{
|
2010-03-24 15:34:34 -07:00
|
|
|
LInsp _ins; // next instruction to be read; invariant: is never a skip
|
|
|
|
LInsp _finalIns; // final instruction in the stream; ie. the first one to be read
|
2009-07-10 12:58:34 -07:00
|
|
|
|
|
|
|
public:
|
2010-03-24 15:34:34 -07:00
|
|
|
LirReader(LInsp ins) : LirFilter(0), _ins(ins), _finalIns(ins)
|
2009-11-12 14:26:26 -08:00
|
|
|
{
|
|
|
|
// The last instruction for a fragment shouldn't be a skip.
|
|
|
|
// (Actually, if the last *inserted* instruction exactly fills up
|
|
|
|
// a chunk, a new chunk will be created, and thus the last *written*
|
|
|
|
// instruction will be a skip -- the one needed for the
|
|
|
|
// cross-chunk link. But the last *inserted* instruction is what
|
|
|
|
// is recorded and used to initialise each LirReader, and that is
|
|
|
|
// what is seen here, and therefore this assertion holds.)
|
2010-03-24 15:34:34 -07:00
|
|
|
NanoAssert(ins && !ins->isop(LIR_skip));
|
2009-08-06 06:41:07 -07:00
|
|
|
}
|
2009-07-10 12:58:34 -07:00
|
|
|
virtual ~LirReader() {}
|
|
|
|
|
2009-11-12 14:26:26 -08:00
|
|
|
// Returns next instruction and advances to the prior instruction.
|
|
|
|
// Invariant: never returns a skip.
|
2009-12-21 12:05:48 -08:00
|
|
|
LInsp read();
|
2009-11-12 14:26:26 -08:00
|
|
|
|
2010-03-24 15:34:34 -07:00
|
|
|
LInsp finalIns() {
|
|
|
|
return _finalIns;
|
2009-07-10 12:58:34 -07:00
|
|
|
}
|
|
|
|
};
|
2008-06-18 21:11:15 -07:00
|
|
|
|
2010-01-14 15:05:44 -08:00
|
|
|
verbose_only(void live(LirFilter* in, Allocator& alloc, Fragment* frag, LogControl*);)
|
2009-07-10 12:58:34 -07:00
|
|
|
|
2010-03-11 14:59:45 -08:00
|
|
|
// WARNING: StackFilter assumes that all stack entries are eight bytes.
|
|
|
|
// Some of its optimisations aren't valid if that isn't true. See
|
|
|
|
// StackFilter::read() for more details.
|
2009-07-10 12:58:34 -07:00
|
|
|
class StackFilter: public LirFilter
|
|
|
|
{
|
|
|
|
LInsp sp;
|
2010-03-11 14:59:45 -08:00
|
|
|
BitSet stk;
|
|
|
|
int top;
|
|
|
|
int getTop(LInsp br);
|
2009-09-08 18:48:30 -07:00
|
|
|
|
2009-07-10 12:58:34 -07:00
|
|
|
public:
|
2010-03-11 14:59:45 -08:00
|
|
|
StackFilter(LirFilter *in, Allocator& alloc, LInsp sp);
|
2009-07-10 12:58:34 -07:00
|
|
|
LInsp read();
|
|
|
|
};
|
|
|
|
|
2010-02-07 13:35:35 -08:00
|
|
|
struct SoftFloatOps
|
|
|
|
{
|
|
|
|
const CallInfo* opmap[LIR_sentinel];
|
|
|
|
SoftFloatOps();
|
|
|
|
};
|
|
|
|
|
|
|
|
extern const SoftFloatOps softFloatOps;
|
|
|
|
|
|
|
|
// Replaces fpu ops with function calls, for platforms lacking float
|
|
|
|
// hardware (eg. some ARM machines).
|
|
|
|
class SoftFloatFilter: public LirWriter
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
static const CallInfo* opmap[LIR_sentinel];
|
|
|
|
|
|
|
|
SoftFloatFilter(LirWriter *out);
|
|
|
|
LIns *split(LIns *a);
|
|
|
|
LIns *split(const CallInfo *call, LInsp args[]);
|
|
|
|
LIns *fcall1(const CallInfo *call, LIns *a);
|
|
|
|
LIns *fcall2(const CallInfo *call, LIns *a, LIns *b);
|
|
|
|
LIns *fcmp(const CallInfo *call, LIns *a, LIns *b);
|
|
|
|
LIns *ins1(LOpcode op, LIns *a);
|
|
|
|
LIns *ins2(LOpcode op, LIns *a, LIns *b);
|
|
|
|
LIns *insCall(const CallInfo *ci, LInsp args[]);
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-09-04 17:56:22 -07:00
|
|
|
#ifdef DEBUG
|
2010-01-21 15:10:59 -08:00
|
|
|
// This class does thorough checking of LIR. It checks *implicit* LIR
|
|
|
|
// instructions, ie. LIR instructions specified via arguments -- to
|
|
|
|
// methods like insLoad() -- that have not yet been converted into
|
|
|
|
// *explicit* LIns objects in a LirBuffer. The reason for this is that if
|
|
|
|
// we wait until the LIR instructions are explicit, they will have gone
|
|
|
|
// through the entire writer pipeline and been optimised. By checking
|
|
|
|
// implicit LIR instructions we can check the LIR code at the start of the
|
|
|
|
// writer pipeline, exactly as it is generated by the compiler front-end.
|
|
|
|
//
|
|
|
|
// A general note about the errors produced by this class: for
|
|
|
|
// TraceMonkey, they won't include special names for instructions that
|
|
|
|
// have them unless TMFLAGS is specified.
|
|
|
|
class ValidateWriter : public LirWriter
|
2009-09-04 17:56:22 -07:00
|
|
|
{
|
2010-01-21 15:10:59 -08:00
|
|
|
private:
|
2010-03-23 15:05:47 -07:00
|
|
|
LInsPrinter* printer;
|
|
|
|
const char* whereInPipeline;
|
2010-01-21 15:10:59 -08:00
|
|
|
|
|
|
|
const char* type2string(LTy type);
|
|
|
|
void typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[]);
|
|
|
|
void errorStructureShouldBe(LOpcode op, const char* argDesc, int argN, LIns* arg,
|
|
|
|
const char* shouldBeDesc);
|
2010-03-23 15:05:47 -07:00
|
|
|
void errorAccSet(const char* what, AccSet accSet, const char* shouldDesc);
|
2010-01-21 15:10:59 -08:00
|
|
|
void checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op2);
|
|
|
|
void checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins);
|
|
|
|
void checkLInsIsNull(LOpcode op, int argN, LIns* ins);
|
2010-03-23 15:05:47 -07:00
|
|
|
void checkAccSet(LOpcode op, LInsp base, AccSet accSet, AccSet maxAccSet);
|
|
|
|
|
|
|
|
LInsp sp, rp;
|
2010-01-21 15:10:59 -08:00
|
|
|
|
2009-09-04 17:56:22 -07:00
|
|
|
public:
|
2010-03-23 15:05:47 -07:00
|
|
|
ValidateWriter(LirWriter* out, LInsPrinter* printer, const char* where);
|
|
|
|
void setSp(LInsp ins) { sp = ins; }
|
|
|
|
void setRp(LInsp ins) { rp = ins; }
|
|
|
|
|
2010-03-01 17:43:20 -08:00
|
|
|
LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet);
|
|
|
|
LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet);
|
2010-01-21 15:10:59 -08:00
|
|
|
LIns* ins0(LOpcode v);
|
|
|
|
LIns* ins1(LOpcode v, LIns* a);
|
|
|
|
LIns* ins2(LOpcode v, LIns* a, LIns* b);
|
|
|
|
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
|
|
|
|
LIns* insParam(int32_t arg, int32_t kind);
|
|
|
|
LIns* insImm(int32_t imm);
|
2010-02-11 17:30:16 -08:00
|
|
|
#ifdef NANOJIT_64BIT
|
2010-01-21 15:10:59 -08:00
|
|
|
LIns* insImmq(uint64_t imm);
|
2010-02-11 17:30:16 -08:00
|
|
|
#endif
|
2010-01-21 15:10:59 -08:00
|
|
|
LIns* insImmf(double d);
|
|
|
|
LIns* insCall(const CallInfo *call, LIns* args[]);
|
|
|
|
LIns* insGuard(LOpcode v, LIns *c, GuardRecord *gr);
|
2010-02-18 20:04:55 -08:00
|
|
|
LIns* insGuardXov(LOpcode v, LIns* a, LIns* b, GuardRecord* gr);
|
2010-01-21 15:10:59 -08:00
|
|
|
LIns* insBranch(LOpcode v, LIns* condition, LIns* to);
|
|
|
|
LIns* insAlloc(int32_t size);
|
|
|
|
LIns* insJtbl(LIns* index, uint32_t size);
|
|
|
|
};
|
|
|
|
|
|
|
|
// This just checks things that aren't possible to check in
|
|
|
|
// ValidateWriter, eg. whether all branch targets are set and are labels.
|
|
|
|
class ValidateReader: public LirFilter {
|
2009-09-04 17:56:22 -07:00
|
|
|
public:
|
2010-01-21 15:10:59 -08:00
|
|
|
ValidateReader(LirFilter* in);
|
|
|
|
LIns* read();
|
2009-09-04 17:56:22 -07:00
|
|
|
};
|
|
|
|
#endif
|
2010-01-07 16:41:42 -08:00
|
|
|
|
|
|
|
#ifdef NJ_VERBOSE
|
|
|
|
/* A listing filter for LIR, going through backwards. It merely
|
|
|
|
passes its input to its output, but notes it down too. When
|
|
|
|
finish() is called, prints out what went through. Is intended to be
|
|
|
|
used to print arbitrary intermediate transformation stages of
|
|
|
|
LIR. */
|
|
|
|
class ReverseLister : public LirFilter
|
|
|
|
{
|
|
|
|
Allocator& _alloc;
|
2010-03-17 16:42:30 -07:00
|
|
|
LInsPrinter* _printer;
|
2010-01-07 16:41:42 -08:00
|
|
|
const char* _title;
|
|
|
|
StringList _strs;
|
|
|
|
LogControl* _logc;
|
2010-03-24 15:34:34 -07:00
|
|
|
LIns* _prevIns;
|
2010-01-07 16:41:42 -08:00
|
|
|
public:
|
|
|
|
ReverseLister(LirFilter* in, Allocator& alloc,
|
2010-03-17 16:42:30 -07:00
|
|
|
LInsPrinter* printer, LogControl* logc, const char* title)
|
2010-01-07 16:41:42 -08:00
|
|
|
: LirFilter(in)
|
|
|
|
, _alloc(alloc)
|
2010-03-17 16:42:30 -07:00
|
|
|
, _printer(printer)
|
2010-01-07 16:41:42 -08:00
|
|
|
, _title(title)
|
|
|
|
, _strs(alloc)
|
|
|
|
, _logc(logc)
|
2010-03-24 15:34:34 -07:00
|
|
|
, _prevIns(NULL)
|
2010-01-07 16:41:42 -08:00
|
|
|
{ }
|
|
|
|
|
|
|
|
void finish();
|
|
|
|
LInsp read();
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2008-06-18 21:11:15 -07:00
|
|
|
}
|
|
|
|
#endif // __nanojit_LIR__
|