Merge tracemonkey to mozilla-central.

This commit is contained in:
Robert Sayre 2009-07-06 17:17:08 -04:00
commit 0a1c8346ac
37 changed files with 1650 additions and 870 deletions

View File

@ -42,7 +42,7 @@
#ifndef nsContentUtils_h___
#define nsContentUtils_h___
#include "jspubtd.h"
#include "jsprvtd.h"
#include "jsnum.h"
#include "nsAString.h"
#include "nsIStatefulFrame.h"

View File

@ -39,6 +39,7 @@
#include "jsapi.h"
#include "jsdtoa.h"
#include "jsprvtd.h"
#include "jsnum.h"
#include "jsbool.h"
#include "jsarena.h"

View File

@ -2,4 +2,4 @@
X="var d = Date.now();";
for i in t/*.js; do X="$X load(\"$i\");"; done
X="$X print(Date.now() - d);"
echo $X | (./Darwin_OPT.OBJ/js -j || ./Linux_All_OPT.OBJ/js -j)
echo $X | $1 -j

View File

@ -2617,6 +2617,7 @@ JS_SetErrorReporter(JSContext *cx, JSErrorReporter er);
#define JSREG_MULTILINE 0x04 /* treat ^ and $ as begin and end of line */
#define JSREG_STICKY 0x08 /* only match starting at lastIndex */
#define JSREG_FLAT 0x10 /* parse as a flat regexp */
#define JSREG_NOCOMPILE 0x20 /* do not try to compile to native code */
extern JS_PUBLIC_API(JSObject *)
JS_NewRegExpObject(JSContext *cx, char *bytes, size_t length, uintN flags);

View File

@ -99,6 +99,7 @@
#include "jsscope.h"
#include "jsstr.h"
#include "jsstaticcheck.h"
#include "jsvector.h"
/* 2^32 - 1 as a number and a string */
#define MAXINDEX 4294967295u
@ -1301,225 +1302,15 @@ js_MakeArraySlow(JSContext *cx, JSObject *obj)
return JS_FALSE;
}
enum ArrayToStringOp {
TO_STRING,
TO_LOCALE_STRING,
TO_SOURCE
};
/*
* When op is TO_STRING or TO_LOCALE_STRING sep indicates a separator to use
* or "," when sep is NULL.
* When op is TO_SOURCE sep must be NULL.
*/
/* Transfer ownership of buffer to returned string. */
static JSBool
array_join_sub(JSContext *cx, JSObject *obj, enum ArrayToStringOp op,
JSString *sep, jsval *rval)
BufferToString(JSContext *cx, JSTempVector<jschar> &buf, jsval *rval)
{
JSBool ok, hole;
jsuint length, index;
jschar *chars, *ochars;
size_t nchars, growth, seplen, tmplen, extratail;
const jschar *sepstr;
JSString *str;
JSHashEntry *he;
JSAtom *atom;
JS_CHECK_RECURSION(cx, return JS_FALSE);
ok = js_GetLengthProperty(cx, obj, &length);
if (!ok)
return JS_FALSE;
he = js_EnterSharpObject(cx, obj, NULL, &chars);
if (!he)
return JS_FALSE;
#ifdef DEBUG
growth = (size_t) -1;
#endif
/*
* We must check for the sharp bit and skip js_LeaveSharpObject when it is
* set even when op is not TO_SOURCE. A script can overwrite the default
* toSource implementation and trigger a call, for example, to the
* toString method during serialization of the object graph (bug 369696).
*/
if (IS_SHARP(he)) {
#if JS_HAS_SHARP_VARS
nchars = js_strlen(chars);
#else
chars[0] = '[';
chars[1] = ']';
chars[2] = 0;
nchars = 2;
#endif
goto make_string;
}
if (op == TO_SOURCE) {
/*
* Always allocate 2 extra chars for closing ']' and terminating 0
* and then preallocate 1 + extratail to include starting '['.
*/
extratail = 2;
growth = (1 + extratail) * sizeof(jschar);
if (!chars) {
nchars = 0;
chars = (jschar *) malloc(growth);
if (!chars)
goto done;
} else {
MAKE_SHARP(he);
nchars = js_strlen(chars);
growth += nchars * sizeof(jschar);
chars = (jschar *)realloc((ochars = chars), growth);
if (!chars) {
free(ochars);
goto done;
}
}
chars[nchars++] = '[';
JS_ASSERT(sep == NULL);
sepstr = NULL; /* indicates to use ", " as separator */
seplen = 2;
} else {
/*
* Free any sharp variable definition in chars. Normally, we would
* MAKE_SHARP(he) so that only the first sharp variable annotation is
* a definition, and all the rest are references, but in the current
* case of (op != TO_SOURCE), we don't need chars at all.
*/
if (chars)
JS_free(cx, chars);
chars = NULL;
nchars = 0;
extratail = 1; /* allocate extra char for terminating 0 */
/* Return the empty string on a cycle as well as on empty join. */
if (IS_BUSY(he) || length == 0) {
js_LeaveSharpObject(cx, NULL);
*rval = JS_GetEmptyStringValue(cx);
return ok;
}
/* Flag he as BUSY so we can distinguish a cycle from a join-point. */
MAKE_BUSY(he);
if (sep) {
sep->getCharsAndLength(sepstr, seplen);
} else {
sepstr = NULL; /* indicates to use "," as separator */
seplen = 1;
}
}
/* Use rval to locally root each element value as we loop and convert. */
for (index = 0; index < length; index++) {
ok = (JS_CHECK_OPERATION_LIMIT(cx) &&
GetArrayElement(cx, obj, index, &hole, rval));
if (!ok)
goto done;
if (hole ||
(op != TO_SOURCE &&
(JSVAL_IS_VOID(*rval) || JSVAL_IS_NULL(*rval)))) {
str = cx->runtime->emptyString;
} else {
if (op == TO_LOCALE_STRING) {
JSObject *robj;
atom = cx->runtime->atomState.toLocaleStringAtom;
ok = js_ValueToObject(cx, *rval, &robj);
if (ok) {
/* Re-use *rval to protect robj temporarily. */
*rval = OBJECT_TO_JSVAL(robj);
ok = js_TryMethod(cx, robj, atom, 0, NULL, rval);
}
if (!ok)
goto done;
str = js_ValueToString(cx, *rval);
} else if (op == TO_STRING) {
str = js_ValueToString(cx, *rval);
} else {
JS_ASSERT(op == TO_SOURCE);
str = js_ValueToSource(cx, *rval);
}
if (!str) {
ok = JS_FALSE;
goto done;
}
}
/*
* Do not append separator after the last element unless it is a hole
* and we are in toSource. In that case we append single ",".
*/
if (index + 1 == length)
seplen = (hole && op == TO_SOURCE) ? 1 : 0;
/* Allocate 1 at end for closing bracket and zero. */
tmplen = str->length();
growth = nchars + tmplen + seplen + extratail;
if (nchars > growth || tmplen > growth ||
growth > (size_t)-1 / sizeof(jschar)) {
if (chars) {
free(chars);
chars = NULL;
}
goto done;
}
growth *= sizeof(jschar);
if (!chars) {
chars = (jschar *) malloc(growth);
if (!chars)
goto done;
} else {
chars = (jschar *) realloc((ochars = chars), growth);
if (!chars) {
free(ochars);
goto done;
}
}
js_strncpy(&chars[nchars], str->chars(), tmplen);
nchars += tmplen;
if (seplen) {
if (sepstr) {
js_strncpy(&chars[nchars], sepstr, seplen);
} else {
JS_ASSERT(seplen == 1 || seplen == 2);
chars[nchars] = ',';
if (seplen == 2)
chars[nchars + 1] = ' ';
}
nchars += seplen;
}
}
done:
if (op == TO_SOURCE) {
if (chars)
chars[nchars++] = ']';
} else {
CLEAR_BUSY(he);
}
js_LeaveSharpObject(cx, NULL);
if (!ok) {
if (chars)
free(chars);
return ok;
}
make_string:
if (!chars) {
JS_ReportOutOfMemory(cx);
return JS_FALSE;
}
chars[nchars] = 0;
JS_ASSERT(growth == (size_t)-1 || (nchars + 1) * sizeof(jschar) == growth);
str = js_NewString(cx, chars, nchars);
size_t length = buf.size() - 1;
jschar *chars = buf.extractRawBuffer();
JSString *str = js_NewString(cx, chars, length);
if (!str) {
free(chars);
JS_free(cx, chars);
return JS_FALSE;
}
*rval = STRING_TO_JSVAL(str);
@ -1530,28 +1321,258 @@ array_join_sub(JSContext *cx, JSObject *obj, enum ArrayToStringOp op,
static JSBool
array_toSource(JSContext *cx, uintN argc, jsval *vp)
{
JSObject *obj;
JS_CHECK_RECURSION(cx, return JS_FALSE);
obj = JS_THIS_OBJECT(cx, vp);
if (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
!JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2)) {
JSObject *obj = JS_THIS_OBJECT(cx, vp);
if (!obj ||
(OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
!JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2))) {
return JS_FALSE;
}
return array_join_sub(cx, obj, TO_SOURCE, NULL, vp);
/* Find joins or cycles in the reachable object graph. */
jschar *sharpchars;
JSHashEntry *he = js_EnterSharpObject(cx, obj, NULL, &sharpchars);
if (!he)
return JS_FALSE;
JSBool initiallySharp = IS_SHARP(he) ? JS_TRUE : JS_FALSE;
/* After this point, all paths exit through the 'done' label. */
MUST_FLOW_THROUGH("done");
JSBool ok = JS_TRUE;
/*
* This object will take responsibility for the jschar buffer until the
* buffer is transferred to the returned JSString.
*/
JSTempVector<jschar> buf(cx);
if (!(ok = buf.reserve(3)))
goto done;
/* Cycles/joins are indicated by sharp objects. */
#if JS_HAS_SHARP_VARS
if (IS_SHARP(he)) {
JS_ASSERT(sharpchars != 0);
/* +1 to include the trailing '\0' */
buf.replaceRawBuffer(sharpchars, js_strlen(sharpchars) + 1);
goto make_string;
} else if (sharpchars) {
MAKE_SHARP(he);
buf.replaceRawBuffer(sharpchars, js_strlen(sharpchars));
}
#else
if (IS_SHARP(he)) {
static const jschar arr[] = { '[', ']', 0 };
if (!(ok = buf.pushBack(arr, arr + 3)))
goto done;
if (sharpchars)
JS_free(cx, sharpchars);
goto make_string;
}
#endif
if (!(ok = buf.pushBack('[')))
goto done;
jsuint length;
ok = js_GetLengthProperty(cx, obj, &length);
if (!ok)
goto done;
for (jsuint index = 0; index < length; index++) {
/* Use vp to locally root each element value. */
JSBool hole;
ok = (JS_CHECK_OPERATION_LIMIT(cx) &&
GetArrayElement(cx, obj, index, &hole, vp));
if (!ok)
goto done;
/* Get element's character string. */
JSString *str;
if (hole) {
str = cx->runtime->emptyString;
} else {
str = js_ValueToSource(cx, *vp);
if (!str) {
ok = JS_FALSE;
goto done;
}
}
*vp = STRING_TO_JSVAL(str);
const jschar *chars;
size_t charlen;
str->getCharsAndLength(chars, charlen);
/* Append element to buffer. */
if (!(ok = buf.pushBack(chars, chars + charlen)))
goto done;
if (index + 1 != length) {
if (!(ok = buf.pushBack(',')) || !(ok = buf.pushBack(' ')))
goto done;
} else if (hole) {
if (!(ok = buf.pushBack(',')))
goto done;
}
}
/* Finalize the buffer. */
if (!(ok = buf.pushBack(']')) || !(ok = buf.pushBack(0)))
goto done;
make_string:
if (!(ok = BufferToString(cx, buf, vp)))
goto done;
done:
if (!initiallySharp)
js_LeaveSharpObject(cx, NULL);
return ok;
}
#endif
static JSHashNumber
js_hash_array(const void *key)
{
return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS;
}
JSBool
js_InitContextBusyArrayTable(JSContext *cx)
{
cx->busyArrayTable = JS_NewHashTable(4, js_hash_array, JS_CompareValues,
JS_CompareValues, NULL, NULL);
return cx->busyArrayTable != NULL;
}
static JSBool
array_toString_sub(JSContext *cx, JSObject *obj, JSBool locale,
JSString *sepstr, jsval *rval)
{
JS_CHECK_RECURSION(cx, return JS_FALSE);
/*
* This hash table is shared between toString invocations and must be empty
* after the root invocation completes.
*/
JSHashTable *table = cx->busyArrayTable;
/*
* Use HashTable entry as the cycle indicator. On first visit, create the
* entry, and, when leaving, remove the entry.
*/
JSHashNumber hash = js_hash_array(obj);
JSHashEntry **hep = JS_HashTableRawLookup(table, hash, obj);
JSHashEntry *he = *hep;
if (!he) {
/* Not in hash table, so not a cycle. */
he = JS_HashTableRawAdd(table, hep, hash, obj, NULL);
if (!he) {
JS_ReportOutOfMemory(cx);
return JS_FALSE;
}
} else {
/* Cycle, so return empty string. */
*rval = ATOM_KEY(cx->runtime->atomState.emptyAtom);
return JS_TRUE;
}
JSAutoTempValueRooter tvr(cx, obj);
/* After this point, all paths exit through the 'done' label. */
MUST_FLOW_THROUGH("done");
JSBool ok = JS_TRUE;
/* Get characters to use for the separator. */
static const jschar comma = ',';
const jschar *sep;
size_t seplen;
if (sepstr) {
sepstr->getCharsAndLength(sep, seplen);
} else {
sep = &comma;
seplen = 1;
}
/*
* This object will take responsibility for the jschar buffer until the
* buffer is transferred to the returned JSString.
*/
JSTempVector<jschar> buf(cx);
jsuint length;
ok = js_GetLengthProperty(cx, obj, &length);
if (!ok)
goto done;
for (jsuint index = 0; index < length; index++) {
/* Use rval to locally root each element value. */
JSBool hole;
ok = JS_CHECK_OPERATION_LIMIT(cx) &&
GetArrayElement(cx, obj, index, &hole, rval);
if (!ok)
goto done;
/* Get element's character string. */
if (!(hole || JSVAL_IS_VOID(*rval) || JSVAL_IS_NULL(*rval))) {
if (locale) {
JSObject *robj;
JSAtom *atom = cx->runtime->atomState.toLocaleStringAtom;
ok = js_ValueToObject(cx, *rval, &robj);
if (ok) {
/* Re-use *rval to protect robj temporarily. */
*rval = OBJECT_TO_JSVAL(robj);
ok = js_TryMethod(cx, robj, atom, 0, NULL, rval);
}
if (!ok)
goto done;
}
ok = js_ValueToStringBuffer(cx, *rval, buf);
if (!ok)
goto done;
}
/* Append the separator. */
if (index + 1 != length) {
if (!(ok = buf.pushBack(sep, sep + seplen)))
goto done;
}
}
/* Finalize the buffer. */
if (buf.empty()) {
*rval = ATOM_KEY(cx->runtime->atomState.emptyAtom);
goto done;
}
ok = buf.pushBack(0) &&
BufferToString(cx, buf, rval);
if (!ok)
goto done;
done:
/*
* It is possible that 'hep' may have been invalidated by subsequent
* RawAdd/Remove. Hence, 'RawRemove' must not be used.
*/
JS_HashTableRemove(table, obj);
return ok;
}
static JSBool
array_toString(JSContext *cx, uintN argc, jsval *vp)
{
JSObject *obj;
obj = JS_THIS_OBJECT(cx, vp);
if (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
!JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2)) {
if (!obj ||
(OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
!JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2))) {
return JS_FALSE;
}
return array_join_sub(cx, obj, TO_STRING, NULL, vp);
return array_toString_sub(cx, obj, JS_FALSE, NULL, vp);
}
static JSBool
@ -1560,8 +1581,9 @@ array_toLocaleString(JSContext *cx, uintN argc, jsval *vp)
JSObject *obj;
obj = JS_THIS_OBJECT(cx, vp);
if (OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
!JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2)) {
if (!obj ||
(OBJ_GET_CLASS(cx, obj) != &js_SlowArrayClass &&
!JS_InstanceOf(cx, obj, &js_ArrayClass, vp + 2))) {
return JS_FALSE;
}
@ -1569,7 +1591,7 @@ array_toLocaleString(JSContext *cx, uintN argc, jsval *vp)
* Passing comma here as the separator. Need a way to get a
* locale-specific version.
*/
return array_join_sub(cx, obj, TO_LOCALE_STRING, NULL, vp);
return array_toString_sub(cx, obj, JS_TRUE, NULL, vp);
}
enum TargetElementsType {
@ -1716,7 +1738,7 @@ static JSString* FASTCALL
Array_p_join(JSContext* cx, JSObject* obj, JSString *str)
{
JSAutoTempValueRooter tvr(cx);
if (!array_join_sub(cx, obj, TO_STRING, str, tvr.addr())) {
if (!array_toString_sub(cx, obj, JS_FALSE, str, tvr.addr())) {
js_SetBuiltinError(cx);
return NULL;
}
@ -1727,7 +1749,7 @@ static JSString* FASTCALL
Array_p_toString(JSContext* cx, JSObject* obj)
{
JSAutoTempValueRooter tvr(cx);
if (!array_join_sub(cx, obj, TO_STRING, NULL, tvr.addr())) {
if (!array_toString_sub(cx, obj, JS_FALSE, NULL, tvr.addr())) {
js_SetBuiltinError(cx);
return NULL;
}
@ -1753,7 +1775,7 @@ array_join(JSContext *cx, uintN argc, jsval *vp)
vp[2] = STRING_TO_JSVAL(str);
}
obj = JS_THIS_OBJECT(cx, vp);
return obj && array_join_sub(cx, obj, TO_STRING, str, vp);
return obj && array_toString_sub(cx, obj, JS_FALSE, str, vp);
}
static JSBool

View File

@ -97,6 +97,9 @@ js_GetProtoIfDenseArray(JSContext *cx, JSObject *obj)
extern JSObject *
js_InitArrayClass(JSContext *cx, JSObject *obj);
extern JSBool
js_InitContextBusyArrayTable(JSContext *);
extern JSObject *
js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector,
JSBool holey = JS_FALSE);

View File

@ -52,6 +52,7 @@
#include "jsnum.h"
#include "jsobj.h"
#include "jsstr.h"
#include "jsvector.h"
/* Check pseudo-booleans values. */
JS_STATIC_ASSERT(!(JSVAL_TRUE & JSVAL_HOLE_FLAG));
@ -162,6 +163,16 @@ js_BooleanToString(JSContext *cx, JSBool b)
return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[b ? 1 : 0]);
}
/* This function implements E-262-3 section 9.8, toString. */
JSBool
js_BooleanToStringBuffer(JSContext *cx, JSBool b, JSTempVector<jschar> &buf)
{
static const jschar trueChars[] = { 't', 'r', 'u', 'e' },
falseChars[] = { 'f', 'a', 'l', 's', 'e' };
return b ? buf.pushBack(trueChars, trueChars + JS_ARRAY_LENGTH(trueChars))
: buf.pushBack(falseChars, falseChars + JS_ARRAY_LENGTH(falseChars));
}
JSBool
js_ValueToBoolean(jsval v)
{

View File

@ -80,6 +80,9 @@ js_InitBooleanClass(JSContext *cx, JSObject *obj);
extern JSString *
js_BooleanToString(JSContext *cx, JSBool b);
extern JSBool
js_BooleanToStringBuffer(JSContext *cx, JSBool b, JSTempVector<jschar> &buf);
extern JSBool
js_ValueToBoolean(jsval v);

View File

@ -387,6 +387,11 @@ js_NewContext(JSRuntime *rt, size_t stackChunkSize)
js_InitRegExpStatics(cx);
JS_ASSERT(cx->resolveFlags == 0);
if (!js_InitContextBusyArrayTable(cx)) {
FreeContext(cx);
return NULL;
}
#ifdef JS_THREADSAFE
if (!js_InitContextThread(cx)) {
FreeContext(cx);
@ -743,6 +748,12 @@ FreeContext(JSContext *cx)
JS_free(cx, temp);
}
/* Destroy the busy array table. */
if (cx->busyArrayTable) {
JS_HashTableDestroy(cx->busyArrayTable);
cx->busyArrayTable = NULL;
}
/* Destroy the resolve recursion damper. */
if (cx->resolvingTable) {
JS_DHashTableDestroy(cx->resolvingTable);

View File

@ -56,6 +56,7 @@
#include "jspubtd.h"
#include "jsregexp.h"
#include "jsutil.h"
#include "jsarray.h"
JS_BEGIN_EXTERN_C
@ -954,6 +955,7 @@ struct JSContext {
/* State for object and array toSource conversion. */
JSSharpObjectMap sharpObjectMap;
JSHashTable *busyArrayTable;
/* Argument formatter support for JS_{Convert,Push}Arguments{,VA}. */
JSArgumentFormatMap *argumentFormatMap;

View File

@ -631,18 +631,27 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp)
memset(&frame, 0, sizeof(frame));
frame.script = script;
frame.regs = NULL;
frame.callee = closure;
frame.fun = fun;
frame.argv = argv + 2;
frame.down = js_GetTopStackFrame(cx);
frame.scopeChain = OBJ_GET_PARENT(cx, closure);
if (script) {
JS_ASSERT(script->length >= JSOP_STOP_LENGTH);
regs.pc = script->code + script->length
- JSOP_STOP_LENGTH;
regs.sp = NULL;
frame.regs = &regs;
if (fun &&
JSFUN_HEAVYWEIGHT_TEST(fun->flags) &&
!js_GetCallObject(cx, &frame)) {
if (argv != smallv)
JS_free(cx, argv);
DBG_LOCK(rt);
DropWatchPointAndUnlock(cx, wp, JSWP_HELD);
return JS_FALSE;
}
}
frame.callee = closure;
frame.fun = fun;
frame.argv = argv + 2;
frame.down = js_GetTopStackFrame(cx);
frame.scopeChain = OBJ_GET_PARENT(cx, closure);
cx->fp = &frame;
}

View File

@ -46,7 +46,7 @@
#include "jsdtoa.h"
#include "jsprf.h"
#include "jsutil.h" /* Added by JSIFY */
#include "jspubtd.h"
#include "jsprvtd.h"
#include "jsnum.h"
#include "jsbit.h"

View File

@ -71,6 +71,7 @@
#include "jsprf.h"
#include "jsscope.h"
#include "jsstr.h"
#include "jsvector.h"
static JSBool
num_isNaN(JSContext *cx, uintN argc, jsval *vp)
@ -861,6 +862,41 @@ js_NumberToString(JSContext *cx, jsdouble d)
return NumberToStringWithBase(cx, d, 10);
}
JSBool JS_FASTCALL
js_NumberValueToStringBuffer(JSContext *cx, jsval v, JSTempVector<jschar> &buf)
{
/* Convert to C-string. */
static const size_t arrSize = DTOSTR_STANDARD_BUFFER_SIZE;
char arr[arrSize];
const char *cstr;
if (JSVAL_IS_INT(v)) {
cstr = IntToCString(JSVAL_TO_INT(v), 10, arr, arrSize);
} else {
JS_ASSERT(JSVAL_IS_DOUBLE(v));
cstr = JS_dtostr(arr, arrSize, DTOSTR_STANDARD, 0, *JSVAL_TO_DOUBLE(v));
}
if (!cstr)
return JS_FALSE;
/*
* Inflate to jschar string. The input C-string characters are < 127, so
* even if jschars are UTF-8, all chars should map to one jschar.
*/
size_t cstrlen = strlen(cstr);
JS_ASSERT(cstrlen < arrSize);
size_t sizeBefore = buf.size();
if (!buf.growBy(cstrlen))
return JS_FALSE;
jschar *appendBegin = buf.begin() + sizeBefore;
#ifdef DEBUG
size_t oldcstrlen = cstrlen;
JSBool ok =
#endif
js_InflateStringToBuffer(cx, cstr, cstrlen, appendBegin, &cstrlen);
JS_ASSERT(ok && cstrlen == oldcstrlen);
return JS_TRUE;
}
jsdouble
js_ValueToNumber(JSContext *cx, jsval *vp)
{

View File

@ -189,6 +189,13 @@ js_NewWeaklyRootedNumber(JSContext *cx, jsdouble d, jsval *vp);
extern JSString * JS_FASTCALL
js_NumberToString(JSContext *cx, jsdouble d);
/*
* Convert an integer or double (contained in the given jsval) to a string and
* append to the given buffer.
*/
extern JSBool JS_FASTCALL
js_NumberValueToStringBuffer(JSContext *, jsval, JSTempVector<jschar> &);
/*
* Convert a value to a number. On exit JSVAL_IS_NULL(*vp) iff there was an
* error. If on exit JSVAL_IS_NUMBER(*vp), then *vp holds the jsval that

View File

@ -2168,7 +2168,7 @@ JS_DEFINE_CALLINFO_3(extern, CONSTRUCTOR_RETRY, js_NewInstance, CONTEXT, CLASS,
* access is "object-detecting" in the sense used by web scripts, e.g., when
* checking whether document.all is defined.
*/
static JS_REQUIRES_STACK JSBool
JS_REQUIRES_STACK JSBool
Detecting(JSContext *cx, jsbytecode *pc)
{
JSScript *script;
@ -2231,9 +2231,16 @@ Detecting(JSContext *cx, jsbytecode *pc)
* does not indicate whether we are in a with statement. Return defaultFlags
* if a currently executing bytecode cannot be determined.
*/
static uintN
InferFlags(JSContext *cx, uintN defaultFlags)
uintN
js_InferFlags(JSContext *cx, uintN defaultFlags)
{
#ifdef JS_TRACER
if (JS_ON_TRACE(cx))
return cx->bailExit->lookupFlags;
#endif
JS_ASSERT_NOT_ON_TRACE(cx);
JSStackFrame *fp;
jsbytecode *pc;
const JSCodeSpec *cs;
@ -2271,7 +2278,7 @@ with_LookupProperty(JSContext *cx, JSObject *obj, jsid id, JSObject **objp,
/* Fixes bug 463997 */
uintN flags = cx->resolveFlags;
if (flags == JSRESOLVE_INFER)
flags = InferFlags(cx, flags);
flags = js_InferFlags(cx, flags);
flags |= JSRESOLVE_WITH;
JSAutoResolveFlags rf(cx, flags);
JSObject *proto = OBJ_GET_PROTO(cx, obj);
@ -3837,7 +3844,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags,
if (clasp->flags & JSCLASS_NEW_RESOLVE) {
newresolve = (JSNewResolveOp)resolve;
if (flags == JSRESOLVE_INFER)
flags = InferFlags(cx, flags);
flags = js_InferFlags(cx, flags);
obj2 = (clasp->flags & JSCLASS_NEW_RESOLVE_GETS_START)
? start
: NULL;

View File

@ -861,6 +861,9 @@ JS_FRIEND_API(void) js_DumpObject(JSObject *obj);
JS_FRIEND_API(void) js_DumpStackFrame(JSStackFrame *fp);
#endif
extern uintN
js_InferFlags(JSContext *cx, uintN defaultFlags);
JS_END_EXTERN_C
#endif /* jsobj_h___ */

View File

@ -5360,10 +5360,10 @@ SimulateImacroCFG(JSContext *cx, JSScript *script,
ptrdiff_t jmpoff = (type == JOF_JUMP) ? GET_JUMP_OFFSET(pc)
: GET_JUMPX_OFFSET(pc);
LOCAL_ASSERT(jmpoff >= 0);
uintN tmp_pcdepth = SimulateImacroCFG(cx, script, pcdepth, pc + jmpoff,
target, tmp_pcstack);
intN tmp_pcdepth = SimulateImacroCFG(cx, script, pcdepth, pc + jmpoff,
target, tmp_pcstack);
if (tmp_pcdepth >= 0) {
pcdepth = tmp_pcdepth;
pcdepth = uintN(tmp_pcdepth);
goto success;
}
@ -5378,7 +5378,6 @@ SimulateImacroCFG(JSContext *cx, JSScript *script,
LOCAL_ASSERT(pc == target);
success:
LOCAL_ASSERT(pcdepth >= 0);
memcpy(pcstack, tmp_pcstack, nbytes);
JS_free(cx, tmp_pcstack);
return pcdepth;
@ -5403,9 +5402,9 @@ ReconstructImacroPCStack(JSContext *cx, JSScript *script,
*/
JSStackFrame *fp = js_GetScriptedCaller(cx, NULL);
JS_ASSERT(fp->imacpc);
uintN pcdepth = ReconstructPCStack(cx, script, fp->imacpc, pcstack);
intN pcdepth = ReconstructPCStack(cx, script, fp->imacpc, pcstack);
if (pcdepth < 0)
return pcdepth;
return uintN(pcdepth);
return SimulateImacroCFG(cx, script, pcdepth, imacstart, target, pcstack);
}

View File

@ -134,6 +134,21 @@ typedef struct JSXML JSXML;
typedef struct JSXMLArray JSXMLArray;
typedef struct JSXMLArrayCursor JSXMLArrayCursor;
/*
* Template declarations.
*
* jsprvtd.h can be included in both C and C++ translation units. For C++, it
* may possibly be wrapped in an extern "C" block which does not agree with
* templates.
*/
#ifdef __cplusplus
extern "C++" {
template <class T> class JSTempVector;
}
#endif /* __cplusplus */
/* "Friend" types used by jscntxt.h and jsdbgapi.h. */
typedef enum JSTrapStatus {
JSTRAP_ERROR,

View File

@ -2388,8 +2388,10 @@ class RegExpNativeCompiler {
* If the regexp is too long nanojit will assert when we
* try to insert the guard record.
*/
if (re_length > 1024)
if (re_length > 1024) {
re->flags |= JSREG_NOCOMPILE;
return JS_FALSE;
}
this->cx = cx;
/* At this point we have an empty fragment. */
@ -2446,7 +2448,7 @@ class RegExpNativeCompiler {
lirbuf->rewind();
} else {
if (!guard) insertGuard(re_chars, re_length);
fragment->blacklist();
re->flags |= JSREG_NOCOMPILE;
}
delete lirBufWriter;
#ifdef NJ_VERBOSE
@ -2469,8 +2471,6 @@ CompileRegExpToNative(JSContext* cx, JSRegExp* re, Fragment* fragment)
RegExpNativeCompiler rc(re, &state, fragment);
JS_ASSERT(!fragment->code());
JS_ASSERT(!fragment->isBlacklisted());
mark = JS_ARENA_MARK(&cx->tempPool);
if (!CompileRegExpToAST(cx, NULL, re->source, re->flags, state)) {
goto out;
@ -2499,19 +2499,15 @@ GetNativeRegExp(JSContext* cx, JSRegExp* re)
re->source->getCharsAndLength(re_chars, re_length);
void* hash = HashRegExp(re->flags, re_chars, re_length);
fragment = LookupNativeRegExp(cx, hash, re->flags, re_chars, re_length);
if (fragment) {
if (fragment->code())
goto ok;
if (fragment->isBlacklisted())
return NULL;
} else {
if (!fragment) {
fragment = fragmento->getAnchor(hash);
fragment->lirbuf = JS_TRACE_MONITOR(cx).reLirBuf;
fragment->root = fragment;
}
if (!fragment->code()) {
if (!CompileRegExpToNative(cx, re, fragment))
return NULL;
}
if (!CompileRegExpToNative(cx, re, fragment))
return NULL;
ok:
union { NIns *code; NativeRegExp func; } u;
u.code = fragment->code();
@ -3922,6 +3918,7 @@ MatchRegExp(REGlobalData *gData, REMatchState *x)
/* Run with native regexp if possible. */
if (TRACING_ENABLED(gData->cx) &&
!(gData->regexp->flags & JSREG_NOCOMPILE) &&
(native = GetNativeRegExp(gData->cx, gData->regexp))) {
gData->skipped = (ptrdiff_t) x->cp;

View File

@ -73,6 +73,7 @@
#include "jsstaticcheck.h"
#include "jsstr.h"
#include "jsbit.h"
#include "jsvector.h"
#define JSSTRDEP_RECURSION_LIMIT 100
@ -246,35 +247,36 @@ js_MakeStringImmutable(JSContext *cx, JSString *str)
static JSString *
ArgToRootedString(JSContext *cx, uintN argc, jsval *vp, uintN arg)
{
JSObject *obj;
JSString *str;
if (arg >= argc)
return ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
vp += 2 + arg;
if (JSVAL_IS_OBJECT(*vp)) {
obj = JSVAL_TO_OBJECT(*vp);
if (!obj)
return ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, vp))
return NULL;
if (!JSVAL_IS_PRIMITIVE(*vp) &&
!OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(*vp), JSTYPE_STRING, vp)) {
return NULL;
}
if (JSVAL_IS_STRING(*vp))
return JSVAL_TO_STRING(*vp);
if (JSVAL_IS_INT(*vp)) {
str = js_NumberToString(cx, JSVAL_TO_INT(*vp));
} else if (JSVAL_IS_DOUBLE(*vp)) {
str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(*vp));
JSString *str;
if (JSVAL_IS_STRING(*vp)) {
str = JSVAL_TO_STRING(*vp);
} else if (JSVAL_IS_BOOLEAN(*vp)) {
return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[
str = ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[
JSVAL_TO_BOOLEAN(*vp)? 1 : 0]);
} else {
JS_ASSERT(JSVAL_IS_VOID(*vp));
return ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
} else if (JSVAL_IS_NULL(*vp)) {
str = ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
} else if (JSVAL_IS_VOID(*vp)) {
str = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
}
else {
if (JSVAL_IS_INT(*vp)) {
str = js_NumberToString(cx, JSVAL_TO_INT(*vp));
} else {
JS_ASSERT(JSVAL_IS_DOUBLE(*vp));
str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(*vp));
}
if (str)
*vp = STRING_TO_JSVAL(str);
}
if (str)
*vp = STRING_TO_JSVAL(str);
return str;
}
@ -2966,16 +2968,13 @@ js_ValueToPrintable(JSContext *cx, jsval v, JSValueToStringFun v2sfun)
JS_FRIEND_API(JSString *)
js_ValueToString(JSContext *cx, jsval v)
{
JSObject *obj;
JSString *str;
if (JSVAL_IS_OBJECT(v)) {
obj = JSVAL_TO_OBJECT(v);
if (!obj)
return ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
if (!OBJ_DEFAULT_VALUE(cx, obj, JSTYPE_STRING, &v))
return NULL;
if (!JSVAL_IS_PRIMITIVE(v) &&
!OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), JSTYPE_STRING, &v)) {
return NULL;
}
if (JSVAL_IS_STRING(v)) {
str = JSVAL_TO_STRING(v);
} else if (JSVAL_IS_INT(v)) {
@ -2984,12 +2983,50 @@ js_ValueToString(JSContext *cx, jsval v)
str = js_NumberToString(cx, *JSVAL_TO_DOUBLE(v));
} else if (JSVAL_IS_BOOLEAN(v)) {
str = js_BooleanToString(cx, JSVAL_TO_BOOLEAN(v));
} else if (JSVAL_IS_NULL(v)) {
str = ATOM_TO_STRING(cx->runtime->atomState.nullAtom);
} else {
str = ATOM_TO_STRING(cx->runtime->atomState.typeAtoms[JSTYPE_VOID]);
}
return str;
}
static inline JSBool
pushAtom(JSAtom *atom, JSTempVector<jschar> &buf)
{
JSString *str = ATOM_TO_STRING(atom);
const jschar *chars;
size_t length;
str->getCharsAndLength(chars, length);
return buf.pushBack(chars, chars + length);
}
/* This function implements E-262-3 section 9.8, toString. */
JS_FRIEND_API(JSBool)
js_ValueToStringBuffer(JSContext *cx, jsval v, JSTempVector<jschar> &buf)
{
if (!JSVAL_IS_PRIMITIVE(v) &&
!OBJ_DEFAULT_VALUE(cx, JSVAL_TO_OBJECT(v), JSTYPE_STRING, &v)) {
return JS_FALSE;
}
if (JSVAL_IS_STRING(v)) {
JSString *str = JSVAL_TO_STRING(v);
const jschar *chars;
size_t length;
str->getCharsAndLength(chars, length);
return buf.pushBack(chars, chars + length);
}
if (JSVAL_IS_NUMBER(v))
return js_NumberValueToStringBuffer(cx, v, buf);
if (JSVAL_IS_BOOLEAN(v))
return js_BooleanToStringBuffer(cx, JSVAL_TO_BOOLEAN(v), buf);
if (JSVAL_IS_NULL(v))
return pushAtom(cx->runtime->atomState.nullAtom, buf);
JS_ASSERT(JSVAL_IS_VOID(v));
return pushAtom(cx->runtime->atomState.typeAtoms[JSTYPE_VOID], buf);
}
JS_FRIEND_API(JSString *)
js_ValueToSource(JSContext *cx, jsval v)
{

View File

@ -602,6 +602,14 @@ js_ValueToPrintable(JSContext *cx, jsval v, JSValueToStringFun v2sfun);
extern JS_FRIEND_API(JSString *)
js_ValueToString(JSContext *cx, jsval v);
/*
* This function implements E-262-3 section 9.8, toString. Convert the given
* value to a string of jschars appended to the given buffer. On error, the
* passed buffer may have partial results appended.
*/
extern JS_FRIEND_API(JSBool)
js_ValueToStringBuffer(JSContext *, jsval, JSTempVector<jschar> &);
/*
* Convert a value to its source expression, returning null after reporting
* an error, otherwise returning a new string reference.

View File

@ -2944,6 +2944,7 @@ TraceRecorder::snapshot(ExitType exitType)
exit->sp_adj = (stackSlots * sizeof(double)) - treeInfo->nativeStackBase;
exit->rp_adj = exit->calldepth * sizeof(FrameInfo*);
exit->nativeCalleeWord = 0;
exit->lookupFlags = js_InferFlags(cx, 0);
memcpy(getFullTypeMap(exit), typemap, typemap_size);
return exit;
}

View File

@ -338,6 +338,7 @@ struct VMSideExit : public nanojit::SideExit
uint32 numStackSlots;
uint32 numStackSlotsBelowCurrentFrame;
ExitType exitType;
uintN lookupFlags;
/*
* Ordinarily 0. If a slow native function is atop the stack, the 1 bit is

410
js/src/jsvector.h Normal file
View File

@ -0,0 +1,410 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sw=4 et tw=99 ft=cpp:
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
* June 12, 2009.
*
* The Initial Developer of the Original Code is
* the Mozilla Corporation.
*
* Contributor(s):
* Luke Wagner <lw@mozilla.com>
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifndef jsvector_h_
#define jsvector_h_
#include "jscntxt.h"
#include <string.h>
#include <new>
/*
* Traits class for identifying POD types. Until C++0x, there is no automatic
* way to detect PODs, so for the moment it is done manually.
*/
template <class T> struct IsPodType { static const bool result = false; };
template <> struct IsPodType<char> { static const bool result = true; };
template <> struct IsPodType<int> { static const bool result = true; };
template <> struct IsPodType<short> { static const bool result = true; };
template <> struct IsPodType<long> { static const bool result = true; };
template <> struct IsPodType<float> { static const bool result = true; };
template <> struct IsPodType<double> { static const bool result = true; };
template <> struct IsPodType<jschar> { static const bool result = true; };
/*
* This template class provides a default implementation for vector operations
* when the element type is not known to be a POD, as judged by IsPodType.
*/
template <class T, bool IsPod>
struct JSTempVectorImpl
{
/* Destroys constructed objects in the range [begin, end). */
static inline void destroy(T *begin, T *end) {
for (T *p = begin; p != end; ++p)
p->~T();
}
/* Constructs objects in the uninitialized range [begin, end). */
static inline void initialize(T *begin, T *end) {
for (T *p = begin; p != end; ++p)
new(p) T();
}
/*
* Copy-constructs objects in the uninitialized range
* [dst, dst+(srcend-srcbeg)) from the range [srcbeg, srcend).
*/
template <class U>
static inline void copyInitialize(T *dst, const U *srcbeg, const U *srcend) {
for (const U *p = srcbeg; p != srcend; ++p, ++dst)
new(dst) T(*p);
}
/*
* Grows the given buffer to have capacity newcap, preserving the objects
* constructed in the range [begin, end) and updating vec.
*/
static inline bool growTo(JSTempVector<T> &vec, size_t newcap) {
size_t bytes = sizeof(T) * newcap;
T *newbuf = reinterpret_cast<T *>(malloc(bytes));
if (!newbuf) {
js_ReportOutOfMemory(vec.mCx);
return false;
}
for (T *dst = newbuf, *src = vec.mBegin; src != vec.mEnd; ++dst, ++src)
new(dst) T(*src);
JSTempVectorImpl::destroy(vec.mBegin, vec.mEnd);
free(vec.mBegin);
vec.mEnd = newbuf + (vec.mEnd - vec.mBegin);
vec.mBegin = newbuf;
vec.mCapacity = newbuf + newcap;
return true;
}
};
/*
* This partial template specialization provides a default implementation for
* vector operations when the element type is known to be a POD, as judged by
* IsPodType.
*/
template <class T>
struct JSTempVectorImpl<T, true>
{
static inline void destroy(T *, T *) {}
static inline void initialize(T *begin, T *end) {
//memset(begin, 0, sizeof(T) * (end-begin)); //SLOWER
for (T *p = begin; p != end; ++p)
*p = 0;
}
static inline void copyInitialize(T *dst, const T *srcbeg, const T *srcend) {
//memcpy(dst, srcbeg, sizeof(T) * (srcend-srcbeg)); //SLOWER
for (const T *p = srcbeg; p != srcend; ++p, ++dst)
*dst = *p;
}
static inline bool growTo(JSTempVector<T> &vec, size_t newcap) {
size_t bytes = sizeof(T) * newcap;
T *newbuf = reinterpret_cast<T *>(realloc(vec.mBegin, bytes));
if (!newbuf) {
js_ReportOutOfMemory(vec.mCx);
free(vec.mBegin);
return false;
}
vec.mEnd = newbuf + (vec.mEnd - vec.mBegin);
vec.mBegin = newbuf;
vec.mCapacity = newbuf + newcap;
return true;
}
};
/*
* JS-friendly, STL-like container providing a short-lived, dynamic buffer.
* JSTempVector calls the constructors/destructors of all elements stored in
* its internal buffer, so non-PODs may be safely used.
*
* T requirements:
* - default and copy constructible, assignable, destructible
* - operations do not throw
*
* N.B: JSTempVector is not reentrant: T member functions called during
* JSTempVector member functions must not call back into the same
* JSTempVector.
*/
template <class T>
class JSTempVector
{
#ifdef DEBUG
bool mInProgress;
#endif
class ReentrancyGuard {
JSTempVector &mVec;
public:
ReentrancyGuard(JSTempVector &v)
: mVec(v)
{
#ifdef DEBUG
JS_ASSERT(!mVec.mInProgress);
mVec.mInProgress = true;
#endif
}
~ReentrancyGuard()
{
#ifdef DEBUG
mVec.mInProgress = false;
#endif
}
};
public:
JSTempVector(JSContext *cx)
:
#ifdef DEBUG
mInProgress(false),
#endif
mCx(cx), mBegin(0), mEnd(0), mCapacity(0)
{}
~JSTempVector();
JSTempVector(const JSTempVector &);
JSTempVector &operator=(const JSTempVector &);
/* accessors */
size_t size() const { return mEnd - mBegin; }
size_t capacity() const { return mCapacity - mBegin; }
bool empty() const { return mBegin == mEnd; }
T &operator[](int i) {
JS_ASSERT(!mInProgress && i < size());
return mBegin[i];
}
const T &operator[](int i) const {
JS_ASSERT(!mInProgress && i < size());
return mBegin[i];
}
T *begin() {
JS_ASSERT(!mInProgress);
return mBegin;
}
const T *begin() const {
JS_ASSERT(!mInProgress);
return mBegin;
}
T *end() {
JS_ASSERT(!mInProgress);
return mEnd;
}
const T *end() const {
JS_ASSERT(!mInProgress);
return mEnd;
}
T &back() {
JS_ASSERT(!mInProgress);
return *(mEnd - 1);
}
const T &back() const {
JS_ASSERT(!mInProgress && !empty());
return *(mEnd - 1);
}
/* mutators */
bool reserve(size_t);
bool growBy(size_t);
void clear();
bool pushBack(const T &);
template <class U> bool pushBack(const U *begin, const U *end);
/*
* Transfers ownership of the internal buffer used by JSTempVector to the
* caller. After this call, the JSTempVector is empty.
* N.B. Although a T*, only the range [0, size()) is constructed.
*/
T *extractRawBuffer();
/*
* Transfer ownership of an array of objects into the JSTempVector.
* N.B. This call assumes that there are no uninitialized elements in the
* passed array.
*/
void replaceRawBuffer(T *, size_t length);
private:
typedef JSTempVectorImpl<T, IsPodType<T>::result> Impl;
friend class JSTempVectorImpl<T, IsPodType<T>::result>;
static const int sGrowthFactor = 3;
bool checkOverflow(size_t newval, size_t oldval, size_t diff) const;
JSContext *mCx;
T *mBegin, *mEnd, *mCapacity;
};
template <class T>
inline
JSTempVector<T>::~JSTempVector()
{
ReentrancyGuard g(*this);
Impl::destroy(mBegin, mEnd);
free(mBegin);
}
template <class T>
inline bool
JSTempVector<T>::reserve(size_t newsz)
{
ReentrancyGuard g(*this);
size_t oldcap = capacity();
if (newsz > oldcap) {
size_t diff = newsz - oldcap;
size_t newcap = diff + oldcap * sGrowthFactor;
return checkOverflow(newcap, oldcap, diff) &&
Impl::growTo(*this, newcap);
}
return true;
}
template <class T>
inline bool
JSTempVector<T>::growBy(size_t amount)
{
/* grow if needed */
size_t oldsize = size(), newsize = oldsize + amount;
if (!checkOverflow(newsize, oldsize, amount) ||
(newsize > capacity() && !reserve(newsize)))
return false;
/* initialize new elements */
ReentrancyGuard g(*this);
JS_ASSERT(mCapacity - (mBegin + newsize) >= 0);
T *newend = mBegin + newsize;
Impl::initialize(mEnd, newend);
mEnd = newend;
return true;
}
template <class T>
inline void
JSTempVector<T>::clear()
{
ReentrancyGuard g(*this);
Impl::destroy(mBegin, mEnd);
mEnd = mBegin;
}
/*
* Check for overflow of an increased size or capacity (generically, 'value').
* 'diff' is how much greater newval should be compared to oldval.
*/
template <class T>
inline bool
JSTempVector<T>::checkOverflow(size_t newval, size_t oldval, size_t diff) const
{
size_t newbytes = newval * sizeof(T),
oldbytes = oldval * sizeof(T),
diffbytes = diff * sizeof(T);
bool ok = newbytes >= oldbytes && (newbytes - oldbytes) >= diffbytes;
if (!ok)
js_ReportAllocationOverflow(mCx);
return ok;
}
template <class T>
inline bool
JSTempVector<T>::pushBack(const T &t)
{
ReentrancyGuard g(*this);
if (mEnd == mCapacity) {
/* reallocate, doubling size */
size_t oldcap = capacity();
size_t newcap = empty() ? 1 : oldcap * sGrowthFactor;
if (!checkOverflow(newcap, oldcap, 1) ||
!Impl::growTo(*this, newcap))
return false;
}
JS_ASSERT(mEnd != mCapacity);
new(mEnd++) T(t);
return true;
}
template <class T>
template <class U>
inline bool
JSTempVector<T>::pushBack(const U *begin, const U *end)
{
ReentrancyGuard g(*this);
size_t space = mCapacity - mEnd, needed = end - begin;
if (space < needed) {
/* reallocate, doubling size */
size_t oldcap = capacity();
size_t newcap = empty() ? needed : (needed + oldcap * sGrowthFactor);
if (!checkOverflow(newcap, oldcap, needed) ||
!Impl::growTo(*this, newcap))
return false;
}
JS_ASSERT((mCapacity - mEnd) >= (end - begin));
Impl::copyInitialize(mEnd, begin, end);
mEnd += needed;
return true;
}
template <class T>
inline T *
JSTempVector<T>::extractRawBuffer()
{
T *ret = mBegin;
mBegin = mEnd = mCapacity = 0;
return ret;
}
template <class T>
inline void
JSTempVector<T>::replaceRawBuffer(T *p, size_t length)
{
ReentrancyGuard g(*this);
Impl::destroy(mBegin, mEnd);
free(mBegin);
mBegin = p;
mCapacity = mEnd = mBegin + length;
}
#endif /* jsvector_h_ */

View File

@ -493,8 +493,7 @@ XDRDoubleValue(JSXDRState *xdr, jsdouble *dp)
{
jsdpun u;
if (xdr->mode == JSXDR_ENCODE)
u.d = *dp;
u.d = (xdr->mode == JSXDR_ENCODE) ? *dp : 0.0;
if (!JS_XDRUint32(xdr, &u.s.lo) || !JS_XDRUint32(xdr, &u.s.hi))
return JS_FALSE;
if (xdr->mode == JSXDR_DECODE)
@ -505,10 +504,7 @@ XDRDoubleValue(JSXDRState *xdr, jsdouble *dp)
JS_PUBLIC_API(JSBool)
JS_XDRDouble(JSXDRState *xdr, jsdouble **dpp)
{
jsdouble d;
if (xdr->mode == JSXDR_ENCODE)
d = **dpp;
jsdouble d = (xdr->mode == JSXDR_ENCODE) ? **dpp : 0.0;
if (!XDRDoubleValue(xdr, &d))
return JS_FALSE;
if (xdr->mode == JSXDR_DECODE) {
@ -544,9 +540,7 @@ XDRValueBody(JSXDRState *xdr, uint32 type, jsval *vp)
break;
}
case JSVAL_DOUBLE: {
jsdouble *dp;
if (xdr->mode == JSXDR_ENCODE)
dp = JSVAL_TO_DOUBLE(*vp);
jsdouble *dp = (xdr->mode == JSXDR_ENCODE) ? JSVAL_TO_DOUBLE(*vp) : NULL;
if (!JS_XDRDouble(xdr, &dp))
return JS_FALSE;
if (xdr->mode == JSXDR_DECODE)

View File

@ -643,9 +643,9 @@ assemble(istream &in,
map<string,LIns*> labels;
map<string,pair<LOpcode,size_t> > op_map;
#define OPDEF(op, number, args) \
#define OPDEF(op, number, args, repkind) \
op_map[#op] = make_pair(LIR_##op, args);
#define OPDEF64(op, number, args) \
#define OPDEF64(op, number, args, repkind) \
op_map[#op] = make_pair(LIR_##op, args);
#include "nanojit/LIRopcode.tbl"
#undef OPDEF

View File

@ -1482,7 +1482,6 @@ namespace nanojit
}
case LIR_eq:
case LIR_ov:
case LIR_cs:
case LIR_le:
case LIR_lt:
case LIR_gt:
@ -1543,13 +1542,7 @@ namespace nanojit
void Assembler::emitJumpTable(SwitchInfo* si, NIns* target)
{
underrunProtect(si->count * sizeof(NIns*) + 20);
// Align for platform. The branch should be optimized away and is
// required to select the compatible int type.
if (sizeof(NIns*) == 8) {
_nIns = (NIns*) (uint64(_nIns) & ~7);
} else if (sizeof(NIns*) == 4) {
_nIns = (NIns*) (uint32(_nIns) & ~3);
}
_nIns = reinterpret_cast<NIns*>(uintptr_t(_nIns) & ~(sizeof(NIns*) - 1));
for (uint32_t i = 0; i < si->count; ++i) {
_nIns = (NIns*) (((uint8*) _nIns) - sizeof(NIns*));
*(NIns**) _nIns = target;
@ -1565,7 +1558,7 @@ namespace nanojit
for (int i=0, n = NumSavedRegs; i < n; i++) {
LIns *p = b->savedRegs[i];
if (p)
findSpecificRegFor(p, savedRegs[p->imm8()]);
findSpecificRegFor(p, savedRegs[p->paramArg()]);
}
}
@ -1584,10 +1577,10 @@ namespace nanojit
{
LInsp state = _thisfrag->lirbuf->state;
if (state)
findSpecificRegFor(state, argRegs[state->imm8()]);
findSpecificRegFor(state, argRegs[state->paramArg()]);
LInsp param1 = _thisfrag->lirbuf->param1;
if (param1)
findSpecificRegFor(param1, argRegs[param1->imm8()]);
findSpecificRegFor(param1, argRegs[param1->paramArg()]);
}
void Assembler::handleLoopCarriedExprs()

View File

@ -84,12 +84,13 @@ namespace nanojit
/* Opcodes must be strictly increasing without holes. */
uint32_t count = 0;
#define OPDEF(op, number, operands) \
NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
#define OPDEF64(op, number, operands) OPDEF(op, number, operands)
#include "LIRopcode.tbl"
#undef OPDEF
#undef OPDEF64
#define OPDEF(op, number, operands, repkind) \
NanoAssertMsg(LIR_##op == count++, "misnumbered opcode");
#define OPDEF64(op, number, operands, repkind) \
OPDEF(op, number, operands, repkind)
#include "LIRopcode.tbl"
#undef OPDEF
#undef OPDEF64
}
#endif

View File

@ -51,9 +51,9 @@ namespace nanojit
#ifdef FEATURE_NANOJIT
const uint8_t operandCount[] = {
#define OPDEF(op, number, operands) \
#define OPDEF(op, number, operands, repkind) \
operands,
#define OPDEF64(op, number, operands) \
#define OPDEF64(op, number, operands, repkind) \
operands,
#include "LIRopcode.tbl"
#undef OPDEF
@ -61,13 +61,35 @@ namespace nanojit
0
};
const uint8_t repKinds[] = {
#define OPDEF(op, number, operands, repkind) \
LRK_##repkind,
#define OPDEF64(op, number, operands, repkind) \
OPDEF(op, number, operands, repkind)
#include "LIRopcode.tbl"
#undef OPDEF
#undef OPDEF64
0
};
const uint8_t insSizes[] = {
#define OPDEF(op, number, operands, repkind) \
sizeof(LIns##repkind),
#define OPDEF64(op, number, operands, repkind) \
OPDEF(op, number, operands, repkind)
#include "LIRopcode.tbl"
#undef OPDEF
#undef OPDEF64
0
};
// LIR verbose specific
#ifdef NJ_VERBOSE
const char* lirNames[] = {
#define OPDEF(op, number, operands) \
#define OPDEF(op, number, operands, repkind) \
#op,
#define OPDEF64(op, number, operands) \
#define OPDEF64(op, number, operands, repkind) \
#op,
#include "LIRopcode.tbl"
#undef OPDEF
@ -131,7 +153,8 @@ namespace nanojit
int32_t LirBuffer::insCount()
{
// Doesn't include LIR_skip payload or LIR_call arg slots.
// A LIR_skip payload is considered part of the LIR_skip, and LIR_call
// arg slots are considered part of the LIR_call.
return _stats.lir;
}
@ -165,10 +188,10 @@ namespace nanojit
// Unlike all the ins*() functions, we don't call makeRoom() here
// because we know we have enough space, having just started a new
// page.
LInsp l = (LInsp)_unused;
l->setIns1(LIR_skip, (LInsp)addrOfLastLInsOnCurrentPage);
l->resv()->clear();
_unused += sizeof(LIns);
LInsSk* insSk = (LInsSk*)_unused;
LIns* ins = insSk->getLIns();
ins->initLInsSk((LInsp)addrOfLastLInsOnCurrentPage);
_unused += sizeof(LInsSk);
_stats.lir++;
}
@ -208,40 +231,42 @@ namespace nanojit
moveToNewPage(addrOfLastLInsOnPage);
}
// Make sure it's word-aligned.
NanoAssert(0 == startOfRoom % sizeof(void*));
return startOfRoom;
}
LInsp LirBufWriter::insStorei(LInsp val, LInsp base, int32_t d)
{
LOpcode op = val->isQuad() ? LIR_stqi : LIR_sti;
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setStorei(op, val, base, d);
l->resv()->clear();
return l;
LInsSti* insSti = (LInsSti*)_buf->makeRoom(sizeof(LInsSti));
LIns* ins = insSti->getLIns();
ins->initLInsSti(op, val, base, d);
return ins;
}
LInsp LirBufWriter::ins0(LOpcode op)
{
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setIns0(op);
l->resv()->clear();
return l;
LInsOp0* insOp0 = (LInsOp0*)_buf->makeRoom(sizeof(LInsOp0));
LIns* ins = insOp0->getLIns();
ins->initLInsOp0(op);
return ins;
}
LInsp LirBufWriter::ins1(LOpcode op, LInsp o1)
{
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setIns1(op, o1);
l->resv()->clear();
return l;
LInsOp1* insOp1 = (LInsOp1*)_buf->makeRoom(sizeof(LInsOp1));
LIns* ins = insOp1->getLIns();
ins->initLInsOp1(op, o1);
return ins;
}
LInsp LirBufWriter::ins2(LOpcode op, LInsp o1, LInsp o2)
{
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setIns2(op, o1, o2);
l->resv()->clear();
return l;
LInsOp2* insOp2 = (LInsOp2*)_buf->makeRoom(sizeof(LInsOp2));
LIns* ins = insOp2->getLIns();
ins->initLInsOp2(op, o1, o2);
return ins;
}
LInsp LirBufWriter::insLoad(LOpcode op, LInsp base, LInsp d)
@ -263,39 +288,39 @@ namespace nanojit
LInsp LirBufWriter::insAlloc(int32_t size)
{
size = (size+3)>>2; // # of required 32bit words
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setAlloc(LIR_alloc, size);
l->resv()->clear();
return l;
LInsI* insI = (LInsI*)_buf->makeRoom(sizeof(LInsI));
LIns* ins = insI->getLIns();
ins->initLInsI(LIR_alloc, size);
return ins;
}
LInsp LirBufWriter::insParam(int32_t arg, int32_t kind)
{
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setParam(LIR_param, arg, kind);
l->resv()->clear();
LInsP* insP = (LInsP*)_buf->makeRoom(sizeof(LInsP));
LIns* ins = insP->getLIns();
ins->initLInsP(arg, kind);
if (kind) {
NanoAssert(arg < NumSavedRegs);
_buf->savedRegs[arg] = l;
_buf->savedRegs[arg] = ins;
_buf->explicitSavedRegs = true;
}
return l;
return ins;
}
LInsp LirBufWriter::insImm(int32_t imm)
{
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setImm(LIR_int, imm);
l->resv()->clear();
return l;
LInsI* insI = (LInsI*)_buf->makeRoom(sizeof(LInsI));
LIns* ins = insI->getLIns();
ins->initLInsI(LIR_int, imm);
return ins;
}
LInsp LirBufWriter::insImmq(uint64_t imm)
{
LInsp l = (LInsp)_buf->makeRoom(sizeof(LIns));
l->setImmq(LIR_quad, imm);
l->resv()->clear();
return l;
LInsI64* insI64 = (LInsI64*)_buf->makeRoom(sizeof(LInsI64));
LIns* ins = insI64->getLIns();
ins->initLInsI64(LIR_quad, imm);
return ins;
}
LInsp LirBufWriter::insSkip(size_t payload_szB)
@ -308,14 +333,14 @@ namespace nanojit
NanoAssert(0 == NJ_MAX_SKIP_PAYLOAD_SZB % sizeof(void*));
NanoAssert(sizeof(void*) <= payload_szB && payload_szB <= NJ_MAX_SKIP_PAYLOAD_SZB);
uintptr_t payload = _buf->makeRoom(payload_szB + sizeof(LIns)); // payload + skip
uintptr_t payload = _buf->makeRoom(payload_szB + sizeof(LInsSk));
uintptr_t prevLInsAddr = payload - sizeof(LIns);
LInsp l = (LInsp)(payload + payload_szB);
LInsSk* insSk = (LInsSk*)(payload + payload_szB);
LIns* ins = insSk->getLIns();
NanoAssert(prevLInsAddr >= pageDataStart(prevLInsAddr));
NanoAssert(samepage(prevLInsAddr, l));
l->setIns1(LIR_skip, (LInsp)prevLInsAddr);
l->resv()->clear();
return l;
NanoAssert(samepage(prevLInsAddr, insSk));
ins->initLInsSk((LInsp)prevLInsAddr);
return ins;
}
// Reads the next non-skip instruction.
@ -341,33 +366,38 @@ namespace nanojit
do
{
switch (iop)
{
default:
i -= sizeof(LIns);
break;
// Nb: this switch is table-driven (because sizeof_LInsXYZ() is
// table-driven) in most cases to avoid branch mispredictions --
// if we do a vanilla switch on the iop or LInsRepKind the extra
// branch mispredictions cause a small but noticeable slowdown.
switch (iop)
{
default:
i -= insSizes[((LInsp)i)->opcode()];
break;
#if defined NANOJIT_64BIT
case LIR_callh:
case LIR_callh:
#endif
case LIR_call:
case LIR_fcall: {
case LIR_call:
case LIR_fcall: {
int argc = ((LInsp)i)->argc();
uintptr_t prev = i - sizeof(LIns) - argc*sizeof(LInsp);
NanoAssert( samepage(i, prev) );
i = prev;
i -= sizeof(LInsC); // step over the instruction
i -= argc*sizeof(LInsp); // step over the arguments
NanoAssert( samepage(i, _i) );
break;
}
case LIR_skip:
NanoAssert(((LInsp)i)->oprnd1() != (LInsp)i);
i = uintptr_t(((LInsp)i)->oprnd1());
break;
case LIR_skip:
// Ignore the skip, move onto its predecessor.
NanoAssert(((LInsp)i)->prevLIns() != (LInsp)i);
i = uintptr_t(((LInsp)i)->prevLIns());
break;
case LIR_start:
_i = 0; // start of trace
return cur;
}
case LIR_start:
_i = 0; // this means the next call to this method will return 0
return cur;
}
iop = ((LInsp)i)->opcode();
}
while (iop==LIR_skip || iop==LIR_2);
@ -376,7 +406,7 @@ namespace nanojit
}
bool LIns::isFloat() const {
switch (firstWord.code) {
switch (opcode()) {
default:
return false;
case LIR_fadd:
@ -392,107 +422,69 @@ namespace nanojit
}
#if defined(_DEBUG)
bool LIns::isOp1() const {
switch (firstWord.code) {
case LIR_skip:
case LIR_ret:
case LIR_live:
case LIR_neg:
#if !defined NANOJIT_64BIT
case LIR_callh:
#endif
case LIR_not:
case LIR_qlo:
case LIR_qhi:
case LIR_ov:
case LIR_cs:
case LIR_file:
case LIR_line:
case LIR_fret:
case LIR_fneg:
case LIR_i2f:
case LIR_u2f:
case LIR_mod:
return true;
default:
return false;
}
bool LIns::isLInsOp0() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_Op0 == repKinds[opcode()];
}
// Nb: this excludes loads and stores, which are covered by isLoad() and
// isStore().
bool LIns::isOp2() const {
switch (firstWord.code) {
case LIR_loop:
case LIR_x:
case LIR_jt:
case LIR_jf:
case LIR_feq:
case LIR_flt:
case LIR_fgt:
case LIR_fle:
case LIR_fge:
case LIR_cmov:
case LIR_add:
case LIR_sub:
case LIR_mul:
case LIR_div:
case LIR_and:
case LIR_or:
case LIR_xor:
case LIR_lsh:
case LIR_rsh:
case LIR_ush:
case LIR_xt:
case LIR_xf:
case LIR_eq:
case LIR_lt:
case LIR_gt:
case LIR_le:
case LIR_ge:
case LIR_ult:
case LIR_ugt:
case LIR_ule:
case LIR_uge:
case LIR_2:
case LIR_xbarrier:
case LIR_xtbl:
case LIR_qiand:
case LIR_qiadd:
case LIR_qjoin:
case LIR_qcmov:
case LIR_fadd:
case LIR_fsub:
case LIR_fmul:
case LIR_fdiv:
case LIR_qior:
case LIR_qilsh:
return true;
bool LIns::isLInsOp1() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_Op1 == repKinds[opcode()];
}
default:
return false;
}
bool LIns::isLInsOp2() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_Op2 == repKinds[opcode()];
}
bool LIns::isLInsSti() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_Sti == repKinds[opcode()];
}
bool LIns::isLInsSk() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_Sk == repKinds[opcode()];
}
bool LIns::isLInsC() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_C == repKinds[opcode()];
}
bool LIns::isLInsP() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_P == repKinds[opcode()];
}
bool LIns::isLInsI() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_I == repKinds[opcode()];
}
bool LIns::isLInsI64() const {
NanoAssert(LRK_None != repKinds[opcode()]);
return LRK_I64 == repKinds[opcode()];
}
#endif // defined(_DEBUG)
bool LIns::isCmp() const {
LOpcode op = firstWord.code;
LOpcode op = opcode();
return (op >= LIR_eq && op <= LIR_uge) || (op >= LIR_feq && op <= LIR_fge);
}
bool LIns::isCond() const {
LOpcode op = firstWord.code;
return (op == LIR_ov) || (op == LIR_cs) || isCmp();
LOpcode op = opcode();
return (op == LIR_ov) || isCmp();
}
bool LIns::isQuad() const {
#ifdef AVMPLUS_64BIT
// callh in 64bit cpu's means a call that returns an int64 in a single register
return (firstWord.code & LIR64) != 0 || firstWord.code == LIR_callh;
return (opcode() & LIR64) != 0 || opcode() == LIR_callh;
#else
// callh in 32bit cpu's means the 32bit MSW of an int64 result in 2 registers
return (firstWord.code & LIR64) != 0;
return (opcode() & LIR64) != 0;
#endif
}
@ -503,7 +495,7 @@ namespace nanojit
bool LIns::isconstq() const
{
return firstWord.code == LIR_quad;
return opcode() == LIR_quad;
}
bool LIns::isconstp() const
@ -517,14 +509,14 @@ namespace nanojit
bool LIns::isCse() const
{
return nanojit::isCseOpcode(firstWord.code) || (isCall() && callInfo()->_cse);
return nanojit::isCseOpcode(opcode()) || (isCall() && callInfo()->_cse);
}
void LIns::setTarget(LInsp label)
{
NanoAssert(label && label->isop(LIR_label));
NanoAssert(isBranch());
u.oprnd_2 = label;
toLInsOp2()->oprnd_2 = label;
}
LInsp LIns::getTarget()
@ -536,15 +528,15 @@ namespace nanojit
void *LIns::payload() const
{
NanoAssert(isop(LIR_skip));
// Operand 1 points to the previous instruction; we move one
// instruction past it to get to the payload.
return (void*) (intptr_t(oprnd1()) + sizeof(LIns));
// Operand 1 points to the previous LIns; we move past it to get to
// the payload.
return (void*) (uintptr_t(prevLIns()) + sizeof(LIns));
}
uint64_t LIns::imm64() const
{
NanoAssert(isconstq());
return (uint64_t(i64.imm64_1) << 32) | uint32_t(i64.imm64_0);
return (uint64_t(toLInsI64()->imm64_1) << 32) | uint32_t(toLInsI64()->imm64_0);
}
double LIns::imm64f() const
@ -560,7 +552,7 @@ namespace nanojit
const CallInfo* LIns::callInfo() const
{
NanoAssert(isCall());
return c.ci;
return toLInsC()->ci;
}
// Index args in r-l order. arg(0) is rightmost arg.
@ -569,8 +561,9 @@ namespace nanojit
{
NanoAssert(isCall());
NanoAssert(i < argc());
LInsp* offs = (LInsp*)this - (i+1);
return *offs;
// Move to the start of the LInsC, then move back one word per argument.
LInsp* argSlot = (LInsp*)(uintptr_t(toLInsC()) - (i+1)*sizeof(void*));
return *argSlot;
}
LIns* LirWriter::ins2i(LOpcode v, LIns* oprnd1, int32_t imm)
@ -695,8 +688,6 @@ namespace nanojit
return insImm(c1 == c2);
case LIR_ov:
return insImm((c2 != 0) && ((c1 + c2) <= c1));
case LIR_cs:
return insImm((c2 != 0) && ((uint32_t(c1) + uint32_t(c2)) <= uint32_t(c1)));
case LIR_lt:
return insImm(c1 < c2);
case LIR_gt:
@ -1011,38 +1002,23 @@ namespace nanojit
op = LIR_callh;
}
// An example of what we're trying to serialize (for a 32-bit machine):
//
// byte
// ----
// N+0 [ arg operand #2 ----------------------
// N+4 arg operand #1 ----------------------
// N+8 arg operand #0 ---------------------- ]
// N+12 [ resv + code=LIR_call
// N+16 imm8a | imm8b | (pad16) -------------
// N+20 ci ----------------------------------
// N+24 (pad32) ----------------------------- ]
//
// In this example:
// 'argc' = 3
NanoAssert(argc <= (int)MAXARGS);
// Lay the call parameters out (in reverse order).
// Nb: this must be kept in sync with arg().
LInsp* newargs = (LInsp*)_buf->makeRoom(argc*sizeof(LInsp) + sizeof(LIns)); // args + call
LInsp* newargs = (LInsp*)_buf->makeRoom(argc*sizeof(LInsp) + sizeof(LInsC)); // args + call
for (int32_t i = 0; i < argc; i++)
newargs[argc - i - 1] = args[i];
// Write the call instruction itself.
LInsp l = (LInsp)(uintptr_t(newargs) + argc*sizeof(LInsp));
LInsC* insC = (LInsC*)(uintptr_t(newargs) + argc*sizeof(LInsp));
LIns* ins = insC->getLIns();
#ifndef NANOJIT_64BIT
l->setCall(op==LIR_callh ? LIR_call : op, argc, ci);
ins->initLInsC(op==LIR_callh ? LIR_call : op, argc, ci);
#else
l->setCall(op, argc, ci);
ins->initLInsC(op, argc, ci);
#endif
l->resv()->clear();
return l;
return ins;
}
using namespace avmplus;
@ -1458,9 +1434,9 @@ namespace nanojit
RetiredEntry *e = NJ_NEW(gc, RetiredEntry)(gc);
e->i = i;
for (int j=0, n=live.size(); j < n; j++) {
LInsp l = live.keyAt(j);
if (!l->isStore() && !l->isGuard())
e->live.add(l);
LInsp ins = live.keyAt(j);
if (!ins->isStore() && !ins->isGuard())
e->live.add(ins);
}
int size=0;
if ((size = e->live.size()) > maxlive)
@ -1707,8 +1683,8 @@ namespace nanojit
}
case LIR_param: {
uint32_t arg = i->imm8();
if (!i->imm8b()) {
uint32_t arg = i->paramArg();
if (!i->paramKind()) {
if (arg < sizeof(Assembler::argRegs)/sizeof(Assembler::argRegs[0])) {
sprintf(s, "%s = %s %d %s", formatRef(i), lirNames[op],
arg, gpn(Assembler::argRegs[arg]));
@ -1751,7 +1727,6 @@ namespace nanojit
case LIR_qlo:
case LIR_qhi:
case LIR_ov:
case LIR_cs:
case LIR_not:
case LIR_mod:
sprintf(s, "%s = %s %s", formatRef(i), lirNames[op], formatRef(i->oprnd1()));

View File

@ -58,9 +58,9 @@ namespace nanojit
// flags; upper bits reserved
LIR64 = 0x40, // result is double or quad
#define OPDEF(op, number, args) \
#define OPDEF(op, number, args, repkind) \
LIR_##op = (number),
#define OPDEF64(op, number, args) \
#define OPDEF64(op, number, args, repkind) \
LIR_##op = ((number) | LIR64),
#include "LIRopcode.tbl"
LIR_sentinel
@ -70,7 +70,6 @@ namespace nanojit
#if defined NANOJIT_64BIT
#define LIR_ldp LIR_ldq
#define LIR_stp LIR_stq
#define LIR_piadd LIR_qiadd
#define LIR_piand LIR_qiand
#define LIR_pilsh LIR_qilsh
@ -78,7 +77,6 @@ namespace nanojit
#define LIR_pior LIR_qior
#else
#define LIR_ldp LIR_ld
#define LIR_stp LIR_st
#define LIR_piadd LIR_add
#define LIR_piand LIR_and
#define LIR_pilsh LIR_lsh
@ -148,13 +146,6 @@ namespace nanojit
return (op & ~LIR64) == LIR_ret;
}
// Sun Studio requires explicitly declaring signed int bit-field
#if defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#define _sign_int signed int
#else
#define _sign_int int32_t
#endif
// The opcode is not logically part of the Reservation, but we include it
// in this struct to ensure that opcode plus the Reservation fits in a
// single word. Yuk.
@ -163,7 +154,7 @@ namespace nanojit
uint32_t arIndex:16; // index into stack frame. displ is -4*arIndex
Register reg:7; // register UnknownReg implies not in register
uint32_t used:1; // when set, the reservation is active
LOpcode code:8;
LOpcode opcode:8;
inline void init() {
reg = UnknownReg;
@ -171,107 +162,425 @@ namespace nanojit
used = 1;
}
inline void clear()
{
inline void clear() {
used = 0;
}
};
// Low-level Instruction. 4 words per instruction -- it's important this
// doesn't change unintentionally, so it is checked in LIR.cpp by an
// assertion in initOpcodeAndClearResv().
// The first word is the same for all LIns kinds; the last three differ.
//-----------------------------------------------------------------------
// Low-level instructions. This is a bit complicated, because we have a
// variable-width representation to minimise space usage.
//
// - Instruction size is always an integral multiple of word size.
//
// - Every instruction has at least one word, holding the opcode and the
// reservation info. That word is in class LIns.
//
// - Beyond that, most instructions have 1, 2 or 3 extra words. These
// extra words are in classes LInsOp1, LInsOp2, etc (collectively called
// "LInsXYZ" in what follows). Each LInsXYZ class also contains a word,
// accessible by the 'ins' member, which holds the LIns data; its type
// is void* (which is the same size as LIns) rather than LIns to avoid a
// recursive dependency between LIns and LInsXYZ.
//
// - LIR is written forward, but read backwards. When reading backwards,
// in order to find the opcode, it must be in a predictable place in the
// LInsXYZ isn't affected by instruction width. Therefore, the LIns
// word (which contains the opcode) is always the *last* word in an
// instruction.
//
// - Each instruction is created by casting pre-allocated bytes from a
// LirBuffer to the LInsXYZ type. Therefore there are no constructors
// for LIns or LInsXYZ.
//
// - The standard handle for an instruction is a LIns*. This actually
// points to the LIns word, ie. to the final word in the instruction.
// This is a bit odd, but it allows the instruction's opcode to be
// easily accessed. Once you've looked at the opcode and know what kind
// of instruction it is, if you want to access any of the other words,
// you need to use toLInsXYZ(), which takes the LIns* and gives you an
// LInsXYZ*, ie. the pointer to the actual start of the instruction's
// bytes. From there you can access the instruction-specific extra
// words.
//
// - However, from outside class LIns, LInsXYZ isn't visible, nor is
// toLInsXYZ() -- from outside LIns, all LIR instructions are handled
// via LIns pointers and get/set methods are used for all LIns/LInsXYZ
// accesses. In fact, all data members in LInsXYZ are private and can
// only be accessed by LIns, which is a friend class. The only thing
// anyone outside LIns can do with a LInsXYZ is call getLIns().
//
// - An example Op2 instruction and the likely pointers to it (each line
// represents a word, and pointers to a line point to the start of the
// word on that line):
//
// [ oprnd_2 <-- LInsOp2* insOp2 == toLInsOp2(ins)
// oprnd_1
// opcode + resv ] <-- LIns* ins
//
// - LIR_skip instructions are more complicated. They allow an arbitrary
// blob of data (the "payload") to be placed in the LIR stream. The
// size of the payload is always a multiple of the word size. A skip
// instruction's operand points to the previous instruction, which lets
// the payload be skipped over when reading backwards. Here's an
// example of a skip instruction with a 3-word payload preceded by an
// LInsOp1:
//
// [ oprnd_1
// +-> opcode + resv ]
// | [ data
// | data
// | data
// +---- prevLIns <-- LInsSk* insSk == toLInsSk(ins)
// opcode==LIR_skip + resv ] <-- LIns* ins
//
// Skips are also used to link code pages. If the first instruction on
// a page isn't a LIR_start, it will be a skip, and the skip's operand
// will point to the last LIns on the previous page. In this case there
// isn't a payload as such; in fact, the previous page might be at a
// higher address, ie. the operand might point forward rather than
// backward.
//
// LInsSk has the same layout as LInsOp1, but we represent it as a
// different class because there are some places where we treat
// skips specially and so having it separate seems like a good idea.
//
// - Call instructions (LIR_call, LIR_fcall, LIR_calli, LIR_fcalli) are
// also more complicated. They are preceded by the arguments to the
// call, which are laid out in reverse order. For example, a call with
// 3 args will look like this:
//
// [ arg #2
// arg #1
// arg #0
// argc <-- LInsC insC == toLInsC(ins)
// ci
// opcode + resv ] <-- LIns* ins
//
// - Various things about the size and layout of LIns and LInsXYZ are
// statically checked in staticSanityCheck(). In particular, this is
// worthwhile because there's nothing that guarantees that all the
// LInsXYZ classes have a size that is a multiple of word size (but in
// practice all sane compilers use a layout that results in this). We
// also check that every LInsXYZ is word-aligned in
// LirBuffer::makeRoom(); this seems sensible to avoid potential
// slowdowns due to misalignment. It relies on pages themselves being
// word-aligned, which is extremely likely.
//
// - There is an enum, LInsRepKind, with one member for each of the
// LInsXYZ kinds. Each opcode is categorised with its LInsRepKind value
// in LIRopcode.tbl, and this is used in various places.
//-----------------------------------------------------------------------
enum LInsRepKind {
// LRK_XYZ corresponds to class LInsXYZ.
LRK_Op0,
LRK_Op1,
LRK_Op2,
LRK_Sti,
LRK_Sk,
LRK_C,
LRK_P,
LRK_I,
LRK_I64,
LRK_None // this one is used for unused opcode numbers
};
// 0-operand form. Used for LIR_start and LIR_label.
class LInsOp0
{
private:
friend class LIns;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// 1-operand form. Used for LIR_ret, LIR_ov, unary arithmetic/logic ops,
// etc.
class LInsOp1
{
private:
friend class LIns;
// Nb: oprnd_1 position relative to 'ins' must match that in
// LIns{Op2,Sti}. Checked in LirBufWriter::LirBufWriter().
LIns* oprnd_1;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// 2-operand form. Used for loads, guards, branches, comparisons, binary
// arithmetic/logic ops, etc.
class LInsOp2
{
private:
friend class LIns;
// Nb: oprnd_{1,2} position relative to 'ins' must match that in
// LIns{Op1,Sti}. Checked in LirBufWriter::LirBufWriter().
LIns* oprnd_2;
LIns* oprnd_1;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_sti and LIR_stqi.
class LInsSti
{
private:
friend class LIns;
int32_t disp;
// Nb: oprnd_{1,2} position relative to 'ins' must match that in
// LIns{Op1,Op2}. Checked in LIns::staticSanityCheck().
LIns* oprnd_2;
LIns* oprnd_1;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_skip.
class LInsSk
{
private:
friend class LIns;
LIns* prevLIns;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for all variants of LIR_call.
class LInsC
{
private:
friend class LIns;
uintptr_t argc:8;
const CallInfo* ci;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_param.
class LInsP
{
private:
friend class LIns;
uintptr_t arg:8;
uintptr_t kind:8;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_int and LIR_alloc.
class LInsI
{
private:
friend class LIns;
int32_t imm32;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used for LIR_quad.
class LInsI64
{
private:
friend class LIns;
int32_t imm64_0;
int32_t imm64_1;
void* ins;
public:
LIns* getLIns() { return (LIns*)&ins; };
};
// Used only as a placeholder for OPDEF macros for unused opcodes in
// LIRopcode.tbl.
class LInsNone
{
};
class LIns
{
// 2-operand form. Used for most LIns kinds, including LIR_skip (for
// which oprnd_1 is the target).
struct u_type
{
// Nb: oprnd_1 and oprnd_2 layout must match that in sti_type
// because oprnd1() and oprnd2() are used for both.
LIns* oprnd_1;
LIns* oprnd_2;
};
// Used for LIR_sti and LIR_stqi.
struct sti_type
{
// Nb: oprnd_1 and oprnd_2 layout must match that in u_type
// because oprnd1() and oprnd2() are used for both.
LIns* oprnd_1;
LIns* oprnd_2;
int32_t disp;
};
// Used for LIR_call and LIR_param.
struct c_type
{
uintptr_t imm8a:8; // call: 0 (not used); param: arg
uintptr_t imm8b:8; // call: argc; param: kind
const CallInfo* ci; // call: callInfo; param: NULL (not used)
};
// Used for LIR_int.
struct i_type
{
int32_t imm32;
};
// Used for LIR_quad.
struct i64_type
{
int32_t imm64_0;
int32_t imm64_1;
};
#undef _sign_int
// 1st word: fields shared by all LIns kinds. The reservation fields
private:
// Last word: fields shared by all LIns kinds. The reservation fields
// are read/written during assembly.
Reservation firstWord;
Reservation lastWord;
// 2nd, 3rd and 4th words: differ depending on the LIns kind.
union
{
u_type u;
c_type c;
i_type i;
i64_type i64;
sti_type sti;
};
// LIns-to-LInsXYZ converters.
LInsOp0* toLInsOp0() const { return (LInsOp0*)( uintptr_t(this+1) - sizeof(LInsOp0) ); }
LInsOp1* toLInsOp1() const { return (LInsOp1*)( uintptr_t(this+1) - sizeof(LInsOp1) ); }
LInsOp2* toLInsOp2() const { return (LInsOp2*)( uintptr_t(this+1) - sizeof(LInsOp2) ); }
LInsSti* toLInsSti() const { return (LInsSti*)( uintptr_t(this+1) - sizeof(LInsSti) ); }
LInsSk* toLInsSk() const { return (LInsSk* )( uintptr_t(this+1) - sizeof(LInsSk ) ); }
LInsC* toLInsC() const { return (LInsC* )( uintptr_t(this+1) - sizeof(LInsC ) ); }
LInsP* toLInsP() const { return (LInsP* )( uintptr_t(this+1) - sizeof(LInsP ) ); }
LInsI* toLInsI() const { return (LInsI* )( uintptr_t(this+1) - sizeof(LInsI ) ); }
LInsI64* toLInsI64() const { return (LInsI64*)( uintptr_t(this+1) - sizeof(LInsI64) ); }
// This is never called, but that's ok because it contains only static
// assertions.
void staticSanityCheck()
{
// LIns must be word-sized.
NanoStaticAssert(sizeof(LIns) == 1*sizeof(void*));
// LInsXYZ have expected sizes too.
NanoStaticAssert(sizeof(LInsOp0) == 1*sizeof(void*));
NanoStaticAssert(sizeof(LInsOp1) == 2*sizeof(void*));
NanoStaticAssert(sizeof(LInsOp2) == 3*sizeof(void*));
NanoStaticAssert(sizeof(LInsSti) == 4*sizeof(void*));
NanoStaticAssert(sizeof(LInsSk) == 2*sizeof(void*));
NanoStaticAssert(sizeof(LInsC) == 3*sizeof(void*));
NanoStaticAssert(sizeof(LInsP) == 2*sizeof(void*));
NanoStaticAssert(sizeof(LInsI) == 2*sizeof(void*));
#if defined NANOJIT_64BIT
NanoStaticAssert(sizeof(LInsI64) == 2*sizeof(void*));
#else
NanoStaticAssert(sizeof(LInsI64) == 3*sizeof(void*));
#endif
// oprnd_1 must be in the same position in LIns{Op1,Op2,Sti}
// because oprnd1() is used for all of them.
NanoStaticAssert( (offsetof(LInsOp1, ins) - offsetof(LInsOp1, oprnd_1)) ==
(offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) );
NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_1)) ==
(offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_1)) );
// oprnd_2 must be in the same position in LIns{Op2,Sti}
// because oprnd2() is used for both of them.
NanoStaticAssert( (offsetof(LInsOp2, ins) - offsetof(LInsOp2, oprnd_2)) ==
(offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_2)) );
}
public:
void initLInsOp0(LOpcode opcode) {
lastWord.clear();
lastWord.opcode = opcode;
NanoAssert(isLInsOp0());
}
void initLInsOp1(LOpcode opcode, LIns* oprnd1) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsOp1()->oprnd_1 = oprnd1;
NanoAssert(isLInsOp1());
}
void initLInsOp2(LOpcode opcode, LIns* oprnd1, LIns* oprnd2) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsOp2()->oprnd_1 = oprnd1;
toLInsOp2()->oprnd_2 = oprnd2;
NanoAssert(isLInsOp2());
}
void initLInsSti(LOpcode opcode, LIns* val, LIns* base, int32_t d) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsSti()->oprnd_1 = val;
toLInsSti()->oprnd_2 = base;
toLInsSti()->disp = d;
NanoAssert(isLInsSti());
}
void initLInsSk(LIns* prevLIns) {
lastWord.clear();
lastWord.opcode = LIR_skip;
toLInsSk()->prevLIns = prevLIns;
NanoAssert(isLInsSk());
}
// Nb: this does NOT initialise the arguments. That must be done
// separately.
void initLInsC(LOpcode opcode, int32_t argc, const CallInfo* ci) {
NanoAssert(isU8(argc));
lastWord.clear();
lastWord.opcode = opcode;
toLInsC()->argc = argc;
toLInsC()->ci = ci;
NanoAssert(isLInsC());
}
void initLInsP(int32_t arg, int32_t kind) {
lastWord.clear();
lastWord.opcode = LIR_param;
NanoAssert(isU8(arg) && isU8(kind));
toLInsP()->arg = arg;
toLInsP()->kind = kind;
NanoAssert(isLInsP());
}
void initLInsI(LOpcode opcode, int32_t imm32) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsI()->imm32 = imm32;
NanoAssert(isLInsI());
}
void initLInsI64(LOpcode opcode, int64_t imm64) {
lastWord.clear();
lastWord.opcode = opcode;
toLInsI64()->imm64_0 = int32_t(imm64);
toLInsI64()->imm64_1 = int32_t(imm64 >> 32);
NanoAssert(isLInsI64());
}
public:
LIns* oprnd1() const {
NanoAssert(isOp1() || isOp2() || isLoad() || isStore());
return u.oprnd_1;
NanoAssert(isLInsOp1() || isLInsOp2() || isStore());
return toLInsOp2()->oprnd_1;
}
LIns* oprnd2() const {
NanoAssert(isOp2() || isLoad() || isStore());
return u.oprnd_2;
NanoAssert(isLInsOp2() || isStore());
return toLInsOp2()->oprnd_2;
}
inline LOpcode opcode() const { return firstWord.code; }
inline uint8_t imm8() const { NanoAssert(isop(LIR_param)); return c.imm8a; }
inline uint8_t imm8b() const { NanoAssert(isop(LIR_param)); return c.imm8b; }
inline int32_t imm32() const { NanoAssert(isconst()); return i.imm32; }
inline int32_t imm64_0() const { NanoAssert(isconstq()); return i64.imm64_0; }
inline int32_t imm64_1() const { NanoAssert(isconstq()); return i64.imm64_1; }
uint64_t imm64() const;
double imm64f() const;
Reservation* resv() { return &firstWord; }
void* payload() const;
inline Page* page() { return (Page*) alignTo(this,NJ_PAGE_SIZE); }
inline int32_t size() const {
NanoAssert(isop(LIR_alloc));
return i.imm32<<2;
LIns* prevLIns() const {
NanoAssert(isop(LIR_skip));
return toLInsSk()->prevLIns;
}
inline void setSize(int32_t bytes) {
NanoAssert(isop(LIR_alloc) && (bytes&3)==0 && isU16(bytes>>2));
i.imm32 = bytes>>2;
inline LOpcode opcode() const { return lastWord.opcode; }
inline uint8_t paramArg() const { NanoAssert(isop(LIR_param)); return toLInsP()->arg; }
inline uint8_t paramKind() const { NanoAssert(isop(LIR_param)); return toLInsP()->kind; }
inline int32_t imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; }
inline int32_t imm64_0() const { NanoAssert(isconstq()); return toLInsI64()->imm64_0; }
inline int32_t imm64_1() const { NanoAssert(isconstq()); return toLInsI64()->imm64_1; }
uint64_t imm64() const;
double imm64f() const;
Reservation* resv() { return &lastWord; }
void* payload() const;
inline Page* page() { return (Page*) alignTo(this,NJ_PAGE_SIZE); }
inline int32_t size() const {
NanoAssert(isop(LIR_alloc));
return toLInsI()->imm32 << 2;
}
LIns* arg(uint32_t i);
@ -279,7 +588,7 @@ namespace nanojit
inline int32_t immdisp() const
{
NanoAssert(isStore());
return sti.disp;
return toLInsSti()->disp;
}
inline void* constvalp() const
@ -292,36 +601,49 @@ namespace nanojit
}
bool isCse() const;
bool isRet() const { return nanojit::isRetOpcode(firstWord.code); }
bool isop(LOpcode o) const { return firstWord.code == o; }
bool isRet() const { return nanojit::isRetOpcode(opcode()); }
bool isop(LOpcode o) const { return opcode() == o; }
#if defined(_DEBUG)
bool isOp1() const; // true for unary ops
bool isOp2() const; // true for binary ops
// isLInsXYZ() returns true if the instruction has the LInsXYZ form.
// Note that there is some overlap with other predicates, eg.
// isStore()==isLInsSti(), isCall()==isLInsC(), but that's ok; these
// ones are used only to check that opcodes are appropriate for
// instruction layouts, the others are used for non-debugging
// purposes.
bool isLInsOp0() const;
bool isLInsOp1() const;
bool isLInsOp2() const;
bool isLInsSti() const;
bool isLInsSk() const;
bool isLInsC() const;
bool isLInsP() const;
bool isLInsI() const;
bool isLInsI64() const;
#endif
bool isQuad() const;
bool isCond() const;
bool isFloat() const;
bool isCmp() const;
bool isCall() const {
LOpcode op = LOpcode(firstWord.code & ~LIR64);
LOpcode op = LOpcode(opcode() & ~LIR64);
return op == LIR_call;
}
bool isStore() const {
LOpcode op = LOpcode(firstWord.code & ~LIR64);
LOpcode op = LOpcode(opcode() & ~LIR64);
return op == LIR_sti;
}
bool isLoad() const {
LOpcode op = firstWord.code;
LOpcode op = opcode();
return op == LIR_ldq || op == LIR_ld || op == LIR_ldc ||
op == LIR_ldqc || op == LIR_ldcs || op == LIR_ldcb;
}
bool isGuard() const {
LOpcode op = firstWord.code;
LOpcode op = opcode();
return op == LIR_x || op == LIR_xf || op == LIR_xt ||
op == LIR_loop || op == LIR_xbarrier || op == LIR_xtbl;
}
// True if the instruction is a 32-bit or smaller constant integer.
bool isconst() const { return firstWord.code == LIR_int; }
bool isconst() const { return opcode() == LIR_int; }
// True if the instruction is a 32-bit or smaller constant integer and
// has the value val when treated as a 32-bit signed integer.
bool isconstval(int32_t val) const;
@ -333,69 +655,6 @@ namespace nanojit
return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j);
}
void setIns0(LOpcode op) {
firstWord.code = op;
}
void setIns1(LOpcode op, LIns* oprnd1) {
firstWord.code = op;
u.oprnd_1 = oprnd1;
NanoAssert(isOp1());
}
void setIns2(LOpcode op, LIns* oprnd1, LIns* oprnd2) {
firstWord.code = op;
u.oprnd_1 = oprnd1;
u.oprnd_2 = oprnd2;
NanoAssert(isOp2() || isLoad() || isGuard() || isBranch());
}
void setLoad(LOpcode op, LIns* base, LIns* d) {
setIns2(op, base, d);
}
void setGuard(LOpcode op, LIns* cond, LIns* data) {
setIns2(op, cond, data);
}
void setBranch(LOpcode op, LIns* cond, LIns* target) {
setIns2(op, cond, target);
}
void setStorei(LOpcode op, LIns* val, LIns* base, int32_t d) {
firstWord.code = op;
u.oprnd_1 = val;
u.oprnd_2 = base;
sti.disp = d;
NanoAssert(isStore());
}
void setImm(LOpcode op, int32_t imm32) {
firstWord.code = op;
i.imm32 = imm32;
NanoAssert(op == LIR_alloc || op == LIR_int);
}
void setAlloc(LOpcode op, int32_t size) {
setImm(op, size);
}
void setParam(LOpcode op, int32_t arg, int32_t kind)
{
firstWord.code = op;
NanoAssert(isU8(arg) && isU8(kind));
c.imm8a = arg;
c.imm8b = kind;
c.ci = NULL;
NanoAssert(op == LIR_param);
}
void setCall(LOpcode op, int32_t argc, const CallInfo* ci)
{
firstWord.code = op;
NanoAssert(isU8(argc));
c.imm8a = 0;
c.imm8b = argc;
c.ci = ci;
NanoAssert(op == LIR_call || op == LIR_fcall);
}
void setImmq(LOpcode op, int64_t imm64) {
firstWord.code = op;
i64.imm64_0 = int32_t(imm64);
i64.imm64_1 = int32_t(imm64>>32);
NanoAssert(op == LIR_quad);
}
void setTarget(LIns* t);
LIns* getTarget();
@ -403,17 +662,17 @@ namespace nanojit
inline uint32_t argc() const {
NanoAssert(isCall());
return c.imm8b;
return toLInsC()->argc;
}
const CallInfo *callInfo() const;
};
typedef LIns* LInsp;
typedef LIns* LInsp;
LIns* FASTCALL callArgN(LInsp i, uint32_t n);
extern const uint8_t operandCount[];
class Fragmento; // @todo remove this ; needed for minbuild for some reason?!? Should not be compiling this code at all
class LirFilter;
// make it a GCObject so we can explicitly delete it early
class LirWriter : public avmplus::GCObject
@ -490,12 +749,12 @@ namespace nanojit
// The first instruction on a page is always a start instruction, or a
// payload-less skip instruction linking to the previous page. The
// biggest possible instruction would take up the entire rest of the page.
#define NJ_MAX_LINS_SZB (NJ_PAGE_CODE_AREA_SZB - sizeof(LIns))
#define NJ_MAX_LINS_SZB (NJ_PAGE_CODE_AREA_SZB - sizeof(LInsSk))
// The maximum skip payload size is determined by the maximum instruction
// size. We require that a skip's payload be adjacent to the skip LIns
// itself.
#define NJ_MAX_SKIP_PAYLOAD_SZB (NJ_MAX_LINS_SZB - sizeof(LIns))
#define NJ_MAX_SKIP_PAYLOAD_SZB (NJ_MAX_LINS_SZB - sizeof(LInsSk))
#ifdef NJ_VERBOSE

View File

@ -44,15 +44,22 @@
*
* Includers must define OPDEF and OPDEF64 macros of the following forms:
*
* #define OPDEF(op,val,operands) ...
* #define OPDEF64(op,val,operands) ...
* #define OPDEF(op,val,operands,repkind) ...
* #define OPDEF64(op,val,operands,repkind) ...
*
* Selected arguments can then be used within the macro expansions.
*
* Field Description
* op Bytecode name, token-pasted after "LIR_" to form an LOpcode
* val Bytecode value, which is the LOpcode enumerator value
* operands Number of operands for this instruction
* op Bytecode name, token-pasted after "LIR_" to form an LOpcode.
* val Bytecode value, which is the LOpcode enumerator value.
* operands Number of operands for this instruction, where an "operand" is
* a LIns* argument. Eg. LIR_sti has 3 fields, but the last is an
* immediate, so it only has two operands. Call instructions are
* considered to have 0 operands -- the call args aren't counted.
* The value is set to -1 for unused opcodes to make it obvious
* that it needs changing if the opcode becomes used.
* repkind Indicates how the instruction is represented in memory; XYZ
* corresponds to LInsXYZ and LRK_XYZ.
*
* This file is best viewed with 128 columns:
12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678
@ -61,37 +68,36 @@
/* op val name operands */
/* special operations (must be 0..N) */
OPDEF(start, 0, 0)
OPDEF(unused1, 1, 0)
OPDEF(skip, 2, 0)
OPDEF(unused3, 3, 0)
OPDEF(unused4, 4, 0)
OPDEF(unused5, 5, 2)
OPDEF(unused6, 6, 2)
OPDEF(start, 0, 0, Op0) // start of a fragment
OPDEF(unused1, 1,-1, None)
OPDEF(skip, 2, 1, Sk) // holds blobs ("payloads") of data; also links pages
OPDEF(unused3, 3,-1, None)
OPDEF(unused4, 4,-1, None)
OPDEF(unused5, 5,-1, None)
OPDEF(unused6, 6,-1, None)
/* non-pure operations */
OPDEF(addp, 7, 2)
OPDEF(param, 8, 0)
OPDEF(unused9, 9, 2)
OPDEF(ld, 10, 2) // 32-bit load
OPDEF(alloc, 11, 0) // alloca some stack space
OPDEF(sti, 12, 2) // 32-bit store
OPDEF(ret, 13, 1)
OPDEF(live, 14, 1) // extend live range of reference
OPDEF(unused15, 15, 0) // indirect call
OPDEF(call, 16, 0) // subroutine call returning a 32-bit value
OPDEF(addp, 7, 2, Op2) // integer addition for temporary pointer calculations
OPDEF(param, 8, 0, P) // load a parameter
OPDEF(unused9, 9,-1, None)
OPDEF(ld, 10, 2, Op2) // 32-bit load
OPDEF(alloc, 11, 0, I) // alloca some stack space
OPDEF(sti, 12, 2, Sti) // 32-bit store
OPDEF(ret, 13, 1, Op1) // return a word-sized value
OPDEF(live, 14, 1, Op1) // extend live range of reference
OPDEF(unused15, 15, 0, C)
OPDEF(call, 16, 0, C) // subroutine call returning a 32-bit value
/* guards */
OPDEF(loop, 17, 0) // loop fragment
OPDEF(x, 18, 0) // exit always
OPDEF(loop, 17, 0, Op2) // loop fragment
OPDEF(x, 18, 0, Op2) // exit always
/* branches */
OPDEF(j, 19, 0) // jump always
OPDEF(jt, 20, 1) // jump true
OPDEF(jf, 21, 1) // jump false
OPDEF(label, 22, 0) // a jump target
OPDEF(ji, 23, 2) // jump indirect
OPDEF(j, 19, 0, Op2) // jump always
OPDEF(jt, 20, 1, Op2) // jump if true
OPDEF(jf, 21, 1, Op2) // jump if false
OPDEF(label, 22, 0, Op0) // a jump target (no machine code is emitted for this)
OPDEF(ji, 23,-1, None) // indirect jump (currently not implemented)
/* operators */
@ -100,12 +106,12 @@ OPDEF(ji, 23, 2) // jump indirect
* common-subexpression-elimination detection code.
*/
OPDEF(int, 24, 0) // constant 32-bit integer
OPDEF(cmov, 25, 2) // conditional move (op1=cond, op2=cond(iftrue,iffalse))
OPDEF(int, 24, 0, I) // constant 32-bit integer
OPDEF(cmov, 25, 2, Op2) // conditional move (op1=cond, op2=LIR_2(iftrue,iffalse))
#if defined(NANOJIT_64BIT)
OPDEF(callh, 26, 0)
OPDEF(callh, 26,-1, None) // unused on 64-bit machines
#else
OPDEF(callh, 26, 1)
OPDEF(callh, 26, 1, Op1) // get the high 32 bits of a call returning a 64-bit value
#endif
/*
@ -117,46 +123,44 @@ OPDEF(callh, 26, 1)
* with 3. NB: These opcodes must remain continuous so that comparison-opcode
* detection works correctly.
*/
OPDEF(feq, 27, 2) // floating-point equality [2 float inputs]
OPDEF(flt, 28, 2) // floating-point less than: arg1 < arg2
OPDEF(fgt, 29, 2) // floating-point greater than: arg1 > arg2
OPDEF(fle, 30, 2) // arg1 <= arg2, both floating-point
OPDEF(fge, 31, 2) // arg1 >= arg2, both floating-point
OPDEF(feq, 27, 2, Op2) // floating-point equality
OPDEF(flt, 28, 2, Op2) // floating-point less-than
OPDEF(fgt, 29, 2, Op2) // floating-point greater-than
OPDEF(fle, 30, 2, Op2) // floating-point less-than-or-equal
OPDEF(fge, 31, 2, Op2) // floating-point greater-than-or-equal
OPDEF(ldcb, 32, 2) // non-volatile 8-bit load
OPDEF(ldcs, 33, 2) // non-volatile 16-bit load
OPDEF(ldc, 34, 2) // non-volatile 32-bit load
OPDEF(ldcb, 32, 2, Op2) // non-volatile 8-bit load
OPDEF(ldcs, 33, 2, Op2) // non-volatile 16-bit load
OPDEF(ldc, 34, 2, Op2) // non-volatile 32-bit load
// neg through ush are all integer operations
OPDEF(neg, 35, 1) // numeric negation [ 1 integer input / integer output ]
OPDEF(add, 36, 2) // integer addition [ 2 operand integer intputs / integer output ]
OPDEF(sub, 37, 2) // integer subtraction
OPDEF(mul, 38, 2) // integer multiplication
OPDEF(div, 39, 2)
OPDEF(mod, 40, 1)
OPDEF(neg, 35, 1, Op1) // integer negation
OPDEF(add, 36, 2, Op2) // integer addition
OPDEF(sub, 37, 2, Op2) // integer subtraction
OPDEF(mul, 38, 2, Op2) // integer multiplication
OPDEF(div, 39, 2, Op2) // integer division
OPDEF(mod, 40, 1, Op1) // hack: get the modulus from a LIR_div result, for x86 only
OPDEF(and, 41, 2)
OPDEF(or, 42, 2)
OPDEF(xor, 43, 2)
OPDEF(not, 44, 1)
OPDEF(lsh, 45, 2)
OPDEF(rsh, 46, 2) // >>
OPDEF(ush, 47, 2) // >>>
OPDEF(and, 41, 2, Op2) // 32-bit bitwise AND
OPDEF(or, 42, 2, Op2) // 32-bit bitwise OR
OPDEF(xor, 43, 2, Op2) // 32-bit bitwise XOR
OPDEF(not, 44, 1, Op1) // 32-bit bitwise NOT
OPDEF(lsh, 45, 2, Op2) // 32-bit left shift
OPDEF(rsh, 46, 2, Op2) // 32-bit right shift with sign-extend (>>)
OPDEF(ush, 47, 2, Op2) // 32-bit unsigned right shift (>>>)
// conditional guards, op^1 to complement. Only things that are
// isCond() can be passed to these.
OPDEF(xt, 48, 1) // exit if true 0x30 0011 0000
OPDEF(xf, 49, 1) // exit if false 0x31 0011 0001
OPDEF(xt, 48, 1, Op2) // exit if true (0x30 0011 0000)
OPDEF(xf, 49, 1, Op2) // exit if false (0x31 0011 0001)
// qlo and qhi take a single quad argument and return its low and high
// 32 bits respectively as 32-bit integers.
OPDEF(qlo, 50, 1)
OPDEF(qhi, 51, 1)
OPDEF(qlo, 50, 1, Op1) // get the low 32 bits of a 64-bit value
OPDEF(qhi, 51, 1, Op1) // get the high 32 bits of a 64-bit value
OPDEF(unused52, 52, 0)
OPDEF(unused52, 52,-1, None)
OPDEF(ov, 53, 1)
OPDEF(cs, 54, 1)
OPDEF(ov, 53, 1, Op1) // test for overflow; value must have just been computed
OPDEF(unused53, 54,-1, None)
// Integer (all sizes) relational operators. (op ^ 1) is the op which flips the
// left and right sides of the comparison, so (lt ^ 1) == gt, or the operator
@ -165,96 +169,96 @@ OPDEF(cs, 54, 1)
// with 3. 'u' prefix indicates the unsigned integer variant.
// NB: These opcodes must remain continuous so that comparison-opcode detection
// works correctly.
OPDEF(eq, 55, 2) // integer equality
OPDEF(lt, 56, 2) // 0x38 0011 1000
OPDEF(gt, 57, 2) // 0x39 0011 1001
OPDEF(le, 58, 2) // 0x3A 0011 1010
OPDEF(ge, 59, 2) // 0x3B 0011 1011
OPDEF(ult, 60, 2) // 0x3C 0011 1100
OPDEF(ugt, 61, 2) // 0x3D 0011 1101
OPDEF(ule, 62, 2) // 0x3E 0011 1110
OPDEF(uge, 63, 2) // 0x3F 0011 1111
OPDEF(eq, 55, 2, Op2) // integer equality
OPDEF(lt, 56, 2, Op2) // signed integer less-than (0x38 0011 1000)
OPDEF(gt, 57, 2, Op2) // signed integer greater-than (0x39 0011 1001)
OPDEF(le, 58, 2, Op2) // signed integer less-than-or-equal (0x3A 0011 1010)
OPDEF(ge, 59, 2, Op2) // signed integer greater-than-or-equal (0x3B 0011 1011)
OPDEF(ult, 60, 2, Op2) // unsigned integer less-than (0x3C 0011 1100)
OPDEF(ugt, 61, 2, Op2) // unsigned integer greater-than (0x3D 0011 1101)
OPDEF(ule, 62, 2, Op2) // unsigned integer less-than-or-equal (0x3E 0011 1110)
OPDEF(uge, 63, 2, Op2) // unsigned integer greater-than-or-equal (0x3F 0011 1111)
OPDEF64(2, 0, 2) // wraps a pair of refs
OPDEF64(file, 1, 2)
OPDEF64(line, 2, 2)
OPDEF64(xbarrier, 3, 1) // memory barrier (dummy guard)
OPDEF64(xtbl, 4, 1) // exit via indirect jump
OPDEF64(2, 0, 2, Op2) // wraps a pair of refs, for LIR_cmov or LIR_qcmov
OPDEF64(file, 1, 2, Op1) // source filename for debug symbols
OPDEF64(line, 2, 2, Op1) // source line number for debug symbols
OPDEF64(xbarrier, 3, 1, Op2) // memory barrier; doesn't exit, but flushes all values to the stack
OPDEF64(xtbl, 4, 1, Op2) // exit via indirect jump
OPDEF64(unused5_64, 5, 2)
OPDEF64(unused6_64, 6, 2)
OPDEF64(unused7_64, 7, 2)
OPDEF64(unused8_64, 8, 2)
OPDEF64(unused5_64, 5,-1, None)
OPDEF64(unused6_64, 6,-1, None)
OPDEF64(unused7_64, 7,-1, None)
OPDEF64(unused8_64, 8,-1, None)
OPDEF64(unused9_64, 9,-1, None)
OPDEF64(unused9_64, 9, 2)
OPDEF64(ldq, LIR_ld, 2) // quad load
OPDEF64(ldq, LIR_ld, 2, Op2) // 64-bit (quad) load
OPDEF64(unused11_64, 11, 2)
OPDEF64(unused11_64, 11,-1, None)
OPDEF64(stqi, LIR_sti, 2) // quad store
OPDEF64(fret, LIR_ret, 1)
OPDEF64(stqi, LIR_sti, 2, Sti) // 64-bit (quad) store
OPDEF64(fret, LIR_ret, 1, Op1)
OPDEF64(unused14_64, 14, 2)
OPDEF64(unused15_64, 15, 2)
OPDEF64(unused14_64, 14,-1, None)
OPDEF64(unused15_64, 15,-1, None)
OPDEF64(fcall, LIR_call, 0) // subroutine call returning quad
OPDEF64(fcall, LIR_call, 0, C) // subroutine call returning 64-bit (quad) value
OPDEF64(unused17_64, 17, 2)
OPDEF64(unused18_64, 18, 2)
OPDEF64(unused19_64, 19, 2)
OPDEF64(unused20_64, 20, 2)
OPDEF64(unused21_64, 21, 2)
OPDEF64(unused22_64, 22, 2)
OPDEF64(unused23_64, 23, 2)
OPDEF64(unused17_64, 17,-1, None)
OPDEF64(unused18_64, 18,-1, None)
OPDEF64(unused19_64, 19,-1, None)
OPDEF64(unused20_64, 20,-1, None)
OPDEF64(unused21_64, 21,-1, None)
OPDEF64(unused22_64, 22,-1, None)
OPDEF64(unused23_64, 23,-1, None)
// We strip of the 64bit flag and compare that the opcode is between LIR_int
// We strip off the 64 bit flag and compare that the opcode is between LIR_int
// and LIR_uge to decide whether we can CSE the opcode. All opcodes below
// this marker are subject to CSE.
OPDEF64(quad, LIR_int, 0) // quad constant value
OPDEF64(qcmov, LIR_cmov, 2)
OPDEF64(unused26_64, 26, 2)
OPDEF64(quad, LIR_int, 0, I64) // 64-bit (quad) constant value
OPDEF64(qcmov, LIR_cmov, 2, Op2) // 64-bit conditional move
OPDEF64(unused27_64, 27, 2)
OPDEF64(unused28_64, 28, 2)
OPDEF64(unused29_64, 29, 2)
OPDEF64(unused30_64, 30, 2)
OPDEF64(unused31_64, 31, 2)
OPDEF64(unused32_64, 32, 2)
OPDEF64(unused33_64, 33, 2)
OPDEF64(unused26_64, 26,-1, None)
OPDEF64(unused27_64, 27,-1, None)
OPDEF64(unused28_64, 28,-1, None)
OPDEF64(unused29_64, 29,-1, None)
OPDEF64(unused30_64, 30,-1, None)
OPDEF64(unused31_64, 31,-1, None)
OPDEF64(unused32_64, 32,-1, None)
OPDEF64(unused33_64, 33,-1, None)
OPDEF64(ldqc, LIR_ldc, 2)
OPDEF64(ldqc, LIR_ldc, 2, Op2) // non-volatile 64-bit load
/* floating-point arithmetic operations */
OPDEF64(fneg, LIR_neg, 1)
OPDEF64(fadd, LIR_add, 2)
OPDEF64(fsub, LIR_sub, 2)
OPDEF64(fmul, LIR_mul, 2)
OPDEF64(fdiv, LIR_div, 2)
OPDEF64(fmod, LIR_mod, 2)
OPDEF64(fneg, LIR_neg, 1, Op1) // floating-point negation
OPDEF64(fadd, LIR_add, 2, Op2) // floating-point addition
OPDEF64(fsub, LIR_sub, 2, Op2) // floating-point subtraction
OPDEF64(fmul, LIR_mul, 2, Op2) // floating-point multiplication
OPDEF64(fdiv, LIR_div, 2, Op2) // floating-point division
OPDEF64(fmod, LIR_mod, 2, Op2) // floating-point modulus(?)
OPDEF64(qiand, 41, 2)
OPDEF64(qiadd, 42, 2)
OPDEF64(qior, 43, 2)
OPDEF64(qilsh, 44, 2)
OPDEF64(qjoin, 45, 2) // 1st arg is low 32 bits, 2nd arg is high 32 bits
OPDEF64(qiand, 41, 2, Op2) // 64-bit bitwise AND
OPDEF64(qiadd, 42, 2, Op2) // 64-bit bitwise ADD
OPDEF64(qior, 43, 2, Op2) // 64-bit bitwise OR
OPDEF64(i2f, 46, 1) // convert an integer to a float
OPDEF64(u2f, 47, 1) // convert an unsigned integer to a float
OPDEF64(qilsh, 44, 2, Op2) // 64-bit left shift
OPDEF64(qjoin, 45, 2, Op2) // join two 32-bit values (1st arg is low bits, 2nd is high)
OPDEF64(unused48_64, 48, 2)
OPDEF64(unused49_64, 49, 2)
OPDEF64(unused50_64, 50, 2)
OPDEF64(unused51_64, 51, 2)
OPDEF64(unused52_64, 52, 2)
OPDEF64(unused53_64, 53, 2)
OPDEF64(unused54_64, 54, 2)
OPDEF64(unused55_64, 55, 2)
OPDEF64(unused56_64, 56, 2)
OPDEF64(unused57_64, 57, 2)
OPDEF64(unused58_64, 58, 2)
OPDEF64(unused59_64, 59, 2)
OPDEF64(unused60_64, 60, 2)
OPDEF64(unused61_64, 61, 2)
OPDEF64(unused62_64, 62, 2)
OPDEF64(unused63_64, 63, 2)
OPDEF64(i2f, 46, 1, Op1) // convert a signed 32-bit integer to a float
OPDEF64(u2f, 47, 1, Op1) // convert an unsigned 32-bit integer to a float
OPDEF64(unused48_64, 48,-1, None)
OPDEF64(unused49_64, 49,-1, None)
OPDEF64(unused50_64, 50,-1, None)
OPDEF64(unused51_64, 51,-1, None)
OPDEF64(unused52_64, 52,-1, None)
OPDEF64(unused53_64, 53,-1, None)
OPDEF64(unused54_64, 54,-1, None)
OPDEF64(unused55_64, 55,-1, None)
OPDEF64(unused56_64, 56,-1, None)
OPDEF64(unused57_64, 57,-1, None)
OPDEF64(unused58_64, 58,-1, None)
OPDEF64(unused59_64, 59,-1, None)
OPDEF64(unused60_64, 60,-1, None)
OPDEF64(unused61_64, 61,-1, None)
OPDEF64(unused62_64, 62,-1, None)
OPDEF64(unused63_64, 63,-1, None)

View File

@ -615,7 +615,7 @@ Assembler::hint(LIns* i, RegisterMask allow /* = ~0 */)
else if (op == LIR_callh)
prefer = rmask(R1);
else if (op == LIR_param)
prefer = rmask(imm2register(i->imm8()));
prefer = rmask(imm2register(i->paramArg()));
if (_allocator.free & allow & prefer)
allow &= prefer;
@ -1564,7 +1564,6 @@ Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ, bool isfar)
// Standard signed and unsigned integer comparisons.
case LIR_eq: cc = EQ; fp_cond = false; break;
case LIR_ov: cc = VS; fp_cond = false; break;
case LIR_cs: cc = CS; fp_cond = false; break;
case LIR_lt: cc = LT; fp_cond = false; break;
case LIR_le: cc = LE; fp_cond = false; break;
case LIR_gt: cc = GT; fp_cond = false; break;
@ -1608,8 +1607,8 @@ Assembler::asm_cmp(LIns *cond)
{
LOpcode condop = cond->opcode();
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
if ((condop == LIR_ov) || (condop == LIR_cs))
// LIR_ov recycles the flags set by arithmetic ops
if ((condop == LIR_ov))
return;
LInsp lhs = cond->oprnd1();
@ -1700,7 +1699,6 @@ Assembler::asm_cond(LInsp ins)
{
case LIR_eq: SET(r,EQ); break;
case LIR_ov: SET(r,VS); break;
case LIR_cs: SET(r,CS); break;
case LIR_lt: SET(r,LT); break;
case LIR_le: SET(r,LE); break;
case LIR_gt: SET(r,GT); break;
@ -1882,7 +1880,6 @@ Assembler::asm_cmov(LInsp ins)
// note that these are all opposites...
case LIR_eq: MOVNE(rr, iffalsereg); break;
case LIR_ov: MOVVC(rr, iffalsereg); break;
case LIR_cs: MOVNC(rr, iffalsereg); break;
case LIR_lt: MOVGE(rr, iffalsereg); break;
case LIR_le: MOVGT(rr, iffalsereg); break;
case LIR_gt: MOVLE(rr, iffalsereg); break;
@ -1918,8 +1915,8 @@ Assembler::asm_qlo(LInsp ins)
void
Assembler::asm_param(LInsp ins)
{
uint32_t a = ins->imm8();
uint32_t kind = ins->imm8b();
uint32_t a = ins->paramArg();
uint32_t kind = ins->paramKind();
if (kind == 0) {
// ordinary param
AbiKind abi = _thisfrag->lirbuf->abi;

View File

@ -503,7 +503,6 @@ enum {
#define MOVHS(dr,sr) MOV_cond(HS, dr, sr) // Equivalent to MOVCS
#define MOVCS(dr,sr) MOV_cond(CS, dr, sr) // Equivalent to MOVHS
#define MOVVC(dr,sr) MOV_cond(VC, dr, sr) // overflow clear
#define MOVNC(dr,sr) MOV_cond(CC, dr, sr) // carry clear
// _d = [_b+off]
#define LDR(_d,_b,_off) asm_ldr_chk(_d,_b,_off,1)
@ -666,8 +665,6 @@ enum {
#define JNGE(t) B_cond(LT,t)
#define JG(t) B_cond(GT,t)
#define JNG(t) B_cond(LE,t)
#define JC(t) B_cond(CS,t)
#define JNC(t) B_cond(CC,t)
#define JO(t) B_cond(VS,t)
#define JNO(t) B_cond(VC,t)

View File

@ -491,8 +491,6 @@ namespace nanojit
BNE(0, tt);
else if (condop == LIR_ov)
BVC(0, tt);
else if (condop == LIR_cs)
BCC(0, tt);
else if (condop == LIR_lt)
BGE(0, tt);
else if (condop == LIR_le)
@ -516,8 +514,6 @@ namespace nanojit
BE(0, tt);
else if (condop == LIR_ov)
BVS(0, tt);
else if (condop == LIR_cs)
BCS(0, tt);
else if (condop == LIR_lt)
BL(0, tt);
else if (condop == LIR_le)
@ -544,8 +540,8 @@ namespace nanojit
underrunProtect(12);
LOpcode condop = cond->opcode();
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
if ((condop == LIR_ov) || (condop == LIR_cs))
// LIR_ov recycles the flags set by arithmetic ops
if ((condop == LIR_ov))
return;
LInsp lhs = cond->oprnd1();
@ -591,7 +587,7 @@ namespace nanojit
// restore first parameter, the only one we use
LInsp state = _thisfrag->lirbuf->state;
findSpecificRegFor(state, argRegs[state->imm8()]);
findSpecificRegFor(state, argRegs[state->paramArg()]);
}
void Assembler::asm_fcond(LInsp ins)
@ -612,8 +608,6 @@ namespace nanojit
MOVEI(1, 1, 0, 0, r);
else if (op == LIR_ov)
MOVVSI(1, 1, 0, 0, r);
else if (op == LIR_cs)
MOVCSI(1, 1, 0, 0, r);
else if (op == LIR_lt)
MOVLI(1, 1, 0, 0, r);
else if (op == LIR_le)
@ -788,7 +782,6 @@ namespace nanojit
// note that these are all opposites...
case LIR_eq: MOVNE (iffalsereg, 1, 0, 0, rr); break;
case LIR_ov: MOVVC (iffalsereg, 1, 0, 0, rr); break;
case LIR_cs: MOVCC (iffalsereg, 1, 0, 0, rr); break;
case LIR_lt: MOVGE (iffalsereg, 1, 0, 0, rr); break;
case LIR_le: MOVG (iffalsereg, 1, 0, 0, rr); break;
case LIR_gt: MOVLE (iffalsereg, 1, 0, 0, rr); break;
@ -817,8 +810,8 @@ namespace nanojit
void Assembler::asm_param(LInsp ins)
{
uint32_t a = ins->imm8();
uint32_t kind = ins->imm8b();
uint32_t a = ins->paramArg();
uint32_t kind = ins->paramKind();
// prepResultReg(ins, rmask(argRegs[a]));
if (kind == 0) {
prepResultReg(ins, rmask(argRegs[a]));

View File

@ -685,12 +685,6 @@ namespace nanojit
Format_4_2I(rd, 0x2c, cc2, 0xb, cc1, cc0, simm11); \
} while (0)
#define MOVCSI(simm11, cc2, cc1, cc0, rd) \
do { \
asm_output("movcs %d, %s", simm11, gpn(rd)); \
Format_4_2I(rd, 0x2c, cc2, 5, cc1, cc0, simm11); \
} while (0)
#define MOVLEUI(simm11, cc2, cc1, cc0, rd) \
do { \
asm_output("movleu %d, %s", simm11, gpn(rd)); \

View File

@ -343,8 +343,8 @@ namespace nanojit
}
else if (op == LIR_param) {
uint32_t max_regs = max_abi_regs[_thisfrag->lirbuf->abi];
if (i->imm8() < max_regs)
prefer &= rmask(Register(i->imm8()));
if (i->paramArg() < max_regs)
prefer &= rmask(Register(i->paramArg()));
}
else if (op == LIR_callh || (op == LIR_rsh && i->oprnd1()->opcode()==LIR_callh)) {
prefer &= rmask(retRegs[1]);
@ -657,8 +657,6 @@ namespace nanojit
JNE(targ, isfar);
else if (condop == LIR_ov)
JNO(targ, isfar);
else if (condop == LIR_cs)
JNC(targ, isfar);
else if (condop == LIR_lt)
JNL(targ, isfar);
else if (condop == LIR_le)
@ -682,8 +680,6 @@ namespace nanojit
JE(targ, isfar);
else if (condop == LIR_ov)
JO(targ, isfar);
else if (condop == LIR_cs)
JC(targ, isfar);
else if (condop == LIR_lt)
JL(targ, isfar);
else if (condop == LIR_le)
@ -718,8 +714,8 @@ namespace nanojit
{
LOpcode condop = cond->opcode();
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
if ((condop == LIR_ov) || (condop == LIR_cs))
// LIR_ov recycles the flags set by arithmetic ops
if ((condop == LIR_ov))
return;
LInsp lhs = cond->oprnd1();
@ -788,8 +784,6 @@ namespace nanojit
SETE(r);
else if (op == LIR_ov)
SETO(r);
else if (op == LIR_cs)
SETC(r);
else if (op == LIR_lt)
SETL(r);
else if (op == LIR_le)
@ -1086,7 +1080,6 @@ namespace nanojit
// note that these are all opposites...
case LIR_eq: MRNE(rr, iffalsereg); break;
case LIR_ov: MRNO(rr, iffalsereg); break;
case LIR_cs: MRNC(rr, iffalsereg); break;
case LIR_lt: MRGE(rr, iffalsereg); break;
case LIR_le: MRG(rr, iffalsereg); break;
case LIR_gt: MRLE(rr, iffalsereg); break;
@ -1114,8 +1107,8 @@ namespace nanojit
void Assembler::asm_param(LInsp ins)
{
uint32_t a = ins->imm8();
uint32_t kind = ins->imm8b();
uint32_t a = ins->paramArg();
uint32_t kind = ins->paramKind();
if (kind == 0) {
// ordinary param
AbiKind abi = _thisfrag->lirbuf->abi;

View File

@ -393,7 +393,6 @@ namespace nanojit
#define SETBE(r) do { count_alu(); ALU2(0x0f96,(r),(r)); asm_output("setbe %s",gpn(r)); } while(0)
#define SETA(r) do { count_alu(); ALU2(0x0f97,(r),(r)); asm_output("seta %s",gpn(r)); } while(0)
#define SETAE(r) do { count_alu(); ALU2(0x0f93,(r),(r)); asm_output("setae %s",gpn(r)); } while(0)
#define SETC(r) do { count_alu(); ALU2(0x0f90,(r),(r)); asm_output("setc %s",gpn(r)); } while(0)
#define SETO(r) do { count_alu(); ALU2(0x0f92,(r),(r)); asm_output("seto %s",gpn(r)); } while(0)
#define MREQ(dr,sr) do { count_alu(); ALU2(0x0f44,dr,sr); asm_output("cmove %s,%s", gpn(dr),gpn(sr)); } while(0)
@ -405,7 +404,6 @@ namespace nanojit
#define MRB(dr,sr) do { count_alu(); ALU2(0x0f42,dr,sr); asm_output("cmovb %s,%s", gpn(dr),gpn(sr)); } while(0)
#define MRBE(dr,sr) do { count_alu(); ALU2(0x0f46,dr,sr); asm_output("cmovbe %s,%s", gpn(dr),gpn(sr)); } while(0)
#define MRA(dr,sr) do { count_alu(); ALU2(0x0f47,dr,sr); asm_output("cmova %s,%s", gpn(dr),gpn(sr)); } while(0)
#define MRNC(dr,sr) do { count_alu(); ALU2(0x0f43,dr,sr); asm_output("cmovnc %s,%s", gpn(dr),gpn(sr)); } while(0)
#define MRAE(dr,sr) do { count_alu(); ALU2(0x0f43,dr,sr); asm_output("cmovae %s,%s", gpn(dr),gpn(sr)); } while(0)
#define MRNO(dr,sr) do { count_alu(); ALU2(0x0f41,dr,sr); asm_output("cmovno %s,%s", gpn(dr),gpn(sr)); } while(0)
@ -610,8 +608,6 @@ namespace nanojit
#define JGE(t, isfar) JCC(0x0D, t, isfar, "jge")
#define JNGE(t, isfar) JCC(0x0C, t, isfar, "jnge")
#define JC(t, isfar) JCC(0x02, t, isfar, "jc")
#define JNC(t, isfar) JCC(0x03, t, isfar, "jnc")
#define JO(t, isfar) JCC(0x00, t, isfar, "jo")
#define JNO(t, isfar) JCC(0x01, t, isfar, "jno")