diff --git a/browser/components/places/tests/unit/test_leftpane_corruption_handling.js b/browser/components/places/tests/unit/test_leftpane_corruption_handling.js index 014c9aeb673..8b60d87f02d 100644 --- a/browser/components/places/tests/unit/test_leftpane_corruption_handling.js +++ b/browser/components/places/tests/unit/test_leftpane_corruption_handling.js @@ -202,7 +202,8 @@ function compareJSON(aNodeJSON_1, aNodeJSON_2) { const SKIP_PROPS = ["dateAdded", "lastModified", "id"]; function compareObjects(obj1, obj2) { - do_check_eq(obj1.__count__, obj2.__count__); + function count(o) { var n = 0; for (let p in o) n++; return n; } + do_check_eq(count(obj1), count(obj2)); for (let prop in obj1) { // Skip everchanging values. if (SKIP_PROPS.indexOf(prop) != -1) diff --git a/configure.in b/configure.in index 81b19ec18f5..728683c4016 100644 --- a/configure.in +++ b/configure.in @@ -6997,6 +6997,17 @@ if test -n "$MOZ_TRACEVIS"; then AC_DEFINE(MOZ_TRACEVIS) fi +dnl ======================================================== +dnl = Use GCTimer +dnl ======================================================== +MOZ_ARG_ENABLE_BOOL(gctimer, +[ --enable-gctimer Enable GC timer (default=no)], + MOZ_GCTIMER=1, + MOZ_GCTIMER= ) +if test -n "$MOZ_GCTIMER"; then + AC_DEFINE(MOZ_GCTIMER) +fi + dnl ======================================================== dnl = Use Valgrind dnl ======================================================== @@ -8128,9 +8139,6 @@ if test "$BUILD_CTYPES"; then if test "$OS_ARCH" = "WINCE" -a `echo $OS_TEST | grep -ic arm` = 1; then # Disable ctypes for arm/wince. BUILD_CTYPES= - elif test "$_MSC_VER" && test -z $AS; then - # Disable ctypes if we're on MSVC and MASM is unavailable. - AC_MSG_ERROR([No suitable assembler found. An assembler is required to build js-ctypes. You may --disable-ctypes to avoid this. If you are building with MS Visual Studio 8 Express, you may download the MASM 8.0 package, upgrade to Visual Studio 9 Express, or install the Vista SDK.]) else AC_DEFINE(BUILD_CTYPES) fi @@ -8721,6 +8729,10 @@ fi dist=$MOZ_BUILD_ROOT/dist ac_configure_args="$_SUBDIR_CONFIG_ARGS" ac_configure_args="$ac_configure_args --enable-threadsafe" +if test "$BUILD_CTYPES"; then + # Build js-ctypes on the platforms we can. + ac_configure_args="$ac_configure_args --enable-ctypes" +fi if test -z "$MOZ_NATIVE_NSPR"; then ac_configure_args="$ac_configure_args --with-nspr-cflags='$NSPR_CFLAGS'" ac_configure_args="$ac_configure_args --with-nspr-libs='$NSPR_LIBS'" @@ -8737,54 +8749,6 @@ fi AC_OUTPUT_SUBDIRS(js/src) ac_configure_args="$_SUBDIR_CONFIG_ARGS" -# Build jsctypes on the platforms we can. -if test "$BUILD_CTYPES"; then - # Run the libffi 'configure' script. - ac_configure_args="--disable-shared --enable-static --disable-raw-api" - if test "$MOZ_DEBUG"; then - ac_configure_args="$ac_configure_args --enable-debug" - fi - if test "$DSO_PIC_CFLAGS"; then - ac_configure_args="$ac_configure_args --with-pic" - fi - if test "$CROSS_COMPILE"; then - case "$target" in - *-mingw*) - ac_configure_args="$ac_configure_args --build=$build --host=${target_cpu}-${target_os} HOST_CC=\"$HOST_CC\" CC=\"$CC\"" - ;; - *) - ac_configure_args="$ac_configure_args --build=$build --host=$target HOST_CC=\"$HOST_CC\" CC=\"$CC\"" - ;; - esac - fi - if test "$_MSC_VER"; then - # Use a wrapper script for cl and ml that looks more like gcc. - # autotools can't quite handle an MSVC build environment yet. - ac_configure_args="$ac_configure_args LD=link CPP=\"cl -nologo -EP\" SHELL=sh.exe" - case "${target_cpu}" in - x86_64) - # Need target since MSYS tools into mozilla-build may be 32bit - ac_configure_args="$ac_configure_args CC=\"$_topsrcdir/js/ctypes/libffi/msvcc.sh -m64\" --build=$build --host=$target" - ;; - *) - ac_configure_args="$ac_configure_args CC=$_topsrcdir/js/ctypes/libffi/msvcc.sh" - ;; - esac - fi - if test "$SOLARIS_SUNPRO_CC"; then - # Always use gcc for libffi on Solaris - ac_configure_args="$ac_configure_args CC=gcc" - fi - - # Use a separate cache file for libffi, since it does things differently - # from our configure. - old_cache_file=$cache_file - cache_file=js/ctypes/libffi/config.cache - AC_OUTPUT_SUBDIRS(js/ctypes/libffi) - cache_file=$old_cache_file - ac_configure_args="$_SUBDIR_CONFIG_ARGS" -fi - fi # COMPILE_ENVIRONMENT && !LIBXUL_SDK_DIR dnl Prevent the regeneration of autoconf.mk forcing rebuilds of the world diff --git a/content/base/src/nsContentUtils.cpp b/content/base/src/nsContentUtils.cpp index 8d39eca01aa..4ece5dffe7d 100644 --- a/content/base/src/nsContentUtils.cpp +++ b/content/base/src/nsContentUtils.cpp @@ -5407,7 +5407,7 @@ public: jsval source; jsval clone; jsval temp; - JSAutoIdArray ids; + js::AutoIdArray ids; jsuint index; private: @@ -5425,7 +5425,7 @@ private: } CloneStackFrame* prevFrame; - JSAutoTempValueRooter tvrVals; + js::AutoArrayRooter tvrVals; }; class CloneStack @@ -5691,7 +5691,7 @@ nsContentUtils::CreateStructuredClone(JSContext* cx, } jsval output = OBJECT_TO_JSVAL(obj); - JSAutoTempValueRooter tvr(cx, output); + js::AutoValueRooter tvr(cx, output); CloneStack stack(cx); if (!stack.Push(val, OBJECT_TO_JSVAL(obj), diff --git a/content/canvas/src/CustomQS_Canvas2D.h b/content/canvas/src/CustomQS_Canvas2D.h index 136fe996b76..4ca619fef47 100644 --- a/content/canvas/src/CustomQS_Canvas2D.h +++ b/content/canvas/src/CustomQS_Canvas2D.h @@ -50,7 +50,7 @@ Canvas2D_SetStyleHelper(JSContext *cx, JSObject *obj, jsval id, jsval *vp, XPC_QS_ASSERT_CONTEXT_OK(cx); nsIDOMCanvasRenderingContext2D *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -179,14 +179,14 @@ nsIDOMCanvasRenderingContext2D_CreateImageData(JSContext *cx, uintN argc, jsval // create the fast typed array; it's initialized to 0 by default JSObject *darray = js_CreateTypedArray(cx, js::TypedArray::TYPE_UINT8_CLAMPED, len); - JSAutoTempValueRooter rd(cx, darray); + js::AutoValueRooter rd(cx, darray); if (!darray) return JS_FALSE; // Do JS_NewObject after CreateTypedArray, so that gc will get // triggered here if necessary JSObject *result = JS_NewObject(cx, NULL, NULL, NULL); - JSAutoTempValueRooter rr(cx, result); + js::AutoValueRooter rr(cx, result); if (!result) return JS_FALSE; @@ -212,7 +212,7 @@ nsIDOMCanvasRenderingContext2D_GetImageData(JSContext *cx, uintN argc, jsval *vp nsIDOMCanvasRenderingContext2D *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -246,7 +246,7 @@ nsIDOMCanvasRenderingContext2D_GetImageData(JSContext *cx, uintN argc, jsval *vp // create the fast typed array JSObject *darray = js_CreateTypedArray(cx, js::TypedArray::TYPE_UINT8_CLAMPED, len); - JSAutoTempValueRooter rd(cx, darray); + js::AutoValueRooter rd(cx, darray); if (!darray) return JS_FALSE; @@ -260,7 +260,7 @@ nsIDOMCanvasRenderingContext2D_GetImageData(JSContext *cx, uintN argc, jsval *vp // Do JS_NewObject after CreateTypedArray, so that gc will get // triggered here if necessary JSObject *result = JS_NewObject(cx, NULL, NULL, NULL); - JSAutoTempValueRooter rr(cx, result); + js::AutoValueRooter rr(cx, result); if (!result) return JS_FALSE; @@ -286,7 +286,7 @@ nsIDOMCanvasRenderingContext2D_PutImageData(JSContext *cx, uintN argc, jsval *vp nsIDOMCanvasRenderingContext2D *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -308,7 +308,7 @@ nsIDOMCanvasRenderingContext2D_PutImageData(JSContext *cx, uintN argc, jsval *vp JSObject *darray; // grab width, height, and the dense array from the dataObject - JSAutoTempValueRooter tv(cx); + js::AutoValueRooter tv(cx); if (!JS_GetProperty(cx, dataObject, "width", tv.addr()) || !JS_ValueToECMAInt32(cx, tv.value(), &wi)) @@ -329,7 +329,7 @@ nsIDOMCanvasRenderingContext2D_PutImageData(JSContext *cx, uintN argc, jsval *vp return JS_FALSE; darray = JSVAL_TO_OBJECT(tv.value()); - JSAutoTempValueRooter tsrc_tvr(cx); + js::AutoValueRooter tsrc_tvr(cx); js::TypedArray *tsrc = NULL; if (darray->getClass() == &js::TypedArray::fastClasses[js::TypedArray::TYPE_UINT8] || diff --git a/content/canvas/src/CustomQS_WebGL.h b/content/canvas/src/CustomQS_WebGL.h index c8b8962c532..a9da0ce1a1b 100644 --- a/content/canvas/src/CustomQS_WebGL.h +++ b/content/canvas/src/CustomQS_WebGL.h @@ -68,7 +68,7 @@ nsICanvasRenderingContextWebGL_BufferData(JSContext *cx, uintN argc, jsval *vp) nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -134,7 +134,7 @@ nsICanvasRenderingContextWebGL_BufferSubData(JSContext *cx, uintN argc, jsval *v nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -204,7 +204,7 @@ nsICanvasRenderingContextWebGL_TexImage2D(JSContext *cx, uintN argc, jsval *vp) nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -306,7 +306,7 @@ nsICanvasRenderingContextWebGL_TexSubImage2D(JSContext *cx, uintN argc, jsval *v nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -407,7 +407,7 @@ helper_nsICanvasRenderingContextWebGL_Uniform_x_iv(JSContext *cx, uintN argc, js nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -427,7 +427,7 @@ helper_nsICanvasRenderingContextWebGL_Uniform_x_iv(JSContext *cx, uintN argc, js JSObject *arg1 = JSVAL_TO_OBJECT(argv[1]); - JSAutoTempValueRooter obj_tvr(cx); + js::AutoValueRooter obj_tvr(cx); js::TypedArray *wa = 0; @@ -477,7 +477,7 @@ helper_nsICanvasRenderingContextWebGL_Uniform_x_fv(JSContext *cx, uintN argc, js nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -497,7 +497,7 @@ helper_nsICanvasRenderingContextWebGL_Uniform_x_fv(JSContext *cx, uintN argc, js JSObject *arg1 = JSVAL_TO_OBJECT(argv[1]); - JSAutoTempValueRooter obj_tvr(cx); + js::AutoValueRooter obj_tvr(cx); js::TypedArray *wa = 0; @@ -547,7 +547,7 @@ helper_nsICanvasRenderingContextWebGL_UniformMatrix_x_fv(JSContext *cx, uintN ar nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -571,7 +571,7 @@ helper_nsICanvasRenderingContextWebGL_UniformMatrix_x_fv(JSContext *cx, uintN ar JSObject *arg2 = JSVAL_TO_OBJECT(argv[2]); - JSAutoTempValueRooter obj_tvr(cx); + js::AutoValueRooter obj_tvr(cx); js::TypedArray *wa = 0; @@ -618,7 +618,7 @@ helper_nsICanvasRenderingContextWebGL_VertexAttrib_x_fv(JSContext *cx, uintN arg nsICanvasRenderingContextWebGL *self; xpc_qsSelfRef selfref; - JSAutoTempValueRooter tvr(cx); + js::AutoValueRooter tvr(cx); if (!xpc_qsUnwrapThis(cx, obj, nsnull, &self, &selfref.ptr, tvr.addr(), nsnull)) return JS_FALSE; @@ -638,7 +638,7 @@ helper_nsICanvasRenderingContextWebGL_VertexAttrib_x_fv(JSContext *cx, uintN arg JSObject *arg1 = JSVAL_TO_OBJECT(argv[1]); - JSAutoTempValueRooter obj_tvr(cx); + js::AutoValueRooter obj_tvr(cx); js::TypedArray *wa = 0; @@ -780,7 +780,7 @@ helper_nsICanvasRenderingContextWebGL_Uniform_x_iv_tn(JSContext *cx, JSObject *o return JSVAL_VOID; } - JSAutoTempValueRooter obj_tvr(cx); + js::AutoValueRooter obj_tvr(cx); js::TypedArray *wa = 0; @@ -835,7 +835,7 @@ helper_nsICanvasRenderingContextWebGL_Uniform_x_fv_tn(JSContext *cx, JSObject *o return JSVAL_VOID; } - JSAutoTempValueRooter obj_tvr(cx); + js::AutoValueRooter obj_tvr(cx); js::TypedArray *wa = 0; @@ -890,7 +890,7 @@ helper_nsICanvasRenderingContextWebGL_UniformMatrix_x_fv_tn(JSContext *cx, JSObj return JSVAL_VOID; } - JSAutoTempValueRooter obj_tvr(cx); + js::AutoValueRooter obj_tvr(cx); js::TypedArray *wa = 0; diff --git a/dom/base/nsDOMClassInfo.cpp b/dom/base/nsDOMClassInfo.cpp index a0d3a24febb..b9590471aaf 100644 --- a/dom/base/nsDOMClassInfo.cpp +++ b/dom/base/nsDOMClassInfo.cpp @@ -41,6 +41,7 @@ #include "jsapi.h" #include "jsprvtd.h" // we are using private JS typedefs... #include "jscntxt.h" +#include "jsobj.h" #include "jsdbgapi.h" #include "nscore.h" @@ -1469,7 +1470,7 @@ jsval nsDOMClassInfo::sJava_id = JSVAL_VOID; jsval nsDOMClassInfo::sPackages_id = JSVAL_VOID; static const JSClass *sObjectClass = nsnull; -const JSClass *nsDOMClassInfo::sXPCNativeWrapperClass = nsnull; +JSPropertyOp nsDOMClassInfo::sXPCNativeWrapperGetPropertyOp = nsnull; /** * Set our JSClass pointer for the Object class diff --git a/dom/base/nsDOMClassInfo.h b/dom/base/nsDOMClassInfo.h index c2c7b439487..845b4169e3b 100644 --- a/dom/base/nsDOMClassInfo.h +++ b/dom/base/nsDOMClassInfo.h @@ -174,17 +174,17 @@ public: /** * Get our JSClass pointer for the XPCNativeWrapper class */ - static const JSClass* GetXPCNativeWrapperClass() { - return sXPCNativeWrapperClass; + static JSPropertyOp GetXPCNativeWrapperGetPropertyOp() { + return sXPCNativeWrapperGetPropertyOp; } /** * Set our JSClass pointer for the XPCNativeWrapper class */ - static void SetXPCNativeWrapperClass(JSClass* aClass) { - NS_ASSERTION(!sXPCNativeWrapperClass, - "Double set of sXPCNativeWrapperClass"); - sXPCNativeWrapperClass = aClass; + static void SetXPCNativeWrapperGetPropertyOp(JSPropertyOp getPropertyOp) { + NS_ASSERTION(!sXPCNativeWrapperGetPropertyOp, + "Double set of sXPCNativeWrapperGetPropertyOp"); + sXPCNativeWrapperGetPropertyOp = getPropertyOp; } static PRBool ObjectIsNativeWrapper(JSContext* cx, JSObject* obj) @@ -194,13 +194,13 @@ public: nsIScriptContext *scx = GetScriptContextFromJSContext(cx); NS_PRECONDITION(!scx || !scx->IsContextInitialized() || - sXPCNativeWrapperClass, - "Must know what the XPCNativeWrapper class is!"); + sXPCNativeWrapperGetPropertyOp, + "Must know what the XPCNativeWrapper class GetProperty op is!"); } #endif - return sXPCNativeWrapperClass && - ::JS_GET_CLASS(cx, obj) == sXPCNativeWrapperClass; + return sXPCNativeWrapperGetPropertyOp && + ::JS_GET_CLASS(cx, obj)->getProperty == sXPCNativeWrapperGetPropertyOp; } static void PreserveNodeWrapper(nsIXPConnectWrappedNative *aWrapper); @@ -365,7 +365,7 @@ protected: static jsval sJava_id; static jsval sPackages_id; - static const JSClass *sXPCNativeWrapperClass; + static JSPropertyOp sXPCNativeWrapperGetPropertyOp; }; diff --git a/dom/base/nsJSEnvironment.cpp b/dom/base/nsJSEnvironment.cpp index b12700739b3..757148cc46c 100644 --- a/dom/base/nsJSEnvironment.cpp +++ b/dom/base/nsJSEnvironment.cpp @@ -2108,15 +2108,11 @@ nsJSContext::CallEventHandler(nsISupports* aTarget, void *aScope, void *aHandler return NS_OK; } - jsval targetVal = JSVAL_VOID; - JSAutoTempValueRooter tvr(mContext, 1, &targetVal); - JSObject* target = nsnull; nsresult rv = JSObjectFromInterface(aTarget, aScope, &target); NS_ENSURE_SUCCESS(rv, rv); - targetVal = OBJECT_TO_JSVAL(target); - + js::AutoObjectRooter targetVal(mContext, target); jsval rval = JSVAL_VOID; // This one's a lot easier than EvaluateString because we don't have to @@ -2140,7 +2136,7 @@ nsJSContext::CallEventHandler(nsISupports* aTarget, void *aScope, void *aHandler jsval *argv = nsnull; js::LazilyConstructed poolRelease; - js::LazilyConstructed tvr; + js::LazilyConstructed tvr; // Use |target| as the scope for wrapping the arguments, since aScope is // the safe scope in many cases, which isn't very useful. Wrapping aTarget @@ -2587,8 +2583,10 @@ nsJSContext::InitContext(nsIScriptGlobalObject *aGlobalObject) // Now check whether we need to grab a pointer to the // XPCNativeWrapper class - if (!nsDOMClassInfo::GetXPCNativeWrapperClass()) { - nsDOMClassInfo::SetXPCNativeWrapperClass(xpc->GetNativeWrapperClass()); + if (!nsDOMClassInfo::GetXPCNativeWrapperGetPropertyOp()) { + JSPropertyOp getProperty; + xpc->GetNativeWrapperGetPropertyOp(&getProperty); + nsDOMClassInfo::SetXPCNativeWrapperGetPropertyOp(getProperty); } } else { // There's already a global object. We are preparing this outer window @@ -2652,7 +2650,7 @@ nsJSContext::SetProperty(void *aTarget, const char *aPropName, nsISupports *aArg JSAutoRequest ar(mContext); js::LazilyConstructed poolRelease; - js::LazilyConstructed tvr; + js::LazilyConstructed tvr; nsresult rv; rv = ConvertSupportsTojsvals(aArgs, GetNativeGlobal(), &argc, @@ -2687,7 +2685,7 @@ nsJSContext::ConvertSupportsTojsvals(nsISupports *aArgs, PRUint32 *aArgc, jsval **aArgv, js::LazilyConstructed &aPoolRelease, - js::LazilyConstructed &aRooter) + js::LazilyConstructed &aRooter) { nsresult rv = NS_OK; diff --git a/dom/base/nsJSEnvironment.h b/dom/base/nsJSEnvironment.h index 785d53c109e..b6dc7888605 100644 --- a/dom/base/nsJSEnvironment.h +++ b/dom/base/nsJSEnvironment.h @@ -49,8 +49,10 @@ class nsIXPConnectJSObjectHolder; class nsAutoPoolRelease; -class JSAutoTempValueRooter; -namespace js { template class LazilyConstructed; } +namespace js { +class AutoArrayRooter; +template class LazilyConstructed; +} class nsJSContext : public nsIScriptContext, public nsIXPCScriptNotify @@ -215,7 +217,7 @@ protected: PRUint32 *aArgc, jsval **aArgv, js::LazilyConstructed &aPoolRelease, - js::LazilyConstructed &aRooter); + js::LazilyConstructed &aRooter); nsresult AddSupportsPrimitiveTojsvals(nsISupports *aArg, jsval *aArgv); diff --git a/dom/src/threads/nsDOMThreadService.cpp b/dom/src/threads/nsDOMThreadService.cpp index ea6334e22ca..eb2015f2a64 100644 --- a/dom/src/threads/nsDOMThreadService.cpp +++ b/dom/src/threads/nsDOMThreadService.cpp @@ -1,4 +1,4 @@ -/* -*- Mode: c++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*- */ +/* -*- Mode: c++; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 40 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * @@ -504,71 +504,49 @@ DOMWorkerOperationCallback(JSContext* aCx) nsDOMWorker* worker = (nsDOMWorker*)JS_GetContextPrivate(aCx); NS_ASSERTION(worker, "This must never be null!"); - PRBool wasSuspended = PR_FALSE; - PRBool extraThreadAllowed = PR_FALSE; - jsrefcount suspendDepth = 0; + PRBool canceled = worker->IsCanceled(); + if (!canceled && worker->IsSuspended()) { + JSAutoSuspendRequest suspended(aCx); - for (;;) { - // Kill execution if we're canceled. - if (worker->IsCanceled()) { - LOG(("Forcefully killing JS for worker [0x%p]", - static_cast(worker))); + // Since we're going to block this thread we should open up a new thread + // in the thread pool for other workers. Must check the return value to + // make sure we don't decrement when we failed. + PRBool extraThreadAllowed = + NS_SUCCEEDED(gDOMThreadService->ChangeThreadPoolMaxThreads(1)); - if (wasSuspended) { - if (extraThreadAllowed) { - gDOMThreadService->ChangeThreadPoolMaxThreads(-1); - } - JS_ResumeRequest(aCx, suspendDepth); + // Flush JIT caches now before suspending to avoid holding memory that we + // are not going to use. + JS_FlushCaches(aCx); + + for (;;) { + nsAutoMonitor mon(worker->Pool()->Monitor()); + + // There's a small chance that the worker was canceled after our check + // above in which case we shouldn't wait here. We're guaranteed not to + // race here because the pool reenters its monitor after canceling each + // worker in order to notify its condition variable. + canceled = worker->IsCanceled(); + if (!canceled && worker->IsSuspended()) { + mon.Wait(); } - - // Kill execution of the currently running JS. - JS_ClearPendingException(aCx); - return JS_FALSE; - } - - // Break out if we're not suspended. - if (!worker->IsSuspended()) { - if (wasSuspended) { - if (extraThreadAllowed) { - gDOMThreadService->ChangeThreadPoolMaxThreads(-1); - } - JS_ResumeRequest(aCx, suspendDepth); + else { + break; } - return JS_TRUE; } - if (!wasSuspended) { - // Make sure to suspend our request while we block like this, otherwise we - // prevent GC for everyone. - suspendDepth = JS_SuspendRequest(aCx); - - // Since we're going to block this thread we should open up a new thread - // in the thread pool for other workers. Must check the return value to - // make sure we don't decrement when we failed. - extraThreadAllowed = - NS_SUCCEEDED(gDOMThreadService->ChangeThreadPoolMaxThreads(1)); - - // Flush JIT caches now before suspending to avoid holding memory that we - // are not going to use. - JS_FlushCaches(aCx); - - // Only do all this setup once. - wasSuspended = PR_TRUE; - } - - nsAutoMonitor mon(worker->Pool()->Monitor()); - - // There's a small chance that the worker was canceled after our check - // above in which case we shouldn't wait here. We're guaranteed not to race - // here because the pool reenters its monitor after canceling each worker - // in order to notify its condition variable. - if (worker->IsSuspended() && !worker->IsCanceled()) { - mon.Wait(); + if (extraThreadAllowed) { + gDOMThreadService->ChangeThreadPoolMaxThreads(-1); } } - NS_NOTREACHED("Should never get here!"); - return JS_FALSE; + if (canceled) { + LOG(("Forcefully killing JS for worker [0x%p]", + static_cast(worker))); + // Kill execution of the currently running JS. + JS_ClearPendingException(aCx); + return JS_FALSE; + } + return JS_TRUE; } void diff --git a/js/src/Makefile.in b/js/src/Makefile.in index 27a86f8822a..4389ce709a5 100644 --- a/js/src/Makefile.in +++ b/js/src/Makefile.in @@ -152,6 +152,8 @@ CPPSRCS = \ jsopcode.cpp \ jsparse.cpp \ jsprf.cpp \ + jspropertycache.cpp \ + jspropertytree.cpp \ jsregexp.cpp \ jsscan.cpp \ jsscope.cpp \ @@ -206,6 +208,9 @@ INSTALLED_HEADERS = \ jsotypes.h \ jsparse.h \ jsprf.h \ + jspropertycache.h \ + jspropertycacheinlines.h \ + jspropertytree.h \ jsproto.tbl \ jsprvtd.h \ jspubtd.h \ @@ -274,6 +279,27 @@ endif endif # ENABLE_TRACEJIT +ifdef JS_HAS_CTYPES +CPPSRCS += \ + ctypes/CTypes.cpp \ + ctypes/Library.cpp \ + $(NULL) + +LOCAL_INCLUDES = \ + -Ictypes/libffi/include \ + $(NULL) + +ifeq ($(OS_ARCH),OS2) +# libffi builds an aout lib on OS/2; convert it to an OMF lib. +ctypes/libffi/.libs/libffi.$(LIB_SUFFIX): ctypes/libffi/.libs/libffi.a + emxomf $< +endif + +SHARED_LIBRARY_LIBS = \ + ctypes/libffi/.libs/libffi.$(LIB_SUFFIX) \ + $(NULL) +endif # JS_HAS_CTYPES + ifdef HAVE_DTRACE INSTALLED_HEADERS += \ jsdtracef.h \ @@ -343,6 +369,16 @@ endif include $(topsrcdir)/config/rules.mk +ifdef JS_HAS_CTYPES +# Build libffi proper as part of the 'exports' target, so things get built +# in the right order. +export:: + $(call SUBMAKE,,ctypes/libffi) + +clean:: + $(call SUBMAKE,clean,ctypes/libffi) +endif + ifdef MOZ_SYNC_BUILD_FILES # Because the SpiderMonkey can be distributed and built independently # of the Mozilla source tree, it contains its own copies of many of @@ -405,6 +441,10 @@ ifdef JS_THREADSAFE DEFINES += -DJS_THREADSAFE endif +ifdef JS_HAS_CTYPES +DEFINES += -DJS_HAS_CTYPES +endif + ifdef JS_NO_THIN_LOCKS DEFINES += -DJS_USE_ONLY_NSPR_LOCKS endif diff --git a/js/src/Makefile.ref b/js/src/Makefile.ref index 50308a2dc5c..0e8096805c7 100644 --- a/js/src/Makefile.ref +++ b/js/src/Makefile.ref @@ -91,6 +91,16 @@ OTHER_LIBS += -L$(DIST)/lib -lnspr$(NSPR_LIBSUFFIX) endif endif +ifdef JS_HAS_CTYPES +DEFINES += -DJS_HAS_CTYPES +INCLUDES += -I$(DIST)/include/nspr +ifdef USE_MSVC +OTHER_LIBS += $(DIST)/lib/libnspr$(NSPR_LIBSUFFIX).lib +else +OTHER_LIBS += -L$(DIST)/lib -lnspr$(NSPR_LIBSUFFIX) +endif +endif + ifdef JS_NO_THIN_LOCKS DEFINES += -DJS_USE_ONLY_NSPR_LOCKS endif @@ -392,6 +402,7 @@ js-config-switch=$(if $(value $($1)),-e 's/\#undef $1/\#define $1/') $(OBJDIR)/js-config.h.stamp: js-config.h.in Makefile.ref sed < $< > $(@:.stamp=.tmp) \ $(call js-config-switch,JS_THREADSAFE) \ + $(call js-config-switch,JS_HAS_CTYPES) \ $(call js-config-switch,JS_GC_ZEAL) \ -e :dummy if ! [ -f $(@:.stamp=) ] || ! cmp $(@:.stamp=.tmp) $(@:.stamp=); then \ diff --git a/js/src/config.mk b/js/src/config.mk index 0e7696a5b24..f3f802e9593 100644 --- a/js/src/config.mk +++ b/js/src/config.mk @@ -127,9 +127,9 @@ INTERP_OPTIMIZER = -O2 -GL BUILTINS_OPTIMIZER = -O2 -GL LDFLAGS += -LTCG else -OPTIMIZER = -Os -fstrict-aliasing -fno-exceptions -fno-rtti -Wstrict-aliasing=2 -BUILTINS_OPTIMIZER = -O9 -fstrict-aliasing -fno-exceptions -fno-rtti -INTERP_OPTIMIZER = -O3 -fstrict-aliasing -fno-exceptions -fno-rtti +OPTIMIZER = -Os -fno-exceptions -fno-rtti -fstrict-aliasing -Wstrict-aliasing=3 +BUILTINS_OPTIMIZER = -O9 -fno-exceptions -fno-rtti -fstrict-aliasing +INTERP_OPTIMIZER = -O3 -fno-exceptions -fno-rtti -fstrict-aliasing endif DEFINES += -UDEBUG -DNDEBUG -UDEBUG_$(USER) OBJDIR_TAG = _OPT @@ -139,7 +139,7 @@ OPTIMIZER = -Zi INTERP_OPTIMIZER = -Zi BUILTINS_OPTIMIZER = $(INTERP_OPTIMIZER) else -OPTIMIZER = -g3 -fstrict-aliasing -fno-exceptions -fno-rtti -Wstrict-aliasing=2 +OPTIMIZER = -g3 -fstrict-aliasing -fno-exceptions -fno-rtti -Wstrict-aliasing=3 INTERP_OPTIMIZER = -g3 -fstrict-aliasing -fno-exceptions -fno-rtti BUILTINS_OPTIMIZER = $(INTERP_OPTIMIZER) endif diff --git a/js/src/config/autoconf.mk.in b/js/src/config/autoconf.mk.in index f5cf1e35890..e9fb9ad0185 100644 --- a/js/src/config/autoconf.mk.in +++ b/js/src/config/autoconf.mk.in @@ -92,6 +92,7 @@ MOZ_JPROF = @MOZ_JPROF@ MOZ_SHARK = @MOZ_SHARK@ MOZ_CALLGRIND = @MOZ_CALLGRIND@ MOZ_VTUNE = @MOZ_VTUNE@ +JS_HAS_CTYPES = @JS_HAS_CTYPES@ DEHYDRA_PATH = @DEHYDRA_PATH@ NS_TRACE_MALLOC = @NS_TRACE_MALLOC@ diff --git a/js/src/configure.in b/js/src/configure.in index ce61cf1d07a..037d75354ed 100644 --- a/js/src/configure.in +++ b/js/src/configure.in @@ -513,6 +513,8 @@ case "$target" in # Make sure compilers are valid CFLAGS="$CFLAGS -TC -nologo" CXXFLAGS="$CXXFLAGS -TP -nologo" + # MSVC warning C4345 warns of newly conformant behavior as of VS2003. + CXXFLAGS="$CXXFLAGS -wd4345" AC_LANG_SAVE AC_LANG_C AC_TRY_COMPILE([#include ], @@ -4303,6 +4305,22 @@ MOZ_ARG_WITH_STRING(wrap-malloc, [ --with-wrap-malloc=DIR Location of malloc wrapper library], WRAP_MALLOC_LIB=$withval) +dnl ======================================================== +dnl = Build jsctypes if it's enabled +dnl ======================================================== +MOZ_ARG_ENABLE_BOOL(ctypes, +[ --enable-ctypes Enable js-ctypes (default=no)], + JS_HAS_CTYPES=1, + JS_HAS_CTYPES= ) +AC_SUBST(JS_HAS_CTYPES) +if test "$JS_HAS_CTYPES"; then + if test "$_MSC_VER" && test -z $AS; then + # Error out if we're on MSVC and MASM is unavailable. + AC_MSG_ERROR([No suitable assembler found. An assembler is required to build js-ctypes. If you are building with MS Visual Studio 8 Express, you may download the MASM 8.0 package, upgrade to Visual Studio 9 Express, or install the Vista SDK.]) + fi + AC_DEFINE(JS_HAS_CTYPES) +fi + dnl ======================================================== dnl = Use TraceVis dnl ======================================================== @@ -4317,6 +4335,16 @@ if test -n "$MOZ_TRACEVIS"; then fi fi +dnl ======================================================== +dnl = Use GCTimer +dnl ======================================================== +MOZ_ARG_ENABLE_BOOL(gctimer, +[ --enable-gctimer Enable GC timer (default=no)], + MOZ_GCTIMER=1, + MOZ_GCTIMER= ) +if test -n "$MOZ_GCTIMER"; then + AC_DEFINE(MOZ_GCTIMER) +fi dnl ======================================================== dnl = Use Valgrind @@ -5245,6 +5273,8 @@ MAKEFILES=" Makefile shell/Makefile lirasm/Makefile + ctypes/Makefile + ctypes/tests/Makefile jsapi-tests/Makefile tests/Makefile config/Makefile @@ -5295,3 +5325,55 @@ fi # 'js-config' in Makefile.in. AC_MSG_RESULT(invoking make to create js-config script) $MAKE js-config + +# Build jsctypes if it's enabled. +if test "$JS_HAS_CTYPES"; then + # Run the libffi 'configure' script. + ac_configure_args="--disable-shared --enable-static --disable-raw-api" + if test "$MOZ_DEBUG"; then + ac_configure_args="$ac_configure_args --enable-debug" + fi + if test "$DSO_PIC_CFLAGS"; then + ac_configure_args="$ac_configure_args --with-pic" + fi + if test "$CROSS_COMPILE"; then + case "$target" in + *-mingw*) + ac_configure_args="$ac_configure_args --build=$build --host=${target_cpu}-${target_os} HOST_CC=\"$HOST_CC\" CC=\"$CC\"" + ;; + *) + ac_configure_args="$ac_configure_args --build=$build --host=$target HOST_CC=\"$HOST_CC\" CC=\"$CC\"" + ;; + esac + fi + if test "$_MSC_VER"; then + # Use a wrapper script for cl and ml that looks more like gcc. + # autotools can't quite handle an MSVC build environment yet. + ac_configure_args="$ac_configure_args LD=link CPP=\"cl -nologo -EP\" SHELL=sh.exe" + case "${target_cpu}" in + x86_64) + # Need target since MSYS tools into mozilla-build may be 32bit + ac_configure_args="$ac_configure_args CC=\"$_topsrcdir/ctypes/libffi/msvcc.sh -m64\" --build=$build --host=$target" + ;; + *) + ac_configure_args="$ac_configure_args CC=$_topsrcdir/ctypes/libffi/msvcc.sh" + ;; + esac + fi + if test "$SOLARIS_SUNPRO_CC"; then + # Always use gcc for libffi on Solaris + ac_configure_args="$ac_configure_args CC=gcc" + fi + + # Use a separate cache file for libffi, since it does things differently + # from our configure. + old_cache_file=$cache_file + cache_file=ctypes/libffi/config.cache + old_config_files=$CONFIG_FILES + unset CONFIG_FILES + AC_OUTPUT_SUBDIRS(ctypes/libffi) + cache_file=$old_cache_file + ac_configure_args="$_SUBDIR_CONFIG_ARGS" + CONFIG_FILES=$old_config_files +fi + diff --git a/js/ctypes/CTypes.cpp b/js/src/ctypes/CTypes.cpp similarity index 90% rename from js/ctypes/CTypes.cpp rename to js/src/ctypes/CTypes.cpp index 046f5d9f368..7e9f8c9cf02 100644 --- a/js/ctypes/CTypes.cpp +++ b/js/src/ctypes/CTypes.cpp @@ -36,13 +36,8 @@ * * ***** END LICENSE BLOCK ***** */ -#include "jscntxt.h" #include "CTypes.h" #include "Library.h" -#include "nsAutoPtr.h" -#include "nsUTF8Utils.h" -#include "nsCRTGlue.h" -#include "prlog.h" #include "jsdtoa.h" #include @@ -58,7 +53,7 @@ #include #endif -namespace mozilla { +namespace js { namespace ctypes { /******************************************************************************* @@ -173,10 +168,10 @@ namespace CData { // Int64Base provides functions common to Int64 and UInt64. namespace Int64Base { - JSObject* Construct(JSContext* cx, JSObject* proto, PRUint64 data, + JSObject* Construct(JSContext* cx, JSObject* proto, JSUint64 data, bool isUnsigned); - PRUint64 GetInt(JSContext* cx, JSObject* obj); + JSUint64 GetInt(JSContext* cx, JSObject* obj); JSBool ToString(JSContext* cx, JSObject* obj, uintN argc, jsval* vp, bool isUnsigned); @@ -432,6 +427,12 @@ static JSFunctionSpec sUInt64Functions[] = { JS_FS_END }; +static JSFunctionSpec sModuleFunctions[] = { + JS_FN("open", Library::Open, 1, CTYPESFN_FLAGS), + JS_FN("cast", CData::Cast, 2, CTYPESFN_FLAGS), + JS_FS_END +}; + static inline bool FloatIsFinite(jsdouble f) { #ifdef WIN32 return _finite(f); @@ -447,20 +448,9 @@ ASSERT_OK(JSBool ok) } JS_ALWAYS_INLINE JSString* -NewUCString(JSContext* cx, const nsString& from) +NewUCString(JSContext* cx, const AutoString& from) { - JS_ASSERT(from.get()); - return JS_NewUCStringCopyN(cx, - reinterpret_cast(from.get()), from.Length()); -} - -JS_ALWAYS_INLINE const nsDependentString -GetString(JSString* str) -{ - JS_ASSERT(str); - const jschar* chars = JS_GetStringChars(str); - size_t length = JS_GetStringLength(str); - return nsDependentString(reinterpret_cast(chars), length); + return JS_NewUCStringCopyN(cx, from.begin(), from.length()); } JS_ALWAYS_INLINE size_t @@ -502,7 +492,7 @@ JSBool TypeError(JSContext* cx, const char* expected, jsval actual) { JSString* str = JS_ValueToSource(cx, actual); - JSAutoTempValueRooter root(cx, str); + js::AutoValueRooter root(cx, str); const char* src; if (str) { @@ -666,7 +656,7 @@ InitTypeConstructor(JSContext* cx, dataProto = JS_NewObject(cx, &sCDataProtoClass, CDataProto, parent); if (!dataProto) return false; - JSAutoTempValueRooter protoroot(cx, dataProto); + js::AutoValueRooter protoroot(cx, dataProto); // Define functions and properties on the 'dataProto' object that are common // to all CData objects created from this type constructor. (These will @@ -725,7 +715,7 @@ AttachProtos(JSContext* cx, JSObject* proto, JSObject** protos) // For a given 'proto' of [[Class]] "CTypeProto", attach each of the 'protos' // to the appropriate CTypeProtoSlot. (SLOT_UINT64PROTO is the last slot // of [[Class]] "CTypeProto".) - for (PRUint32 i = 0; i <= SLOT_UINT64PROTO; ++i) { + for (JSUint32 i = 0; i <= SLOT_UINT64PROTO; ++i) { if (!JS_SetReservedSlot(cx, proto, i, OBJECT_TO_JSVAL(protos[i]))) return false; } @@ -800,25 +790,25 @@ InitTypeClasses(JSContext* cx, JSObject* parent) sPointerInstanceProps, protos[SLOT_POINTERPROTO], protos[SLOT_POINTERDATAPROTO])) return false; - JSAutoTempValueRooter proot(cx, protos[SLOT_POINTERDATAPROTO]); + js::AutoValueRooter proot(cx, protos[SLOT_POINTERDATAPROTO]); if (!InitTypeConstructor(cx, parent, CTypeProto, CDataProto, sArrayFunction, sArrayProps, sArrayInstanceFunctions, sArrayInstanceProps, protos[SLOT_ARRAYPROTO], protos[SLOT_ARRAYDATAPROTO])) return false; - JSAutoTempValueRooter aroot(cx, protos[SLOT_ARRAYDATAPROTO]); + js::AutoValueRooter aroot(cx, protos[SLOT_ARRAYDATAPROTO]); if (!InitTypeConstructor(cx, parent, CTypeProto, CDataProto, sStructFunction, sStructProps, sStructInstanceFunctions, NULL, protos[SLOT_STRUCTPROTO], protos[SLOT_STRUCTDATAPROTO])) return false; - JSAutoTempValueRooter sroot(cx, protos[SLOT_STRUCTDATAPROTO]); + js::AutoValueRooter sroot(cx, protos[SLOT_STRUCTDATAPROTO]); if (!InitTypeConstructor(cx, parent, CTypeProto, CDataProto, sFunctionFunction, sFunctionProps, NULL, NULL, protos[SLOT_FUNCTIONPROTO], protos[SLOT_FUNCTIONDATAPROTO])) return false; - JSAutoTempValueRooter froot(cx, protos[SLOT_FUNCTIONDATAPROTO]); + js::AutoValueRooter froot(cx, protos[SLOT_FUNCTIONDATAPROTO]); protos[SLOT_CDATAPROTO] = CDataProto; @@ -897,6 +887,33 @@ InitTypeClasses(JSContext* cx, JSObject* parent) return true; } +JS_BEGIN_EXTERN_C + +JS_PUBLIC_API(JSBool) +JS_InitCTypesClass(JSContext* cx, JSObject* global) +{ + // attach ctypes property to global object + JSObject* ctypes = JS_NewObject(cx, NULL, NULL, NULL); + if (!ctypes) + return false; + + if (!JS_DefineProperty(cx, global, "ctypes", OBJECT_TO_JSVAL(ctypes), + NULL, NULL, JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT)) + return false; + + if (!InitTypeClasses(cx, ctypes)) + return false; + + // attach API functions + if (!JS_DefineFunctions(cx, ctypes, sModuleFunctions)) + return false; + + // Seal the ctypes object, to prevent modification. + return JS_SealObject(cx, ctypes, JS_FALSE); +} + +JS_END_EXTERN_C + /******************************************************************************* ** Type conversion functions *******************************************************************************/ @@ -906,16 +923,15 @@ InitTypeClasses(JSContext* cx, JSObject* parent) // autoconverts to a primitive JS number; to support ILP64 architectures, it // would need to autoconvert to an Int64 object instead. Therefore we enforce // this invariant here.) -PR_STATIC_ASSERT(sizeof(bool) == 1 || sizeof(bool) == 4); -PR_STATIC_ASSERT(sizeof(char) == 1); -PR_STATIC_ASSERT(sizeof(short) == 2); -PR_STATIC_ASSERT(sizeof(int) == 4); -PR_STATIC_ASSERT(sizeof(unsigned) == 4); -PR_STATIC_ASSERT(sizeof(long) == 4 || sizeof(long) == 8); -PR_STATIC_ASSERT(sizeof(long long) == 8); -PR_STATIC_ASSERT(sizeof(size_t) == sizeof(uintptr_t)); -PR_STATIC_ASSERT(sizeof(float) == 4); -PR_STATIC_ASSERT(sizeof(jschar) == sizeof(PRUnichar)); +JS_STATIC_ASSERT(sizeof(bool) == 1 || sizeof(bool) == 4); +JS_STATIC_ASSERT(sizeof(char) == 1); +JS_STATIC_ASSERT(sizeof(short) == 2); +JS_STATIC_ASSERT(sizeof(int) == 4); +JS_STATIC_ASSERT(sizeof(unsigned) == 4); +JS_STATIC_ASSERT(sizeof(long) == 4 || sizeof(long) == 8); +JS_STATIC_ASSERT(sizeof(long long) == 8); +JS_STATIC_ASSERT(sizeof(size_t) == sizeof(uintptr_t)); +JS_STATIC_ASSERT(sizeof(float) == 4); template static JS_ALWAYS_INLINE IntegerType @@ -928,12 +944,12 @@ Convert(jsdouble d) // MSVC can't perform double to unsigned __int64 conversion when the // double is greater than 2^63 - 1. Help it along a little. template<> -JS_ALWAYS_INLINE PRUint64 -Convert(jsdouble d) +JS_ALWAYS_INLINE JSUint64 +Convert(jsdouble d) { return d > 0x7fffffffffffffffui64 ? - PRUint64(d - 0x8000000000000000ui64) + 0x8000000000000000ui64 : - PRUint64(d); + JSUint64(d - 0x8000000000000000ui64) + 0x8000000000000000ui64 : + JSUint64(d); } #endif @@ -1048,23 +1064,23 @@ jsvalToInteger(JSContext* cx, jsval val, IntegerType* result) } if (Int64::IsInt64(cx, obj)) { - PRInt64 i = Int64Base::GetInt(cx, obj); + JSInt64 i = Int64Base::GetInt(cx, obj); *result = IntegerType(i); // Make sure the integer fits in IntegerType. if (IsUnsigned() && i < 0) return false; - return PRInt64(*result) == i; + return JSInt64(*result) == i; } if (UInt64::IsUInt64(cx, obj)) { - PRUint64 i = Int64Base::GetInt(cx, obj); + JSUint64 i = Int64Base::GetInt(cx, obj); *result = IntegerType(i); // Make sure the integer fits in IntegerType. if (!IsUnsigned() && *result < 0) return false; - return PRUint64(*result) == i; + return JSUint64(*result) == i; } return false; @@ -1184,23 +1200,23 @@ jsvalToBigInteger(JSContext* cx, // Allow conversion from an Int64 or UInt64 object directly. JSObject* obj = JSVAL_TO_OBJECT(val); if (UInt64::IsUInt64(cx, obj)) { - PRUint64 i = Int64Base::GetInt(cx, obj); + JSUint64 i = Int64Base::GetInt(cx, obj); *result = IntegerType(i); // Make sure the integer fits in IntegerType. if (!IsUnsigned() && *result < 0) return false; - return PRUint64(*result) == i; + return JSUint64(*result) == i; } if (Int64::IsInt64(cx, obj)) { - PRInt64 i = Int64Base::GetInt(cx, obj); + JSInt64 i = Int64Base::GetInt(cx, obj); *result = IntegerType(i); // Make sure the integer fits in IntegerType. if (IsUnsigned() && i < 0) return false; - return PRInt64(*result) == i; + return JSInt64(*result) == i; } } return false; @@ -1246,12 +1262,12 @@ jsvalToIntegerExplicit(JSContext* cx, jsval val, IntegerType* result) // Convert Int64 and UInt64 values by C-style cast. JSObject* obj = JSVAL_TO_OBJECT(val); if (Int64::IsInt64(cx, obj)) { - PRInt64 i = Int64Base::GetInt(cx, obj); + JSInt64 i = Int64Base::GetInt(cx, obj); *result = IntegerType(i); return true; } if (UInt64::IsUInt64(cx, obj)) { - PRUint64 i = Int64Base::GetInt(cx, obj); + JSUint64 i = Int64Base::GetInt(cx, obj); *result = IntegerType(i); return true; } @@ -1290,35 +1306,36 @@ jsvalToPtrExplicit(JSContext* cx, jsval val, uintptr_t* result) if (!JSVAL_IS_PRIMITIVE(val)) { JSObject* obj = JSVAL_TO_OBJECT(val); if (Int64::IsInt64(cx, obj)) { - PRInt64 i = Int64Base::GetInt(cx, obj); + JSInt64 i = Int64Base::GetInt(cx, obj); intptr_t p = intptr_t(i); // Make sure the integer fits in the alotted precision. - if (PRInt64(p) != i) + if (JSInt64(p) != i) return false; *result = uintptr_t(p); return true; } if (UInt64::IsUInt64(cx, obj)) { - PRUint64 i = Int64Base::GetInt(cx, obj); + JSUint64 i = Int64Base::GetInt(cx, obj); // Make sure the integer fits in the alotted precision. *result = uintptr_t(i); - return PRUint64(*result) == i; + return JSUint64(*result) == i; } } return false; } template -nsAutoString -IntegerToString(IntegerType i, jsuint radix) +void +IntegerToString(IntegerType i, jsuint radix, AutoString& result) { // The buffer must be big enough for all the bits of IntegerType to fit, // in base-2, including '-'. - PRUnichar buffer[sizeof(IntegerType) * 8 + 1]; - PRUnichar* cp = buffer + sizeof(buffer) / sizeof(PRUnichar); + jschar buffer[sizeof(IntegerType) * 8 + 1]; + jschar* end = buffer + sizeof(buffer) / sizeof(jschar); + jschar* cp = end; // Build the string in reverse. We use multiplication and subtraction // instead of modulus because that's much faster. @@ -1335,7 +1352,7 @@ IntegerToString(IntegerType i, jsuint radix) *--cp = '-'; JS_ASSERT(cp >= buffer); - return nsAutoString(cp, buffer + sizeof(buffer) / sizeof(PRUnichar) - cp); + result.append(cp, end); } template @@ -1387,21 +1404,6 @@ StringToInteger(JSContext* cx, JSString* string, IntegerType* result) return true; } -static bool -IsUTF16(const jschar* string, size_t length) -{ - PRBool error; - const PRUnichar* buffer = reinterpret_cast(string); - const PRUnichar* end = buffer + length; - while (buffer != end) { - UTF16CharEnumerator::NextChar(&buffer, end, &error); - if (error) - return false; - } - - return true; -} - template static size_t strnlen(const CharType* begin, size_t max) @@ -1458,14 +1460,14 @@ ConvertToJS(JSContext* cx, #define DEFINE_WRAPPED_INT_TYPE(name, type, ffiType) \ case TYPE_##name: { \ /* Return an Int64 or UInt64 object - do not convert to a JS number. */ \ - PRUint64 value; \ + JSUint64 value; \ JSObject* proto; \ if (IsUnsigned()) { \ value = *static_cast(data); \ /* Get ctypes.UInt64.prototype from ctypes.CType.prototype. */ \ proto = CType::GetProtoFromType(cx, typeObj, SLOT_UINT64PROTO); \ } else { \ - value = PRInt64(*static_cast(data)); \ + value = JSInt64(*static_cast(data)); \ /* Get ctypes.Int64.prototype from ctypes.CType.prototype. */ \ proto = CType::GetProtoFromType(cx, typeObj, SLOT_INT64PROTO); \ } \ @@ -1657,21 +1659,22 @@ ImplicitConvert(JSContext* cx, case TYPE_signed_char: case TYPE_unsigned_char: { // Convert from UTF-16 to UTF-8. - if (!IsUTF16(sourceChars, sourceLength)) - return TypeError(cx, "UTF-16 string", val); - - NS_ConvertUTF16toUTF8 converted( - reinterpret_cast(sourceChars), sourceLength); + size_t nbytes = + js_GetDeflatedUTF8StringLength(cx, sourceChars, sourceLength); + if (nbytes == (size_t) -1) + return false; char** charBuffer = static_cast(buffer); - *charBuffer = new char[converted.Length() + 1]; + *charBuffer = new char[nbytes + 1]; if (!*charBuffer) { JS_ReportAllocationOverflow(cx); return false; } + ASSERT_OK(js_DeflateStringToUTF8Buffer(cx, sourceChars, sourceLength, + *charBuffer, &nbytes)); + (*charBuffer)[nbytes] = 0; *freePointer = true; - memcpy(*charBuffer, converted.get(), converted.Length() + 1); break; } case TYPE_jschar: { @@ -1719,20 +1722,22 @@ ImplicitConvert(JSContext* cx, case TYPE_signed_char: case TYPE_unsigned_char: { // Convert from UTF-16 to UTF-8. - if (!IsUTF16(sourceChars, sourceLength)) - return TypeError(cx, "UTF-16 string", val); + size_t nbytes = + js_GetDeflatedUTF8StringLength(cx, sourceChars, sourceLength); + if (nbytes == (size_t) -1) + return false; - NS_ConvertUTF16toUTF8 converted( - reinterpret_cast(sourceChars), sourceLength); - - if (targetLength < converted.Length()) { + if (targetLength < nbytes) { JS_ReportError(cx, "ArrayType has insufficient length"); return false; } - memcpy(buffer, converted.get(), converted.Length()); - if (targetLength > converted.Length()) - static_cast(buffer)[converted.Length()] = 0; + char* charBuffer = static_cast(buffer); + ASSERT_OK(js_DeflateStringToUTF8Buffer(cx, sourceChars, sourceLength, + charBuffer, &nbytes)); + + if (targetLength > nbytes) + charBuffer[nbytes] = 0; break; } @@ -1768,23 +1773,23 @@ ImplicitConvert(JSContext* cx, // Convert into an intermediate, in case of failure. size_t elementSize = CType::GetSize(cx, baseType); size_t arraySize = elementSize * targetLength; - nsAutoArrayPtr intermediate(new char[arraySize]); + AutoPtr::Array intermediate(new char[arraySize]); if (!intermediate) { JS_ReportAllocationOverflow(cx); return false; } for (jsuint i = 0; i < sourceLength; ++i) { - JSAutoTempValueRooter item(cx); + js::AutoValueRooter item(cx); if (!JS_GetElement(cx, sourceArray, i, item.addr())) return false; - char* data = intermediate + elementSize * i; + char* data = intermediate.get() + elementSize * i; if (!ImplicitConvert(cx, item.value(), baseType, data, false, NULL)) return false; } - memcpy(buffer, intermediate, arraySize); + memcpy(buffer, intermediate.get(), arraySize); } else { // Don't implicitly convert to string. Users can implicitly convert @@ -1795,33 +1800,31 @@ ImplicitConvert(JSContext* cx, } case TYPE_struct: { if (!JSVAL_IS_PRIMITIVE(val) && !sourceData) { - nsTArray* fields = StructType::GetFieldInfo(cx, targetType); - // Enumerate the properties of the object; if they match the struct // specification, convert the fields. JSObject* obj = JSVAL_TO_OBJECT(val); JSObject* iter = JS_NewPropertyIterator(cx, obj); if (!iter) return false; - JSAutoTempValueRooter iterroot(cx, iter); + js::AutoValueRooter iterroot(cx, iter); // Convert into an intermediate, in case of failure. size_t structSize = CType::GetSize(cx, targetType); - nsAutoArrayPtr intermediate(new char[structSize]); + AutoPtr::Array intermediate(new char[structSize]); if (!intermediate) { JS_ReportAllocationOverflow(cx); return false; } jsid id; - jsuint i = 0; + size_t i = 0; while (1) { if (!JS_NextProperty(cx, iter, &id)) return false; if (JSVAL_IS_VOID(id)) break; - JSAutoTempValueRooter fieldVal(cx); + js::AutoValueRooter fieldVal(cx); if (!JS_IdToValue(cx, id, fieldVal.addr())) return false; if (!JSVAL_IS_STRING(fieldVal.value())) { @@ -1838,24 +1841,25 @@ ImplicitConvert(JSContext* cx, const jschar* name = JS_GetStringChars(nameStr); size_t namelen = JS_GetStringLength(nameStr); - JSAutoTempValueRooter prop(cx); + js::AutoValueRooter prop(cx); if (!JS_GetUCProperty(cx, obj, name, namelen, prop.addr())) return false; // Convert the field via ImplicitConvert(). - char* fieldData = intermediate + field->mOffset; + char* fieldData = intermediate.get() + field->mOffset; if (!ImplicitConvert(cx, prop.value(), field->mType, fieldData, false, NULL)) return false; ++i; } - if (i != fields->Length()) { + Array* fields = StructType::GetFieldInfo(cx, targetType); + if (i != fields->length()) { JS_ReportError(cx, "missing fields"); return false; } - memcpy(buffer, intermediate, structSize); + memcpy(buffer, intermediate.get(), structSize); break; } @@ -1882,7 +1886,7 @@ ExplicitConvert(JSContext* cx, jsval val, JSObject* targetType, void* buffer) // If ImplicitConvert failed, and there is no pending exception, then assume // hard failure (out of memory, or some other similarly serious condition). // We store any pending exception in case we need to re-throw it. - JSAutoTempValueRooter ex(cx); + js::AutoValueRooter ex(cx); if (!JS_GetPendingException(cx, ex.addr())) return false; @@ -1952,16 +1956,17 @@ ExplicitConvert(JSContext* cx, jsval val, JSObject* targetType, void* buffer) // corresponding to 'typeObj'. For instance, the CType constructed from // 'ctypes.int32_t.ptr.array(4).ptr.ptr' will result in the type string // 'int32_t*(**)[4]'. -static nsAutoString +static JSString* BuildTypeName(JSContext* cx, JSObject* typeObj) { + AutoString result; + // Walk the hierarchy of types, outermost to innermost, building up the type // string. This consists of the base type, which goes on the left. // Derived type modifiers (* and []) build from the inside outward, with // pointers on the left and arrays on the right. An excellent description // of the rules for building C type declarations can be found at: // http://unixwiz.net/techtips/reading-cdecl.html - nsAutoString result; JSObject* currentType = typeObj; JSObject* nextType; TypeCode prevGrouping = CType::GetTypeCode(cx, currentType), currentGrouping; @@ -1976,7 +1981,7 @@ BuildTypeName(JSContext* cx, JSObject* typeObj) } // Pointer types go on the left. - result.Insert('*', 0); + PrependString(result, "*"); currentType = nextType; prevGrouping = currentGrouping; @@ -1985,17 +1990,17 @@ BuildTypeName(JSContext* cx, JSObject* typeObj) case TYPE_array: { if (prevGrouping == TYPE_pointer) { // Outer type is pointer, inner type is array. Grouping is required. - result.Insert('(', 0); - result.Append(')'); - } + PrependString(result, "("); + AppendString(result, ")"); + } // Array types go on the right. - result.Append('['); + AppendString(result, "["); size_t length; - if (ArrayType::GetSafeLength(cx, currentType, &length)) { - result.Append(IntegerToString(length, 10)); - } - result.Append(']'); + if (ArrayType::GetSafeLength(cx, currentType, &length)) + IntegerToString(length, 10, result); + + AppendString(result, "]"); currentType = ArrayType::GetBaseType(cx, currentType); prevGrouping = currentGrouping; @@ -2005,28 +2010,28 @@ BuildTypeName(JSContext* cx, JSObject* typeObj) FunctionInfo* fninfo = FunctionType::GetFunctionInfo(cx, currentType); // Function pointer goes on the left. - result.Insert('*', 0); + PrependString(result, "*"); // Add in the calling convention, if it's not cdecl. if (GetABICode(cx, fninfo->mABI) == ABI_STDCALL) - result.Insert(NS_LITERAL_STRING("__stdcall "), 0); + PrependString(result, "__stdcall "); // Wrap the entire expression so far with parens. - result.Insert('(', 0); - result.Append(')'); + PrependString(result, "("); + AppendString(result, ")"); // Argument list goes on the right. - result.Append('('); - for (PRUint32 i = 0; i < fninfo->mArgTypes.Length(); ++i) { + AppendString(result, "("); + for (size_t i = 0; i < fninfo->mArgTypes.length(); ++i) { JSString* argName = CType::GetName(cx, fninfo->mArgTypes[i]); - result.Append(GetString(argName)); - if (i != fninfo->mArgTypes.Length() - 1 || + AppendString(result, argName); + if (i != fninfo->mArgTypes.length() - 1 || fninfo->mIsVariadic) - result.Append(NS_LITERAL_STRING(", ")); + AppendString(result, ", "); } if (fninfo->mIsVariadic) - result.Append(NS_LITERAL_STRING("...")); - result.Append(')'); + AppendString(result, "..."); + AppendString(result, ")"); // Set 'currentType' to the return type, and let the loop process it. currentType = fninfo->mReturnType; @@ -2042,8 +2047,8 @@ BuildTypeName(JSContext* cx, JSObject* typeObj) // Stick the base type and derived type parts together. JSString* baseName = CType::GetName(cx, currentType); - result.Insert(GetString(baseName), 0); - return result; + PrependString(result, baseName); + return NewUCString(cx, result); } // Given a CType 'typeObj', generate a string 'result' such that 'eval(result)' @@ -2053,55 +2058,57 @@ BuildTypeName(JSContext* cx, JSObject* typeObj) // (This means the type comparison function CType::TypesEqual will return true // when comparing the input and output of BuildTypeSource, since struct // equality is determined by strict JSObject pointer equality.) -static nsAutoString -BuildTypeSource(JSContext* cx, JSObject* typeObj, bool makeShort) +static void +BuildTypeSource(JSContext* cx, + JSObject* typeObj, + bool makeShort, + AutoString& result) { // Walk the types, building up the toSource() string. - nsAutoString result; switch (CType::GetTypeCode(cx, typeObj)) { case TYPE_void_t: #define DEFINE_TYPE(name, type, ffiType) \ case TYPE_##name: #include "typedefs.h" { - result.Append(NS_LITERAL_STRING("ctypes.")); + AppendString(result, "ctypes."); JSString* nameStr = CType::GetName(cx, typeObj); - result.Append(GetString(nameStr)); + AppendString(result, nameStr); break; } case TYPE_pointer: { JSObject* baseType = PointerType::GetBaseType(cx, typeObj); if (!baseType) { // Opaque pointer type. Use the type's name. - result.Append(NS_LITERAL_STRING("ctypes.PointerType(\"")); + AppendString(result, "ctypes.PointerType(\""); JSString* baseName = CType::GetName(cx, typeObj); - result.Append(GetString(baseName)); - result.Append(NS_LITERAL_STRING("\")")); + AppendString(result, baseName); + AppendString(result, "\")"); break; } // Specialcase ctypes.voidptr_t. if (CType::GetTypeCode(cx, baseType) == TYPE_void_t) { - result.Append(NS_LITERAL_STRING("ctypes.voidptr_t")); + AppendString(result, "ctypes.voidptr_t"); break; } // Recursively build the source string, and append '.ptr'. - result.Append(BuildTypeSource(cx, baseType, makeShort)); - result.Append(NS_LITERAL_STRING(".ptr")); + BuildTypeSource(cx, baseType, makeShort, result); + AppendString(result, ".ptr"); break; } case TYPE_function: { FunctionInfo* fninfo = FunctionType::GetFunctionInfo(cx, typeObj); - result.Append(NS_LITERAL_STRING("ctypes.FunctionType(")); + AppendString(result, "ctypes.FunctionType("); switch (GetABICode(cx, fninfo->mABI)) { case ABI_DEFAULT: - result.Append(NS_LITERAL_STRING("ctypes.default_abi, ")); + AppendString(result, "ctypes.default_abi, "); break; case ABI_STDCALL: - result.Append(NS_LITERAL_STRING("ctypes.stdcall_abi, ")); + AppendString(result, "ctypes.stdcall_abi, "); break; case INVALID_ABI: JS_NOT_REACHED("invalid abi"); @@ -2110,22 +2117,22 @@ BuildTypeSource(JSContext* cx, JSObject* typeObj, bool makeShort) // Recursively build the source string describing the function return and // argument types. - result.Append(BuildTypeSource(cx, fninfo->mReturnType, true)); + BuildTypeSource(cx, fninfo->mReturnType, true, result); - if (fninfo->mArgTypes.Length() > 0) { - result.Append(NS_LITERAL_STRING(", [")); - for (PRUint32 i = 0; i < fninfo->mArgTypes.Length(); ++i) { - result.Append(BuildTypeSource(cx, fninfo->mArgTypes[i], true)); - if (i != fninfo->mArgTypes.Length() - 1 || + if (fninfo->mArgTypes.length() > 0) { + AppendString(result, ", ["); + for (size_t i = 0; i < fninfo->mArgTypes.length(); ++i) { + BuildTypeSource(cx, fninfo->mArgTypes[i], true, result); + if (i != fninfo->mArgTypes.length() - 1 || fninfo->mIsVariadic) - result.Append(NS_LITERAL_STRING(", ")); + AppendString(result, ", "); } if (fninfo->mIsVariadic) - result.Append(NS_LITERAL_STRING("\"...\"")); - result.Append(']'); + AppendString(result, "\"...\""); + AppendString(result, "]"); } - result.Append(')'); + AppendString(result, ")"); break; } case TYPE_array: { @@ -2133,14 +2140,14 @@ BuildTypeSource(JSContext* cx, JSObject* typeObj, bool makeShort) // where n is the array length, or the empty string if the array length // is undefined. JSObject* baseType = ArrayType::GetBaseType(cx, typeObj); - result.Append(BuildTypeSource(cx, baseType, makeShort)); - result.Append(NS_LITERAL_STRING(".array(")); + BuildTypeSource(cx, baseType, makeShort, result); + AppendString(result, ".array("); size_t length; if (ArrayType::GetSafeLength(cx, typeObj, &length)) - result.Append(IntegerToString(length, 10)); + IntegerToString(length, 10, result); - result.Append(')'); + AppendString(result, ")"); break; } case TYPE_struct: { @@ -2149,33 +2156,31 @@ BuildTypeSource(JSContext* cx, JSObject* typeObj, bool makeShort) if (makeShort) { // Shorten the type declaration by assuming that StructType 't' is bound // to an in-scope variable of name 't.name'. - result.Append(GetString(name)); + AppendString(result, name); break; } // Write the full struct declaration. - result.Append(NS_LITERAL_STRING("ctypes.StructType(\"")); - result.Append(GetString(name)); - result.Append(NS_LITERAL_STRING("\", [")); + AppendString(result, "ctypes.StructType(\""); + AppendString(result, name); + AppendString(result, "\", ["); - nsTArray* fields = StructType::GetFieldInfo(cx, typeObj); - for (PRUint32 i = 0; i < fields->Length(); ++i) { - const FieldInfo& field = fields->ElementAt(i); - result.Append(NS_LITERAL_STRING("{ \"")); - result.Append(field.mName); - result.Append(NS_LITERAL_STRING("\": ")); - result.Append(BuildTypeSource(cx, field.mType, true)); - result.Append(NS_LITERAL_STRING(" }")); - if (i != fields->Length() - 1) - result.Append(NS_LITERAL_STRING(", ")); + Array* fields = StructType::GetFieldInfo(cx, typeObj); + for (size_t i = 0; i < fields->length(); ++i) { + FieldInfo* field = fields->begin() + i; + AppendString(result, "{ \""); + AppendString(result, field->mName); + AppendString(result, "\": "); + BuildTypeSource(cx, field->mType, true, result); + AppendString(result, " }"); + if (i != fields->length() - 1) + AppendString(result, ", "); } - result.Append(NS_LITERAL_STRING("])")); + AppendString(result, "])"); break; } } - - return result; } // Given a CData object of CType 'typeObj' with binary value 'data', generate a @@ -2188,63 +2193,70 @@ BuildTypeSource(JSContext* cx, JSObject* typeObj, bool makeShort) // resulting string can ImplicitConvert successfully if passed to another data // constructor. (This is important when called recursively, since fields of // structs and arrays are converted with ImplicitConvert.) -static nsAutoString -BuildDataSource(JSContext* cx, JSObject* typeObj, void* data, bool isImplicit) +static JSBool +BuildDataSource(JSContext* cx, + JSObject* typeObj, + void* data, + bool isImplicit, + AutoString& result) { - nsAutoString result; TypeCode type = CType::GetTypeCode(cx, typeObj); switch (type) { case TYPE_bool: - result.Append(*static_cast(data) ? - NS_LITERAL_STRING("true") : - NS_LITERAL_STRING("false")); + if (*static_cast(data)) + AppendString(result, "true"); + else + AppendString(result, "false"); break; #define DEFINE_INT_TYPE(name, type, ffiType) \ case TYPE_##name: \ /* Serialize as a primitive decimal integer. */ \ - result.Append(IntegerToString(*static_cast(data), 10)); \ + IntegerToString(*static_cast(data), 10, result); \ break; #define DEFINE_WRAPPED_INT_TYPE(name, type, ffiType) \ case TYPE_##name: \ /* Serialize as a wrapped decimal integer. */ \ if (IsUnsigned()) \ - result.Append(NS_LITERAL_STRING("ctypes.UInt64(\"")); \ + AppendString(result, "ctypes.UInt64(\""); \ else \ - result.Append(NS_LITERAL_STRING("ctypes.Int64(\"")); \ + AppendString(result, "ctypes.Int64(\""); \ \ - result.Append(IntegerToString(*static_cast(data), 10)); \ - result.Append(NS_LITERAL_STRING("\")")); \ + IntegerToString(*static_cast(data), 10, result); \ + AppendString(result, "\")"); \ break; #define DEFINE_FLOAT_TYPE(name, type, ffiType) \ case TYPE_##name: { \ /* Serialize as a primitive double. */ \ double fp = *static_cast(data); \ char buf[DTOSTR_STANDARD_BUFFER_SIZE]; \ - char* str = JS_dtostr(buf, sizeof(buf), DTOSTR_STANDARD, 0, fp); \ - JS_ASSERT(str); \ - if (!str) \ - break; \ + char* str = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, buf, sizeof(buf), \ + DTOSTR_STANDARD, 0, fp); \ + if (!str) { \ + JS_ReportOutOfMemory(cx); \ + return false; \ + } \ \ - result.AppendASCII(str); \ + result.append(str, strlen(str)); \ break; \ } #define DEFINE_CHAR_TYPE(name, type, ffiType) \ case TYPE_##name: \ /* Serialize as an integer. */ \ - result.Append(IntegerToString(*static_cast(data), 10)); \ + IntegerToString(*static_cast(data), 10, result); \ break; #include "typedefs.h" case TYPE_jschar: { - /* Serialize as a 1-character JS string. */ + // Serialize as a 1-character JS string. JSString* str = JS_NewUCStringCopyN(cx, static_cast(data), 1); if (!str) - break; + return false; + // Escape characters, and quote as necessary. JSString* src = JS_ValueToSource(cx, STRING_TO_JSVAL(str)); if (!src) - break; + return false; - result.Append(GetString(src)); + AppendString(result, src); break; } case TYPE_pointer: @@ -2252,18 +2264,18 @@ BuildDataSource(JSContext* cx, JSObject* typeObj, void* data, bool isImplicit) if (isImplicit) { // The result must be able to ImplicitConvert successfully. // Wrap in a type constructor, then serialize for ExplicitConvert. - result.Append(BuildTypeSource(cx, typeObj, true)); - result.Append('('); + BuildTypeSource(cx, typeObj, true, result); + AppendString(result, "("); } // Serialize the pointer value as a wrapped hexadecimal integer. uintptr_t ptr = *static_cast(data); - result.Append(NS_LITERAL_STRING("ctypes.UInt64(\"0x")); - result.Append(IntegerToString(ptr, 16)); - result.Append(NS_LITERAL_STRING("\")")); + AppendString(result, "ctypes.UInt64(\"0x"); + IntegerToString(ptr, 16, result); + AppendString(result, "\")"); if (isImplicit) - result.Append(')'); + AppendString(result, ")"); break; } @@ -2271,17 +2283,19 @@ BuildDataSource(JSContext* cx, JSObject* typeObj, void* data, bool isImplicit) // Serialize each element of the array recursively. Each element must // be able to ImplicitConvert successfully. JSObject* baseType = ArrayType::GetBaseType(cx, typeObj); - result.Append('['); + AppendString(result, "["); size_t length = ArrayType::GetLength(cx, typeObj); size_t elementSize = CType::GetSize(cx, baseType); for (size_t i = 0; i < length; ++i) { char* element = static_cast(data) + elementSize * i; - result.Append(BuildDataSource(cx, baseType, element, true)); + if (!BuildDataSource(cx, baseType, element, true, result)) + return false; + if (i + 1 < length) - result.Append(NS_LITERAL_STRING(", ")); + AppendString(result, ", "); } - result.Append(']'); + AppendString(result, "]"); break; } case TYPE_struct: { @@ -2289,29 +2303,31 @@ BuildDataSource(JSContext* cx, JSObject* typeObj, void* data, bool isImplicit) // The result must be able to ImplicitConvert successfully. // Serialize the data as an object with properties, rather than // a sequence of arguments to the StructType constructor. - result.Append('{'); + AppendString(result, "{"); } // Serialize each field of the struct recursively. Each field must // be able to ImplicitConvert successfully. - nsTArray* fields = StructType::GetFieldInfo(cx, typeObj); - for (size_t i = 0; i < fields->Length(); ++i) { - const FieldInfo& field = fields->ElementAt(i); - char* fieldData = static_cast(data) + field.mOffset; + Array* fields = StructType::GetFieldInfo(cx, typeObj); + for (size_t i = 0; i < fields->length(); ++i) { + FieldInfo* field = fields->begin() + i; + char* fieldData = static_cast(data) + field->mOffset; if (isImplicit) { - result.Append('"'); - result.Append(field.mName); - result.Append(NS_LITERAL_STRING("\": ")); + AppendString(result, "\""); + AppendString(result, field->mName); + AppendString(result, "\": "); } - result.Append(BuildDataSource(cx, field.mType, fieldData, true)); - if (i + 1 != fields->Length()) - result.Append(NS_LITERAL_STRING(", ")); + if (!BuildDataSource(cx, field->mType, fieldData, true, result)) + return false; + + if (i + 1 != fields->length()) + AppendString(result, ", "); } if (isImplicit) - result.Append('}'); + AppendString(result, "}"); break; } @@ -2320,7 +2336,7 @@ BuildDataSource(JSContext* cx, JSObject* typeObj, void* data, bool isImplicit) break; } - return result; + return true; } /******************************************************************************* @@ -2438,7 +2454,7 @@ CType::Create(JSContext* cx, JSObject* typeObj = JS_NewObject(cx, &sCTypeClass, typeProto, parent); if (!typeObj) return NULL; - JSAutoTempValueRooter root(cx, typeObj); + js::AutoValueRooter root(cx, typeObj); // Set up the reserved slots. if (!JS_SetReservedSlot(cx, typeObj, SLOT_TYPECODE, INT_TO_JSVAL(type)) || @@ -2452,7 +2468,7 @@ CType::Create(JSContext* cx, JSObject* prototype = JS_NewObject(cx, &sCDataProtoClass, dataProto, parent); if (!prototype) return NULL; - JSAutoTempValueRooter protoroot(cx, prototype); + js::AutoValueRooter protoroot(cx, prototype); if (!JS_DefineProperty(cx, prototype, "constructor", OBJECT_TO_JSVAL(typeObj), NULL, NULL, JSPROP_READONLY | JSPROP_PERMANENT)) @@ -2501,7 +2517,7 @@ CType::DefineBuiltin(JSContext* cx, JSString* nameStr = JS_NewStringCopyZ(cx, name); if (!nameStr) return NULL; - JSAutoTempValueRooter nameRoot(cx, nameStr); + js::AutoValueRooter nameRoot(cx, nameStr); // Create a new CType object with the common properties and slots. JSObject* typeObj = Create(cx, typeProto, dataProto, type, nameStr, size, @@ -2537,8 +2553,10 @@ CType::Finalize(JSContext* cx, JSObject* obj) case TYPE_struct: // Free the FieldInfo array. ASSERT_OK(JS_GetReservedSlot(cx, obj, SLOT_FIELDINFO, &slot)); - if (!JSVAL_IS_VOID(slot)) - delete static_cast*>(JSVAL_TO_PRIVATE(slot)); + if (!JSVAL_IS_VOID(slot)) { + void* info = JSVAL_TO_PRIVATE(slot); + delete static_cast*>(info); + } // Fall through. case TYPE_array: { @@ -2598,7 +2616,7 @@ CType::Trace(JSTracer* trc, JSObject* obj) // Identify our objects to the tracer. JS_CALL_TRACER(trc, fninfo->mABI, JSTRACE_OBJECT, "abi"); JS_CALL_TRACER(trc, fninfo->mReturnType, JSTRACE_OBJECT, "returnType"); - for (PRUint32 i = 0; i < fninfo->mArgTypes.Length(); ++i) + for (size_t i = 0; i < fninfo->mArgTypes.length(); ++i) JS_CALL_TRACER(trc, fninfo->mArgTypes[i], JSTRACE_OBJECT, "argType"); break; @@ -2668,13 +2686,13 @@ CType::TypesEqual(JSContext* cx, JSObject* t1, JSObject* t2) if (!TypesEqual(cx, f1->mReturnType, f2->mReturnType)) return false; - if (f1->mArgTypes.Length() != f2->mArgTypes.Length()) + if (f1->mArgTypes.length() != f2->mArgTypes.length()) return false; if (f1->mIsVariadic != f2->mIsVariadic) return false; - for (PRUint32 i = 0; i < f1->mArgTypes.Length(); ++i) { + for (size_t i = 0; i < f1->mArgTypes.length(); ++i) { if (!TypesEqual(cx, f1->mArgTypes[i], f2->mArgTypes[i])) return false; } @@ -2923,9 +2941,9 @@ CType::ToString(JSContext* cx, uintN argc, jsval *vp) return JS_FALSE; } - nsAutoString type(NS_LITERAL_STRING("type ")); - JSString* right = GetName(cx, obj); - type.Append(GetString(right)); + AutoString type; + AppendString(type, "type "); + AppendString(type, GetName(cx, obj)); JSString* result = NewUCString(cx, type); if (!result) @@ -2946,7 +2964,8 @@ CType::ToSource(JSContext* cx, uintN argc, jsval *vp) return JS_FALSE; } - nsAutoString source = BuildTypeSource(cx, obj, false); + AutoString source; + BuildTypeSource(cx, obj, false, source); JSString* result = NewUCString(cx, source); if (!result) return JS_FALSE; @@ -3054,7 +3073,7 @@ PointerType::CreateInternal(JSContext* cx, &ffi_type_pointer, NULL); if (!typeObj) return NULL; - JSAutoTempValueRooter root(cx, typeObj); + js::AutoValueRooter root(cx, typeObj); // Set the target type. (This will be 'null' for an opaque pointer type.) if (!JS_SetReservedSlot(cx, typeObj, SLOT_TARGET_T, OBJECT_TO_JSVAL(baseType))) @@ -3062,8 +3081,7 @@ PointerType::CreateInternal(JSContext* cx, if (baseType) { // Determine the name of the PointerType, since it wasn't supplied. - nsAutoString typeName = BuildTypeName(cx, typeObj); - JSString* nameStr = NewUCString(cx, typeName); + JSString* nameStr = BuildTypeName(cx, typeObj); if (!nameStr || !JS_SetReservedSlot(cx, typeObj, SLOT_NAME, STRING_TO_JSVAL(nameStr))) return NULL; @@ -3347,7 +3365,7 @@ ArrayType::CreateInternal(JSContext* cx, sizeVal, INT_TO_JSVAL(align), ffiType, NULL); if (!typeObj) return NULL; - JSAutoTempValueRooter root(cx, typeObj); + js::AutoValueRooter root(cx, typeObj); // Set the element type. if (!JS_SetReservedSlot(cx, typeObj, SLOT_ELEMENT_T, OBJECT_TO_JSVAL(baseType))) @@ -3358,8 +3376,7 @@ ArrayType::CreateInternal(JSContext* cx, return NULL; // Determine the name of the ArrayType. - nsAutoString typeName = BuildTypeName(cx, typeObj); - JSString* name = NewUCString(cx, typeName); + JSString* name = BuildTypeName(cx, typeObj); if (!name || !JS_SetReservedSlot(cx, typeObj, SLOT_NAME, STRING_TO_JSVAL(name))) return NULL; @@ -3408,7 +3425,7 @@ ArrayType::ConstructData(JSContext* cx, // We were given an object with a .length property. // This could be a JS array, or a CData array. JSObject* arg = JSVAL_TO_OBJECT(argv[0]); - JSAutoTempValueRooter lengthVal(cx); + js::AutoValueRooter lengthVal(cx); if (!JS_GetProperty(cx, arg, "length", lengthVal.addr()) || !jsvalToSize(cx, lengthVal.value(), false, &length)) { JS_ReportError(cx, "argument must be an array object or length"); @@ -3426,14 +3443,12 @@ ArrayType::ConstructData(JSContext* cx, case TYPE_char: case TYPE_signed_char: case TYPE_unsigned_char: { - // Convert from UTF-16 to UTF-8 to determine the length. :( - if (!IsUTF16(sourceChars, sourceLength)) - return TypeError(cx, "UTF-16 string", argv[0]); + // Determine the UTF-8 length. + length = js_GetDeflatedUTF8StringLength(cx, sourceChars, sourceLength); + if (length == (size_t) -1) + return false; - NS_ConvertUTF16toUTF8 converted( - reinterpret_cast(sourceChars), sourceLength); - - length = converted.Length() + 1; + ++length; break; } case TYPE_jschar: @@ -3455,7 +3470,7 @@ ArrayType::ConstructData(JSContext* cx, } // Root the CType object, in case we created one above. - JSAutoTempValueRooter root(cx, obj); + js::AutoValueRooter root(cx, obj); JSObject* result = CData::Create(cx, obj, NULL, NULL, true); if (!result) @@ -3652,7 +3667,7 @@ ArrayType::AddressOfElement(JSContext* cx, uintN argc, jsval *vp) JSObject* pointerType = PointerType::CreateInternal(cx, NULL, baseType, NULL); if (!pointerType) return JS_FALSE; - JSAutoTempValueRooter root(cx, pointerType); + js::AutoValueRooter root(cx, pointerType); // Create a PointerType CData object containing null. JSObject* result = CData::Create(cx, pointerType, NULL, NULL, true); @@ -3695,13 +3710,13 @@ ExtractStructField(JSContext* cx, jsval val, FieldInfo* field) JSObject* iter = JS_NewPropertyIterator(cx, obj); if (!iter) return false; - JSAutoTempValueRooter iterroot(cx, iter); + js::AutoValueRooter iterroot(cx, iter); jsid id; if (!JS_NextProperty(cx, iter, &id)) return false; - JSAutoTempValueRooter nameVal(cx); + js::AutoValueRooter nameVal(cx); if (!JS_IdToValue(cx, id, nameVal.addr())) return false; if (!JSVAL_IS_STRING(nameVal.value())) { @@ -3718,11 +3733,12 @@ ExtractStructField(JSContext* cx, jsval val, FieldInfo* field) } JSString* name = JSVAL_TO_STRING(nameVal.value()); + field->mName.clear(); + AppendString(field->mName, name); + + js::AutoValueRooter propVal(cx); const jschar* nameChars = JS_GetStringChars(name); size_t namelen = JS_GetStringLength(name); - field->mName.Assign(reinterpret_cast(nameChars), namelen); - - JSAutoTempValueRooter propVal(cx); if (!JS_GetUCProperty(cx, obj, nameChars, namelen, propVal.addr())) return false; @@ -3751,7 +3767,7 @@ static JSBool AddFieldToArray(JSContext* cx, JSObject* arrayObj, jsval* element, - const nsString& name, + const String& name, JSObject* typeObj) { JSObject* fieldObj = JS_NewObject(cx, NULL, NULL, arrayObj); @@ -3761,7 +3777,7 @@ AddFieldToArray(JSContext* cx, *element = OBJECT_TO_JSVAL(fieldObj); if (!JS_DefineUCProperty(cx, fieldObj, - reinterpret_cast(name.get()), name.Length(), + name.begin(), name.length(), OBJECT_TO_JSVAL(typeObj), NULL, NULL, JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT)) return false; @@ -3801,10 +3817,10 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) js_NewArrayObjectWithCapacity(cx, len, &fieldsVec); if (!fieldsProp) return JS_FALSE; - JSAutoTempValueRooter root(cx, fieldsProp); + js::AutoValueRooter root(cx, fieldsProp); JS_ASSERT(len == 0 || fieldsVec); - nsAutoPtr ffiType(new ffi_type); + AutoPtr ffiType(new ffi_type); if (!ffiType) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -3814,15 +3830,16 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) // Create an array of FieldInfo objects to stash on the type object, and an // array of PropertySpecs to reflect the struct fields as properties // on CData objects created from this type. - nsAutoPtr< nsTArray > fields(new nsTArray()); - nsAutoTArray instanceProps; + AutoPtr< Array > fields(new Array); + Array instanceProps; if (!fields || - !fields->SetCapacity(len) || - !instanceProps.SetCapacity(len + 1)) { + !fields->resize(len) || + !instanceProps.resize(len + 1)) { JS_ReportOutOfMemory(cx); return JS_FALSE; } - nsAutoArrayPtr elements; + + AutoPtr::Array elements; // Process the field types and fill in the ffi_type fields. size_t structSize = 0, structAlign = 0; @@ -3835,17 +3852,18 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) elements[len] = NULL; for (jsuint i = 0; i < len; ++i) { - JSAutoTempValueRooter item(cx); + js::AutoValueRooter item(cx); if (!JS_GetElement(cx, fieldsObj, i, item.addr())) return JS_FALSE; - FieldInfo* info = fields->AppendElement(); + FieldInfo* info = fields->begin() + i; if (!ExtractStructField(cx, item.value(), info)) return JS_FALSE; // Make sure each field name is unique. - for (PRUint32 j = 0; j < fields->Length() - 1; ++j) { - if (fields->ElementAt(j).mName == info->mName) { + for (size_t j = 0; j < i; ++j) { + FieldInfo* field = fields->begin() + j; + if (StringsEqual(field->mName, info->mName)) { JS_ReportError(cx, "struct fields must have unique names"); return JS_FALSE; } @@ -3857,9 +3875,9 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) return JS_FALSE; // Fill in the PropertySpec for the field. - PropertySpec* instanceProp = instanceProps.AppendElement(); - instanceProp->name = reinterpret_cast(info->mName.get()); - instanceProp->namelen = info->mName.Length(); + PropertySpec* instanceProp = instanceProps.begin() + i; + instanceProp->name = info->mName.begin(); + instanceProp->namelen = info->mName.length(); instanceProp->flags = JSPROP_SHARED | JSPROP_ENUMERATE | JSPROP_PERMANENT; instanceProp->getter = StructType::FieldGetter; instanceProp->setter = StructType::FieldSetter; @@ -3907,7 +3925,7 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) elements[1] = NULL; } - ffiType->elements = elements; + ffiType->elements = elements.get(); #ifdef DEBUG // Perform a sanity check: the result of our struct size and alignment @@ -3916,7 +3934,7 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) ffi_cif cif; ffiType->size = 0; ffiType->alignment = 0; - ffi_status status = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, ffiType, NULL); + ffi_status status = ffi_prep_cif(&cif, FFI_DEFAULT_ABI, 0, ffiType.get(), NULL); JS_ASSERT(status == FFI_OK); JS_ASSERT(structSize == ffiType->size); JS_ASSERT(structAlign == ffiType->alignment); @@ -3930,7 +3948,7 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) #endif // Terminate the PropertySpec array. - instanceProps.AppendElement()->name = NULL; + instanceProps[len].name = NULL; jsval sizeVal; if (!SizeTojsval(cx, structSize, &sizeVal)) @@ -3945,8 +3963,8 @@ StructType::Create(JSContext* cx, uintN argc, jsval* vp) // Create a new CType object with the common properties and slots. JSObject* typeObj = CType::Create(cx, typeProto, dataProto, TYPE_struct, JSVAL_TO_STRING(name), sizeVal, - INT_TO_JSVAL(structAlign), ffiType, - instanceProps.Elements()); + INT_TO_JSVAL(structAlign), ffiType.get(), + instanceProps.begin()); if (!typeObj) return JS_FALSE; ffiType.forget(); @@ -3991,7 +4009,7 @@ StructType::ConstructData(JSContext* cx, return JS_TRUE; char* buffer = static_cast(CData::GetData(cx, result)); - nsTArray* fields = GetFieldInfo(cx, obj); + Array* fields = GetFieldInfo(cx, obj); if (argc == 1) { // There are two possible interpretations of the argument: @@ -4006,7 +4024,7 @@ StructType::ConstructData(JSContext* cx, if (ExplicitConvert(cx, argv[0], obj, buffer)) return JS_TRUE; - if (fields->Length() != 1) + if (fields->length() != 1) return JS_FALSE; // If ExplicitConvert failed, and there is no pending exception, then assume @@ -4023,10 +4041,10 @@ StructType::ConstructData(JSContext* cx, // We have a type constructor of the form 'ctypes.StructType(a, b, c, ...)'. // ImplicitConvert each field. - if (argc == fields->Length()) { - for (PRUint32 i = 0; i < fields->Length(); ++i) { - FieldInfo& field = fields->ElementAt(i); - if (!ImplicitConvert(cx, argv[i], field.mType, buffer + field.mOffset, + if (argc == fields->length()) { + for (size_t i = 0; i < fields->length(); ++i) { + FieldInfo* field = fields->begin() + i; + if (!ImplicitConvert(cx, argv[i], field->mType, buffer + field->mOffset, false, NULL)) return JS_FALSE; } @@ -4035,11 +4053,11 @@ StructType::ConstructData(JSContext* cx, } JS_ReportError(cx, "constructor takes 0, 1, or %u arguments", - fields->Length()); + fields->length()); return JS_FALSE; } -nsTArray* +Array* StructType::GetFieldInfo(JSContext* cx, JSObject* obj) { JS_ASSERT(CType::IsCType(cx, obj)); @@ -4049,7 +4067,7 @@ StructType::GetFieldInfo(JSContext* cx, JSObject* obj) ASSERT_OK(JS_GetReservedSlot(cx, obj, SLOT_FIELDINFO, &slot)); JS_ASSERT(!JSVAL_IS_VOID(slot) && JSVAL_TO_PRIVATE(slot)); - return static_cast*>(JSVAL_TO_PRIVATE(slot)); + return static_cast*>(JSVAL_TO_PRIVATE(slot)); } FieldInfo* @@ -4058,18 +4076,20 @@ StructType::LookupField(JSContext* cx, JSObject* obj, jsval idval) JS_ASSERT(CType::IsCType(cx, obj)); JS_ASSERT(CType::GetTypeCode(cx, obj) == TYPE_struct); - nsTArray* fields = GetFieldInfo(cx, obj); + Array* fields = GetFieldInfo(cx, obj); - JSString* nameStr = JSVAL_TO_STRING(idval); - const nsDependentString name(GetString(nameStr)); - - for (PRUint32 i = 0; i < fields->Length(); ++i) { - if (fields->ElementAt(i).mName.Equals(name)) - return &fields->ElementAt(i); + JSString* name = JSVAL_TO_STRING(idval); + for (size_t i = 0; i < fields->length(); ++i) { + FieldInfo* field = fields->begin() + i; + if (StringsEqual(field->mName, name)) + return field; } - JS_ReportError(cx, "%s does not name a field", - NS_LossyConvertUTF16toASCII(name).get()); + const char* bytes = JS_GetStringBytesZ(cx, name); + if (!bytes) + return NULL; + + JS_ReportError(cx, "%s does not name a field", bytes); return NULL; } @@ -4161,7 +4181,7 @@ StructType::AddressOfField(JSContext* cx, uintN argc, jsval *vp) JSObject* pointerType = PointerType::CreateInternal(cx, NULL, baseType, NULL); if (!pointerType) return JS_FALSE; - JSAutoTempValueRooter root(cx, pointerType); + js::AutoValueRooter root(cx, pointerType); // Create a PointerType CData object containing null. JSObject* result = CData::Create(cx, pointerType, NULL, NULL, true); @@ -4311,9 +4331,9 @@ PrepareCIF(JSContext* cx, ffi_status status = ffi_prep_cif(&fninfo->mCIF, abi, - fninfo->mFFITypes.Length(), + fninfo->mFFITypes.length(), CType::GetFFIType(cx, fninfo->mReturnType), - fninfo->mFFITypes.Elements()); + fninfo->mFFITypes.begin()); switch (status) { case FFI_OK: @@ -4337,7 +4357,7 @@ NewFunctionInfo(JSContext* cx, jsval* argTypes, uintN argLength) { - nsAutoPtr fninfo(new FunctionInfo()); + AutoPtr fninfo(new FunctionInfo()); if (!fninfo) { JS_ReportOutOfMemory(cx); return NULL; @@ -4351,15 +4371,15 @@ NewFunctionInfo(JSContext* cx, return NULL; // prepare the argument types - if (!fninfo->mArgTypes.SetCapacity(argLength) || - !fninfo->mFFITypes.SetCapacity(argLength)) { + if (!fninfo->mArgTypes.reserve(argLength) || + !fninfo->mFFITypes.reserve(argLength)) { JS_ReportOutOfMemory(cx); return NULL; } fninfo->mIsVariadic = false; - for (PRUint32 i = 0; i < argLength; ++i) { + for (JSUint32 i = 0; i < argLength; ++i) { if (IsEllipsis(argTypes[i])) { fninfo->mIsVariadic = true; if (i < 1) { @@ -4384,15 +4404,15 @@ NewFunctionInfo(JSContext* cx, if (!argType) return NULL; - fninfo->mArgTypes.AppendElement(argType); - fninfo->mFFITypes.AppendElement(CType::GetFFIType(cx, argType)); + fninfo->mArgTypes.append(argType); + fninfo->mFFITypes.append(CType::GetFFIType(cx, argType)); } if (fninfo->mIsVariadic) // wait to PrepareCIF until function is called return fninfo.forget(); - if (!PrepareCIF(cx, fninfo)) + if (!PrepareCIF(cx, fninfo.get())) return NULL; return fninfo.forget(); @@ -4408,7 +4428,7 @@ FunctionType::Create(JSContext* cx, uintN argc, jsval* vp) } jsval* argv = JS_ARGV(cx, vp); - nsAutoTArray argTypes; + Array argTypes; JSObject* arrayObj = NULL; if (argc == 3) { @@ -4423,7 +4443,7 @@ FunctionType::Create(JSContext* cx, uintN argc, jsval* vp) jsuint len; ASSERT_OK(JS_GetArrayLength(cx, arrayObj, &len)); - if (!argTypes.SetLength(len)) { + if (!argTypes.resize(len)) { JS_ReportOutOfMemory(cx); return JS_FALSE; } @@ -4433,15 +4453,15 @@ FunctionType::Create(JSContext* cx, uintN argc, jsval* vp) } // Pull out the argument types from the array, if any. - JS_ASSERT(!argTypes.Length() || arrayObj); - JSAutoTempValueRooter items(cx, argTypes.Length(), argTypes.Elements()); - for (jsuint i = 0; i < argTypes.Length(); ++i) { + JS_ASSERT(!argTypes.length() || arrayObj); + js::AutoArrayRooter items(cx, argTypes.length(), argTypes.begin()); + for (jsuint i = 0; i < argTypes.length(); ++i) { if (!JS_GetElement(cx, arrayObj, i, &argTypes[i])) return JS_FALSE; } JSObject* result = CreateInternal(cx, argv[0], argv[1], - argTypes.Elements(), argTypes.Length()); + argTypes.begin(), argTypes.length()); if (!result) return JS_FALSE; @@ -4457,7 +4477,7 @@ FunctionType::CreateInternal(JSContext* cx, jsuint arglen) { // Determine and check the types, and prepare the function CIF. - nsAutoPtr fninfo(NewFunctionInfo(cx, abi, rtype, argtypes, arglen)); + AutoPtr fninfo(NewFunctionInfo(cx, abi, rtype, argtypes, arglen)); if (!fninfo) return NULL; @@ -4475,7 +4495,7 @@ FunctionType::CreateInternal(JSContext* cx, &ffi_type_pointer, NULL); if (!typeObj) return NULL; - JSAutoTempValueRooter root(cx, typeObj); + js::AutoValueRooter root(cx, typeObj); // Stash the FunctionInfo in a reserved slot. if (!JS_SetReservedSlot(cx, typeObj, SLOT_FNINFO, @@ -4484,8 +4504,7 @@ FunctionType::CreateInternal(JSContext* cx, fninfo.forget(); // Determine the name of the FunctionType. - nsAutoString typeName = BuildTypeName(cx, typeObj); - JSString* name = NewUCString(cx, typeName); + JSString* name = BuildTypeName(cx, typeObj); if (!name || !JS_SetReservedSlot(cx, typeObj, SLOT_NAME, STRING_TO_JSVAL(name))) return NULL; @@ -4540,7 +4559,7 @@ FunctionType::ConstructData(JSContext* cx, JSObject* closureObj = CClosure::Create(cx, obj, fnObj, thisObj, data); if (!closureObj) return JS_FALSE; - JSAutoTempValueRooter root(cx, closureObj); + js::AutoValueRooter root(cx, closureObj); // Set the closure object as the referent of the new CData object. if (!JS_SetReservedSlot(cx, result, SLOT_REFERENT, @@ -4566,17 +4585,15 @@ FunctionType::ConstructData(JSContext* cx, return JS_FALSE; } -typedef nsAutoTArray AutoValueAutoArray; +typedef Array AutoValueAutoArray; static JSBool ConvertArgument(JSContext* cx, jsval arg, JSObject* type, - AutoValueAutoArray* values, + AutoValue* value, AutoValueAutoArray* strings) { - AutoValue* value = values->AppendElement(); - if (!value->SizeToType(cx, type)) { JS_ReportAllocationOverflow(cx); return false; @@ -4589,7 +4606,11 @@ ConvertArgument(JSContext* cx, if (freePointer) { // ImplicitConvert converted a string for us, which we have to free. // Keep track of it. - strings->AppendElement()->mData = *static_cast(value->mData); + if (!strings->growBy(1)) { + JS_ReportOutOfMemory(cx); + return false; + } + strings->back().mData = *static_cast(value->mData); } return true; @@ -4616,7 +4637,7 @@ FunctionType::Call(JSContext* cx, } FunctionInfo* fninfo = GetFunctionInfo(cx, typeObj); - PRUint32 argcFixed = fninfo->mArgTypes.Length(); + JSUint32 argcFixed = fninfo->mArgTypes.length(); if ((!fninfo->mIsVariadic && argc != argcFixed) || (fninfo->mIsVariadic && argc < argcFixed)) { @@ -4638,19 +4659,25 @@ FunctionType::Call(JSContext* cx, // prepare the values for each argument AutoValueAutoArray values; AutoValueAutoArray strings; + if (!values.resize(argc)) { + JS_ReportOutOfMemory(cx); + return false; + } - for (PRUint32 i = 0; i < argcFixed; ++i) - if (!ConvertArgument(cx, argv[i], fninfo->mArgTypes[i], &values, &strings)) + for (jsuint i = 0; i < argcFixed; ++i) + if (!ConvertArgument(cx, argv[i], fninfo->mArgTypes[i], &values[i], &strings)) return false; if (fninfo->mIsVariadic) { - fninfo->mFFITypes.SetLength(argcFixed); - ASSERT_OK(fninfo->mFFITypes.SetCapacity(argc)); + if (!fninfo->mFFITypes.resize(argc)) { + JS_ReportOutOfMemory(cx); + return false; + } JSObject* obj; // Could reuse obj instead of declaring a second JSObject* type; // JSObject*, but readability would suffer. - for (PRUint32 i = argcFixed; i < argc; ++i) { + for (JSUint32 i = argcFixed; i < argc; ++i) { if (JSVAL_IS_PRIMITIVE(argv[i]) || !CData::IsCData(cx, obj = JSVAL_TO_OBJECT(argv[i]))) { // Since we know nothing about the CTypes of the ... arguments, @@ -4663,11 +4690,11 @@ FunctionType::Call(JSContext* cx, !(type = PrepareType(cx, OBJECT_TO_JSVAL(type))) || // Relying on ImplicitConvert only for the limited purpose of // converting one CType to another (e.g., T[] to T*). - !ConvertArgument(cx, argv[i], type, &values, &strings)) { + !ConvertArgument(cx, argv[i], type, &values[i], &strings)) { // These functions report their own errors. return false; } - fninfo->mFFITypes.AppendElement(CType::GetFFIType(cx, type)); + fninfo->mFFITypes[i] = CType::GetFFIType(cx, type); } if (!PrepareCIF(cx, fninfo)) return false; @@ -4688,7 +4715,7 @@ FunctionType::Call(JSContext* cx, jsrefcount rc = JS_SuspendRequest(cx); ffi_call(&fninfo->mCIF, FFI_FN(fn), returnValue.mData, - reinterpret_cast(values.Elements())); + reinterpret_cast(values.begin())); JS_ResumeRequest(cx, rc); @@ -4732,7 +4759,7 @@ FunctionType::ArgTypesGetter(JSContext* cx, JSObject* obj, jsval idval, jsval* v return JS_TRUE; FunctionInfo* fninfo = GetFunctionInfo(cx, obj); - PRUint32 len = fninfo->mArgTypes.Length(); + size_t len = fninfo->mArgTypes.length(); // Prepare a new array. jsval* vec; @@ -4740,10 +4767,10 @@ FunctionType::ArgTypesGetter(JSContext* cx, JSObject* obj, jsval idval, jsval* v js_NewArrayObjectWithCapacity(cx, len, &vec); if (!argTypes) return JS_FALSE; - JSAutoTempValueRooter argsroot(cx, argTypes); + js::AutoValueRooter argsroot(cx, argTypes); JS_ASSERT(len == 0 || vec); - for (PRUint32 i = 0; i < len; ++i) + for (size_t i = 0; i < len; ++i) vec[i] = OBJECT_TO_JSVAL(fninfo->mArgTypes[i]); // Seal and cache it. @@ -4801,13 +4828,13 @@ CClosure::Create(JSContext* cx, JSObject* result = JS_NewObject(cx, &sCClosureClass, NULL, NULL); if (!result) return NULL; - JSAutoTempValueRooter root(cx, result); + js::AutoValueRooter root(cx, result); // Get the FunctionInfo from the FunctionType. FunctionInfo* fninfo = FunctionType::GetFunctionInfo(cx, typeObj); JS_ASSERT(!fninfo->mIsVariadic); - nsAutoPtr cinfo(new ClosureInfo()); + AutoPtr cinfo(new ClosureInfo()); if (!cinfo) { JS_ReportOutOfMemory(cx); return NULL; @@ -4847,9 +4874,6 @@ CClosure::Create(JSContext* cx, cinfo->typeObj = typeObj; cinfo->thisObj = thisObj; cinfo->jsfnObj = fnObj; -#ifdef DEBUG - cinfo->thread = PR_GetCurrentThread(); -#endif // Create an ffi_closure object and initialize it. void* code; @@ -4861,7 +4885,7 @@ CClosure::Create(JSContext* cx, } ffi_status status = ffi_prep_closure_loc(cinfo->closure, &fninfo->mCIF, - CClosure::ClosureStub, cinfo, code); + CClosure::ClosureStub, cinfo.get(), code); if (status != FFI_OK) { ffi_closure_free(cinfo->closure); JS_ReportError(cx, "couldn't create closure - libffi error"); @@ -4935,10 +4959,9 @@ CClosure::ClosureStub(ffi_cif* cif, void* result, void** args, void* userData) JSObject* thisObj = cinfo->thisObj; JSObject* jsfnObj = cinfo->jsfnObj; -#ifdef DEBUG +#ifdef JS_THREADSAFE // Assert that we're on the thread we were created from. - PRThread* thread = PR_GetCurrentThread(); - JS_ASSERT(thread == cinfo->thread); + JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread)); #endif JSAutoRequest ar(cx); @@ -4948,20 +4971,20 @@ CClosure::ClosureStub(ffi_cif* cif, void* result, void** args, void* userData) JS_ASSERT(cif == &fninfo->mCIF); // Get a death grip on 'closureObj'. - JSAutoTempValueRooter root(cx, cinfo->closureObj); + js::AutoValueRooter root(cx, cinfo->closureObj); // Set up an array for converted arguments. - nsAutoTArray argv; - if (!argv.SetLength(cif->nargs)) { + Array argv; + if (!argv.resize(cif->nargs)) { JS_ReportOutOfMemory(cx); return; } - for (PRUint32 i = 0; i < cif->nargs; ++i) + for (JSUint32 i = 0; i < cif->nargs; ++i) argv[i] = JSVAL_VOID; - JSAutoTempValueRooter roots(cx, argv.Length(), argv.Elements()); - for (PRUint32 i = 0; i < cif->nargs; ++i) { + js::AutoArrayRooter roots(cx, argv.length(), argv.begin()); + for (JSUint32 i = 0; i < cif->nargs; ++i) { // Convert each argument, and have any CData objects created depend on // the existing buffers. if (!ConvertToJS(cx, fninfo->mArgTypes[i], NULL, args[i], false, false, @@ -4973,7 +4996,7 @@ CClosure::ClosureStub(ffi_cif* cif, void* result, void** args, void* userData) // will find an appropriate object to use. jsval rval; if (!JS_CallFunctionValue(cx, thisObj, OBJECT_TO_JSVAL(jsfnObj), cif->nargs, - argv.Elements(), &rval)) + argv.begin(), &rval)) return; // Convert the result. Note that we pass 'isArgument = false', such that @@ -5036,7 +5059,7 @@ CData::Create(JSContext* cx, JSObject* dataObj = JS_NewObject(cx, &sCDataClass, proto, parent); if (!dataObj) return NULL; - JSAutoTempValueRooter root(cx, dataObj); + js::AutoValueRooter root(cx, dataObj); // set the CData's associated type if (!JS_SetReservedSlot(cx, dataObj, SLOT_CTYPE, OBJECT_TO_JSVAL(typeObj))) @@ -5187,7 +5210,7 @@ CData::Address(JSContext* cx, uintN argc, jsval *vp) JSObject* pointerType = PointerType::CreateInternal(cx, NULL, typeObj, NULL); if (!pointerType) return JS_FALSE; - JSAutoTempValueRooter root(cx, pointerType); + js::AutoValueRooter root(cx, pointerType); // Create a PointerType CData object containing null. JSObject* result = CData::Create(cx, pointerType, NULL, NULL, true); @@ -5303,13 +5326,20 @@ CData::ReadString(JSContext* cx, uintN argc, jsval *vp) case TYPE_unsigned_char: { char* bytes = static_cast(data); size_t length = strnlen(bytes, maxLength); - nsDependentCSubstring string(bytes, bytes + length); - if (!IsUTF8(string)) { - JS_ReportError(cx, "not a UTF-8 string"); - return JS_FALSE; - } - result = NewUCString(cx, NS_ConvertUTF8toUTF16(string)); + // Determine the length. + size_t dstlen; + if (!js_InflateUTF8StringToBuffer(cx, bytes, length, NULL, &dstlen)) + return JS_FALSE; + + jschar* dst = + static_cast(JS_malloc(cx, (dstlen + 1) * sizeof(jschar))); + if (!dst) + return JS_FALSE; + + ASSERT_OK(js_InflateUTF8StringToBuffer(cx, bytes, length, dst, &dstlen)); + + result = JS_NewUCString(cx, dst, dstlen); break; } case TYPE_int16_t: @@ -5358,10 +5388,13 @@ CData::ToSource(JSContext* cx, uintN argc, jsval *vp) // 't.array([n])' for arrays; // 'n' for structs, where n = t.name, the struct's name. (We assume this is // bound to a variable in the current scope.) - nsAutoString source = BuildTypeSource(cx, typeObj, true); - source.Append('('); - source.Append(BuildDataSource(cx, typeObj, data, false)); - source.Append(')'); + AutoString source; + BuildTypeSource(cx, typeObj, true, source); + AppendString(source, "("); + if (!BuildDataSource(cx, typeObj, data, false, source)) + return JS_FALSE; + + AppendString(source, ")"); JSString* result = NewUCString(cx, source); if (!result) @@ -5378,17 +5411,17 @@ CData::ToSource(JSContext* cx, uintN argc, jsval *vp) JSObject* Int64Base::Construct(JSContext* cx, JSObject* proto, - PRUint64 data, + JSUint64 data, bool isUnsigned) { JSClass* clasp = isUnsigned ? &sUInt64Class : &sInt64Class; JSObject* result = JS_NewObject(cx, clasp, proto, JS_GetParent(cx, proto)); if (!result) return NULL; - JSAutoTempValueRooter root(cx, result); + js::AutoValueRooter root(cx, result); // attach the Int64's data - PRUint64* buffer = new PRUint64(data); + JSUint64* buffer = new JSUint64(data); if (!buffer) { JS_ReportOutOfMemory(cx); return NULL; @@ -5412,16 +5445,16 @@ Int64Base::Finalize(JSContext* cx, JSObject* obj) if (!JS_GetReservedSlot(cx, obj, SLOT_INT64, &slot) || JSVAL_IS_VOID(slot)) return; - delete static_cast(JSVAL_TO_PRIVATE(slot)); + delete static_cast(JSVAL_TO_PRIVATE(slot)); } -PRUint64 +JSUint64 Int64Base::GetInt(JSContext* cx, JSObject* obj) { JS_ASSERT(Int64::IsInt64(cx, obj) || UInt64::IsUInt64(cx, obj)); jsval slot; ASSERT_OK(JS_GetReservedSlot(cx, obj, SLOT_INT64, &slot)); - return *static_cast(JSVAL_TO_PRIVATE(slot)); + return *static_cast(JSVAL_TO_PRIVATE(slot)); } JSBool @@ -5447,11 +5480,11 @@ Int64Base::ToString(JSContext* cx, } } - nsAutoString intString; + AutoString intString; if (isUnsigned) { - intString = IntegerToString(GetInt(cx, obj), radix); + IntegerToString(GetInt(cx, obj), radix, intString); } else { - intString = IntegerToString(static_cast(GetInt(cx, obj)), radix); + IntegerToString(static_cast(GetInt(cx, obj)), radix, intString); } JSString *result = NewUCString(cx, intString); @@ -5475,15 +5508,15 @@ Int64Base::ToSource(JSContext* cx, } // Return a decimal string suitable for constructing the number. - nsAutoString source; + AutoString source; if (isUnsigned) { - source.Append(NS_LITERAL_STRING("ctypes.UInt64(\"")); - source.Append(IntegerToString(GetInt(cx, obj), 10)); + AppendString(source, "ctypes.UInt64(\""); + IntegerToString(GetInt(cx, obj), 10, source); } else { - source.Append(NS_LITERAL_STRING("ctypes.Int64(\"")); - source.Append(IntegerToString(static_cast(GetInt(cx, obj)), 10)); + AppendString(source, "ctypes.Int64(\""); + IntegerToString(static_cast(GetInt(cx, obj)), 10, source); } - source.Append(NS_LITERAL_STRING("\")")); + AppendString(source, "\")"); JSString *result = NewUCString(cx, source); if (!result) @@ -5506,7 +5539,7 @@ Int64::Construct(JSContext* cx, return JS_FALSE; } - PRInt64 i; + JSInt64 i; if (!jsvalToBigInteger(cx, argv[0], true, &i)) return TypeError(cx, "int64", argv[0]); @@ -5571,8 +5604,8 @@ Int64::Compare(JSContext* cx, uintN argc, jsval* vp) JSObject* obj1 = JSVAL_TO_OBJECT(argv[0]); JSObject* obj2 = JSVAL_TO_OBJECT(argv[1]); - PRInt64 i1 = Int64Base::GetInt(cx, obj1); - PRInt64 i2 = Int64Base::GetInt(cx, obj2); + JSInt64 i1 = Int64Base::GetInt(cx, obj1); + JSInt64 i2 = Int64Base::GetInt(cx, obj2); if (i1 == i2) JS_SET_RVAL(cx, vp, INT_TO_JSVAL(0)); @@ -5584,7 +5617,7 @@ Int64::Compare(JSContext* cx, uintN argc, jsval* vp) return JS_TRUE; } -#define LO_MASK ((PRUint64(1) << 32) - 1) +#define LO_MASK ((JSUint64(1) << 32) - 1) #define INT64_LO(i) ((i) & LO_MASK) #define INT64_HI(i) ((i) >> 32) @@ -5599,8 +5632,8 @@ Int64::Lo(JSContext* cx, uintN argc, jsval* vp) } JSObject* obj = JSVAL_TO_OBJECT(argv[0]); - PRInt64 u = Int64Base::GetInt(cx, obj); - jsdouble d = PRUint32(INT64_LO(u)); + JSInt64 u = Int64Base::GetInt(cx, obj); + jsdouble d = JSUint32(INT64_LO(u)); jsval result; if (!JS_NewNumberValue(cx, d, &result)) @@ -5621,8 +5654,8 @@ Int64::Hi(JSContext* cx, uintN argc, jsval* vp) } JSObject* obj = JSVAL_TO_OBJECT(argv[0]); - PRInt64 u = Int64Base::GetInt(cx, obj); - jsdouble d = PRInt32(INT64_HI(u)); + JSInt64 u = Int64Base::GetInt(cx, obj); + jsdouble d = JSInt32(INT64_HI(u)); jsval result; if (!JS_NewNumberValue(cx, d, &result)) @@ -5641,14 +5674,14 @@ Int64::Join(JSContext* cx, uintN argc, jsval* vp) } jsval* argv = JS_ARGV(cx, vp); - PRInt32 hi; - PRUint32 lo; + JSInt32 hi; + JSUint32 lo; if (!jsvalToInteger(cx, argv[0], &hi)) return TypeError(cx, "int32", argv[0]); if (!jsvalToInteger(cx, argv[1], &lo)) return TypeError(cx, "uint32", argv[1]); - PRInt64 i = (PRInt64(hi) << 32) + PRInt64(lo); + JSInt64 i = (JSInt64(hi) << 32) + JSInt64(lo); // Get Int64.prototype from the function's reserved slot. JSObject* callee = JSVAL_TO_OBJECT(JS_ARGV_CALLEE(argv)); @@ -5679,7 +5712,7 @@ UInt64::Construct(JSContext* cx, return JS_FALSE; } - PRUint64 u; + JSUint64 u; if (!jsvalToBigInteger(cx, argv[0], true, &u)) return TypeError(cx, "uint64", argv[0]); @@ -5744,8 +5777,8 @@ UInt64::Compare(JSContext* cx, uintN argc, jsval* vp) JSObject* obj1 = JSVAL_TO_OBJECT(argv[0]); JSObject* obj2 = JSVAL_TO_OBJECT(argv[1]); - PRUint64 u1 = Int64Base::GetInt(cx, obj1); - PRUint64 u2 = Int64Base::GetInt(cx, obj2); + JSUint64 u1 = Int64Base::GetInt(cx, obj1); + JSUint64 u2 = Int64Base::GetInt(cx, obj2); if (u1 == u2) JS_SET_RVAL(cx, vp, INT_TO_JSVAL(0)); @@ -5768,8 +5801,8 @@ UInt64::Lo(JSContext* cx, uintN argc, jsval* vp) } JSObject* obj = JSVAL_TO_OBJECT(argv[0]); - PRUint64 u = Int64Base::GetInt(cx, obj); - jsdouble d = PRUint32(INT64_LO(u)); + JSUint64 u = Int64Base::GetInt(cx, obj); + jsdouble d = JSUint32(INT64_LO(u)); jsval result; if (!JS_NewNumberValue(cx, d, &result)) @@ -5790,8 +5823,8 @@ UInt64::Hi(JSContext* cx, uintN argc, jsval* vp) } JSObject* obj = JSVAL_TO_OBJECT(argv[0]); - PRUint64 u = Int64Base::GetInt(cx, obj); - jsdouble d = PRUint32(INT64_HI(u)); + JSUint64 u = Int64Base::GetInt(cx, obj); + jsdouble d = JSUint32(INT64_HI(u)); jsval result; if (!JS_NewNumberValue(cx, d, &result)) @@ -5810,14 +5843,14 @@ UInt64::Join(JSContext* cx, uintN argc, jsval* vp) } jsval* argv = JS_ARGV(cx, vp); - PRUint32 hi; - PRUint32 lo; + JSUint32 hi; + JSUint32 lo; if (!jsvalToInteger(cx, argv[0], &hi)) return TypeError(cx, "uint32_t", argv[0]); if (!jsvalToInteger(cx, argv[1], &lo)) return TypeError(cx, "uint32_t", argv[1]); - PRUint64 u = (PRUint64(hi) << 32) + PRUint64(lo); + JSUint64 u = (JSUint64(hi) << 32) + JSUint64(lo); // Get UInt64.prototype from the function's reserved slot. JSObject* callee = JSVAL_TO_OBJECT(JS_ARGV_CALLEE(argv)); diff --git a/js/ctypes/CTypes.h b/js/src/ctypes/CTypes.h similarity index 70% rename from js/ctypes/CTypes.h rename to js/src/ctypes/CTypes.h index 7215b56514c..58ad3d88a2b 100644 --- a/js/ctypes/CTypes.h +++ b/js/src/ctypes/CTypes.h @@ -39,15 +39,168 @@ #ifndef CTYPES_H #define CTYPES_H +#include "jscntxt.h" #include "jsapi.h" -#include "nsString.h" -#include "nsTArray.h" #include "prlink.h" #include "ffi.h" -namespace mozilla { +namespace js { namespace ctypes { +/******************************************************************************* +** Utility classes +*******************************************************************************/ + +template +class OperatorDelete +{ +public: + static void destroy(T* ptr) { delete ptr; } +}; + +template +class OperatorArrayDelete +{ +public: + static void destroy(T* ptr) { delete[] ptr; } +}; + +// Class that takes ownership of a pointer T*, and calls operator delete or +// operator delete[] upon destruction. +template > +class AutoPtr { +private: + typedef AutoPtr self_type; + +public: + // An AutoPtr variant that calls operator delete[] instead. + typedef AutoPtr > Array; + + AutoPtr() : mPtr(NULL) { } + explicit AutoPtr(T* ptr) : mPtr(ptr) { } + ~AutoPtr() { DeleteTraits::destroy(mPtr); } + + T* operator->() { return mPtr; } + bool operator!() { return mPtr == NULL; } + T& operator[](size_t i) { return *(mPtr + i); } + // Note: we cannot safely provide an 'operator T*()', since this would allow + // the compiler to perform implicit conversion from one AutoPtr to another + // via the constructor AutoPtr(T*). + + T* get() { return mPtr; } + void set(T* other) { JS_ASSERT(mPtr == NULL); mPtr = other; } + T* forget() { T* result = mPtr; mPtr = NULL; return result; } + + self_type& operator=(T* rhs) { mPtr = rhs; return *this; } + +private: + // Do not allow copy construction or assignment from another AutoPtr. + template AutoPtr(AutoPtr&); + template self_type& operator=(AutoPtr& rhs); + + T* mPtr; +}; + +// Container class for Vector, using SystemAllocPolicy. +template +class Array : public Vector +{ +}; + +// String and AutoString classes, based on Vector. +typedef Vector String; +typedef Vector AutoString; + +// Convenience functions to append, insert, and compare Strings. +template +void +AppendString(Vector &v, const char (&array)[ArrayLength]) +{ + // Don't include the trailing '\0'. + size_t alen = ArrayLength - 1; + size_t vlen = v.length(); + if (!v.resize(vlen + alen)) + return; + + for (size_t i = 0; i < alen; ++i) + v[i + vlen] = array[i]; +} + +template +void +AppendString(Vector &v, Vector &w) +{ + v.append(w.begin(), w.length()); +} + +template +void +AppendString(Vector &v, JSString* str) +{ + JS_ASSERT(str); + const jschar* chars = JS_GetStringChars(str); + size_t length = JS_GetStringLength(str); + v.append(chars, length); +} + +template +void +PrependString(Vector &v, const char (&array)[ArrayLength]) +{ + // Don't include the trailing '\0'. + size_t alen = ArrayLength - 1; + size_t vlen = v.length(); + if (!v.resize(vlen + alen)) + return; + + // Move vector data forward. This is safe since we've already resized. + memmove(v.begin() + alen, v.begin(), vlen * sizeof(T)); + + // Copy data to insert. + for (size_t i = 0; i < alen; ++i) + v[i] = array[i]; +} + +template +void +PrependString(Vector &v, JSString* str) +{ + JS_ASSERT(str); + size_t vlen = v.length(); + size_t alen = JS_GetStringLength(str); + if (!v.resize(vlen + alen)) + return; + + // Move vector data forward. This is safe since we've already resized. + memmove(v.begin() + alen, v.begin(), vlen * sizeof(jschar)); + + // Copy data to insert. + memcpy(v.begin(), JS_GetStringChars(str), alen * sizeof(jschar)); +} + +template +bool +StringsEqual(Vector &v, Vector &w) +{ + if (v.length() != w.length()) + return false; + + return memcmp(v.begin(), w.begin(), v.length() * sizeof(T)) == 0; +} + +template +bool +StringsEqual(Vector &v, JSString* str) +{ + JS_ASSERT(str); + size_t length = JS_GetStringLength(str); + if (v.length() != length) + return false; + + const jschar* chars = JS_GetStringChars(str); + return memcmp(v.begin(), chars, length * sizeof(jschar)) == 0; +} + /******************************************************************************* ** Function and struct API definitions *******************************************************************************/ @@ -89,7 +242,14 @@ enum TypeCode { struct FieldInfo { - nsString mName; + // We need to provide a copy constructor because of Vector. + FieldInfo() {} + FieldInfo(const FieldInfo& other) + { + JS_NOT_REACHED("shouldn't be copy constructing FieldInfo"); + } + + String mName; JSObject* mType; size_t mOffset; }; @@ -122,12 +282,12 @@ struct FunctionInfo // A fixed array of known parameter types, excluding any variadic // parameters (if mIsVariadic). - nsTArray mArgTypes; + Array mArgTypes; // A variable array of ffi_type*s corresponding to both known parameter // types and dynamic (variadic) parameter types. Longer than mArgTypes // only if mIsVariadic. - nsTArray mFFITypes; + Array mFFITypes; // Flag indicating whether the function behaves like a C function with // ... as the final formal parameter. @@ -143,9 +303,6 @@ struct ClosureInfo JSObject* thisObj; // 'this' object to use for the JS function call JSObject* jsfnObj; // JS function ffi_closure* closure; // The C closure itself -#ifdef DEBUG - PRThread* thread; // The thread the closure was created on -#endif }; JSBool InitTypeClasses(JSContext* cx, JSObject* parent); @@ -275,7 +432,7 @@ namespace ArrayType { } namespace StructType { - nsTArray* GetFieldInfo(JSContext* cx, JSObject* obj); + Array* GetFieldInfo(JSContext* cx, JSObject* obj); FieldInfo* LookupField(JSContext* cx, JSObject* obj, jsval idval); } diff --git a/js/ctypes/Library.cpp b/js/src/ctypes/Library.cpp similarity index 78% rename from js/ctypes/Library.cpp rename to js/src/ctypes/Library.cpp index 22f1fd63219..e28ea21c364 100644 --- a/js/ctypes/Library.cpp +++ b/js/src/ctypes/Library.cpp @@ -41,12 +41,9 @@ #include "jscntxt.h" #include "Library.h" #include "CTypes.h" -#include "nsServiceManagerUtils.h" -#include "nsIXPConnect.h" -#include "nsILocalFile.h" -#include "nsNativeCharsetUtils.h" +#include "prlink.h" -namespace mozilla { +namespace js { namespace ctypes { /******************************************************************************* @@ -88,7 +85,7 @@ Library::Create(JSContext* cx, jsval aPath) JSObject* libraryObj = JS_NewObject(cx, &sLibraryClass, NULL, NULL); if (!libraryObj) return NULL; - JSAutoTempValueRooter root(cx, libraryObj); + js::AutoValueRooter root(cx, libraryObj); // initialize the library if (!JS_SetReservedSlot(cx, libraryObj, SLOT_LIBRARY, PRIVATE_TO_JSVAL(NULL))) @@ -98,56 +95,37 @@ Library::Create(JSContext* cx, jsval aPath) if (!JS_DefineFunctions(cx, libraryObj, sLibraryFunctions)) return NULL; - nsresult rv; - PRLibrary* library; + if (!JSVAL_IS_STRING(aPath)) { + JS_ReportError(cx, "open takes a string argument"); + return NULL; + } - // get the path argument. we accept either an nsILocalFile or a string path. - // determine which we have... - if (JSVAL_IS_STRING(aPath)) { - const PRUnichar* path = reinterpret_cast( - JS_GetStringCharsZ(cx, JSVAL_TO_STRING(aPath))); - if (!path) - return NULL; - - // We don't use nsILocalFile, because it doesn't use the system search - // rules when resolving library path. - PRLibSpec libSpec; + PRLibSpec libSpec; #ifdef XP_WIN - // On Windows, converting to native charset may corrupt path string. - // So, we have to use Unicode path directly. - libSpec.value.pathname_u = path; - libSpec.type = PR_LibSpec_PathnameU; + // On Windows, converting to native charset may corrupt path string. + // So, we have to use Unicode path directly. + const PRUnichar* path = reinterpret_cast( + JS_GetStringCharsZ(cx, JSVAL_TO_STRING(aPath))); + if (!path) + return NULL; + + libSpec.value.pathname_u = path; + libSpec.type = PR_LibSpec_PathnameU; #else - nsCAutoString nativePath; - NS_CopyUnicodeToNative(nsDependentString(path), nativePath); - libSpec.value.pathname = nativePath.get(); - libSpec.type = PR_LibSpec_Pathname; + // Assume the JS string is not UTF-16, but is in the platform's native + // charset. (This basically means ASCII.) It would be nice to have a + // UTF-16 -> native charset implementation available. :( + const char* path = JS_GetStringBytesZ(cx, JSVAL_TO_STRING(aPath)); + if (!path) + return NULL; + + libSpec.value.pathname = path; + libSpec.type = PR_LibSpec_Pathname; #endif - library = PR_LoadLibraryWithFlags(libSpec, 0); - if (!library) { - JS_ReportError(cx, "couldn't open library"); - return NULL; - } - } else if (!JSVAL_IS_PRIMITIVE(aPath)) { - nsCOMPtr xpc = do_GetService(nsIXPConnect::GetCID()); - - nsISupports* file = xpc->GetNativeOfWrapper(cx, JSVAL_TO_OBJECT(aPath)); - nsCOMPtr localFile = do_QueryInterface(file); - if (!localFile) { - JS_ReportError(cx, "open takes a string or nsILocalFile argument"); - return NULL; - } - - rv = localFile->Load(&library); - if (NS_FAILED(rv)) { - JS_ReportError(cx, "couldn't open library"); - return NULL; - } - - } else { - // don't convert the argument - JS_ReportError(cx, "open takes a string or nsIFile argument"); + PRLibrary* library = PR_LoadLibraryWithFlags(libSpec, 0); + if (!library) { + JS_ReportError(cx, "couldn't open library"); return NULL; } @@ -264,7 +242,7 @@ Library::Declare(JSContext* cx, uintN argc, jsval* vp) argv[1], argv[2], &argv[3], argc - 3); if (!typeObj) return JS_FALSE; - JSAutoTempValueRooter root(cx, typeObj); + js::AutoValueRooter root(cx, typeObj); JSObject* fn = CData::Create(cx, typeObj, obj, &func, true); if (!fn) diff --git a/js/ctypes/Library.h b/js/src/ctypes/Library.h similarity index 98% rename from js/ctypes/Library.h rename to js/src/ctypes/Library.h index 1c97701f17a..08d6242a3db 100644 --- a/js/ctypes/Library.h +++ b/js/src/ctypes/Library.h @@ -40,9 +40,11 @@ #ifndef LIBRARY_H #define LIBRARY_H +#include "jsapi.h" + struct PRLibrary; -namespace mozilla { +namespace js { namespace ctypes { enum LibrarySlot { diff --git a/js/ctypes/ctypes.msg b/js/src/ctypes/ctypes.msg similarity index 100% rename from js/ctypes/ctypes.msg rename to js/src/ctypes/ctypes.msg diff --git a/js/ctypes/libffi.patch b/js/src/ctypes/libffi.patch similarity index 100% rename from js/ctypes/libffi.patch rename to js/src/ctypes/libffi.patch diff --git a/js/ctypes/libffi/ChangeLog b/js/src/ctypes/libffi/ChangeLog similarity index 100% rename from js/ctypes/libffi/ChangeLog rename to js/src/ctypes/libffi/ChangeLog diff --git a/js/ctypes/libffi/ChangeLog.libffi b/js/src/ctypes/libffi/ChangeLog.libffi similarity index 100% rename from js/ctypes/libffi/ChangeLog.libffi rename to js/src/ctypes/libffi/ChangeLog.libffi diff --git a/js/ctypes/libffi/ChangeLog.libgcj b/js/src/ctypes/libffi/ChangeLog.libgcj similarity index 100% rename from js/ctypes/libffi/ChangeLog.libgcj rename to js/src/ctypes/libffi/ChangeLog.libgcj diff --git a/js/ctypes/libffi/ChangeLog.v1 b/js/src/ctypes/libffi/ChangeLog.v1 similarity index 100% rename from js/ctypes/libffi/ChangeLog.v1 rename to js/src/ctypes/libffi/ChangeLog.v1 diff --git a/js/ctypes/libffi/LICENSE b/js/src/ctypes/libffi/LICENSE similarity index 100% rename from js/ctypes/libffi/LICENSE rename to js/src/ctypes/libffi/LICENSE diff --git a/js/ctypes/libffi/Makefile.am b/js/src/ctypes/libffi/Makefile.am similarity index 100% rename from js/ctypes/libffi/Makefile.am rename to js/src/ctypes/libffi/Makefile.am diff --git a/js/ctypes/libffi/Makefile.in b/js/src/ctypes/libffi/Makefile.in similarity index 100% rename from js/ctypes/libffi/Makefile.in rename to js/src/ctypes/libffi/Makefile.in diff --git a/js/ctypes/libffi/README b/js/src/ctypes/libffi/README similarity index 100% rename from js/ctypes/libffi/README rename to js/src/ctypes/libffi/README diff --git a/js/ctypes/libffi/acinclude.m4 b/js/src/ctypes/libffi/acinclude.m4 similarity index 100% rename from js/ctypes/libffi/acinclude.m4 rename to js/src/ctypes/libffi/acinclude.m4 diff --git a/js/ctypes/libffi/aclocal.m4 b/js/src/ctypes/libffi/aclocal.m4 similarity index 100% rename from js/ctypes/libffi/aclocal.m4 rename to js/src/ctypes/libffi/aclocal.m4 diff --git a/js/ctypes/libffi/compile b/js/src/ctypes/libffi/compile similarity index 100% rename from js/ctypes/libffi/compile rename to js/src/ctypes/libffi/compile diff --git a/js/ctypes/libffi/config.guess b/js/src/ctypes/libffi/config.guess similarity index 100% rename from js/ctypes/libffi/config.guess rename to js/src/ctypes/libffi/config.guess diff --git a/js/ctypes/libffi/config.sub b/js/src/ctypes/libffi/config.sub similarity index 100% rename from js/ctypes/libffi/config.sub rename to js/src/ctypes/libffi/config.sub diff --git a/js/ctypes/libffi/configure b/js/src/ctypes/libffi/configure similarity index 100% rename from js/ctypes/libffi/configure rename to js/src/ctypes/libffi/configure diff --git a/js/ctypes/libffi/configure.ac b/js/src/ctypes/libffi/configure.ac similarity index 100% rename from js/ctypes/libffi/configure.ac rename to js/src/ctypes/libffi/configure.ac diff --git a/js/ctypes/libffi/configure.host b/js/src/ctypes/libffi/configure.host similarity index 100% rename from js/ctypes/libffi/configure.host rename to js/src/ctypes/libffi/configure.host diff --git a/js/ctypes/libffi/depcomp b/js/src/ctypes/libffi/depcomp similarity index 100% rename from js/ctypes/libffi/depcomp rename to js/src/ctypes/libffi/depcomp diff --git a/js/ctypes/libffi/doc/libffi.info b/js/src/ctypes/libffi/doc/libffi.info similarity index 100% rename from js/ctypes/libffi/doc/libffi.info rename to js/src/ctypes/libffi/doc/libffi.info diff --git a/js/ctypes/libffi/doc/libffi.texi b/js/src/ctypes/libffi/doc/libffi.texi similarity index 100% rename from js/ctypes/libffi/doc/libffi.texi rename to js/src/ctypes/libffi/doc/libffi.texi diff --git a/js/ctypes/libffi/doc/stamp-vti b/js/src/ctypes/libffi/doc/stamp-vti similarity index 100% rename from js/ctypes/libffi/doc/stamp-vti rename to js/src/ctypes/libffi/doc/stamp-vti diff --git a/js/ctypes/libffi/doc/version.texi b/js/src/ctypes/libffi/doc/version.texi similarity index 100% rename from js/ctypes/libffi/doc/version.texi rename to js/src/ctypes/libffi/doc/version.texi diff --git a/js/ctypes/libffi/fficonfig.h.in b/js/src/ctypes/libffi/fficonfig.h.in similarity index 100% rename from js/ctypes/libffi/fficonfig.h.in rename to js/src/ctypes/libffi/fficonfig.h.in diff --git a/js/ctypes/libffi/include/Makefile.am b/js/src/ctypes/libffi/include/Makefile.am similarity index 100% rename from js/ctypes/libffi/include/Makefile.am rename to js/src/ctypes/libffi/include/Makefile.am diff --git a/js/ctypes/libffi/include/Makefile.in b/js/src/ctypes/libffi/include/Makefile.in similarity index 100% rename from js/ctypes/libffi/include/Makefile.in rename to js/src/ctypes/libffi/include/Makefile.in diff --git a/js/ctypes/libffi/include/ffi.h.in b/js/src/ctypes/libffi/include/ffi.h.in similarity index 100% rename from js/ctypes/libffi/include/ffi.h.in rename to js/src/ctypes/libffi/include/ffi.h.in diff --git a/js/ctypes/libffi/include/ffi_common.h b/js/src/ctypes/libffi/include/ffi_common.h similarity index 100% rename from js/ctypes/libffi/include/ffi_common.h rename to js/src/ctypes/libffi/include/ffi_common.h diff --git a/js/ctypes/libffi/install-sh b/js/src/ctypes/libffi/install-sh similarity index 100% rename from js/ctypes/libffi/install-sh rename to js/src/ctypes/libffi/install-sh diff --git a/js/ctypes/libffi/libffi.pc.in b/js/src/ctypes/libffi/libffi.pc.in similarity index 100% rename from js/ctypes/libffi/libffi.pc.in rename to js/src/ctypes/libffi/libffi.pc.in diff --git a/js/ctypes/libffi/libtool-version b/js/src/ctypes/libffi/libtool-version similarity index 100% rename from js/ctypes/libffi/libtool-version rename to js/src/ctypes/libffi/libtool-version diff --git a/js/ctypes/libffi/ltmain.sh b/js/src/ctypes/libffi/ltmain.sh similarity index 100% rename from js/ctypes/libffi/ltmain.sh rename to js/src/ctypes/libffi/ltmain.sh diff --git a/js/ctypes/libffi/m4/libtool.m4 b/js/src/ctypes/libffi/m4/libtool.m4 similarity index 100% rename from js/ctypes/libffi/m4/libtool.m4 rename to js/src/ctypes/libffi/m4/libtool.m4 diff --git a/js/ctypes/libffi/m4/ltoptions.m4 b/js/src/ctypes/libffi/m4/ltoptions.m4 similarity index 100% rename from js/ctypes/libffi/m4/ltoptions.m4 rename to js/src/ctypes/libffi/m4/ltoptions.m4 diff --git a/js/ctypes/libffi/m4/ltsugar.m4 b/js/src/ctypes/libffi/m4/ltsugar.m4 similarity index 100% rename from js/ctypes/libffi/m4/ltsugar.m4 rename to js/src/ctypes/libffi/m4/ltsugar.m4 diff --git a/js/ctypes/libffi/m4/ltversion.m4 b/js/src/ctypes/libffi/m4/ltversion.m4 similarity index 100% rename from js/ctypes/libffi/m4/ltversion.m4 rename to js/src/ctypes/libffi/m4/ltversion.m4 diff --git a/js/ctypes/libffi/m4/lt~obsolete.m4 b/js/src/ctypes/libffi/m4/lt~obsolete.m4 similarity index 100% rename from js/ctypes/libffi/m4/lt~obsolete.m4 rename to js/src/ctypes/libffi/m4/lt~obsolete.m4 diff --git a/js/ctypes/libffi/man/Makefile.am b/js/src/ctypes/libffi/man/Makefile.am similarity index 100% rename from js/ctypes/libffi/man/Makefile.am rename to js/src/ctypes/libffi/man/Makefile.am diff --git a/js/ctypes/libffi/man/Makefile.in b/js/src/ctypes/libffi/man/Makefile.in similarity index 100% rename from js/ctypes/libffi/man/Makefile.in rename to js/src/ctypes/libffi/man/Makefile.in diff --git a/js/ctypes/libffi/man/ffi.3 b/js/src/ctypes/libffi/man/ffi.3 similarity index 100% rename from js/ctypes/libffi/man/ffi.3 rename to js/src/ctypes/libffi/man/ffi.3 diff --git a/js/ctypes/libffi/man/ffi_call.3 b/js/src/ctypes/libffi/man/ffi_call.3 similarity index 100% rename from js/ctypes/libffi/man/ffi_call.3 rename to js/src/ctypes/libffi/man/ffi_call.3 diff --git a/js/ctypes/libffi/man/ffi_prep_cif.3 b/js/src/ctypes/libffi/man/ffi_prep_cif.3 similarity index 100% rename from js/ctypes/libffi/man/ffi_prep_cif.3 rename to js/src/ctypes/libffi/man/ffi_prep_cif.3 diff --git a/js/ctypes/libffi/mdate-sh b/js/src/ctypes/libffi/mdate-sh similarity index 100% rename from js/ctypes/libffi/mdate-sh rename to js/src/ctypes/libffi/mdate-sh diff --git a/js/ctypes/libffi/missing b/js/src/ctypes/libffi/missing similarity index 100% rename from js/ctypes/libffi/missing rename to js/src/ctypes/libffi/missing diff --git a/js/ctypes/libffi/msvcc.sh b/js/src/ctypes/libffi/msvcc.sh similarity index 100% rename from js/ctypes/libffi/msvcc.sh rename to js/src/ctypes/libffi/msvcc.sh diff --git a/js/ctypes/libffi/src/alpha/ffi.c b/js/src/ctypes/libffi/src/alpha/ffi.c similarity index 100% rename from js/ctypes/libffi/src/alpha/ffi.c rename to js/src/ctypes/libffi/src/alpha/ffi.c diff --git a/js/ctypes/libffi/src/alpha/ffitarget.h b/js/src/ctypes/libffi/src/alpha/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/alpha/ffitarget.h rename to js/src/ctypes/libffi/src/alpha/ffitarget.h diff --git a/js/ctypes/libffi/src/alpha/osf.S b/js/src/ctypes/libffi/src/alpha/osf.S similarity index 100% rename from js/ctypes/libffi/src/alpha/osf.S rename to js/src/ctypes/libffi/src/alpha/osf.S diff --git a/js/ctypes/libffi/src/arm/ffi.c b/js/src/ctypes/libffi/src/arm/ffi.c similarity index 100% rename from js/ctypes/libffi/src/arm/ffi.c rename to js/src/ctypes/libffi/src/arm/ffi.c diff --git a/js/ctypes/libffi/src/arm/ffitarget.h b/js/src/ctypes/libffi/src/arm/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/arm/ffitarget.h rename to js/src/ctypes/libffi/src/arm/ffitarget.h diff --git a/js/ctypes/libffi/src/arm/sysv.S b/js/src/ctypes/libffi/src/arm/sysv.S similarity index 100% rename from js/ctypes/libffi/src/arm/sysv.S rename to js/src/ctypes/libffi/src/arm/sysv.S diff --git a/js/ctypes/libffi/src/avr32/ffi.c b/js/src/ctypes/libffi/src/avr32/ffi.c similarity index 100% rename from js/ctypes/libffi/src/avr32/ffi.c rename to js/src/ctypes/libffi/src/avr32/ffi.c diff --git a/js/ctypes/libffi/src/avr32/ffitarget.h b/js/src/ctypes/libffi/src/avr32/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/avr32/ffitarget.h rename to js/src/ctypes/libffi/src/avr32/ffitarget.h diff --git a/js/ctypes/libffi/src/avr32/sysv.S b/js/src/ctypes/libffi/src/avr32/sysv.S similarity index 100% rename from js/ctypes/libffi/src/avr32/sysv.S rename to js/src/ctypes/libffi/src/avr32/sysv.S diff --git a/js/ctypes/libffi/src/closures.c b/js/src/ctypes/libffi/src/closures.c similarity index 100% rename from js/ctypes/libffi/src/closures.c rename to js/src/ctypes/libffi/src/closures.c diff --git a/js/ctypes/libffi/src/cris/ffi.c b/js/src/ctypes/libffi/src/cris/ffi.c similarity index 100% rename from js/ctypes/libffi/src/cris/ffi.c rename to js/src/ctypes/libffi/src/cris/ffi.c diff --git a/js/ctypes/libffi/src/cris/ffitarget.h b/js/src/ctypes/libffi/src/cris/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/cris/ffitarget.h rename to js/src/ctypes/libffi/src/cris/ffitarget.h diff --git a/js/ctypes/libffi/src/cris/sysv.S b/js/src/ctypes/libffi/src/cris/sysv.S similarity index 100% rename from js/ctypes/libffi/src/cris/sysv.S rename to js/src/ctypes/libffi/src/cris/sysv.S diff --git a/js/ctypes/libffi/src/debug.c b/js/src/ctypes/libffi/src/debug.c similarity index 100% rename from js/ctypes/libffi/src/debug.c rename to js/src/ctypes/libffi/src/debug.c diff --git a/js/ctypes/libffi/src/dlmalloc.c b/js/src/ctypes/libffi/src/dlmalloc.c similarity index 100% rename from js/ctypes/libffi/src/dlmalloc.c rename to js/src/ctypes/libffi/src/dlmalloc.c diff --git a/js/ctypes/libffi/src/frv/eabi.S b/js/src/ctypes/libffi/src/frv/eabi.S similarity index 100% rename from js/ctypes/libffi/src/frv/eabi.S rename to js/src/ctypes/libffi/src/frv/eabi.S diff --git a/js/ctypes/libffi/src/frv/ffi.c b/js/src/ctypes/libffi/src/frv/ffi.c similarity index 100% rename from js/ctypes/libffi/src/frv/ffi.c rename to js/src/ctypes/libffi/src/frv/ffi.c diff --git a/js/ctypes/libffi/src/frv/ffitarget.h b/js/src/ctypes/libffi/src/frv/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/frv/ffitarget.h rename to js/src/ctypes/libffi/src/frv/ffitarget.h diff --git a/js/ctypes/libffi/src/ia64/ffi.c b/js/src/ctypes/libffi/src/ia64/ffi.c similarity index 100% rename from js/ctypes/libffi/src/ia64/ffi.c rename to js/src/ctypes/libffi/src/ia64/ffi.c diff --git a/js/ctypes/libffi/src/ia64/ffitarget.h b/js/src/ctypes/libffi/src/ia64/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/ia64/ffitarget.h rename to js/src/ctypes/libffi/src/ia64/ffitarget.h diff --git a/js/ctypes/libffi/src/ia64/ia64_flags.h b/js/src/ctypes/libffi/src/ia64/ia64_flags.h similarity index 100% rename from js/ctypes/libffi/src/ia64/ia64_flags.h rename to js/src/ctypes/libffi/src/ia64/ia64_flags.h diff --git a/js/ctypes/libffi/src/ia64/unix.S b/js/src/ctypes/libffi/src/ia64/unix.S similarity index 100% rename from js/ctypes/libffi/src/ia64/unix.S rename to js/src/ctypes/libffi/src/ia64/unix.S diff --git a/js/ctypes/libffi/src/java_raw_api.c b/js/src/ctypes/libffi/src/java_raw_api.c similarity index 100% rename from js/ctypes/libffi/src/java_raw_api.c rename to js/src/ctypes/libffi/src/java_raw_api.c diff --git a/js/ctypes/libffi/src/m32r/ffi.c b/js/src/ctypes/libffi/src/m32r/ffi.c similarity index 100% rename from js/ctypes/libffi/src/m32r/ffi.c rename to js/src/ctypes/libffi/src/m32r/ffi.c diff --git a/js/ctypes/libffi/src/m32r/ffitarget.h b/js/src/ctypes/libffi/src/m32r/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/m32r/ffitarget.h rename to js/src/ctypes/libffi/src/m32r/ffitarget.h diff --git a/js/ctypes/libffi/src/m32r/sysv.S b/js/src/ctypes/libffi/src/m32r/sysv.S similarity index 100% rename from js/ctypes/libffi/src/m32r/sysv.S rename to js/src/ctypes/libffi/src/m32r/sysv.S diff --git a/js/ctypes/libffi/src/m68k/ffi.c b/js/src/ctypes/libffi/src/m68k/ffi.c similarity index 100% rename from js/ctypes/libffi/src/m68k/ffi.c rename to js/src/ctypes/libffi/src/m68k/ffi.c diff --git a/js/ctypes/libffi/src/m68k/ffitarget.h b/js/src/ctypes/libffi/src/m68k/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/m68k/ffitarget.h rename to js/src/ctypes/libffi/src/m68k/ffitarget.h diff --git a/js/ctypes/libffi/src/m68k/sysv.S b/js/src/ctypes/libffi/src/m68k/sysv.S similarity index 100% rename from js/ctypes/libffi/src/m68k/sysv.S rename to js/src/ctypes/libffi/src/m68k/sysv.S diff --git a/js/ctypes/libffi/src/mips/ffi.c b/js/src/ctypes/libffi/src/mips/ffi.c similarity index 100% rename from js/ctypes/libffi/src/mips/ffi.c rename to js/src/ctypes/libffi/src/mips/ffi.c diff --git a/js/ctypes/libffi/src/mips/ffitarget.h b/js/src/ctypes/libffi/src/mips/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/mips/ffitarget.h rename to js/src/ctypes/libffi/src/mips/ffitarget.h diff --git a/js/ctypes/libffi/src/mips/n32.S b/js/src/ctypes/libffi/src/mips/n32.S similarity index 100% rename from js/ctypes/libffi/src/mips/n32.S rename to js/src/ctypes/libffi/src/mips/n32.S diff --git a/js/ctypes/libffi/src/mips/o32.S b/js/src/ctypes/libffi/src/mips/o32.S similarity index 100% rename from js/ctypes/libffi/src/mips/o32.S rename to js/src/ctypes/libffi/src/mips/o32.S diff --git a/js/ctypes/libffi/src/moxie/eabi.S b/js/src/ctypes/libffi/src/moxie/eabi.S similarity index 100% rename from js/ctypes/libffi/src/moxie/eabi.S rename to js/src/ctypes/libffi/src/moxie/eabi.S diff --git a/js/ctypes/libffi/src/moxie/ffi.c b/js/src/ctypes/libffi/src/moxie/ffi.c similarity index 100% rename from js/ctypes/libffi/src/moxie/ffi.c rename to js/src/ctypes/libffi/src/moxie/ffi.c diff --git a/js/ctypes/libffi/src/moxie/ffitarget.h b/js/src/ctypes/libffi/src/moxie/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/moxie/ffitarget.h rename to js/src/ctypes/libffi/src/moxie/ffitarget.h diff --git a/js/ctypes/libffi/src/pa/ffi.c b/js/src/ctypes/libffi/src/pa/ffi.c similarity index 100% rename from js/ctypes/libffi/src/pa/ffi.c rename to js/src/ctypes/libffi/src/pa/ffi.c diff --git a/js/ctypes/libffi/src/pa/ffitarget.h b/js/src/ctypes/libffi/src/pa/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/pa/ffitarget.h rename to js/src/ctypes/libffi/src/pa/ffitarget.h diff --git a/js/ctypes/libffi/src/pa/hpux32.S b/js/src/ctypes/libffi/src/pa/hpux32.S similarity index 100% rename from js/ctypes/libffi/src/pa/hpux32.S rename to js/src/ctypes/libffi/src/pa/hpux32.S diff --git a/js/ctypes/libffi/src/pa/linux.S b/js/src/ctypes/libffi/src/pa/linux.S similarity index 100% rename from js/ctypes/libffi/src/pa/linux.S rename to js/src/ctypes/libffi/src/pa/linux.S diff --git a/js/ctypes/libffi/src/powerpc/aix.S b/js/src/ctypes/libffi/src/powerpc/aix.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/aix.S rename to js/src/ctypes/libffi/src/powerpc/aix.S diff --git a/js/ctypes/libffi/src/powerpc/aix_closure.S b/js/src/ctypes/libffi/src/powerpc/aix_closure.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/aix_closure.S rename to js/src/ctypes/libffi/src/powerpc/aix_closure.S diff --git a/js/ctypes/libffi/src/powerpc/asm.h b/js/src/ctypes/libffi/src/powerpc/asm.h similarity index 100% rename from js/ctypes/libffi/src/powerpc/asm.h rename to js/src/ctypes/libffi/src/powerpc/asm.h diff --git a/js/ctypes/libffi/src/powerpc/darwin.S b/js/src/ctypes/libffi/src/powerpc/darwin.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/darwin.S rename to js/src/ctypes/libffi/src/powerpc/darwin.S diff --git a/js/ctypes/libffi/src/powerpc/darwin_closure.S b/js/src/ctypes/libffi/src/powerpc/darwin_closure.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/darwin_closure.S rename to js/src/ctypes/libffi/src/powerpc/darwin_closure.S diff --git a/js/ctypes/libffi/src/powerpc/ffi.c b/js/src/ctypes/libffi/src/powerpc/ffi.c similarity index 100% rename from js/ctypes/libffi/src/powerpc/ffi.c rename to js/src/ctypes/libffi/src/powerpc/ffi.c diff --git a/js/ctypes/libffi/src/powerpc/ffi_darwin.c b/js/src/ctypes/libffi/src/powerpc/ffi_darwin.c similarity index 100% rename from js/ctypes/libffi/src/powerpc/ffi_darwin.c rename to js/src/ctypes/libffi/src/powerpc/ffi_darwin.c diff --git a/js/ctypes/libffi/src/powerpc/ffitarget.h b/js/src/ctypes/libffi/src/powerpc/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/powerpc/ffitarget.h rename to js/src/ctypes/libffi/src/powerpc/ffitarget.h diff --git a/js/ctypes/libffi/src/powerpc/linux64.S b/js/src/ctypes/libffi/src/powerpc/linux64.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/linux64.S rename to js/src/ctypes/libffi/src/powerpc/linux64.S diff --git a/js/ctypes/libffi/src/powerpc/linux64_closure.S b/js/src/ctypes/libffi/src/powerpc/linux64_closure.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/linux64_closure.S rename to js/src/ctypes/libffi/src/powerpc/linux64_closure.S diff --git a/js/ctypes/libffi/src/powerpc/ppc_closure.S b/js/src/ctypes/libffi/src/powerpc/ppc_closure.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/ppc_closure.S rename to js/src/ctypes/libffi/src/powerpc/ppc_closure.S diff --git a/js/ctypes/libffi/src/powerpc/sysv.S b/js/src/ctypes/libffi/src/powerpc/sysv.S similarity index 100% rename from js/ctypes/libffi/src/powerpc/sysv.S rename to js/src/ctypes/libffi/src/powerpc/sysv.S diff --git a/js/ctypes/libffi/src/prep_cif.c b/js/src/ctypes/libffi/src/prep_cif.c similarity index 100% rename from js/ctypes/libffi/src/prep_cif.c rename to js/src/ctypes/libffi/src/prep_cif.c diff --git a/js/ctypes/libffi/src/raw_api.c b/js/src/ctypes/libffi/src/raw_api.c similarity index 100% rename from js/ctypes/libffi/src/raw_api.c rename to js/src/ctypes/libffi/src/raw_api.c diff --git a/js/ctypes/libffi/src/s390/ffi.c b/js/src/ctypes/libffi/src/s390/ffi.c similarity index 100% rename from js/ctypes/libffi/src/s390/ffi.c rename to js/src/ctypes/libffi/src/s390/ffi.c diff --git a/js/ctypes/libffi/src/s390/ffitarget.h b/js/src/ctypes/libffi/src/s390/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/s390/ffitarget.h rename to js/src/ctypes/libffi/src/s390/ffitarget.h diff --git a/js/ctypes/libffi/src/s390/sysv.S b/js/src/ctypes/libffi/src/s390/sysv.S similarity index 100% rename from js/ctypes/libffi/src/s390/sysv.S rename to js/src/ctypes/libffi/src/s390/sysv.S diff --git a/js/ctypes/libffi/src/sh/ffi.c b/js/src/ctypes/libffi/src/sh/ffi.c similarity index 100% rename from js/ctypes/libffi/src/sh/ffi.c rename to js/src/ctypes/libffi/src/sh/ffi.c diff --git a/js/ctypes/libffi/src/sh/ffitarget.h b/js/src/ctypes/libffi/src/sh/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/sh/ffitarget.h rename to js/src/ctypes/libffi/src/sh/ffitarget.h diff --git a/js/ctypes/libffi/src/sh/sysv.S b/js/src/ctypes/libffi/src/sh/sysv.S similarity index 100% rename from js/ctypes/libffi/src/sh/sysv.S rename to js/src/ctypes/libffi/src/sh/sysv.S diff --git a/js/ctypes/libffi/src/sh64/ffi.c b/js/src/ctypes/libffi/src/sh64/ffi.c similarity index 100% rename from js/ctypes/libffi/src/sh64/ffi.c rename to js/src/ctypes/libffi/src/sh64/ffi.c diff --git a/js/ctypes/libffi/src/sh64/ffitarget.h b/js/src/ctypes/libffi/src/sh64/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/sh64/ffitarget.h rename to js/src/ctypes/libffi/src/sh64/ffitarget.h diff --git a/js/ctypes/libffi/src/sh64/sysv.S b/js/src/ctypes/libffi/src/sh64/sysv.S similarity index 100% rename from js/ctypes/libffi/src/sh64/sysv.S rename to js/src/ctypes/libffi/src/sh64/sysv.S diff --git a/js/ctypes/libffi/src/sparc/ffi.c b/js/src/ctypes/libffi/src/sparc/ffi.c similarity index 100% rename from js/ctypes/libffi/src/sparc/ffi.c rename to js/src/ctypes/libffi/src/sparc/ffi.c diff --git a/js/ctypes/libffi/src/sparc/ffitarget.h b/js/src/ctypes/libffi/src/sparc/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/sparc/ffitarget.h rename to js/src/ctypes/libffi/src/sparc/ffitarget.h diff --git a/js/ctypes/libffi/src/sparc/v8.S b/js/src/ctypes/libffi/src/sparc/v8.S similarity index 100% rename from js/ctypes/libffi/src/sparc/v8.S rename to js/src/ctypes/libffi/src/sparc/v8.S diff --git a/js/ctypes/libffi/src/sparc/v9.S b/js/src/ctypes/libffi/src/sparc/v9.S similarity index 100% rename from js/ctypes/libffi/src/sparc/v9.S rename to js/src/ctypes/libffi/src/sparc/v9.S diff --git a/js/ctypes/libffi/src/types.c b/js/src/ctypes/libffi/src/types.c similarity index 100% rename from js/ctypes/libffi/src/types.c rename to js/src/ctypes/libffi/src/types.c diff --git a/js/ctypes/libffi/src/x86/darwin.S b/js/src/ctypes/libffi/src/x86/darwin.S similarity index 100% rename from js/ctypes/libffi/src/x86/darwin.S rename to js/src/ctypes/libffi/src/x86/darwin.S diff --git a/js/ctypes/libffi/src/x86/darwin64.S b/js/src/ctypes/libffi/src/x86/darwin64.S similarity index 100% rename from js/ctypes/libffi/src/x86/darwin64.S rename to js/src/ctypes/libffi/src/x86/darwin64.S diff --git a/js/ctypes/libffi/src/x86/ffi.c b/js/src/ctypes/libffi/src/x86/ffi.c similarity index 100% rename from js/ctypes/libffi/src/x86/ffi.c rename to js/src/ctypes/libffi/src/x86/ffi.c diff --git a/js/ctypes/libffi/src/x86/ffi64.c b/js/src/ctypes/libffi/src/x86/ffi64.c similarity index 100% rename from js/ctypes/libffi/src/x86/ffi64.c rename to js/src/ctypes/libffi/src/x86/ffi64.c diff --git a/js/ctypes/libffi/src/x86/ffitarget.h b/js/src/ctypes/libffi/src/x86/ffitarget.h similarity index 100% rename from js/ctypes/libffi/src/x86/ffitarget.h rename to js/src/ctypes/libffi/src/x86/ffitarget.h diff --git a/js/ctypes/libffi/src/x86/freebsd.S b/js/src/ctypes/libffi/src/x86/freebsd.S similarity index 100% rename from js/ctypes/libffi/src/x86/freebsd.S rename to js/src/ctypes/libffi/src/x86/freebsd.S diff --git a/js/ctypes/libffi/src/x86/sysv.S b/js/src/ctypes/libffi/src/x86/sysv.S similarity index 100% rename from js/ctypes/libffi/src/x86/sysv.S rename to js/src/ctypes/libffi/src/x86/sysv.S diff --git a/js/ctypes/libffi/src/x86/unix64.S b/js/src/ctypes/libffi/src/x86/unix64.S similarity index 100% rename from js/ctypes/libffi/src/x86/unix64.S rename to js/src/ctypes/libffi/src/x86/unix64.S diff --git a/js/ctypes/libffi/src/x86/win32.S b/js/src/ctypes/libffi/src/x86/win32.S similarity index 100% rename from js/ctypes/libffi/src/x86/win32.S rename to js/src/ctypes/libffi/src/x86/win32.S diff --git a/js/ctypes/libffi/src/x86/win64.S b/js/src/ctypes/libffi/src/x86/win64.S similarity index 100% rename from js/ctypes/libffi/src/x86/win64.S rename to js/src/ctypes/libffi/src/x86/win64.S diff --git a/js/ctypes/libffi/testsuite/Makefile.am b/js/src/ctypes/libffi/testsuite/Makefile.am similarity index 100% rename from js/ctypes/libffi/testsuite/Makefile.am rename to js/src/ctypes/libffi/testsuite/Makefile.am diff --git a/js/ctypes/libffi/testsuite/Makefile.in b/js/src/ctypes/libffi/testsuite/Makefile.in similarity index 100% rename from js/ctypes/libffi/testsuite/Makefile.in rename to js/src/ctypes/libffi/testsuite/Makefile.in diff --git a/js/ctypes/libffi/testsuite/config/default.exp b/js/src/ctypes/libffi/testsuite/config/default.exp similarity index 100% rename from js/ctypes/libffi/testsuite/config/default.exp rename to js/src/ctypes/libffi/testsuite/config/default.exp diff --git a/js/ctypes/libffi/testsuite/lib/libffi-dg.exp b/js/src/ctypes/libffi/testsuite/lib/libffi-dg.exp similarity index 100% rename from js/ctypes/libffi/testsuite/lib/libffi-dg.exp rename to js/src/ctypes/libffi/testsuite/lib/libffi-dg.exp diff --git a/js/ctypes/libffi/testsuite/lib/target-libpath.exp b/js/src/ctypes/libffi/testsuite/lib/target-libpath.exp similarity index 100% rename from js/ctypes/libffi/testsuite/lib/target-libpath.exp rename to js/src/ctypes/libffi/testsuite/lib/target-libpath.exp diff --git a/js/ctypes/libffi/testsuite/lib/wrapper.exp b/js/src/ctypes/libffi/testsuite/lib/wrapper.exp similarity index 100% rename from js/ctypes/libffi/testsuite/lib/wrapper.exp rename to js/src/ctypes/libffi/testsuite/lib/wrapper.exp diff --git a/js/ctypes/libffi/testsuite/libffi.call/call.exp b/js/src/ctypes/libffi/testsuite/libffi.call/call.exp similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/call.exp rename to js/src/ctypes/libffi/testsuite/libffi.call/call.exp diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_fn0.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_fn0.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_fn0.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_fn0.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_fn1.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_fn1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_fn1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_fn1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_fn2.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_fn2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_fn2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_fn2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_fn3.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_fn3.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_fn3.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_fn3.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_fn4.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_fn4.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_fn4.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_fn4.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_fn5.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_fn5.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_fn5.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_fn5.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_fn6.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_fn6.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_fn6.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_fn6.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_loc_fn0.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_loc_fn0.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_loc_fn0.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_loc_fn0.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/closure_stdcall.c b/js/src/ctypes/libffi/testsuite/libffi.call/closure_stdcall.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/closure_stdcall.c rename to js/src/ctypes/libffi/testsuite/libffi.call/closure_stdcall.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_12byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_12byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_12byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_12byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_16byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_16byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_16byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_16byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_18byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_18byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_18byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_18byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_19byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_19byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_19byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_19byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_1_1byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_20byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_20byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_20byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_20byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_20byte1.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_20byte1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_20byte1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_20byte1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_24byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_24byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_24byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_24byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_2byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_2byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_2byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_2byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_3_1byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_3byte1.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_3byte1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_3byte1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_3byte1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_3byte2.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_3byte2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_3byte2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_3byte2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_4_1byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_4byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_4byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_4byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_4byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_5_1_byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_5byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_5byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_5byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_5byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_64byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_64byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_64byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_64byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_6_1_byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_6byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_6byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_6byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_6byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_7_1_byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_7byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_7byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_7byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_7byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_8byte.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_8byte.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_8byte.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_8byte.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_9byte1.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_9byte1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_9byte1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_9byte1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_9byte2.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_9byte2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_9byte2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_9byte2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_double.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_double.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_double.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_double.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_float.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_float.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_float.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_float.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_longdouble_split2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_pointer.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_sint16.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_sint32.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_sint64.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_uint16.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_uint32.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_align_uint64.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_dbls_struct.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_double.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_double.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_double.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_double.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_double_va.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_double_va.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_double_va.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_double_va.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_float.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_float.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_float.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_float.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_longdouble.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_longdouble.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_longdouble.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_longdouble.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_longdouble_va.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_multi_schar.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_schar.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_multi_schar.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_schar.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_multi_sshort.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_sshort.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_multi_sshort.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_sshort.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_multi_sshortchar.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_sshortchar.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_multi_sshortchar.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_sshortchar.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_multi_uchar.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_uchar.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_multi_uchar.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_uchar.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_multi_ushort.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_ushort.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_multi_ushort.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_ushort.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_multi_ushortchar.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_ushortchar.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_multi_ushortchar.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_multi_ushortchar.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_pointer.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_pointer.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_pointer.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_pointer.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_pointer_stack.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_schar.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_schar.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_schar.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_schar.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_sint.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_sint.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_sint.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_sint.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_sshort.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_sshort.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_sshort.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_sshort.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_uchar.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_uchar.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_uchar.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_uchar.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_uint.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_uint.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_uint.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_uint.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_ulonglong.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/cls_ushort.c b/js/src/ctypes/libffi/testsuite/libffi.call/cls_ushort.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/cls_ushort.c rename to js/src/ctypes/libffi/testsuite/libffi.call/cls_ushort.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/err_bad_abi.c b/js/src/ctypes/libffi/testsuite/libffi.call/err_bad_abi.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/err_bad_abi.c rename to js/src/ctypes/libffi/testsuite/libffi.call/err_bad_abi.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c b/js/src/ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c rename to js/src/ctypes/libffi/testsuite/libffi.call/err_bad_typedef.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/ffitest.h b/js/src/ctypes/libffi/testsuite/libffi.call/ffitest.h similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/ffitest.h rename to js/src/ctypes/libffi/testsuite/libffi.call/ffitest.h diff --git a/js/ctypes/libffi/testsuite/libffi.call/float.c b/js/src/ctypes/libffi/testsuite/libffi.call/float.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/float.c rename to js/src/ctypes/libffi/testsuite/libffi.call/float.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/float1.c b/js/src/ctypes/libffi/testsuite/libffi.call/float1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/float1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/float1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/float2.c b/js/src/ctypes/libffi/testsuite/libffi.call/float2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/float2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/float2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/float3.c b/js/src/ctypes/libffi/testsuite/libffi.call/float3.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/float3.c rename to js/src/ctypes/libffi/testsuite/libffi.call/float3.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/float4.c b/js/src/ctypes/libffi/testsuite/libffi.call/float4.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/float4.c rename to js/src/ctypes/libffi/testsuite/libffi.call/float4.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/huge_struct.c b/js/src/ctypes/libffi/testsuite/libffi.call/huge_struct.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/huge_struct.c rename to js/src/ctypes/libffi/testsuite/libffi.call/huge_struct.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/many.c b/js/src/ctypes/libffi/testsuite/libffi.call/many.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/many.c rename to js/src/ctypes/libffi/testsuite/libffi.call/many.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/many_win32.c b/js/src/ctypes/libffi/testsuite/libffi.call/many_win32.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/many_win32.c rename to js/src/ctypes/libffi/testsuite/libffi.call/many_win32.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/negint.c b/js/src/ctypes/libffi/testsuite/libffi.call/negint.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/negint.c rename to js/src/ctypes/libffi/testsuite/libffi.call/negint.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct1.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct10.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct10.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct10.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct10.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct2.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct3.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct3.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct3.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct3.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct4.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct4.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct4.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct4.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct5.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct5.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct5.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct5.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct6.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct6.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct6.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct6.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct7.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct7.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct7.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct7.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct8.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct8.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct8.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct8.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/nested_struct9.c b/js/src/ctypes/libffi/testsuite/libffi.call/nested_struct9.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/nested_struct9.c rename to js/src/ctypes/libffi/testsuite/libffi.call/nested_struct9.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/problem1.c b/js/src/ctypes/libffi/testsuite/libffi.call/problem1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/problem1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/problem1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/promotion.c b/js/src/ctypes/libffi/testsuite/libffi.call/promotion.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/promotion.c rename to js/src/ctypes/libffi/testsuite/libffi.call/promotion.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/pyobjc-tc.c b/js/src/ctypes/libffi/testsuite/libffi.call/pyobjc-tc.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/pyobjc-tc.c rename to js/src/ctypes/libffi/testsuite/libffi.call/pyobjc-tc.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_dbl.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_dbl.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_dbl.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_dbl.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_dbl1.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_dbl1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_dbl1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_dbl1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_dbl2.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_dbl2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_dbl2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_dbl2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_fl.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_fl.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_fl.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_fl.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_fl1.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_fl1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_fl1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_fl1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_fl2.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_fl2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_fl2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_fl2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_fl3.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_fl3.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_fl3.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_fl3.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_ldl.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_ldl.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_ldl.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_ldl.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_ll.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_ll.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_ll.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_ll.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_ll1.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_ll1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_ll1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_ll1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_sc.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_sc.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_sc.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_sc.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_sl.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_sl.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_sl.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_sl.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_uc.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_uc.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_uc.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_uc.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/return_ul.c b/js/src/ctypes/libffi/testsuite/libffi.call/return_ul.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/return_ul.c rename to js/src/ctypes/libffi/testsuite/libffi.call/return_ul.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/stret_large.c b/js/src/ctypes/libffi/testsuite/libffi.call/stret_large.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/stret_large.c rename to js/src/ctypes/libffi/testsuite/libffi.call/stret_large.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/stret_large2.c b/js/src/ctypes/libffi/testsuite/libffi.call/stret_large2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/stret_large2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/stret_large2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/stret_medium.c b/js/src/ctypes/libffi/testsuite/libffi.call/stret_medium.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/stret_medium.c rename to js/src/ctypes/libffi/testsuite/libffi.call/stret_medium.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/stret_medium2.c b/js/src/ctypes/libffi/testsuite/libffi.call/stret_medium2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/stret_medium2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/stret_medium2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/strlen.c b/js/src/ctypes/libffi/testsuite/libffi.call/strlen.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/strlen.c rename to js/src/ctypes/libffi/testsuite/libffi.call/strlen.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/strlen_win32.c b/js/src/ctypes/libffi/testsuite/libffi.call/strlen_win32.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/strlen_win32.c rename to js/src/ctypes/libffi/testsuite/libffi.call/strlen_win32.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct1.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct1.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct1.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct1.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct2.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct2.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct2.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct2.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct3.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct3.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct3.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct3.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct4.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct4.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct4.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct4.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct5.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct5.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct5.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct5.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct6.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct6.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct6.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct6.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct7.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct7.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct7.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct7.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct8.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct8.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct8.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct8.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/struct9.c b/js/src/ctypes/libffi/testsuite/libffi.call/struct9.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/struct9.c rename to js/src/ctypes/libffi/testsuite/libffi.call/struct9.c diff --git a/js/ctypes/libffi/testsuite/libffi.call/testclosure.c b/js/src/ctypes/libffi/testsuite/libffi.call/testclosure.c similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.call/testclosure.c rename to js/src/ctypes/libffi/testsuite/libffi.call/testclosure.c diff --git a/js/ctypes/libffi/testsuite/libffi.special/ffitestcxx.h b/js/src/ctypes/libffi/testsuite/libffi.special/ffitestcxx.h similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.special/ffitestcxx.h rename to js/src/ctypes/libffi/testsuite/libffi.special/ffitestcxx.h diff --git a/js/ctypes/libffi/testsuite/libffi.special/special.exp b/js/src/ctypes/libffi/testsuite/libffi.special/special.exp similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.special/special.exp rename to js/src/ctypes/libffi/testsuite/libffi.special/special.exp diff --git a/js/ctypes/libffi/testsuite/libffi.special/unwindtest.cc b/js/src/ctypes/libffi/testsuite/libffi.special/unwindtest.cc similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.special/unwindtest.cc rename to js/src/ctypes/libffi/testsuite/libffi.special/unwindtest.cc diff --git a/js/ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc b/js/src/ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc similarity index 100% rename from js/ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc rename to js/src/ctypes/libffi/testsuite/libffi.special/unwindtest_ffi_call.cc diff --git a/js/ctypes/libffi/texinfo.tex b/js/src/ctypes/libffi/texinfo.tex similarity index 100% rename from js/ctypes/libffi/texinfo.tex rename to js/src/ctypes/libffi/texinfo.tex diff --git a/js/ctypes/typedefs.h b/js/src/ctypes/typedefs.h similarity index 93% rename from js/ctypes/typedefs.h rename to js/src/ctypes/typedefs.h index 6f2a06904c6..7e27b8aa5f4 100644 --- a/js/ctypes/typedefs.h +++ b/js/src/ctypes/typedefs.h @@ -102,18 +102,18 @@ // The meat. DEFINE_BOOL_TYPE (bool, bool, CTYPES_FFI_BOOL) -DEFINE_INT_TYPE (int8_t, PRInt8, ffi_type_sint8) -DEFINE_INT_TYPE (int16_t, PRInt16, ffi_type_sint16) -DEFINE_INT_TYPE (int32_t, PRInt32, ffi_type_sint32) -DEFINE_INT_TYPE (uint8_t, PRUint8, ffi_type_uint8) -DEFINE_INT_TYPE (uint16_t, PRUint16, ffi_type_uint16) -DEFINE_INT_TYPE (uint32_t, PRUint32, ffi_type_uint32) +DEFINE_INT_TYPE (int8_t, JSInt8, ffi_type_sint8) +DEFINE_INT_TYPE (int16_t, JSInt16, ffi_type_sint16) +DEFINE_INT_TYPE (int32_t, JSInt32, ffi_type_sint32) +DEFINE_INT_TYPE (uint8_t, JSUint8, ffi_type_uint8) +DEFINE_INT_TYPE (uint16_t, JSUint16, ffi_type_uint16) +DEFINE_INT_TYPE (uint32_t, JSUint32, ffi_type_uint32) DEFINE_INT_TYPE (short, short, ffi_type_sint16) DEFINE_INT_TYPE (unsigned_short, unsigned short, ffi_type_uint16) DEFINE_INT_TYPE (int, int, ffi_type_sint32) DEFINE_INT_TYPE (unsigned_int, unsigned int, ffi_type_uint32) -DEFINE_WRAPPED_INT_TYPE(int64_t, PRInt64, ffi_type_sint64) -DEFINE_WRAPPED_INT_TYPE(uint64_t, PRUint64, ffi_type_uint64) +DEFINE_WRAPPED_INT_TYPE(int64_t, JSInt64, ffi_type_sint64) +DEFINE_WRAPPED_INT_TYPE(uint64_t, JSUint64, ffi_type_uint64) DEFINE_WRAPPED_INT_TYPE(long, long, CTYPES_FFI_LONG) DEFINE_WRAPPED_INT_TYPE(unsigned_long, unsigned long, CTYPES_FFI_ULONG) DEFINE_WRAPPED_INT_TYPE(long_long, long long, ffi_type_sint64) diff --git a/js/src/dtoa.c b/js/src/dtoa.c index 8d7352a635d..76fc8cf08e6 100644 --- a/js/src/dtoa.c +++ b/js/src/dtoa.c @@ -101,9 +101,10 @@ * directly -- and assumed always to succeed. Similarly, if you * want something other than the system's free() to be called to * recycle memory acquired from MALLOC, #define FREE to be the - * name of the alternate routine. (FREE or free is only called in - * pathological cases, e.g., in a dtoa call after a dtoa return in - * mode 3 with thousands of digits requested.) + * name of the alternate routine. (Unless you #define + * NO_GLOBAL_STATE and call destroydtoa, FREE or free is only + * called in pathological cases, e.g., in a dtoa call after a dtoa + * return in mode 3 with thousands of digits requested.) * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making * memory allocations from a private pool of memory when possible. * When used, the private pool is PRIVATE_MEM bytes long: 2304 bytes, @@ -164,6 +165,12 @@ * inexact or when it is a numeric value rounded to +-infinity). * #define NO_ERRNO if strtod should not assign errno = ERANGE when * the result overflows to +-Infinity or underflows to 0. + * #define NO_GLOBAL_STATE to avoid defining any non-const global or + * static variables. Instead the necessary state is stored in an + * opaque struct, DtoaState, a pointer to which must be passed to + * every entry point. Two new functions are added to the API: + * DtoaState *newdtoa(void); + * void destroydtoa(DtoaState *); */ #ifndef Long @@ -195,12 +202,15 @@ extern void *MALLOC(size_t); #define MALLOC malloc #endif +#ifndef FREE +#define FREE free +#endif + #ifndef Omit_Private_Memory #ifndef PRIVATE_MEM #define PRIVATE_MEM 2304 #endif #define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double)) -static double private_mem[PRIVATE_mem], *pmem_next = private_mem; #endif #undef IEEE_Arith @@ -479,14 +489,87 @@ Bigint { typedef struct Bigint Bigint; - static Bigint *freelist[Kmax+1]; +#ifdef NO_GLOBAL_STATE +#ifdef MULTIPLE_THREADS +#error "cannot have both NO_GLOBAL_STATE and MULTIPLE_THREADS" +#endif + struct +DtoaState { +#define DECLARE_GLOBAL_STATE /* nothing */ +#else +#define DECLARE_GLOBAL_STATE static +#endif + + DECLARE_GLOBAL_STATE Bigint *freelist[Kmax+1]; + DECLARE_GLOBAL_STATE Bigint *p5s; +#ifndef Omit_Private_Memory + DECLARE_GLOBAL_STATE double private_mem[PRIVATE_mem]; + DECLARE_GLOBAL_STATE double *pmem_next +#ifndef NO_GLOBAL_STATE + = private_mem +#endif + ; +#endif +#ifdef NO_GLOBAL_STATE + }; + typedef struct DtoaState DtoaState; +#ifdef KR_headers +#define STATE_PARAM state, +#define STATE_PARAM_DECL DtoaState *state; +#else +#define STATE_PARAM DtoaState *state, +#endif +#define PASS_STATE state, +#define GET_STATE(field) (state->field) + + static DtoaState * +newdtoa(void) +{ + DtoaState *state = (DtoaState *) MALLOC(sizeof(DtoaState)); + if (state) { + memset(state, 0, sizeof(DtoaState)); + state->pmem_next = state->private_mem; + } + return state; +} + + static void +destroydtoa +#ifdef KR_headers + (state) STATE_PARAM_DECL +#else + (DtoaState *state) +#endif +{ + int i; + Bigint *v, *next; + + for (i = 0; i <= Kmax; i++) { + for (v = GET_STATE(freelist)[i]; v; v = next) { + next = v->next; +#ifndef Omit_Private_Memory + if ((double*)v < GET_STATE(private_mem) || + (double*)v >= GET_STATE(private_mem) + PRIVATE_mem) +#endif + FREE((void*)v); + } + } + FREE((void *)state); +} + +#else +#define STATE_PARAM /* nothing */ +#define STATE_PARAM_DECL /* nothing */ +#define PASS_STATE /* nothing */ +#define GET_STATE(name) name +#endif static Bigint * Balloc #ifdef KR_headers - (k) int k; + (STATE_PARAM k) STATE_PARAM_DECL int k; #else - (int k) + (STATE_PARAM int k) #endif { int x; @@ -498,8 +581,8 @@ Balloc ACQUIRE_DTOA_LOCK(0); /* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0), */ /* but this case seems very unlikely. */ - if (k <= Kmax && (rv = freelist[k])) - freelist[k] = rv->next; + if (k <= Kmax && (rv = GET_STATE(freelist)[k])) + GET_STATE(freelist)[k] = rv->next; else { x = 1 << k; #ifdef Omit_Private_Memory @@ -507,9 +590,9 @@ Balloc #else len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1) /sizeof(double); - if (k <= Kmax && pmem_next - private_mem + len <= PRIVATE_mem) { - rv = (Bigint*)pmem_next; - pmem_next += len; + if (k <= Kmax && GET_STATE(pmem_next) - GET_STATE(private_mem) + len <= PRIVATE_mem) { + rv = (Bigint*)GET_STATE(pmem_next); + GET_STATE(pmem_next) += len; } else rv = (Bigint*)MALLOC(len*sizeof(double)); @@ -525,22 +608,18 @@ Balloc static void Bfree #ifdef KR_headers - (v) Bigint *v; + (STATE_PARAM v) STATE_PARAM_DECL Bigint *v; #else - (Bigint *v) + (STATE_PARAM Bigint *v) #endif { if (v) { if (v->k > Kmax) -#ifdef FREE FREE((void*)v); -#else - free((void*)v); -#endif else { ACQUIRE_DTOA_LOCK(0); - v->next = freelist[v->k]; - freelist[v->k] = v; + v->next = GET_STATE(freelist)[v->k]; + GET_STATE(freelist)[v->k] = v; FREE_DTOA_LOCK(0); } } @@ -552,9 +631,9 @@ y->wds*sizeof(Long) + 2*sizeof(int)) static Bigint * multadd #ifdef KR_headers - (b, m, a) Bigint *b; int m, a; + (STATE_PARAM b, m, a) STATE_PARAM_DECL Bigint *b; int m, a; #else - (Bigint *b, int m, int a) /* multiply by m and add a */ + (STATE_PARAM Bigint *b, int m, int a) /* multiply by m and add a */ #endif { int i, wds; @@ -595,9 +674,9 @@ multadd while(++i < wds); if (carry) { if (wds >= b->maxwds) { - b1 = Balloc(b->k+1); + b1 = Balloc(PASS_STATE b->k+1); Bcopy(b1, b); - Bfree(b); + Bfree(PASS_STATE b); b = b1; } b->x[wds++] = (ULong) carry; @@ -609,9 +688,9 @@ multadd static Bigint * s2b #ifdef KR_headers - (s, nd0, nd, y9) CONST char *s; int nd0, nd; ULong y9; + (STATE_PARAM s, nd0, nd, y9) STATE_PARAM_DECL CONST char *s; int nd0, nd; ULong y9; #else - (CONST char *s, int nd0, int nd, ULong y9) + (STATE_PARAM CONST char *s, int nd0, int nd, ULong y9) #endif { Bigint *b; @@ -621,11 +700,11 @@ s2b x = (nd + 8) / 9; for(k = 0, y = 1; x > y; y <<= 1, k++) ; #ifdef Pack_32 - b = Balloc(k); + b = Balloc(PASS_STATE k); b->x[0] = y9; b->wds = 1; #else - b = Balloc(k+1); + b = Balloc(PASS_STATE k+1); b->x[0] = y9 & 0xffff; b->wds = (b->x[1] = y9 >> 16) ? 2 : 1; #endif @@ -633,14 +712,14 @@ s2b i = 9; if (9 < nd0) { s += 9; - do b = multadd(b, 10, *s++ - '0'); + do b = multadd(PASS_STATE b, 10, *s++ - '0'); while(++i < nd0); s++; } else s += 10; for(; i < nd; i++) - b = multadd(b, 10, *s++ - '0'); + b = multadd(PASS_STATE b, 10, *s++ - '0'); return b; } @@ -729,14 +808,14 @@ lo0bits static Bigint * i2b #ifdef KR_headers - (i) int i; + (STATE_PARAM i) STATE_PARAM_DECL int i; #else - (int i) + (STATE_PARAM int i) #endif { Bigint *b; - b = Balloc(1); + b = Balloc(PASS_STATE 1); b->x[0] = i; b->wds = 1; return b; @@ -745,9 +824,9 @@ i2b static Bigint * mult #ifdef KR_headers - (a, b) Bigint *a, *b; + (STATE_PARAM a, b) STATE_PARAM_DECL Bigint *a, *b; #else - (Bigint *a, Bigint *b) + (STATE_PARAM Bigint *a, Bigint *b) #endif { Bigint *c; @@ -774,7 +853,7 @@ mult wc = wa + wb; if (wc > a->maxwds) k++; - c = Balloc(k); + c = Balloc(PASS_STATE k); for(x = c->x, xa = x + wc; x < xa; x++) *x = 0; xa = a->x; @@ -852,26 +931,24 @@ mult return c; } - static Bigint *p5s; - static Bigint * pow5mult #ifdef KR_headers - (b, k) Bigint *b; int k; + (STATE_PARAM b, k) STATE_PARAM_DECL Bigint *b; int k; #else - (Bigint *b, int k) + (STATE_PARAM Bigint *b, int k) #endif { Bigint *b1, *p5, *p51; int i; - static int p05[3] = { 5, 25, 125 }; + static CONST int p05[3] = { 5, 25, 125 }; if ((i = k & 3)) - b = multadd(b, p05[i-1], 0); + b = multadd(PASS_STATE b, p05[i-1], 0); if (!(k >>= 2)) return b; - if (!(p5 = p5s)) { + if (!(p5 = GET_STATE(p5s))) { /* first time */ #ifdef MULTIPLE_THREADS ACQUIRE_DTOA_LOCK(1); @@ -881,14 +958,14 @@ pow5mult } FREE_DTOA_LOCK(1); #else - p5 = p5s = i2b(625); + p5 = GET_STATE(p5s) = i2b(PASS_STATE 625); p5->next = 0; #endif } for(;;) { if (k & 1) { - b1 = mult(b, p5); - Bfree(b); + b1 = mult(PASS_STATE b, p5); + Bfree(PASS_STATE b); b = b1; } if (!(k >>= 1)) @@ -902,7 +979,7 @@ pow5mult } FREE_DTOA_LOCK(1); #else - p51 = p5->next = mult(p5,p5); + p51 = p5->next = mult(PASS_STATE p5,p5); p51->next = 0; #endif } @@ -914,9 +991,9 @@ pow5mult static Bigint * lshift #ifdef KR_headers - (b, k) Bigint *b; int k; + (STATE_PARAM b, k) STATE_PARAM_DECL Bigint *b; int k; #else - (Bigint *b, int k) + (STATE_PARAM Bigint *b, int k) #endif { int i, k1, n, n1; @@ -932,7 +1009,7 @@ lshift n1 = n + b->wds + 1; for(i = b->maxwds; n1 > i; i <<= 1) k1++; - b1 = Balloc(k1); + b1 = Balloc(PASS_STATE k1); x1 = b1->x; for(i = 0; i < n; i++) *x1++ = 0; @@ -967,7 +1044,7 @@ lshift *x1++ = *x++; while(x < xe); b1->wds = n1 - 1; - Bfree(b); + Bfree(PASS_STATE b); return b1; } @@ -1008,9 +1085,9 @@ cmp static Bigint * diff #ifdef KR_headers - (a, b) Bigint *a, *b; + (STATE_PARAM a, b) STATE_PARAM_DECL Bigint *a, *b; #else - (Bigint *a, Bigint *b) + (STATE_PARAM Bigint *a, Bigint *b) #endif { Bigint *c; @@ -1027,7 +1104,7 @@ diff i = cmp(a,b); if (!i) { - c = Balloc(0); + c = Balloc(PASS_STATE 0); c->wds = 1; c->x[0] = 0; return c; @@ -1040,7 +1117,7 @@ diff } else i = 0; - c = Balloc(a->k); + c = Balloc(PASS_STATE a->k); c->sign = i; wa = a->wds; xa = a->x; @@ -1214,9 +1291,9 @@ b2d static Bigint * d2b #ifdef KR_headers - (d, e, bits) U d; int *e, *bits; + (STATE_PARAM d, e, bits) STATE_PARAM_DECL U d; int *e, *bits; #else - (U d, int *e, int *bits) + (STATE_PARAM U d, int *e, int *bits) #endif { Bigint *b; @@ -1235,9 +1312,9 @@ d2b #endif #ifdef Pack_32 - b = Balloc(1); + b = Balloc(PASS_STATE 1); #else - b = Balloc(2); + b = Balloc(PASS_STATE 2); #endif x = b->x; @@ -1529,9 +1606,9 @@ hexnan static double _strtod #ifdef KR_headers - (s00, se) CONST char *s00; char **se; + (STATE_PARAM s00, se) STATE_PARAM_DECL CONST char *s00; char **se; #else - (CONST char *s00, char **se) + (STATE_PARAM CONST char *s00, char **se) #endif { #ifdef Avoid_Underflow @@ -1953,13 +2030,13 @@ _strtod /* Put digits into bd: true value = bd * 10^e */ - bd0 = s2b(s0, nd0, nd, y); + bd0 = s2b(PASS_STATE s0, nd0, nd, y); for(;;) { - bd = Balloc(bd0->k); + bd = Balloc(PASS_STATE bd0->k); Bcopy(bd, bd0); - bb = d2b(rv, &bbe, &bbbits); /* rv = bb * 2^bbe */ - bs = i2b(1); + bb = d2b(PASS_STATE rv, &bbe, &bbbits); /* rv = bb * 2^bbe */ + bs = i2b(PASS_STATE 1); if (e >= 0) { bb2 = bb5 = 0; @@ -2015,20 +2092,20 @@ _strtod bs2 -= i; } if (bb5 > 0) { - bs = pow5mult(bs, bb5); - bb1 = mult(bs, bb); - Bfree(bb); + bs = pow5mult(PASS_STATE bs, bb5); + bb1 = mult(PASS_STATE bs, bb); + Bfree(PASS_STATE bb); bb = bb1; } if (bb2 > 0) - bb = lshift(bb, bb2); + bb = lshift(PASS_STATE bb, bb2); if (bd5 > 0) - bd = pow5mult(bd, bd5); + bd = pow5mult(PASS_STATE bd, bd5); if (bd2 > 0) - bd = lshift(bd, bd2); + bd = lshift(PASS_STATE bd, bd2); if (bs2 > 0) - bs = lshift(bs, bs2); - delta = diff(bb, bd); + bs = lshift(PASS_STATE bs, bs2); + delta = diff(PASS_STATE bb, bd); dsign = delta->sign; delta->sign = 0; i = cmp(delta, bs); @@ -2060,7 +2137,7 @@ _strtod if (y) #endif { - delta = lshift(delta,Log2P); + delta = lshift(PASS_STATE delta,Log2P); if (cmp(delta, bs) <= 0) adj = -0.5; } @@ -2149,7 +2226,7 @@ _strtod #endif break; } - delta = lshift(delta,Log2P); + delta = lshift(PASS_STATE delta,Log2P); if (cmp(delta, bs) > 0) goto drop_down; break; @@ -2374,10 +2451,10 @@ _strtod } #endif cont: - Bfree(bb); - Bfree(bd); - Bfree(bs); - Bfree(delta); + Bfree(PASS_STATE bb); + Bfree(PASS_STATE bd); + Bfree(PASS_STATE bs); + Bfree(PASS_STATE delta); } #ifdef SET_INEXACT if (inexact) { @@ -2410,11 +2487,11 @@ _strtod } #endif retfree: - Bfree(bb); - Bfree(bd); - Bfree(bs); - Bfree(bd0); - Bfree(delta); + Bfree(PASS_STATE bb); + Bfree(PASS_STATE bd); + Bfree(PASS_STATE bs); + Bfree(PASS_STATE bd0); + Bfree(PASS_STATE delta); ret: if (se) *se = (char *)s; @@ -2539,15 +2616,16 @@ quorem return q; } -#ifndef MULTIPLE_THREADS +#if !defined(MULTIPLE_THREADS) && !defined(NO_GLOBAL_STATE) +#define USE_DTOA_RESULT 1 static char *dtoa_result; #endif static char * #ifdef KR_headers -rv_alloc(i) int i; +rv_alloc(STATE_PARAM i) STATE_PARAM_DECL int i; #else -rv_alloc(int i) +rv_alloc(STATE_PARAM int i) #endif { int j, k, *r; @@ -2557,10 +2635,10 @@ rv_alloc(int i) sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= (unsigned) i; j <<= 1) k++; - r = (int*)Balloc(k); + r = (int*)Balloc(PASS_STATE k); *r = k; return -#ifndef MULTIPLE_THREADS +#ifdef USE_DTOA_RESULT dtoa_result = #endif (char *)(r+1); @@ -2568,14 +2646,14 @@ rv_alloc(int i) static char * #ifdef KR_headers -nrv_alloc(s, rve, n) char *s, **rve; int n; +nrv_alloc(STATE_PARAM s, rve, n) STATE_PARAM_DECL char *s, **rve; int n; #else -nrv_alloc(CONST char *s, char **rve, int n) +nrv_alloc(STATE_PARAM CONST char *s, char **rve, int n) #endif { char *rv, *t; - t = rv = rv_alloc(n); + t = rv = rv_alloc(PASS_STATE n); while((*t = *s++)) t++; if (rve) *rve = t; @@ -2590,15 +2668,15 @@ nrv_alloc(CONST char *s, char **rve, int n) static void #ifdef KR_headers -freedtoa(s) char *s; +freedtoa(STATE_PARAM s) STATE_PARAM_DECL char *s; #else -freedtoa(char *s) +freedtoa(STATE_PARAM char *s) #endif { Bigint *b = (Bigint *)((int *)s - 1); b->maxwds = 1 << (b->k = *(int*)b); - Bfree(b); -#ifndef MULTIPLE_THREADS + Bfree(PASS_STATE b); +#ifdef USE_DTOA_RESULT if (s == dtoa_result) dtoa_result = 0; #endif @@ -2641,10 +2719,10 @@ freedtoa(char *s) static char * dtoa #ifdef KR_headers - (d, mode, ndigits, decpt, sign, rve) - U d; int mode, ndigits, *decpt, *sign; char **rve; + (STATE_PARAM d, mode, ndigits, decpt, sign, rve) + STATE_PARAM_DECL U d; int mode, ndigits, *decpt, *sign; char **rve; #else - (U d, int mode, int ndigits, int *decpt, int *sign, char **rve) + (STATE_PARAM U d, int mode, int ndigits, int *decpt, int *sign, char **rve) #endif { /* Arguments ndigits, decpt, sign are similar to those @@ -2705,9 +2783,9 @@ dtoa mlo = NULL; #endif -#ifndef MULTIPLE_THREADS +#ifdef USE_DTOA_RESULT if (dtoa_result) { - freedtoa(dtoa_result); + freedtoa(PASS_STATE dtoa_result); dtoa_result = 0; } #endif @@ -2731,9 +2809,9 @@ dtoa *decpt = 9999; #ifdef IEEE_Arith if (!word1(d) && !(word0(d) & 0xfffff)) - return nrv_alloc("Infinity", rve, 8); + return nrv_alloc(PASS_STATE "Infinity", rve, 8); #endif - return nrv_alloc("NaN", rve, 3); + return nrv_alloc(PASS_STATE "NaN", rve, 3); } #endif #ifdef IBM @@ -2741,7 +2819,7 @@ dtoa #endif if (!dval(d)) { *decpt = 1; - return nrv_alloc("0", rve, 1); + return nrv_alloc(PASS_STATE "0", rve, 1); } #ifdef SET_INEXACT @@ -2758,7 +2836,7 @@ dtoa } #endif - b = d2b(d, &be, &bbits); + b = d2b(PASS_STATE d, &be, &bbits); #ifdef Sudden_Underflow i = (int)(word0(d) >> Exp_shift1 & (Exp_mask>>Exp_shift1)); #else @@ -2884,7 +2962,7 @@ dtoa if (i <= 0) i = 1; } - s = s0 = rv_alloc(i); + s = s0 = rv_alloc(PASS_STATE i); #ifdef Honor_FLT_ROUNDS if (mode > 1 && rounding != 1) @@ -3061,7 +3139,7 @@ dtoa #endif b2 += i; s2 += i; - mhi = i2b(1); + mhi = i2b(PASS_STATE 1); } if (m2 > 0 && s2 > 0) { i = m2 < s2 ? m2 : s2; @@ -3072,20 +3150,20 @@ dtoa if (b5 > 0) { if (leftright) { if (m5 > 0) { - mhi = pow5mult(mhi, m5); - b1 = mult(mhi, b); - Bfree(b); + mhi = pow5mult(PASS_STATE mhi, m5); + b1 = mult(PASS_STATE mhi, b); + Bfree(PASS_STATE b); b = b1; } if ((j = b5 - m5)) - b = pow5mult(b, j); + b = pow5mult(PASS_STATE b, j); } else - b = pow5mult(b, b5); + b = pow5mult(PASS_STATE b, b5); } - S = i2b(1); + S = i2b(PASS_STATE 1); if (s5 > 0) - S = pow5mult(S, s5); + S = pow5mult(PASS_STATE S, s5); /* Check for special case that d is a normalized power of 2. */ @@ -3134,20 +3212,20 @@ dtoa s2 += i; } if (b2 > 0) - b = lshift(b, b2); + b = lshift(PASS_STATE b, b2); if (s2 > 0) - S = lshift(S, s2); + S = lshift(PASS_STATE S, s2); if (k_check) { if (cmp(b,S) < 0) { k--; - b = multadd(b, 10, 0); /* we botched the k estimate */ + b = multadd(PASS_STATE b, 10, 0); /* we botched the k estimate */ if (leftright) - mhi = multadd(mhi, 10, 0); + mhi = multadd(PASS_STATE mhi, 10, 0); ilim = ilim1; } } if (ilim <= 0 && (mode == 3 || mode == 5)) { - if (ilim < 0 || cmp(b,S = multadd(S,5,0)) < 0) { + if (ilim < 0 || cmp(b,S = multadd(PASS_STATE S,5,0)) < 0) { /* no digits, fcvt style */ no_digits: /* MOZILLA CHANGE: Always return a non-empty string. */ @@ -3162,7 +3240,7 @@ dtoa } if (leftright) { if (m2 > 0) - mhi = lshift(mhi, m2); + mhi = lshift(PASS_STATE mhi, m2); /* Compute mlo -- check for special case * that d is a normalized power of 2. @@ -3170,9 +3248,9 @@ dtoa mlo = mhi; if (spec_case) { - mhi = Balloc(mhi->k); + mhi = Balloc(PASS_STATE mhi->k); Bcopy(mhi, mlo); - mhi = lshift(mhi, Log2P); + mhi = lshift(PASS_STATE mhi, Log2P); } for(i = 1;;i++) { @@ -3181,9 +3259,9 @@ dtoa * that will round to d? */ j = cmp(b, mlo); - delta = diff(S, mhi); + delta = diff(PASS_STATE S, mhi); j1 = delta->sign ? 1 : cmp(b, delta); - Bfree(delta); + Bfree(PASS_STATE delta); #ifndef ROUND_BIASED if (j1 == 0 && mode != 1 && !(word1(d) & 1) #ifdef Honor_FLT_ROUNDS @@ -3221,7 +3299,7 @@ dtoa } #endif /*Honor_FLT_ROUNDS*/ if (j1 > 0) { - b = lshift(b, 1); + b = lshift(PASS_STATE b, 1); j1 = cmp(b, S); if ((j1 > 0 || (j1 == 0 && dig & 1)) && dig++ == '9') @@ -3250,12 +3328,12 @@ dtoa *s++ = dig; if (i == ilim) break; - b = multadd(b, 10, 0); + b = multadd(PASS_STATE b, 10, 0); if (mlo == mhi) - mlo = mhi = multadd(mhi, 10, 0); + mlo = mhi = multadd(PASS_STATE mhi, 10, 0); else { - mlo = multadd(mlo, 10, 0); - mhi = multadd(mhi, 10, 0); + mlo = multadd(PASS_STATE mlo, 10, 0); + mhi = multadd(PASS_STATE mhi, 10, 0); } } } @@ -3270,7 +3348,7 @@ dtoa } if (i >= ilim) break; - b = multadd(b, 10, 0); + b = multadd(PASS_STATE b, 10, 0); } /* Round off last digit */ @@ -3281,7 +3359,7 @@ dtoa case 2: goto roundoff; } #endif - b = lshift(b, 1); + b = lshift(PASS_STATE b, 1); j = cmp(b, S); if (j >= 0) { /* ECMA compatible rounding needed by Spidermonkey */ roundoff: @@ -3301,11 +3379,11 @@ dtoa s++; } ret: - Bfree(S); + Bfree(PASS_STATE S); if (mhi) { if (mlo && mlo != mhi) - Bfree(mlo); - Bfree(mhi); + Bfree(PASS_STATE mlo); + Bfree(PASS_STATE mhi); } ret1: #ifdef SET_INEXACT @@ -3319,7 +3397,7 @@ dtoa else if (!oldinexact) clear_inexact(); #endif - Bfree(b); + Bfree(PASS_STATE b); *s = 0; *decpt = k + 1; if (rve) diff --git a/js/src/editline/editline.c b/js/src/editline/editline.c index 879dd67ea6f..07d4819198e 100644 --- a/js/src/editline/editline.c +++ b/js/src/editline/editline.c @@ -169,7 +169,10 @@ STATIC void TTYflush() { if (ScreenCount) { - (void)write(1, Screen, ScreenCount); + /* Dummy assignment avoids GCC warning on + * "attribute warn_unused_result" */ + ssize_t dummy = write(1, Screen, ScreenCount); + (void)dummy; ScreenCount = 0; } } diff --git a/js/src/gnuplot/gcTimer.gnu b/js/src/gnuplot/gcTimer.gnu new file mode 100644 index 00000000000..a15e67fc272 --- /dev/null +++ b/js/src/gnuplot/gcTimer.gnu @@ -0,0 +1,25 @@ +# gnuplot script to visualize GCMETER results. +# usage: "gnuplot gcTimer.pl >outputfile.png" + +set terminal png +# set Title +set title "Title goes here!" +set datafile missing "-" +set noxtics +set ytics nomirror +set ylabel "Cycles [1E6]" +set y2tics nomirror +set y2label "Chunk count" +set key below +set style data linespoints + +#set data file +plot 'gcTimer.dat' using 2 title columnheader(2), \ +'' u 3 title columnheader(3), \ +'' u 4 title columnheader(4), \ +'' u 5 title columnheader(5) with points, \ +'' u 6 title columnheader(6) with points, \ +'' u 7 title columnheader(7) with points, \ +'' u 8 title columnheader(8) with points, \ +'' u 9 title columnheader(9) with points axis x1y2, \ +'' u 10 title columnheader(10) with points axis x1y2 diff --git a/js/src/js-config.h.in b/js/src/js-config.h.in index 7555d933f26..a16d154acbc 100644 --- a/js/src/js-config.h.in +++ b/js/src/js-config.h.in @@ -48,6 +48,9 @@ /* Define to 1 if SpiderMonkey should support multi-threaded clients. */ #undef JS_THREADSAFE +/* Define to 1 if SpiderMonkey should include ctypes support. */ +#undef JS_HAS_CTYPES + /* Define to 1 if SpiderMonkey should support the ability to perform entirely too much GC. */ #undef JS_GC_ZEAL diff --git a/js/src/jsapi-tests/Makefile.in b/js/src/jsapi-tests/Makefile.in index e338907d08c..f41b13fb805 100644 --- a/js/src/jsapi-tests/Makefile.in +++ b/js/src/jsapi-tests/Makefile.in @@ -55,6 +55,7 @@ CPPSRCS = \ testIntString.cpp \ testIsAboutToBeFinalized.cpp \ testLookup.cpp \ + testNewObject.cpp \ testPropCache.cpp \ testTrap.cpp \ testSameValue.cpp \ diff --git a/js/src/jsapi-tests/testLookup.cpp b/js/src/jsapi-tests/testLookup.cpp index 4d45f8d791e..566b47b5162 100644 --- a/js/src/jsapi-tests/testLookup.cpp +++ b/js/src/jsapi-tests/testLookup.cpp @@ -1,5 +1,5 @@ #include "tests.h" -#include "jsfun.h" // for js_IsInternalFunctionObject +#include "jsfun.h" // for js::IsInternalFunctionObject BEGIN_TEST(testLookup_bug522590) { @@ -20,8 +20,8 @@ BEGIN_TEST(testLookup_bug522590) CHECK(JS_LookupProperty(cx, xobj, "f", r.addr())); CHECK(JSVAL_IS_OBJECT(r)); JSObject *funobj = JSVAL_TO_OBJECT(r); - CHECK(HAS_FUNCTION_CLASS(funobj)); - CHECK(!js_IsInternalFunctionObject(funobj)); + CHECK(funobj->isFunction()); + CHECK(!js::IsInternalFunctionObject(funobj)); CHECK(GET_FUNCTION_PRIVATE(cx, funobj) != (JSFunction *) funobj); return true; diff --git a/js/src/jsapi-tests/testNewObject.cpp b/js/src/jsapi-tests/testNewObject.cpp new file mode 100644 index 00000000000..b845126e18a --- /dev/null +++ b/js/src/jsapi-tests/testNewObject.cpp @@ -0,0 +1,102 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * vim: set ts=4 sw=4 et tw=99: + */ + +#include "tests.h" + +const size_t N = 1000; +static jsval argv[N]; + +static JSBool +constructHook(JSContext *cx, JSObject *thisobj, uintN argc, jsval *argv, jsval *rval) +{ + // Check that arguments were passed properly from JS_New. + JSObject *callee = JSVAL_TO_OBJECT(JS_ARGV_CALLEE(argv)); + if (!thisobj) { + JS_ReportError(cx, "test failed, null 'this'"); + return false; + } + if (strcmp(JS_GET_CLASS(cx, thisobj)->name, "Object") != 0) { + JS_ReportError(cx, "test failed, wrong class for 'this'"); + return false; + } + if (argc != 3) { + JS_ReportError(cx, "test failed, argc == %d", argc); + return false; + } + if (!JSVAL_IS_INT(argv[2]) || JSVAL_TO_INT(argv[2]) != 2) { + JS_ReportError(cx, "test failed, wrong value in argv[2]"); + return false; + } + if (!JS_IsConstructing(cx)) { + JS_ReportError(cx, "test failed, not constructing"); + return false; + } + + // Perform a side-effect to indicate that this hook was actually called. + if (!JS_SetElement(cx, callee, 0, &argv[0])) + return false; + + *rval = OBJECT_TO_JSVAL(callee); // return the callee, perversely + argv[0] = argv[1] = argv[2] = JSVAL_VOID; // trash the argv, perversely + return true; +} + +BEGIN_TEST(testNewObject_1) +{ + jsval v; + EVAL("Array", &v); + JSObject *Array = JSVAL_TO_OBJECT(v); + + // With no arguments. + JSObject *obj = JS_New(cx, Array, 0, NULL); + CHECK(obj); + jsvalRoot rt(cx, OBJECT_TO_JSVAL(obj)); + CHECK(JS_IsArrayObject(cx, obj)); + jsuint len; + CHECK(JS_GetArrayLength(cx, obj, &len)); + CHECK(len == 0); + + // With one argument. + argv[0] = INT_TO_JSVAL(4); + obj = JS_New(cx, Array, 1, argv); + CHECK(obj); + rt = OBJECT_TO_JSVAL(obj); + CHECK(JS_IsArrayObject(cx, obj)); + CHECK(JS_GetArrayLength(cx, obj, &len)); + CHECK(len == 4); + + // With N arguments. + JS_ASSERT(INT_FITS_IN_JSVAL(N)); + for (size_t i = 0; i < N; i++) + argv[i] = INT_TO_JSVAL(i); + obj = JS_New(cx, Array, N, argv); + CHECK(obj); + rt = OBJECT_TO_JSVAL(obj); + CHECK(JS_IsArrayObject(cx, obj)); + CHECK(JS_GetArrayLength(cx, obj, &len)); + CHECK(len == N); + CHECK(JS_GetElement(cx, obj, N - 1, &v)); + CHECK_SAME(v, INT_TO_JSVAL(N - 1)); + + // With JSClass.construct. + static JSClass cls = { + "testNewObject_1", + 0, + JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, JS_PropertyStub, + JS_EnumerateStub, JS_ResolveStub, JS_ConvertStub, NULL, + NULL, NULL, NULL, constructHook, NULL, NULL, NULL, NULL + }; + JSObject *ctor = JS_NewObject(cx, &cls, NULL, NULL); + CHECK(ctor); + jsvalRoot rt2(cx, OBJECT_TO_JSVAL(ctor)); + obj = JS_New(cx, ctor, 3, argv); + CHECK(obj); + CHECK(obj == ctor); // constructHook returns ctor, perversely + CHECK(JS_GetElement(cx, ctor, 0, &v)); + CHECK_SAME(v, JSVAL_ZERO); + CHECK_SAME(argv[0], JSVAL_ZERO); // original argv should not have been trashed + CHECK_SAME(argv[1], JSVAL_ONE); + return true; +} +END_TEST(testNewObject_1) diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp index d47ccec6bce..3d5ae08dcca 100644 --- a/js/src/jsapi.cpp +++ b/js/src/jsapi.cpp @@ -438,7 +438,7 @@ JS_ValueToNumber(JSContext *cx, jsval v, jsdouble *dp) { CHECK_REQUEST(cx); - JSAutoTempValueRooter tvr(cx, v); + AutoValueRooter tvr(cx, v); *dp = js_ValueToNumber(cx, tvr.addr()); return !JSVAL_IS_NULL(tvr.value()); } @@ -454,7 +454,7 @@ JS_ValueToECMAInt32(JSContext *cx, jsval v, int32 *ip) { CHECK_REQUEST(cx); - JSAutoTempValueRooter tvr(cx, v); + AutoValueRooter tvr(cx, v); *ip = js_ValueToECMAInt32(cx, tvr.addr()); return !JSVAL_IS_NULL(tvr.value()); } @@ -464,7 +464,7 @@ JS_ValueToECMAUint32(JSContext *cx, jsval v, uint32 *ip) { CHECK_REQUEST(cx); - JSAutoTempValueRooter tvr(cx, v); + AutoValueRooter tvr(cx, v); *ip = js_ValueToECMAUint32(cx, tvr.addr()); return !JSVAL_IS_NULL(tvr.value()); } @@ -474,7 +474,7 @@ JS_ValueToInt32(JSContext *cx, jsval v, int32 *ip) { CHECK_REQUEST(cx); - JSAutoTempValueRooter tvr(cx, v); + AutoValueRooter tvr(cx, v); *ip = js_ValueToInt32(cx, tvr.addr()); return !JSVAL_IS_NULL(tvr.value()); } @@ -484,7 +484,7 @@ JS_ValueToUint16(JSContext *cx, jsval v, uint16 *ip) { CHECK_REQUEST(cx); - JSAutoTempValueRooter tvr(cx, v); + AutoValueRooter tvr(cx, v); *ip = js_ValueToUint16(cx, tvr.addr()); return !JSVAL_IS_NULL(tvr.value()); } @@ -563,12 +563,13 @@ JSRuntime::JSRuntime() bool JSRuntime::init(uint32 maxbytes) { - if (!js_InitDtoa() || - !js_InitGC(this, maxbytes) || - !js_InitAtomState(this) || - !js_InitDeflatedStringCache(this)) { + if (!js_InitGC(this, maxbytes) || !js_InitAtomState(this)) return false; - } + + deflatedStringCache = new js::DeflatedStringCache(); + if (!deflatedStringCache || !deflatedStringCache->init()) + return false; + #ifdef JS_THREADSAFE gcLock = JS_NEW_LOCK(); if (!gcLock) @@ -599,7 +600,7 @@ JSRuntime::init(uint32 maxbytes) if (!deallocatorThread || !deallocatorThread->init()) return false; #endif - return js_InitPropertyTree(this) && js_InitThreads(this); + return propertyTree.init() && js_InitThreads(this); } JSRuntime::~JSRuntime() @@ -629,7 +630,7 @@ JSRuntime::~JSRuntime() * Finish the deflated string cache after the last GC and after * calling js_FinishAtomState, which finalizes strings. */ - js_FinishDeflatedStringCache(this); + delete deflatedStringCache; js_FinishGC(this); #ifdef JS_THREADSAFE if (gcLock) @@ -651,7 +652,7 @@ JSRuntime::~JSRuntime() delete deallocatorThread; } #endif - js_FinishPropertyTree(this); + propertyTree.finish(); } @@ -753,7 +754,6 @@ JS_ShutDown(void) reprmeter::js_DumpReprMeter(); #endif - js_FinishDtoa(); #ifdef JS_THREADSAFE js_CleanupLocks(); #endif @@ -874,6 +874,26 @@ JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth) #endif } +JS_PUBLIC_API(void) +JS_TransferRequest(JSContext *cx, JSContext *another) +{ + JS_ASSERT(cx != another); + JS_ASSERT(cx->runtime == another->runtime); +#ifdef JS_THREADSAFE + JS_ASSERT(cx->thread); + JS_ASSERT(another->thread); + JS_ASSERT(cx->thread == another->thread); + JS_ASSERT(cx->requestDepth != 0); + JS_ASSERT(another->requestDepth == 0); + + /* Serialize access to JSContext::requestDepth from other threads. */ + JS_LOCK_GC(cx->runtime); + another->requestDepth = cx->requestDepth; + cx->requestDepth = 0; + JS_UNLOCK_GC(cx->runtime); +#endif +} + JS_PUBLIC_API(void) JS_Lock(JSRuntime *rt) { @@ -1343,7 +1363,6 @@ static JSStdName object_prototype_names[] = { /* Object.prototype properties (global delegates to Object.prototype). */ {js_InitObjectClass, EAGER_ATOM(proto), NULL}, {js_InitObjectClass, EAGER_ATOM(parent), NULL}, - {js_InitObjectClass, EAGER_ATOM(count), NULL}, #if JS_HAS_TOSOURCE {js_InitObjectClass, EAGER_ATOM(toSource), NULL}, #endif @@ -1666,15 +1685,13 @@ JS_GetScopeChain(JSContext *cx) JS_PUBLIC_API(JSObject *) JS_GetGlobalForObject(JSContext *cx, JSObject *obj) { - while (JSObject *parent = obj->getParent()) - obj = parent; - return obj; + return obj->getGlobal(); } JS_PUBLIC_API(jsval) JS_ComputeThis(JSContext *cx, jsval *vp) { - if (!js_ComputeThis(cx, JS_FALSE, vp + 2)) + if (!js_ComputeThis(cx, vp + 2)) return JSVAL_NULL; return vp[1]; } @@ -1904,7 +1921,7 @@ JS_PrintTraceThingInfo(char *buf, size_t bufsize, JSTracer *trc, case JSTRACE_OBJECT: { JSObject *obj = (JSObject *)thing; - JSClass *clasp = STOBJ_GET_CLASS(obj); + JSClass *clasp = obj->getClass(); name = clasp->name; #ifdef HAVE_XPCONNECT @@ -1956,7 +1973,7 @@ JS_PrintTraceThingInfo(char *buf, size_t bufsize, JSTracer *trc, case JSTRACE_OBJECT: { JSObject *obj = (JSObject *)thing; - JSClass *clasp = STOBJ_GET_CLASS(obj); + JSClass *clasp = obj->getClass(); if (clasp == &js_FunctionClass) { JSFunction *fun = GET_FUNCTION_PRIVATE(trc->context, obj); if (!fun) { @@ -2753,7 +2770,7 @@ JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep) if (obj->isDenseArray() && !js_MakeArraySlow(cx, obj)) return JS_FALSE; - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_SEAL_OBJECT, OBJ_GET_CLASS(cx, obj)->name); @@ -2785,10 +2802,8 @@ JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep) /* Ensure that obj has its own, mutable scope, and seal that scope. */ JS_LOCK_OBJ(cx, obj); scope = js_GetMutableScope(cx, obj); - if (scope) { - scope->sealingShapeChange(cx); - scope->setSealed(); - } + if (scope) + scope->seal(cx); JS_UNLOCK_OBJ(cx, obj); if (!scope) return JS_FALSE; @@ -2800,7 +2815,7 @@ JS_SealObject(JSContext *cx, JSObject *obj, JSBool deep) /* Walk slots in obj and if any value is a non-null object, seal it. */ nslots = scope->freeslot; for (i = 0; i != nslots; ++i) { - v = STOBJ_GET_SLOT(obj, i); + v = obj->getSlot(i); if (i == JSSLOT_PRIVATE && (obj->getClass()->flags & JSCLASS_HAS_PRIVATE)) continue; if (JSVAL_IS_PRIMITIVE(v)) @@ -2836,7 +2851,7 @@ DefinePropertyById(JSContext *cx, JSObject *obj, jsid id, jsval value, JSPropertyOp getter, JSPropertyOp setter, uintN attrs, uintN flags, intN tinyid) { - if (flags != 0 && OBJ_IS_NATIVE(obj)) { + if (flags != 0 && obj->isNative()) { JSAutoResolveFlags rf(cx, JSRESOLVE_QUALIFIED | JSRESOLVE_DECLARING); return !!js_DefineNativeProperty(cx, obj, id, value, getter, setter, attrs, flags, tinyid, NULL); @@ -2853,7 +2868,7 @@ DefineProperty(JSContext *cx, JSObject *obj, const char *name, jsval value, JSAtom *atom; if (attrs & JSPROP_INDEX) { - id = INT_TO_JSID(JS_PTR_TO_INT32(name)); + id = INT_TO_JSID(intptr_t(name)); atom = NULL; attrs &= ~JSPROP_INDEX; } else { @@ -2879,7 +2894,7 @@ DefineUCProperty(JSContext *cx, JSObject *obj, atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0); if (!atom) return JS_FALSE; - if (flags != 0 && OBJ_IS_NATIVE(obj)) { + if (flags != 0 && obj->isNative()) { JSAutoResolveFlags rf(cx, JSRESOLVE_QUALIFIED | JSRESOLVE_DECLARING); return !!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), value, getter, setter, attrs, flags, tinyid, @@ -2972,6 +2987,13 @@ JS_DefinePropertyWithTinyId(JSContext *cx, JSObject *obj, const char *name, JSScopeProperty::HAS_SHORTID, tinyid); } +JS_PUBLIC_API(JSBool) +JS_DefineOwnProperty(JSContext *cx, JSObject *obj, jsid id, jsval descriptor, JSBool *bp) +{ + CHECK_REQUEST(cx); + return js_DefineOwnProperty(cx, obj, id, descriptor, bp); +} + static JSBool LookupPropertyById(JSContext *cx, JSObject *obj, jsid id, uintN flags, JSObject **objp, JSProperty **propp) @@ -3023,7 +3045,7 @@ JS_AliasProperty(JSContext *cx, JSObject *obj, const char *name, js_ReportIsNotDefined(cx, name); return JS_FALSE; } - if (obj2 != obj || !OBJ_IS_NATIVE(obj)) { + if (obj2 != obj || !obj->isNative()) { obj2->dropProperty(cx, prop); JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_ALIAS, alias, name, OBJ_GET_CLASS(cx, obj2)->name); @@ -3036,7 +3058,7 @@ JS_AliasProperty(JSContext *cx, JSObject *obj, const char *name, sprop = (JSScopeProperty *)prop; ok = (js_AddNativeProperty(cx, obj, ATOM_TO_JSID(atom), sprop->getter(), sprop->setter(), sprop->slot, - sprop->attrs, sprop->getFlags() | JSScopeProperty::ALIAS, + sprop->attributes(), sprop->getFlags() | JSScopeProperty::ALIAS, sprop->shortid) != NULL); } @@ -3055,11 +3077,11 @@ LookupResult(JSContext *cx, JSObject *obj, JSObject *obj2, JSProperty *prop, } JSBool ok = JS_TRUE; - if (OBJ_IS_NATIVE(obj2)) { + if (obj2->isNative()) { JSScopeProperty *sprop = (JSScopeProperty *) prop; if (sprop->isMethod()) { - JSAutoTempValueRooter root(cx, sprop); + AutoScopePropertyRooter root(cx, sprop); JS_UNLOCK_OBJ(cx, obj2); *vp = sprop->methodValue(); return OBJ_SCOPE(obj2)->methodReadBarrier(cx, sprop, vp); @@ -3105,7 +3127,7 @@ GetPropertyAttributesById(JSContext *cx, JSObject *obj, jsid id, uintN flags, ok = obj2->getAttributes(cx, id, prop, &desc->attrs); if (ok) { - if (OBJ_IS_NATIVE(obj2)) { + if (obj2->isNative()) { JSScopeProperty *sprop = (JSScopeProperty *) prop; desc->getter = sprop->getter(); @@ -3235,7 +3257,7 @@ AlreadyHasOwnPropertyHelper(JSContext *cx, JSObject *obj, jsid id, { JSScope *scope; - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { JSObject *obj2; JSProperty *prop; @@ -3359,7 +3381,7 @@ JS_LookupPropertyWithFlagsById(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop; CHECK_REQUEST(cx); - ok = OBJ_IS_NATIVE(obj) + ok = obj->isNative() ? js_LookupPropertyWithFlags(cx, obj, id, flags, objp, &prop) >= 0 : obj->lookupProperty(cx, id, objp, &prop); if (ok) @@ -3376,6 +3398,13 @@ JS_GetPropertyDescriptorById(JSContext *cx, JSObject *obj, jsid id, uintN flags, return GetPropertyAttributesById(cx, obj, id, flags, JS_FALSE, desc); } +JS_PUBLIC_API(JSBool) +JS_GetOwnPropertyDescriptor(JSContext *cx, JSObject *obj, jsid id, jsval *vp) +{ + CHECK_REQUEST(cx); + return js_GetOwnPropertyDescriptor(cx, obj, id, vp); +} + JS_PUBLIC_API(JSBool) JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp) { @@ -3698,7 +3727,7 @@ JS_AliasElement(JSContext *cx, JSObject *obj, const char *name, jsint alias) js_ReportIsNotDefined(cx, name); return JS_FALSE; } - if (obj2 != obj || !OBJ_IS_NATIVE(obj)) { + if (obj2 != obj || !obj->isNative()) { char numBuf[12]; obj2->dropProperty(cx, prop); JS_snprintf(numBuf, sizeof numBuf, "%ld", (long)alias); @@ -3709,7 +3738,7 @@ JS_AliasElement(JSContext *cx, JSObject *obj, const char *name, jsint alias) sprop = (JSScopeProperty *)prop; ok = (js_AddNativeProperty(cx, obj, INT_TO_JSID(alias), sprop->getter(), sprop->setter(), sprop->slot, - sprop->attrs, sprop->getFlags() | JSScopeProperty::ALIAS, + sprop->attributes(), sprop->getFlags() | JSScopeProperty::ALIAS, sprop->shortid) != NULL); obj->dropProperty(cx, prop); @@ -3810,7 +3839,6 @@ JS_PUBLIC_API(JSIdArray *) JS_Enumerate(JSContext *cx, JSObject *obj) { jsint i, n; - jsval iter_state, num_properties; jsid id; JSIdArray *ida; jsval *vector; @@ -3818,11 +3846,11 @@ JS_Enumerate(JSContext *cx, JSObject *obj) CHECK_REQUEST(cx); ida = NULL; - iter_state = JSVAL_NULL; - JSAutoEnumStateRooter tvr(cx, obj, &iter_state); + AutoEnumStateRooter iterState(cx, obj); /* Get the number of properties to enumerate. */ - if (!obj->enumerate(cx, JSENUMERATE_INIT, &iter_state, &num_properties)) + jsval num_properties; + if (!obj->enumerate(cx, JSENUMERATE_INIT, iterState.addr(), &num_properties)) goto error; if (!JSVAL_IS_INT(num_properties)) { JS_ASSERT(0); @@ -3842,11 +3870,11 @@ JS_Enumerate(JSContext *cx, JSObject *obj) i = 0; vector = &ida->vector[0]; for (;;) { - if (!obj->enumerate(cx, JSENUMERATE_NEXT, &iter_state, &id)) + if (!obj->enumerate(cx, JSENUMERATE_NEXT, iterState.addr(), &id)) goto error; /* No more jsid's to enumerate ? */ - if (iter_state == JSVAL_NULL) + if (iterState.state() == JSVAL_NULL) break; if (i == ida->length) { @@ -3860,8 +3888,6 @@ JS_Enumerate(JSContext *cx, JSObject *obj) return SetIdArrayLength(cx, ida, i); error: - if (!JSVAL_IS_NULL(iter_state)) - obj->enumerate(cx, JSENUMERATE_DESTROY, &iter_state, 0); if (ida) JS_DestroyIdArray(cx, ida); return NULL; @@ -3933,7 +3959,7 @@ JS_NewPropertyIterator(JSContext *cx, JSObject *obj) if (!iterobj) return NULL; - if (OBJ_IS_NATIVE(obj)) { + if (obj->isNative()) { /* Native case: start with the last property in obj's own scope. */ scope = OBJ_SCOPE(obj); pdata = scope->lastProperty(); @@ -3945,7 +3971,7 @@ JS_NewPropertyIterator(JSContext *cx, JSObject *obj) * Note: we have to make sure that we root obj around the call to * JS_Enumerate to protect against multiple allocations under it. */ - JSAutoTempValueRooter tvr(cx, iterobj); + AutoValueRooter tvr(cx, iterobj); ida = JS_Enumerate(cx, obj); if (!ida) return NULL; @@ -3973,7 +3999,7 @@ JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp) if (i < 0) { /* Native case: private data is a property tree node pointer. */ obj = iterobj->getParent(); - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); scope = OBJ_SCOPE(obj); sprop = (JSScopeProperty *) iterobj->getPrivate(); @@ -3999,7 +4025,7 @@ JS_NextProperty(JSContext *cx, JSObject *iterobj, jsid *idp) *idp = JSVAL_VOID; } else { *idp = ida->vector[--i]; - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_INDEX, INT_TO_JSVAL(i)); + iterobj->setSlot(JSSLOT_ITER_INDEX, INT_TO_JSVAL(i)); } } return JS_TRUE; @@ -4257,7 +4283,7 @@ js_generic_fast_native_method_dispatcher(JSContext *cx, uintN argc, jsval *vp) * Follow Function.prototype.apply and .call by using the global object as * the 'this' param if no args. */ - if (!js_ComputeThis(cx, JS_FALSE, vp + 2)) + if (!js_ComputeThis(cx, vp + 2)) return JS_FALSE; /* * Protect against argc underflowing. By calling js_ComputeThis, we made @@ -4321,7 +4347,7 @@ js_generic_native_method_dispatcher(JSContext *cx, JSObject *obj, * Follow Function.prototype.apply and .call by using the global object as * the 'this' param if no args. */ - if (!js_ComputeThis(cx, JS_TRUE, argv)) + if (!js_ComputeThis(cx, argv)) return JS_FALSE; js_GetTopStackFrame(cx)->thisv = argv[-1]; JS_ASSERT(cx->fp->argv == argv); @@ -4413,6 +4439,7 @@ JS_DefineUCFunction(JSContext *cx, JSObject *obj, { JSAtom *atom; + CHECK_REQUEST(cx); atom = js_AtomizeChars(cx, name, AUTO_NAMELEN(name, namelen), 0); if (!atom) return NULL; @@ -4595,7 +4622,6 @@ JS_CompileFileHandleForPrincipals(JSContext *cx, JSObject *obj, JS_PUBLIC_API(JSObject *) JS_NewScriptObject(JSContext *cx, JSScript *script) { - JSTempValueRooter tvr; JSObject *obj; CHECK_REQUEST(cx); @@ -4604,16 +4630,19 @@ JS_NewScriptObject(JSContext *cx, JSScript *script) JS_ASSERT(!script->u.object); - JS_PUSH_TEMP_ROOT_SCRIPT(cx, script, &tvr); - obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL); - if (obj) { - obj->setPrivate(script); - script->u.object = obj; + { + AutoScriptRooter root(cx, script); + + obj = js_NewObject(cx, &js_ScriptClass, NULL, NULL); + if (obj) { + obj->setPrivate(script); + script->u.object = obj; #ifdef CHECK_SCRIPT_OWNER - script->owner = NULL; + script->owner = NULL; #endif + } } - JS_POP_TEMP_ROOT(cx, &tvr); + return obj; } @@ -4691,7 +4720,6 @@ JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj, const char *filename, uintN lineno) { JSFunction *fun; - JSTempValueRooter tvr; JSAtom *funAtom, *argAtom; uintN i; @@ -4709,47 +4737,48 @@ JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj, if (!fun) goto out2; - MUST_FLOW_THROUGH("out"); - JS_PUSH_TEMP_ROOT_OBJECT(cx, FUN_OBJECT(fun), &tvr); - for (i = 0; i < nargs; i++) { - argAtom = js_Atomize(cx, argnames[i], strlen(argnames[i]), 0); - if (!argAtom) { + { + AutoValueRooter tvr(cx, FUN_OBJECT(fun)); + MUST_FLOW_THROUGH("out"); + + for (i = 0; i < nargs; i++) { + argAtom = js_Atomize(cx, argnames[i], strlen(argnames[i]), 0); + if (!argAtom) { + fun = NULL; + goto out; + } + if (!js_AddLocal(cx, fun, argAtom, JSLOCAL_ARG)) { + fun = NULL; + goto out; + } + } + + if (!JSCompiler::compileFunctionBody(cx, fun, principals, + chars, length, filename, lineno)) { fun = NULL; goto out; } - if (!js_AddLocal(cx, fun, argAtom, JSLOCAL_ARG)) { + + if (obj && funAtom && + !obj->defineProperty(cx, ATOM_TO_JSID(funAtom), OBJECT_TO_JSVAL(FUN_OBJECT(fun)), + NULL, NULL, JSPROP_ENUMERATE)) { fun = NULL; - goto out; } - } - - if (!JSCompiler::compileFunctionBody(cx, fun, principals, - chars, length, filename, lineno)) { - fun = NULL; - goto out; - } - - if (obj && - funAtom && - !obj->defineProperty(cx, ATOM_TO_JSID(funAtom), OBJECT_TO_JSVAL(FUN_OBJECT(fun)), - NULL, NULL, JSPROP_ENUMERATE)) { - fun = NULL; - } #ifdef JS_SCOPE_DEPTH_METER - if (fun && obj) { - JSObject *pobj = obj; - uintN depth = 1; + if (fun && obj) { + JSObject *pobj = obj; + uintN depth = 1; - while ((pobj = pobj->getParent()) != NULL) - ++depth; - JS_BASIC_STATS_ACCUM(&cx->runtime->hostenvScopeDepthStats, depth); - } + while ((pobj = pobj->getParent()) != NULL) + ++depth; + JS_BASIC_STATS_ACCUM(&cx->runtime->hostenvScopeDepthStats, depth); + } #endif - out: - cx->weakRoots.finalizableNewborns[FINALIZE_FUNCTION] = fun; - JS_POP_TEMP_ROOT(cx, &tvr); + out: + cx->weakRoots.finalizableNewborns[FINALIZE_FUNCTION] = fun; + } out2: LAST_FRAME_CHECKS(cx, fun); @@ -4809,36 +4838,6 @@ JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval) return ok; } -JS_PUBLIC_API(JSBool) -JS_ExecuteScriptPart(JSContext *cx, JSObject *obj, JSScript *script, - JSExecPart part, jsval *rval) -{ - JSScript tmp; - JSBool ok; - - /* Make a temporary copy of the JSScript structure and farble it a bit. */ - tmp = *script; - if (part == JSEXEC_PROLOG) { - tmp.length = tmp.main - tmp.code; - } else { - tmp.length -= tmp.main - tmp.code; - tmp.code = tmp.main; - } - - /* Tell the debugger about our temporary copy of the script structure. */ - const JSDebugHooks *hooks = cx->debugHooks; - if (hooks->newScriptHook) { - hooks->newScriptHook(cx, tmp.filename, tmp.lineno, &tmp, NULL, - hooks->newScriptHookData); - } - - /* Execute the farbled struct and tell the debugger to forget about it. */ - ok = JS_ExecuteScript(cx, obj, &tmp, rval); - if (hooks->destroyScriptHook) - hooks->destroyScriptHook(cx, &tmp, hooks->destroyScriptHookData); - return ok; -} - /* Ancient uintN nbytes is part of API/ABI, so use size_t length local. */ JS_PUBLIC_API(JSBool) JS_EvaluateScript(JSContext *cx, JSObject *obj, @@ -4937,7 +4936,7 @@ JS_CallFunctionName(JSContext *cx, JSObject *obj, const char *name, uintN argc, { CHECK_REQUEST(cx); - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); JSAtom *atom = js_Atomize(cx, name, strlen(name), 0); JSBool ok = atom && js_GetMethod(cx, obj, ATOM_TO_JSID(atom), @@ -4959,6 +4958,31 @@ JS_CallFunctionValue(JSContext *cx, JSObject *obj, jsval fval, uintN argc, return ok; } +JS_PUBLIC_API(JSObject *) +JS_New(JSContext *cx, JSObject *ctor, uintN argc, jsval *argv) +{ + CHECK_REQUEST(cx); + + // This is not a simple variation of JS_CallFunctionValue because JSOP_NEW + // is not a simple variation of JSOP_CALL. We have to determine what class + // of object to create, create it, and clamp the return value to an object, + // among other details. js_InvokeConstructor does the hard work. + void *mark; + jsval *vp = js_AllocStack(cx, 2 + argc, &mark); + if (!vp) + return NULL; + vp[0] = OBJECT_TO_JSVAL(ctor); + vp[1] = JSVAL_NULL; + memcpy(vp + 2, argv, argc * sizeof(jsval)); + + JSBool ok = js_InvokeConstructor(cx, argc, JS_TRUE, vp); + JSObject *obj = ok ? JSVAL_TO_OBJECT(vp[0]) : NULL; + + js_FreeStack(cx, mark); + LAST_FRAME_CHECKS(cx, ok); + return obj; +} + JS_PUBLIC_API(JSOperationCallback) JS_SetOperationCallback(JSContext *cx, JSOperationCallback callback) { @@ -5064,7 +5088,7 @@ JS_NewString(JSContext *cx, char *bytes, size_t nbytes) } /* Hand off bytes to the deflated string cache, if possible. */ - if (!js_SetStringBytes(cx, str, bytes, nbytes)) + if (!cx->runtime->deflatedStringCache->setBytes(cx, str, bytes)) cx->free(bytes); return str; } @@ -5193,7 +5217,7 @@ JS_GetStringChars(JSString *str) if (s) { memcpy(s, str->dependentChars(), n * sizeof *s); s[n] = 0; - str->reinitFlat(s, n); + str->initFlat(s, n); } else { s = str->dependentChars(); } diff --git a/js/src/jsapi.h b/js/src/jsapi.h index 553d8df2089..d6a0cc7e563 100644 --- a/js/src/jsapi.h +++ b/js/src/jsapi.h @@ -1,4 +1,4 @@ -/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sw=4 et tw=78: * * ***** BEGIN LICENSE BLOCK ***** @@ -561,6 +561,9 @@ JS_SuspendRequest(JSContext *cx); extern JS_PUBLIC_API(void) JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth); +extern JS_PUBLIC_API(void) +JS_TransferRequest(JSContext *cx, JSContext *another); + #ifdef __cplusplus JS_END_EXTERN_C @@ -626,6 +629,27 @@ class JSAutoSuspendRequest { #endif }; +class JSAutoTransferRequest +{ + public: + JSAutoTransferRequest(JSContext* cx1, JSContext* cx2) + : cx1(cx1), cx2(cx2) { + if(cx1 != cx2) + JS_TransferRequest(cx1, cx2); + } + ~JSAutoTransferRequest() { + if(cx1 != cx2) + JS_TransferRequest(cx2, cx1); + } + private: + JSContext* const cx1; + JSContext* const cx2; + + /* Not copyable. */ + JSAutoTransferRequest(JSAutoTransferRequest &); + void operator =(JSAutoTransferRequest&); +}; + JS_BEGIN_EXTERN_C #endif @@ -799,6 +823,15 @@ JS_GetScopeChain(JSContext *cx); extern JS_PUBLIC_API(JSObject *) JS_GetGlobalForObject(JSContext *cx, JSObject *obj); +#ifdef JS_HAS_CTYPES +/* + * Initialize the 'ctypes' object on a global variable 'obj'. The 'ctypes' + * object will be sealed. + */ +extern JS_PUBLIC_API(JSBool) +JS_InitCTypesClass(JSContext *cx, JSObject *global); +#endif + /* * Macros to hide interpreter stack layout details from a JSFastNative using * its jsval *vp parameter. The stack layout underlying invocation can't change @@ -1681,6 +1714,9 @@ extern JS_PUBLIC_API(JSObject *) JS_ConstructObjectWithArguments(JSContext *cx, JSClass *clasp, JSObject *proto, JSObject *parent, uintN argc, jsval *argv); +extern JS_PUBLIC_API(JSObject *) +JS_New(JSContext *cx, JSObject *ctor, uintN argc, jsval *argv); + extern JS_PUBLIC_API(JSObject *) JS_DefineObject(JSContext *cx, JSObject *obj, const char *name, JSClass *clasp, JSObject *proto, uintN attrs); @@ -1699,6 +1735,9 @@ extern JS_PUBLIC_API(JSBool) JS_DefinePropertyById(JSContext *cx, JSObject *obj, jsid id, jsval value, JSPropertyOp getter, JSPropertyOp setter, uintN attrs); +extern JS_PUBLIC_API(JSBool) +JS_DefineOwnProperty(JSContext *cx, JSObject *obj, jsid id, jsval descriptor, JSBool *bp); + /* * Determine the attributes (JSPROP_* flags) of a property on a given object. * @@ -1793,6 +1832,9 @@ extern JS_PUBLIC_API(JSBool) JS_GetPropertyDescriptorById(JSContext *cx, JSObject *obj, jsid id, uintN flags, JSPropertyDescriptor *desc); +extern JS_PUBLIC_API(JSBool) +JS_GetOwnPropertyDescriptor(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + extern JS_PUBLIC_API(JSBool) JS_GetProperty(JSContext *cx, JSObject *obj, const char *name, jsval *vp); @@ -2228,10 +2270,10 @@ extern JS_PUBLIC_API(JSString *) JS_DecompileFunctionBody(JSContext *cx, JSFunction *fun, uintN indent); /* - * NB: JS_ExecuteScript, JS_ExecuteScriptPart, and the JS_Evaluate*Script* - * quadruplets all use the obj parameter as the initial scope chain header, - * the 'this' keyword value, and the variables object (ECMA parlance for where - * 'var' and 'function' bind names) of the execution context for script. + * NB: JS_ExecuteScript and the JS_Evaluate*Script* quadruplets use the obj + * parameter as the initial scope chain header, the 'this' keyword value, and + * the variables object (ECMA parlance for where 'var' and 'function' bind + * names) of the execution context for script. * * Using obj as the variables object is problematic if obj's parent (which is * the scope chain link; see JS_SetParent and JS_NewObject) is not null: in @@ -2271,10 +2313,6 @@ JS_ExecuteScript(JSContext *cx, JSObject *obj, JSScript *script, jsval *rval); */ typedef enum JSExecPart { JSEXEC_PROLOG, JSEXEC_MAIN } JSExecPart; -extern JS_PUBLIC_API(JSBool) -JS_ExecuteScriptPart(JSContext *cx, JSObject *obj, JSScript *script, - JSExecPart part, jsval *rval); - extern JS_PUBLIC_API(JSBool) JS_EvaluateScript(JSContext *cx, JSObject *obj, const char *bytes, uintN length, diff --git a/js/src/jsarray.cpp b/js/src/jsarray.cpp index edf0b43348c..d5b1b390641 100644 --- a/js/src/jsarray.cpp +++ b/js/src/jsarray.cpp @@ -228,16 +228,21 @@ js_GetLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp) { if (obj->isArray()) { *lengthp = obj->fslots[JSSLOT_ARRAY_LENGTH]; - return JS_TRUE; + return true; } - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + if (obj->isArguments() && !IsOverriddenArgsLength(obj)) { + *lengthp = GetArgsLength(obj); + return true; + } + + AutoValueRooter tvr(cx, JSVAL_NULL); if (!obj->getProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.lengthAtom), tvr.addr())) - return JS_FALSE; + return false; if (JSVAL_IS_INT(tvr.value())) { *lengthp = jsuint(jsint(JSVAL_TO_INT(tvr.value()))); /* jsuint cast does ToUint32 */ - return JS_TRUE; + return true; } *lengthp = js_ValueToECMAUint32(cx, tvr.addr()); @@ -405,7 +410,7 @@ EnsureCapacity(JSContext *cx, JSObject *obj, uint32 newcap, static bool ReallyBigIndexToId(JSContext* cx, jsdouble index, jsid* idp) { - JSAutoTempValueRooter dval(cx); + AutoValueRooter dval(cx); if (!js_NewDoubleInRootedValue(cx, index, dval.addr()) || !js_ValueToStringId(cx, dval.value(), idp)) { return JS_FALSE; @@ -450,7 +455,7 @@ GetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, JSBool *hole, return JS_TRUE; } - JSAutoTempIdRooter idr(cx); + AutoIdRooter idr(cx); *hole = JS_FALSE; if (!IndexToId(cx, obj, index, hole, idr.addr())) @@ -505,7 +510,7 @@ SetArrayElement(JSContext *cx, JSObject *obj, jsdouble index, jsval v) return JS_FALSE; } - JSAutoTempIdRooter idr(cx); + AutoIdRooter idr(cx); if (!IndexToId(cx, obj, index, NULL, idr.addr(), JS_TRUE)) return JS_FALSE; @@ -531,7 +536,7 @@ DeleteArrayElement(JSContext *cx, JSObject *obj, jsdouble index) return JS_TRUE; } - JSAutoTempIdRooter idr(cx); + AutoIdRooter idr(cx); if (!IndexToId(cx, obj, index, NULL, idr.addr())) return JS_FALSE; @@ -573,7 +578,7 @@ JSBool js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp) { JSErrorReporter older = JS_SetErrorReporter(cx, NULL); - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx, JSVAL_NULL); jsid id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom); JSBool ok = obj->getProperty(cx, id, tvr.addr()); JS_SetErrorReporter(cx, older); @@ -587,11 +592,9 @@ js_HasLengthProperty(JSContext *cx, JSObject *obj, jsuint *lengthp) JSBool js_IsArrayLike(JSContext *cx, JSObject *obj, JSBool *answerp, jsuint *lengthp) { - JSClass *clasp; + JSObject *wrappedObj = js_GetWrappedObject(cx, obj); - clasp = OBJ_GET_CLASS(cx, js_GetWrappedObject(cx, obj)); - *answerp = (clasp == &js_ArgumentsClass || clasp == &js_ArrayClass || - clasp == &js_SlowArrayClass); + *answerp = wrappedObj->isArguments() || wrappedObj->isArray(); if (!*answerp) { *lengthp = 0; return JS_TRUE; @@ -626,9 +629,6 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp) { jsuint newlen, oldlen, gap, index; jsval junk; - JSObject *iter; - JSTempValueRooter tvr; - JSBool ok; if (!obj->isArray()) { jsid lengthId = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom); @@ -638,32 +638,30 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp) newlen = ValueIsLength(cx, vp); if (JSVAL_IS_NULL(*vp)) - return JS_FALSE; + return false; oldlen = obj->fslots[JSSLOT_ARRAY_LENGTH]; if (oldlen == newlen) - return JS_TRUE; + return true; if (!IndexToValue(cx, newlen, vp)) - return JS_FALSE; + return false; if (oldlen < newlen) { obj->fslots[JSSLOT_ARRAY_LENGTH] = newlen; - return JS_TRUE; + return true; } if (obj->isDenseArray()) { /* Don't reallocate if we're not actually shrinking our slots. */ jsuint capacity = js_DenseArrayCapacity(obj); if (capacity > newlen && !ResizeSlots(cx, obj, capacity, newlen)) - return JS_FALSE; + return false; } else if (oldlen - newlen < (1 << 24)) { do { --oldlen; - if (!JS_CHECK_OPERATION_LIMIT(cx) || - !DeleteArrayElement(cx, obj, oldlen)) { - return JS_FALSE; - } + if (!JS_CHECK_OPERATION_LIMIT(cx) || !DeleteArrayElement(cx, obj, oldlen)) + return false; } while (oldlen != newlen); } else { /* @@ -673,33 +671,28 @@ array_length_setter(JSContext *cx, JSObject *obj, jsval id, jsval *vp) * correspond to indexes in the half-open range [newlen, oldlen). See * bug 322135. */ - iter = JS_NewPropertyIterator(cx, obj); + JSObject *iter = JS_NewPropertyIterator(cx, obj); if (!iter) - return JS_FALSE; + return false; /* Protect iter against GC under JSObject::deleteProperty. */ - JS_PUSH_TEMP_ROOT_OBJECT(cx, iter, &tvr); + AutoValueRooter tvr(cx, iter); + gap = oldlen - newlen; for (;;) { - ok = (JS_CHECK_OPERATION_LIMIT(cx) && - JS_NextProperty(cx, iter, &id)); - if (!ok) - break; + if (!JS_CHECK_OPERATION_LIMIT(cx) || !JS_NextProperty(cx, iter, &id)) + return false; if (JSVAL_IS_VOID(id)) break; - if (js_IdIsIndex(id, &index) && index - newlen < gap) { - ok = obj->deleteProperty(cx, id, &junk); - if (!ok) - break; + if (js_IdIsIndex(id, &index) && index - newlen < gap && + !obj->deleteProperty(cx, id, &junk)) { + return false; } } - JS_POP_TEMP_ROOT(cx, &tvr); - if (!ok) - return JS_FALSE; } obj->fslots[JSSLOT_ARRAY_LENGTH] = newlen; - return JS_TRUE; + return true; } /* @@ -797,7 +790,7 @@ array_getProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp) return JS_FALSE; if (prop) { - if (OBJ_IS_NATIVE(obj2)) { + if (obj2->isNative()) { sprop = (JSScopeProperty *) prop; if (!js_NativeGet(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, vp)) return JS_FALSE; @@ -896,7 +889,7 @@ js_PrototypeHasIndexedProperties(JSContext *cx, JSObject *obj) * a native object (possibly a slow array) that has indexed properties, * return true. */ - if (!OBJ_IS_NATIVE(obj)) + if (!obj->isNative()) return JS_TRUE; if (OBJ_SCOPE(obj)->hadIndexedProperties()) return JS_TRUE; @@ -1319,10 +1312,10 @@ js_MakeArraySlow(JSContext *cx, JSObject *obj) uint32 capacity = js_DenseArrayCapacity(obj); if (capacity) { - scope->freeslot = STOBJ_NSLOTS(obj) + JS_INITIAL_NSLOTS; + scope->freeslot = obj->numSlots() + JS_INITIAL_NSLOTS; obj->dslots[-1] = JS_INITIAL_NSLOTS + capacity; } else { - scope->freeslot = STOBJ_NSLOTS(obj); + scope->freeslot = obj->numSlots(); } /* Create new properties pointing to existing values in dslots */ @@ -1345,21 +1338,14 @@ js_MakeArraySlow(JSContext *cx, JSObject *obj) } /* - * Render our formerly-reserved count property GC-safe. If length fits in - * a jsval, set our slow/sparse COUNT to the current length as a jsval, so - * we can tell when only named properties have been added to a dense array - * to make it slow-but-not-sparse. - * + * Render our formerly-reserved count property GC-safe. * We do not need to make the length slot GC-safe as this slot is private * where the implementation can store an arbitrary value. */ { JS_STATIC_ASSERT(JSSLOT_ARRAY_LENGTH == JSSLOT_PRIVATE); JS_ASSERT(js_SlowArrayClass.flags & JSCLASS_HAS_PRIVATE); - uint32 length = uint32(obj->fslots[JSSLOT_ARRAY_LENGTH]); - obj->fslots[JSSLOT_ARRAY_COUNT] = INT_FITS_IN_JSVAL(length) - ? INT_TO_JSVAL(length) - : JSVAL_VOID; + obj->fslots[JSSLOT_ARRAY_COUNT] = JSVAL_VOID; } /* Make sure we preserve any flags borrowing bits in classword. */ @@ -1518,7 +1504,7 @@ array_toString_sub(JSContext *cx, JSObject *obj, JSBool locale, return true; } - JSAutoTempValueRooter tvr(cx, obj); + AutoValueRooter tvr(cx, obj); /* After this point, all paths exit through the 'out' label. */ MUST_FLOW_THROUGH("out"); @@ -1652,7 +1638,7 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsva #ifdef DEBUG_jwalden { /* Verify that overwriteType and writeType were accurate. */ - JSAutoTempIdRooter idr(cx, JSVAL_ZERO); + AutoIdRooter idr(cx); for (jsuint i = 0; i < count; i++) { JS_ASSERT_IF(vectorType == SourceVectorAllValues, vector[i] != JSVAL_HOLE); @@ -1718,12 +1704,12 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsva JS_ASSERT(start == MAXINDEX); jsval tmp[2] = {JSVAL_NULL, JSVAL_NULL}; - JSAutoTempValueRooter tvr(cx, JS_ARRAY_LENGTH(tmp), tmp); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(tmp), tmp); if (!js_NewDoubleInRootedValue(cx, MAXINDEX, &tmp[0])) return JS_FALSE; jsdouble *dp = JSVAL_TO_DOUBLE(tmp[0]); JS_ASSERT(*dp == MAXINDEX); - JSAutoTempIdRooter idr(cx); + AutoIdRooter idr(cx); do { tmp[1] = *vector++; if (!js_ValueToStringId(cx, tmp[0], idr.addr()) || @@ -1737,8 +1723,8 @@ InitArrayElements(JSContext *cx, JSObject *obj, jsuint start, jsuint count, jsva } static JSBool -InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, jsval *vector, - JSBool holey = JS_FALSE) +InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, const jsval *vector, + bool holey = false) { JS_ASSERT(obj->isArray()); @@ -1769,7 +1755,7 @@ InitArrayObject(JSContext *cx, JSObject *obj, jsuint length, jsval *vector, static JSString* FASTCALL Array_p_join(JSContext* cx, JSObject* obj, JSString *str) { - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); if (!array_toString_sub(cx, obj, JS_FALSE, str, tvr.addr())) { SetBuiltinError(cx); return NULL; @@ -1780,7 +1766,7 @@ Array_p_join(JSContext* cx, JSObject* obj, JSString *str) static JSString* FASTCALL Array_p_toString(JSContext* cx, JSObject* obj) { - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); if (!array_toString_sub(cx, obj, JS_FALSE, NULL, tvr.addr())) { SetBuiltinError(cx); return NULL; @@ -1852,7 +1838,7 @@ array_reverse(JSContext *cx, uintN argc, jsval *vp) return JS_TRUE; } - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx); for (jsuint i = 0, half = len / 2; i < half; i++) { JSBool hole, hole2; if (!JS_CHECK_OPERATION_LIMIT(cx) || @@ -2102,40 +2088,28 @@ JS_STATIC_ASSERT(JSVAL_NULL == 0); static JSBool array_sort(JSContext *cx, uintN argc, jsval *vp) { - jsval *argv, fval, *vec, *mergesort_tmp, v; - JSObject *obj; - CompareArgs ca; + jsval fval; jsuint len, newlen, i, undefs; - JSTempValueRooter tvr; - JSBool hole; - JSBool ok; size_t elemsize; JSString *str; - /* - * Optimize the default compare function case if all of obj's elements - * have values of type string. - */ - JSBool all_strings; - - argv = JS_ARGV(cx, vp); + jsval *argv = JS_ARGV(cx, vp); if (argc > 0) { if (JSVAL_IS_PRIMITIVE(argv[0])) { - JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, - JSMSG_BAD_SORT_ARG); - return JS_FALSE; + JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_SORT_ARG); + return false; } fval = argv[0]; /* non-default compare function */ } else { fval = JSVAL_NULL; } - obj = JS_THIS_OBJECT(cx, vp); + JSObject *obj = JS_THIS_OBJECT(cx, vp); if (!obj || !js_GetLengthProperty(cx, obj, &len)) - return JS_FALSE; + return false; if (len == 0) { *vp = OBJECT_TO_JSVAL(obj); - return JS_TRUE; + return true; } /* @@ -2145,19 +2119,16 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) * malloc'd vector. */ #if JS_BITS_PER_WORD == 32 - if ((size_t)len > ~(size_t)0 / (2 * sizeof(jsval))) { + if (size_t(len) > size_t(-1) / (2 * sizeof(jsval))) { js_ReportAllocationOverflow(cx); - return JS_FALSE; + return false; } #endif - vec = (jsval *) cx->malloc(2 * (size_t) len * sizeof(jsval)); - if (!vec) - return JS_FALSE; /* * Initialize vec as a root. We will clear elements of vec one by - * one while increasing tvr.count when we know that the property at - * the corresponding index exists and its value must be rooted. + * one while increasing the rooted amount of vec when we know that the + * property at the corresponding index exists and its value must be rooted. * * In this way when sorting a huge mostly sparse array we will not * access the tail of vec corresponding to properties that do not @@ -2165,204 +2136,196 @@ array_sort(JSContext *cx, uintN argc, jsval *vp) * * After this point control must flow through label out: to exit. */ - JS_PUSH_TEMP_ROOT(cx, 0, vec, &tvr); + { + jsval *vec = (jsval *) cx->malloc(2 * size_t(len) * sizeof(jsval)); + if (!vec) + return false; - /* - * By ECMA 262, 15.4.4.11, a property that does not exist (which we - * call a "hole") is always greater than an existing property with - * value undefined and that is always greater than any other property. - * Thus to sort holes and undefs we simply count them, sort the rest - * of elements, append undefs after them and then make holes after - * undefs. - */ - undefs = 0; - newlen = 0; - all_strings = JS_TRUE; - for (i = 0; i < len; i++) { - ok = JS_CHECK_OPERATION_LIMIT(cx); - if (!ok) - goto out; + struct AutoFreeVector { + AutoFreeVector(JSContext *cx, jsval *&vec) : cx(cx), vec(vec) { } + ~AutoFreeVector() { + cx->free(vec); + } + JSContext * const cx; + jsval *&vec; + } free(cx, vec); - /* Clear vec[newlen] before including it in the rooted set. */ - vec[newlen] = JSVAL_NULL; - tvr.count = newlen + 1; - ok = GetArrayElement(cx, obj, i, &hole, &vec[newlen]); - if (!ok) - goto out; + AutoArrayRooter tvr(cx, 0, vec); - if (hole) - continue; + /* + * By ECMA 262, 15.4.4.11, a property that does not exist (which we + * call a "hole") is always greater than an existing property with + * value undefined and that is always greater than any other property. + * Thus to sort holes and undefs we simply count them, sort the rest + * of elements, append undefs after them and then make holes after + * undefs. + */ + undefs = 0; + newlen = 0; + bool allStrings = true; + for (i = 0; i < len; i++) { + if (!JS_CHECK_OPERATION_LIMIT(cx)) + return false; - if (JSVAL_IS_VOID(vec[newlen])) { - ++undefs; - continue; + /* Clear vec[newlen] before including it in the rooted set. */ + JSBool hole; + vec[newlen] = JSVAL_NULL; + tvr.changeLength(newlen + 1); + if (!GetArrayElement(cx, obj, i, &hole, &vec[newlen])) + return false; + + if (hole) + continue; + + if (JSVAL_IS_VOID(vec[newlen])) { + ++undefs; + continue; + } + + allStrings = allStrings && JSVAL_IS_STRING(vec[newlen]); + + ++newlen; } - /* We know JSVAL_IS_STRING yields 0 or 1, so avoid a branch via &=. */ - all_strings &= JSVAL_IS_STRING(vec[newlen]); + if (newlen == 0) + return true; /* The array has only holes and undefs. */ - ++newlen; - } - - if (newlen == 0) { - /* The array has only holes and undefs. */ - ok = JS_TRUE; - goto out; - } - - /* - * The first newlen elements of vec are copied from the array object - * (above). The remaining newlen positions are used as GC-rooted scratch - * space for mergesort. We must clear the space before including it to - * the root set covered by tvr.count. We assume JSVAL_NULL==0 to optimize - * initialization using memset. - */ - mergesort_tmp = vec + newlen; - memset(mergesort_tmp, 0, newlen * sizeof(jsval)); - tvr.count = newlen * 2; - - /* Here len == 2 * (newlen + undefs + number_of_holes). */ - if (fval == JSVAL_NULL) { /* - * Sort using the default comparator converting all elements to - * strings. + * The first newlen elements of vec are copied from the array object + * (above). The remaining newlen positions are used as GC-rooted scratch + * space for mergesort. We must clear the space before including it to + * the root set covered by tvr.count. We assume JSVAL_NULL==0 to optimize + * initialization using memset. */ - if (all_strings) { - elemsize = sizeof(jsval); - } else { + jsval *mergesort_tmp = vec + newlen; + PodZero(mergesort_tmp, newlen); + tvr.changeLength(newlen * 2); + + /* Here len == 2 * (newlen + undefs + number_of_holes). */ + if (fval == JSVAL_NULL) { /* - * To avoid string conversion on each compare we do it only once - * prior to sorting. But we also need the space for the original - * values to recover the sorting result. To reuse - * sort_compare_strings we move the original values to the odd - * indexes in vec, put the string conversion results in the even - * indexes and pass 2 * sizeof(jsval) as an element size to the - * sorting function. In this way sort_compare_strings will only - * see the string values when it casts the compare arguments as - * pointers to jsval. - * - * This requires doubling the temporary storage including the - * scratch space for the merge sort. Since vec already contains - * the rooted scratch space for newlen elements at the tail, we - * can use it to rearrange and convert to strings first and try - * realloc only when we know that we successfully converted all - * the elements. + * Sort using the default comparator converting all elements to + * strings. */ + if (allStrings) { + elemsize = sizeof(jsval); + } else { + /* + * To avoid string conversion on each compare we do it only once + * prior to sorting. But we also need the space for the original + * values to recover the sorting result. To reuse + * sort_compare_strings we move the original values to the odd + * indexes in vec, put the string conversion results in the even + * indexes and pass 2 * sizeof(jsval) as an element size to the + * sorting function. In this way sort_compare_strings will only + * see the string values when it casts the compare arguments as + * pointers to jsval. + * + * This requires doubling the temporary storage including the + * scratch space for the merge sort. Since vec already contains + * the rooted scratch space for newlen elements at the tail, we + * can use it to rearrange and convert to strings first and try + * realloc only when we know that we successfully converted all + * the elements. + */ #if JS_BITS_PER_WORD == 32 - if ((size_t)newlen > ~(size_t)0 / (4 * sizeof(jsval))) { - js_ReportAllocationOverflow(cx); - ok = JS_FALSE; - goto out; - } + if (size_t(newlen) > size_t(-1) / (4 * sizeof(jsval))) { + js_ReportAllocationOverflow(cx); + return false; + } #endif - /* - * Rearrange and string-convert the elements of the vector from - * the tail here and, after sorting, move the results back - * starting from the start to prevent overwrite the existing - * elements. - */ - i = newlen; - do { - --i; - ok = JS_CHECK_OPERATION_LIMIT(cx); - if (!ok) - goto out; - v = vec[i]; - str = js_ValueToString(cx, v); - if (!str) { - ok = JS_FALSE; - goto out; + /* + * Rearrange and string-convert the elements of the vector from + * the tail here and, after sorting, move the results back + * starting from the start to prevent overwrite the existing + * elements. + */ + i = newlen; + do { + --i; + if (!JS_CHECK_OPERATION_LIMIT(cx)) + return false; + jsval v = vec[i]; + str = js_ValueToString(cx, v); + if (!str) + return false; + vec[2 * i] = STRING_TO_JSVAL(str); + vec[2 * i + 1] = v; + } while (i != 0); + + JS_ASSERT(tvr.array == vec); + vec = (jsval *) cx->realloc(vec, 4 * size_t(newlen) * sizeof(jsval)); + if (!vec) { + vec = tvr.array; + return false; } - vec[2 * i] = STRING_TO_JSVAL(str); - vec[2 * i + 1] = v; - } while (i != 0); - - JS_ASSERT(tvr.u.array == vec); - vec = (jsval *) cx->realloc(vec, - 4 * (size_t) newlen * sizeof(jsval)); - if (!vec) { - vec = tvr.u.array; - ok = JS_FALSE; - goto out; + mergesort_tmp = vec + 2 * newlen; + PodZero(mergesort_tmp, newlen * 2); + tvr.changeArray(vec, newlen * 4); + elemsize = 2 * sizeof(jsval); } - tvr.u.array = vec; - mergesort_tmp = vec + 2 * newlen; - memset(mergesort_tmp, 0, newlen * 2 * sizeof(jsval)); - tvr.count = newlen * 4; - elemsize = 2 * sizeof(jsval); - } - ok = js_MergeSort(vec, (size_t) newlen, elemsize, - sort_compare_strings, cx, mergesort_tmp); - if (!ok) - goto out; - if (!all_strings) { - /* - * We want to make the following loop fast and to unroot the - * cached results of toString invocations before the operation - * callback has a chance to run the GC. For this reason we do - * not call JS_CHECK_OPERATION_LIMIT in the loop. - */ - i = 0; - do { - vec[i] = vec[2 * i + 1]; - } while (++i != newlen); - } - } else { - void *mark; + if (!js_MergeSort(vec, size_t(newlen), elemsize, + sort_compare_strings, cx, mergesort_tmp)) { + return false; + } + if (!allStrings) { + /* + * We want to make the following loop fast and to unroot the + * cached results of toString invocations before the operation + * callback has a chance to run the GC. For this reason we do + * not call JS_CHECK_OPERATION_LIMIT in the loop. + */ + i = 0; + do { + vec[i] = vec[2 * i + 1]; + } while (++i != newlen); + } + } else { + void *mark; - LeaveTrace(cx); + LeaveTrace(cx); - ca.context = cx; - ca.fval = fval; - ca.elemroot = js_AllocStack(cx, 2 + 2, &mark); - if (!ca.elemroot) { - ok = JS_FALSE; - goto out; + CompareArgs ca; + ca.context = cx; + ca.fval = fval; + ca.elemroot = js_AllocStack(cx, 2 + 2, &mark); + if (!ca.elemroot) + return false; + bool ok = !!js_MergeSort(vec, size_t(newlen), sizeof(jsval), + comparator_stack_cast(sort_compare), + &ca, mergesort_tmp); + js_FreeStack(cx, mark); + if (!ok) + return false; + } + + /* + * We no longer need to root the scratch space for the merge sort, so + * unroot it now to make the job of a potential GC under + * InitArrayElements easier. + */ + tvr.changeLength(newlen); + if (!InitArrayElements(cx, obj, 0, newlen, vec, TargetElementsMayContainValues, + SourceVectorAllValues)) { + return false; } - ok = js_MergeSort(vec, (size_t) newlen, sizeof(jsval), - comparator_stack_cast(sort_compare), - &ca, mergesort_tmp); - js_FreeStack(cx, mark); - if (!ok) - goto out; } - /* - * We no longer need to root the scratch space for the merge sort, so - * unroot it now to make the job of a potential GC under InitArrayElements - * easier. - */ - tvr.count = newlen; - ok = InitArrayElements(cx, obj, 0, newlen, vec, TargetElementsMayContainValues, - SourceVectorAllValues); - if (!ok) - goto out; - - out: - JS_POP_TEMP_ROOT(cx, &tvr); - cx->free(vec); - if (!ok) - return JS_FALSE; - /* Set undefs that sorted after the rest of elements. */ while (undefs != 0) { --undefs; - if (!JS_CHECK_OPERATION_LIMIT(cx) || - !SetArrayElement(cx, obj, newlen++, JSVAL_VOID)) { - return JS_FALSE; - } + if (!JS_CHECK_OPERATION_LIMIT(cx) || !SetArrayElement(cx, obj, newlen++, JSVAL_VOID)) + return false; } /* Re-create any holes that sorted to the end of the array. */ while (len > newlen) { - if (!JS_CHECK_OPERATION_LIMIT(cx) || - !DeleteArrayElement(cx, obj, --len)) { + if (!JS_CHECK_OPERATION_LIMIT(cx) || !DeleteArrayElement(cx, obj, --len)) return JS_FALSE; - } } *vp = OBJECT_TO_JSVAL(obj); - return JS_TRUE; + return true; } /* @@ -2436,7 +2399,7 @@ JS_DEFINE_CALLINFO_3(extern, BOOL, js_ArrayCompPush, CONTEXT, OBJECT, JSVAL, 0, static jsval FASTCALL Array_p_push1(JSContext* cx, JSObject* obj, jsval v) { - JSAutoTempValueRooter tvr(cx, v); + AutoValueRooter tvr(cx, v); if (obj->isDenseArray() ? array_push1_dense(cx, obj, v, tvr.addr()) : array_push_slowly(cx, obj, 1, tvr.addr(), tvr.addr())) { @@ -2508,7 +2471,7 @@ array_pop_dense(JSContext *cx, JSObject* obj, jsval *vp) static jsval FASTCALL Array_p_pop(JSContext* cx, JSObject* obj) { - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); if (obj->isDenseArray() ? array_pop_dense(cx, obj, tvr.addr()) : array_pop_slowly(cx, obj, tvr.addr())) { @@ -2572,7 +2535,7 @@ array_shift(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; /* Slide down the array above the first element. */ - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx); for (i = 0; i != length; i++) { if (!JS_CHECK_OPERATION_LIMIT(cx) || !GetArrayElement(cx, obj, i + 1, &hole, tvr.addr()) || @@ -2617,7 +2580,7 @@ array_unshift(JSContext *cx, uintN argc, jsval *vp) } else { last = length; jsdouble upperIndex = last + argc; - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx); do { --last, --upperIndex; if (!JS_CHECK_OPERATION_LIMIT(cx) || @@ -2707,7 +2670,7 @@ array_splice(JSContext *cx, uintN argc, jsval *vp) argv++; } - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx, JSVAL_NULL); /* If there are elements to remove, put them into the return value. */ if (count > 0) { @@ -2846,7 +2809,7 @@ array_concat(JSContext *cx, uintN argc, jsval *vp) length = 0; } - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx, JSVAL_NULL); /* Loop over [0, argc] to concat args into nobj, expanding all Arrays. */ for (i = 0; i <= argc; i++) { @@ -2960,7 +2923,7 @@ array_slice(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; *vp = OBJECT_TO_JSVAL(nobj); - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx); for (slot = begin; slot < end; slot++) { if (!JS_CHECK_OPERATION_LIMIT(cx) || !GetArrayElement(cx, obj, slot, &hole, tvr.addr())) { @@ -3366,7 +3329,7 @@ JSBool js_Array(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) { jsuint length; - jsval *vector; + const jsval *vector; /* If called without new, replace obj with a new Array object. */ if (!JS_IsConstructing(cx)) { @@ -3468,12 +3431,9 @@ js_InitArrayClass(JSContext *cx, JSObject *obj) } JSObject * -js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector, JSBool holey) +js_NewArrayObject(JSContext *cx, jsuint length, const jsval *vector, bool holey) { - JSTempValueRooter tvr; - JSObject *obj; - - obj = js_NewObject(cx, &js_ArrayClass, NULL, NULL); + JSObject *obj = js_NewObject(cx, &js_ArrayClass, NULL, NULL); if (!obj) return NULL; @@ -3483,10 +3443,11 @@ js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector, JSBool holey) */ JS_ASSERT(obj->getProto()); - JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr); - if (!InitArrayObject(cx, obj, length, vector, holey)) - obj = NULL; - JS_POP_TEMP_ROOT(cx, &tvr); + { + AutoValueRooter tvr(cx, obj); + if (!InitArrayObject(cx, obj, length, vector, holey)) + obj = NULL; + } /* Set/clear newborn root, in case we lost it. */ cx->weakRoots.finalizableNewborns[FINALIZE_OBJECT] = obj; @@ -3602,7 +3563,7 @@ js_NewArrayObjectWithCapacity(JSContext *cx, jsuint capacity, jsval **vector) if (!obj) return NULL; - JSAutoTempValueRooter tvr(cx, obj); + AutoValueRooter tvr(cx, obj); if (!EnsureCapacity(cx, obj, capacity, JS_FALSE)) obj = NULL; diff --git a/js/src/jsarray.h b/js/src/jsarray.h index 7305b74760a..0a0f8ab885d 100644 --- a/js/src/jsarray.h +++ b/js/src/jsarray.h @@ -68,8 +68,8 @@ JSObject::isArray() const } /* - * Dense arrays are not native (OBJ_IS_NATIVE(cx, aobj) for a dense array aobj - * results in false, meaning aobj->map does not point to a JSScope). + * Dense arrays are not native -- aobj->isNative() for a dense array aobj + * results in false, meaning aobj->map does not point to a JSScope. * * But Array methods are called via aobj.sort(), e.g., and the interpreter and * the trace recorder must consult the property cache in order to perform well. @@ -106,8 +106,7 @@ extern JSObject * JS_FASTCALL js_NewArrayWithSlots(JSContext* cx, JSObject* proto, uint32 len); extern JSObject * -js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector, - JSBool holey = JS_FALSE); +js_NewArrayObject(JSContext *cx, jsuint length, const jsval *vector, bool holey = false); /* Create an array object that starts out already made slow/sparse. */ extern JSObject * diff --git a/js/src/jsatom.cpp b/js/src/jsatom.cpp index 869cd673c9a..3ed21b64e33 100644 --- a/js/src/jsatom.cpp +++ b/js/src/jsatom.cpp @@ -132,7 +132,6 @@ const char *const js_common_atom_names[] = { js_caller_str, /* callerAtom */ js_class_prototype_str, /* classPrototypeAtom */ js_constructor_str, /* constructorAtom */ - js_count_str, /* countAtom */ js_each_str, /* eachAtom */ js_eval_str, /* evalAtom */ js_fileName_str, /* fileNameAtom */ @@ -206,7 +205,6 @@ const char js_callee_str[] = "callee"; const char js_caller_str[] = "caller"; const char js_class_prototype_str[] = "prototype"; const char js_constructor_str[] = "constructor"; -const char js_count_str[] = "__count__"; const char js_each_str[] = "each"; const char js_eval_str[] = "eval"; const char js_fileName_str[] = "fileName"; @@ -501,6 +499,7 @@ js_InitCommonAtoms(JSContext *cx) JS_ASSERT((uint8 *)atoms - (uint8 *)state == LAZY_ATOM_OFFSET_START); memset(atoms, 0, ATOM_OFFSET_LIMIT - LAZY_ATOM_OFFSET_START); + cx->runtime->emptyString = ATOM_TO_STRING(state->emptyAtom); return JS_TRUE; } @@ -516,8 +515,8 @@ js_atom_unpinner(JSDHashTable *table, JSDHashEntryHdr *hdr, void js_FinishCommonAtoms(JSContext *cx) { + cx->runtime->emptyString = NULL; JSAtomState *state = &cx->runtime->atomState; - JS_DHashTableEnumerate(&state->stringAtoms, js_atom_unpinner, NULL); #ifdef DEBUG memset(COMMON_ATOMS_START(state), JS_FREE_PATTERN, diff --git a/js/src/jsatom.h b/js/src/jsatom.h index 585d1c8fb7d..4cb934caa17 100644 --- a/js/src/jsatom.h +++ b/js/src/jsatom.h @@ -87,7 +87,7 @@ struct JSAtomListElement { }; #define ALE_ATOM(ale) ((JSAtom *) (ale)->entry.key) -#define ALE_INDEX(ale) ((jsatomid) JS_PTR_TO_UINT32((ale)->entry.value)) +#define ALE_INDEX(ale) (jsatomid(uintptr_t((ale)->entry.value))) #define ALE_VALUE(ale) ((jsval) (ale)->entry.value) #define ALE_NEXT(ale) ((JSAtomListElement *) (ale)->entry.next) @@ -99,7 +99,7 @@ struct JSAtomListElement { #define ALE_DEFN(ale) ((JSDefinition *) (ale)->entry.value) #define ALE_SET_ATOM(ale,atom) ((ale)->entry.key = (const void *)(atom)) -#define ALE_SET_INDEX(ale,index)((ale)->entry.value = JS_UINT32_TO_PTR(index)) +#define ALE_SET_INDEX(ale,index)((ale)->entry.value = (void *)(index)) #define ALE_SET_DEFN(ale, dn) ((ale)->entry.value = (void *)(dn)) #define ALE_SET_VALUE(ale, v) ((ale)->entry.value = (void *)(v)) #define ALE_SET_NEXT(ale,nxt) ((ale)->entry.next = (JSHashEntry *)(nxt)) @@ -246,7 +246,6 @@ struct JSAtomState { JSAtom *callerAtom; JSAtom *classPrototypeAtom; JSAtom *constructorAtom; - JSAtom *countAtom; JSAtom *eachAtom; JSAtom *evalAtom; JSAtom *fileNameAtom; diff --git a/js/src/jsbuiltins.cpp b/js/src/jsbuiltins.cpp index 67180bee1fb..4095a12e5fd 100644 --- a/js/src/jsbuiltins.cpp +++ b/js/src/jsbuiltins.cpp @@ -218,8 +218,8 @@ js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop) } if (!scope->table) { - if (slot < STOBJ_NSLOTS(obj) && !OBJ_GET_CLASS(cx, obj)->reserveSlots) { - JS_ASSERT(JSVAL_IS_VOID(STOBJ_GET_SLOT(obj, scope->freeslot))); + if (slot < obj->numSlots() && !OBJ_GET_CLASS(cx, obj)->reserveSlots) { + JS_ASSERT(JSVAL_IS_VOID(obj->getSlot(scope->freeslot))); ++scope->freeslot; } else { if (!js_AllocSlot(cx, obj, &slot)) @@ -235,7 +235,7 @@ js_AddProperty(JSContext* cx, JSObject* obj, JSScopeProperty* sprop) } else { JSScopeProperty *sprop2 = scope->addProperty(cx, sprop->id, sprop->getter(), sprop->setter(), - SPROP_INVALID_SLOT, sprop->attrs, sprop->getFlags(), + SPROP_INVALID_SLOT, sprop->attributes(), sprop->getFlags(), sprop->shortid); if (sprop2 != sprop) goto exit_trace; @@ -317,29 +317,19 @@ js_TypeOfBoolean(JSContext* cx, int32 unboxed) } JS_DEFINE_CALLINFO_2(extern, STRING, js_TypeOfBoolean, CONTEXT, INT32, 1, ACC_NONE) -jsdouble FASTCALL -js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed) -{ - if (unboxed == JSVAL_TO_SPECIAL(JSVAL_VOID)) - return js_NaN; - JS_ASSERT(unboxed == JS_TRUE || unboxed == JS_FALSE); - return unboxed; -} -JS_DEFINE_CALLINFO_2(extern, DOUBLE, js_BooleanOrUndefinedToNumber, CONTEXT, INT32, 1, ACC_NONE) - JSString* FASTCALL -js_BooleanOrUndefinedToString(JSContext *cx, int32 unboxed) +js_BooleanIntToString(JSContext *cx, int32 unboxed) { - JS_ASSERT(uint32(unboxed) <= 2); + JS_ASSERT(uint32(unboxed) <= 1); return ATOM_TO_STRING(cx->runtime->atomState.booleanAtoms[unboxed]); } -JS_DEFINE_CALLINFO_2(extern, STRING, js_BooleanOrUndefinedToString, CONTEXT, INT32, 1, ACC_NONE) +JS_DEFINE_CALLINFO_2(extern, STRING, js_BooleanIntToString, CONTEXT, INT32, 1, ACC_NONE) JSObject* FASTCALL js_NewNullClosure(JSContext* cx, JSObject* funobj, JSObject* proto, JSObject* parent) { - JS_ASSERT(HAS_FUNCTION_CLASS(funobj)); - JS_ASSERT(HAS_FUNCTION_CLASS(proto)); + JS_ASSERT(funobj->isFunction()); + JS_ASSERT(proto->isFunction()); JS_ASSERT(JS_ON_TRACE(cx)); JSFunction *fun = (JSFunction*) funobj; diff --git a/js/src/jsbuiltins.h b/js/src/jsbuiltins.h index 80efbfe984b..4018bc98b28 100644 --- a/js/src/jsbuiltins.h +++ b/js/src/jsbuiltins.h @@ -116,12 +116,12 @@ struct JSNativeTraceInfo { #define _JS_CI_NAME(op) #endif -#define _JS_I32_ARGSIZE nanojit::ARGSIZE_I -#define _JS_I32_RETSIZE nanojit::ARGSIZE_I -#define _JS_F64_ARGSIZE nanojit::ARGSIZE_F -#define _JS_F64_RETSIZE nanojit::ARGSIZE_F -#define _JS_PTR_ARGSIZE nanojit::ARGSIZE_P -#define _JS_PTR_RETSIZE nanojit::ARGSIZE_P +#define _JS_I32_ARGTYPE nanojit::ARGTYPE_I +#define _JS_I32_RETTYPE nanojit::ARGTYPE_I +#define _JS_F64_ARGTYPE nanojit::ARGTYPE_F +#define _JS_F64_RETTYPE nanojit::ARGTYPE_F +#define _JS_PTR_ARGTYPE nanojit::ARGTYPE_P +#define _JS_PTR_RETTYPE nanojit::ARGTYPE_P struct ClosureVarInfo; @@ -233,10 +233,10 @@ struct ClosureVarInfo; #define _JS_CTYPE_TYPE2(t,s,p,a,f) t #define _JS_CTYPE_TYPE(tyname) _JS_EXPAND(_JS_CTYPE_TYPE2 _JS_CTYPE_##tyname) -#define _JS_CTYPE_RETSIZE2(t,s,p,a,f) s##_RETSIZE -#define _JS_CTYPE_RETSIZE(tyname) _JS_EXPAND(_JS_CTYPE_RETSIZE2 _JS_CTYPE_##tyname) -#define _JS_CTYPE_ARGSIZE2(t,s,p,a,f) s##_ARGSIZE -#define _JS_CTYPE_ARGSIZE(tyname) _JS_EXPAND(_JS_CTYPE_ARGSIZE2 _JS_CTYPE_##tyname) +#define _JS_CTYPE_RETTYPE2(t,s,p,a,f) s##_RETTYPE +#define _JS_CTYPE_RETTYPE(tyname) _JS_EXPAND(_JS_CTYPE_RETTYPE2 _JS_CTYPE_##tyname) +#define _JS_CTYPE_ARGTYPE2(t,s,p,a,f) s##_ARGTYPE +#define _JS_CTYPE_ARGTYPE(tyname) _JS_EXPAND(_JS_CTYPE_ARGTYPE2 _JS_CTYPE_##tyname) #define _JS_CTYPE_PCH2(t,s,p,a,f) p #define _JS_CTYPE_PCH(tyname) _JS_EXPAND(_JS_CTYPE_PCH2 _JS_CTYPE_##tyname) #define _JS_CTYPE_ACH2(t,s,p,a,f) a @@ -260,10 +260,6 @@ struct ClosureVarInfo; _JS_TN_LINKAGE(linkage, crtype) name cargtypes; \ _JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \ { (intptr_t) &name, argtypes, nanojit::ABI_CDECL, isPure, storeAccSet _JS_CI_NAME(name) };\ - /* XXX: a temporary assertion to check all cse/fold pairs are correctly */ \ - /* converted to isPure/storeAccSet pairs for bug 545274. Will be removed */ \ - /* when bug 517910 starts doing more precise storeAccSet markings. */ \ - JS_STATIC_ASSERT_IF(!isPure, storeAccSet == nanojit::ACC_STORE_ANY); /* temporary */ \ JS_STATIC_ASSERT_IF(isPure, storeAccSet == nanojit::ACC_NONE); #else @@ -271,10 +267,6 @@ struct ClosureVarInfo; _JS_TN_LINKAGE(linkage, crtype) FASTCALL name cargtypes; \ _JS_CI_LINKAGE(linkage) const nanojit::CallInfo _JS_CALLINFO(name) = \ { (intptr_t) &name, argtypes, nanojit::ABI_FASTCALL, isPure, storeAccSet _JS_CI_NAME(name) }; \ - /* XXX: a temporary assertion to check all cse/fold pairs are correctly */ \ - /* converted to isPure/storeAccSet pairs for bug 545274. Will be removed */ \ - /* when bug 517910 starts doing more precise storeAccSet markings. */ \ - JS_STATIC_ASSERT_IF(!isPure, storeAccSet == nanojit::ACC_STORE_ANY); /* temporary */ \ JS_STATIC_ASSERT_IF(isPure, storeAccSet == nanojit::ACC_NONE); #endif @@ -315,56 +307,56 @@ struct ClosureVarInfo; */ #define JS_DEFINE_CALLINFO_1(linkage, rt, op, at0, isPure, storeAccSet) \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), (_JS_CTYPE_TYPE(at0)), \ - (_JS_CTYPE_ARGSIZE(at0) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DEFINE_CALLINFO_2(linkage, rt, op, at0, at1, isPure, storeAccSet) \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1)), \ - (_JS_CTYPE_ARGSIZE(at0) << (2*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at1) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (2*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at1) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DEFINE_CALLINFO_3(linkage, rt, op, at0, at1, at2, isPure, storeAccSet) \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2)), \ - (_JS_CTYPE_ARGSIZE(at0) << (3*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at1) << (2*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at2) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (3*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at1) << (2*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at2) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DEFINE_CALLINFO_4(linkage, rt, op, at0, at1, at2, at3, isPure, storeAccSet) \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \ _JS_CTYPE_TYPE(at3)), \ - (_JS_CTYPE_ARGSIZE(at0) << (4*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at1) << (3*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at2) << (2*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at3) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (4*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at1) << (3*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at2) << (2*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at3) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DEFINE_CALLINFO_5(linkage, rt, op, at0, at1, at2, at3, at4, isPure, storeAccSet) \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4)), \ - (_JS_CTYPE_ARGSIZE(at0) << (5*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at1) << (4*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at2) << (3*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at3) << (2*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at4) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (5*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at1) << (4*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at2) << (3*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at3) << (2*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at4) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DEFINE_CALLINFO_6(linkage, rt, op, at0, at1, at2, at3, at4, at5, isPure, storeAccSet) \ _JS_DEFINE_CALLINFO(linkage, op, _JS_CTYPE_TYPE(rt), \ (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYPE_TYPE(at5)), \ - (_JS_CTYPE_ARGSIZE(at0) << (6*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at1) << (5*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at2) << (4*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at3) << (3*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at4) << (2*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at5) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (6*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at1) << (5*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at2) << (4*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at3) << (3*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at4) << (2*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at5) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DEFINE_CALLINFO_7(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, isPure, \ storeAccSet) \ @@ -372,14 +364,14 @@ struct ClosureVarInfo; (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYPE_TYPE(at5), \ _JS_CTYPE_TYPE(at6)), \ - (_JS_CTYPE_ARGSIZE(at0) << (7*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at1) << (6*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at2) << (5*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at3) << (4*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at4) << (3*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at5) << (2*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at6) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (7*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at1) << (6*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at2) << (5*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at3) << (4*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at4) << (3*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at5) << (2*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at6) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DEFINE_CALLINFO_8(linkage, rt, op, at0, at1, at2, at3, at4, at5, at6, at7, isPure, \ storeAccSet) \ @@ -387,15 +379,15 @@ struct ClosureVarInfo; (_JS_CTYPE_TYPE(at0), _JS_CTYPE_TYPE(at1), _JS_CTYPE_TYPE(at2), \ _JS_CTYPE_TYPE(at3), _JS_CTYPE_TYPE(at4), _JS_CTYPE_TYPE(at5), \ _JS_CTYPE_TYPE(at6), _JS_CTYPE_TYPE(at7)), \ - (_JS_CTYPE_ARGSIZE(at0) << (8*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at1) << (7*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at2) << (6*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at3) << (5*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at4) << (4*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at5) << (3*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at6) << (2*nanojit::ARGSIZE_SHIFT)) | \ - (_JS_CTYPE_ARGSIZE(at7) << (1*nanojit::ARGSIZE_SHIFT)) | \ - _JS_CTYPE_RETSIZE(rt), \ + (_JS_CTYPE_ARGTYPE(at0) << (8*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at1) << (7*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at2) << (6*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at3) << (5*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at4) << (4*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at5) << (3*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at6) << (2*nanojit::ARGTYPE_SHIFT)) | \ + (_JS_CTYPE_ARGTYPE(at7) << (1*nanojit::ARGTYPE_SHIFT)) | \ + _JS_CTYPE_RETTYPE(rt), \ isPure, storeAccSet) #define JS_DECLARE_CALLINFO(name) extern const nanojit::CallInfo _JS_CALLINFO(name); @@ -503,9 +495,6 @@ struct ClosureVarInfo; jsdouble FASTCALL js_StringToNumber(JSContext* cx, JSString* str); -jsdouble FASTCALL -js_BooleanOrUndefinedToNumber(JSContext* cx, int32 unboxed); - /* Extern version of SetBuiltinError. */ extern JS_FRIEND_API(void) js_SetTraceableNativeFailed(JSContext *cx); @@ -557,8 +546,7 @@ JS_DECLARE_CALLINFO(js_HasNamedProperty) JS_DECLARE_CALLINFO(js_HasNamedPropertyInt32) JS_DECLARE_CALLINFO(js_TypeOfObject) JS_DECLARE_CALLINFO(js_TypeOfBoolean) -JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToNumber) -JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToString) +JS_DECLARE_CALLINFO(js_BooleanIntToString) JS_DECLARE_CALLINFO(js_NewNullClosure) JS_DECLARE_CALLINFO(js_PopInterpFrame) JS_DECLARE_CALLINFO(js_ConcatN) diff --git a/js/src/jscntxt.cpp b/js/src/jscntxt.cpp index 7589c000eff..d76fd6fc1dd 100644 --- a/js/src/jscntxt.cpp +++ b/js/src/jscntxt.cpp @@ -100,7 +100,7 @@ CallStack::contains(JSStackFrame *fp) } #endif -void +bool JSThreadData::init() { #ifdef DEBUG @@ -111,7 +111,12 @@ JSThreadData::init() #ifdef JS_TRACER InitJIT(&traceMonitor); #endif - js_InitRandom(this); + dtoaState = js_NewDtoaState(); + if (!dtoaState) { + finish(); + return false; + } + return true; } void @@ -127,8 +132,11 @@ JSThreadData::finish() JS_ASSERT(!localRootStack); #endif + if (dtoaState) + js_DestroyDtoaState(dtoaState); + js_FinishGSNCache(&gsnCache); - js_FinishPropertyCache(&propertyCache); + propertyCache.~PropertyCache(); #if defined JS_TRACER FinishJIT(&traceMonitor); #endif @@ -152,7 +160,7 @@ JSThreadData::purge(JSContext *cx) js_PurgeGSNCache(&gsnCache); /* FIXME: bug 506341. */ - js_PurgePropertyCache(cx, &propertyCache); + propertyCache.purge(cx); #ifdef JS_TRACER /* @@ -191,7 +199,10 @@ NewThread(jsword id) return NULL; JS_INIT_CLIST(&thread->contextList); thread->id = id; - thread->data.init(); + if (!thread->data.init()) { + js_free(thread); + return NULL; + } return thread; } @@ -522,6 +533,8 @@ js_NewContext(JSRuntime *rt, size_t stackChunkSize) JS_APPEND_LINK(&cx->link, &rt->contextList); JS_UNLOCK_GC(rt); + js_InitRandom(cx); + /* * If cx is the first context on this runtime, initialize well-known atoms, * keywords, numbers, and strings. If one of these steps should fail, the @@ -545,8 +558,6 @@ js_NewContext(JSRuntime *rt, size_t stackChunkSize) ok = js_InitRuntimeScriptState(rt); if (ok) ok = js_InitRuntimeNumberState(cx); - if (ok) - ok = js_InitRuntimeStringState(cx); if (ok) ok = JSScope::initRuntimeState(cx); @@ -778,7 +789,6 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode) JSScope::finishRuntimeState(cx); js_FinishRuntimeNumberState(cx); - js_FinishRuntimeStringState(cx); /* Unpin all common atoms before final GC. */ js_FinishCommonAtoms(cx); @@ -956,7 +966,7 @@ resolving_HashKey(JSDHashTable *table, const void *ptr) { const JSResolvingKey *key = (const JSResolvingKey *)ptr; - return ((JSDHashNumber)JS_PTR_TO_UINT32(key->obj) >> JSVAL_TAGBITS) ^ key->id; + return (JSDHashNumber(uintptr_t(key->obj)) >> JSVAL_TAGBITS) ^ key->id; } JS_PUBLIC_API(JSBool) @@ -1364,7 +1374,7 @@ js_ReportOutOfMemory(JSContext *cx) const char *msg = efs ? efs->format : "Out of memory"; /* Fill out the report, but don't do anything that requires allocation. */ - memset(&report, 0, sizeof (struct JSErrorReport)); + PodZero(&report); report.flags = JSREPORT_ERROR; report.errorNumber = JSMSG_OUT_OF_MEMORY; PopulateReportBlame(cx, &report); @@ -1459,7 +1469,7 @@ js_ReportErrorVA(JSContext *cx, uintN flags, const char *format, va_list ap) return JS_FALSE; messagelen = strlen(message); - memset(&report, 0, sizeof (struct JSErrorReport)); + PodZero(&report); report.flags = flags; report.errorNumber = JSMSG_USER_DEFINED_ERROR; report.ucmessage = ucmessage = js_InflateString(cx, message, &messagelen); @@ -1651,7 +1661,7 @@ js_ReportErrorNumberVA(JSContext *cx, uintN flags, JSErrorCallback callback, return JS_TRUE; warning = JSREPORT_IS_WARNING(flags); - memset(&report, 0, sizeof (struct JSErrorReport)); + PodZero(&report); report.flags = flags; report.errorNumber = errorNumber; PopulateReportBlame(cx, &report); @@ -1925,6 +1935,39 @@ js_CurrentPCIsInImacro(JSContext *cx) #endif } +CallStack * +JSContext::containingCallStack(JSStackFrame *target) +{ + /* The context may have nothing running. */ + CallStack *cs = currentCallStack; + if (!cs) + return NULL; + + /* The active callstack's top frame is cx->fp. */ + if (fp) { + JS_ASSERT(activeCallStack() == cs); + JSStackFrame *f = fp; + JSStackFrame *stop = cs->getInitialFrame()->down; + for (; f != stop; f = f->down) { + if (f == target) + return cs; + } + cs = cs->getPrevious(); + } + + /* A suspended callstack's top frame is its suspended frame. */ + for (; cs; cs = cs->getPrevious()) { + JSStackFrame *f = cs->getSuspendedFrame(); + JSStackFrame *stop = cs->getInitialFrame()->down; + for (; f != stop; f = f->down) { + if (f == target) + return cs; + } + } + + return NULL; +} + void JSContext::checkMallocGCPressure(void *p) { diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h index f96e701acd6..46e465da2c9 100644 --- a/js/src/jscntxt.h +++ b/js/src/jscntxt.h @@ -49,11 +49,14 @@ #include "jsclist.h" #include "jslong.h" #include "jsatom.h" -#include "jsversion.h" #include "jsdhash.h" +#include "jsdtoa.h" #include "jsgc.h" +#include "jshashtable.h" #include "jsinterp.h" #include "jsobj.h" +#include "jspropertycache.h" +#include "jspropertytree.h" #include "jsprvtd.h" #include "jspubtd.h" #include "jsregexp.h" @@ -61,7 +64,6 @@ #include "jsarray.h" #include "jstask.h" #include "jsvector.h" -#include "jshashtable.h" #ifdef _MSC_VER #pragma warning(push) @@ -412,13 +414,6 @@ struct TraceMonitor { */ JSBool needFlush; - /* - * reservedObjects is a linked list (via fslots[0]) of preallocated JSObjects. - * The JIT uses this to ensure that leaving a trace tree can't fail. - */ - JSBool useReservedObjects; - JSObject *reservedObjects; - /* * Fragment map for the regular expression compiler. */ @@ -538,10 +533,7 @@ struct JSThreadData { JSGSNCache gsnCache; /* Property cache for faster call/get/set invocation. */ - JSPropertyCache propertyCache; - - /* Random number generator state, used by jsmath.cpp. */ - int64 rngSeed; + js::PropertyCache propertyCache; /* Optional stack of heap-allocated scoped local GC roots. */ JSLocalRootStack *localRootStack; @@ -558,6 +550,9 @@ struct JSThreadData { JSEvalCacheMeter evalCacheMeter; #endif + /* State used by dtoa.c. */ + DtoaState *dtoaState; + /* * Cache of reusable JSNativeEnumerators mapped by shape identifiers (as * stored in scope->shape). This cache is nulled by the GC and protected @@ -572,7 +567,7 @@ struct JSThreadData { jsuword nativeEnumCache[NATIVE_ENUM_CACHE_SIZE]; - void init(); + bool init(); void finish(); void mark(JSTracer *trc); void purge(JSContext *cx); @@ -689,7 +684,7 @@ struct JSSetSlotRequest { /* Caching Class.prototype lookups for the standard classes. */ struct JSClassProtoCache { - void purge() { memset(entries, 0, sizeof(entries)); } + void purge() { js::PodArrayZero(entries); } #ifdef JS_PROTO_CACHE_METERING struct Stats { @@ -709,9 +704,15 @@ struct JSClassProtoCache { GlobalAndProto entries[JSProto_LIMIT - JSProto_Object]; +#ifdef __GNUC__ +# pragma GCC visibility push(default) +#endif friend JSBool js_GetClassPrototype(JSContext *cx, JSObject *scope, JSProtoKey protoKey, JSObject **protop, JSClass *clasp); +#ifdef __GNUC__ +# pragma GCC visibility pop +#endif }; struct JSRuntime { @@ -820,13 +821,7 @@ struct JSRuntime { jsval negativeInfinityValue; jsval positiveInfinityValue; -#ifdef JS_THREADSAFE - JSLock *deflatedStringCacheLock; -#endif - JSHashTable *deflatedStringCache; -#ifdef DEBUG - uint32 deflatedStringCacheBytes; -#endif + js::DeflatedStringCache *deflatedStringCache; JSString *emptyString; @@ -919,13 +914,18 @@ struct JSRuntime { /* * Shared scope property tree, and arena-pool for allocating its nodes. + * This really should be free of all locking overhead and allocated in + * thread-local storage, hence the JS_PROPERTY_TREE(cx) macro. + */ + js::PropertyTree propertyTree; + +#define JS_PROPERTY_TREE(cx) ((cx)->runtime->propertyTree) + + /* * The propertyRemovals counter is incremented for every JSScope::clear, * and for each JSScope::remove method call that frees a slot in an object. - * See js_NativeGet and js_NativeSet in jsobj.c. + * See js_NativeGet and js_NativeSet in jsobj.cpp. */ - JSDHashTable propertyTreeHash; - JSScopeProperty *propertyFreeList; - JSArenaPool propertyArenaPool; int32 propertyRemovals; /* Script filename table. */ @@ -978,6 +978,7 @@ struct JSRuntime { JSBackgroundThread *deallocatorThread; #endif + JSEmptyScope *emptyArgumentsScope; JSEmptyScope *emptyBlockScope; /* @@ -1161,109 +1162,13 @@ typedef struct JSResolvingEntry { #define JSRESFLAG_LOOKUP 0x1 /* resolving id from lookup */ #define JSRESFLAG_WATCH 0x2 /* resolving id from watch */ - -/* - * Macros to push/pop JSTempValueRooter instances to context-linked stack of - * temporary GC roots. If you need to protect a result value that flows out of - * a C function across several layers of other functions, use the - * js_LeaveLocalRootScopeWithResult internal API (see further below) instead. - * - * The macros also provide a simple way to get a single rooted pointer via - * JS_PUSH_TEMP_ROOT_(cx, NULL, &tvr). Then &tvr.u. gives the - * necessary pointer. - * - * JSTempValueRooter.count defines the type of the rooted value referenced by - * JSTempValueRooter.u union of type JSTempValueUnion. When count is positive - * or zero, u.array points to a vector of jsvals. Otherwise it must be one of - * the following constants: - */ -#define JSTVU_SINGLE (-1) /* u.value or u. is single jsval - or non-JSString GC-thing pointer */ -#define JSTVU_TRACE (-2) /* u.trace is a hook to trace a custom - * structure */ -#define JSTVU_SPROP (-3) /* u.sprop roots property tree node */ -#define JSTVU_WEAK_ROOTS (-4) /* u.weakRoots points to saved weak roots */ -#define JSTVU_COMPILER (-5) /* u.compiler roots JSCompiler* */ -#define JSTVU_SCRIPT (-6) /* u.script roots JSScript* */ -#define JSTVU_ENUMERATOR (-7) /* a pointer to JSTempValueRooter points - to an instance of JSAutoEnumStateRooter - with u.object storing the enumeration - object */ - -/* - * Here single JSTVU_SINGLE covers both jsval and pointers to almost (see note - * below) any GC-thing via reinterpreting the thing as JSVAL_OBJECT. This works - * because the GC-thing is aligned on a 0 mod 8 boundary, and object has the 0 - * jsval tag. So any GC-heap-allocated thing pointer may be tagged as if it - * were an object and untagged, if it's then used only as an opaque pointer - * until discriminated by other means than tag bits. This is how, for example, - * js_GetGCThingTraceKind uses its |thing| parameter -- it consults GC-thing - * flags stored separately from the thing to decide the kind of thing. - * - * Note well that JSStrings may be statically allocated (see the intStringTable - * and unitStringTable static arrays), so this hack does not work for arbitrary - * GC-thing pointers. - */ -#define JS_PUSH_TEMP_ROOT_COMMON(cx,x,tvr,cnt,kind) \ - JS_BEGIN_MACRO \ - JS_ASSERT((cx)->tempValueRooters != (tvr)); \ - (tvr)->count = (cnt); \ - (tvr)->u.kind = (x); \ - (tvr)->down = (cx)->tempValueRooters; \ - (cx)->tempValueRooters = (tvr); \ - JS_END_MACRO - -#define JS_POP_TEMP_ROOT(cx,tvr) \ - JS_BEGIN_MACRO \ - JS_ASSERT((cx)->tempValueRooters == (tvr)); \ - (cx)->tempValueRooters = (tvr)->down; \ - JS_END_MACRO - -#define JS_PUSH_TEMP_ROOT(cx,cnt,arr,tvr) \ - JS_BEGIN_MACRO \ - JS_ASSERT((int)(cnt) >= 0); \ - JS_PUSH_TEMP_ROOT_COMMON(cx, arr, tvr, (ptrdiff_t) (cnt), array); \ - JS_END_MACRO - -#define JS_PUSH_SINGLE_TEMP_ROOT(cx,val,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, val, tvr, JSTVU_SINGLE, value) - -#define JS_PUSH_TEMP_ROOT_OBJECT(cx,obj,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, obj, tvr, JSTVU_SINGLE, object) - -#define JS_PUSH_TEMP_ROOT_STRING(cx,str,tvr) \ - JS_PUSH_SINGLE_TEMP_ROOT(cx, str ? STRING_TO_JSVAL(str) : JSVAL_NULL, tvr) - -#define JS_PUSH_TEMP_ROOT_XML(cx,xml_,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, xml_, tvr, JSTVU_SINGLE, xml) - -#define JS_PUSH_TEMP_ROOT_TRACE(cx,trace_,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, trace_, tvr, JSTVU_TRACE, trace) - -#define JS_PUSH_TEMP_ROOT_SPROP(cx,sprop_,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, sprop_, tvr, JSTVU_SPROP, sprop) - -#define JS_PUSH_TEMP_ROOT_WEAK_COPY(cx,weakRoots_,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, weakRoots_, tvr, JSTVU_WEAK_ROOTS, weakRoots) - -#define JS_PUSH_TEMP_ROOT_COMPILER(cx,pc,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, pc, tvr, JSTVU_COMPILER, compiler) - -#define JS_PUSH_TEMP_ROOT_SCRIPT(cx,script_,tvr) \ - JS_PUSH_TEMP_ROOT_COMMON(cx, script_, tvr, JSTVU_SCRIPT, script) - #define JSRESOLVE_INFER 0xffff /* infer bits from current bytecode */ extern const JSDebugHooks js_NullDebugHooks; /* defined in jsdbgapi.cpp */ -/* - * Wraps a stack frame which has been temporarily popped from its call stack - * and needs to be GC-reachable. See JSContext::{push,pop}GCReachableFrame. - */ -struct JSGCReachableFrame { - JSGCReachableFrame *next; - JSStackFrame *frame; -}; +namespace js { +class AutoGCRooter; +} struct JSContext { @@ -1383,19 +1288,6 @@ struct JSContext void *data; void *data2; - /* Linked list of frames temporarily popped from their chain. */ - JSGCReachableFrame *reachableFrames; - - void pushGCReachableFrame(JSGCReachableFrame &gcrf, JSStackFrame *f) { - gcrf.next = reachableFrames; - gcrf.frame = f; - reachableFrames = &gcrf; - } - - void popGCReachableFrame() { - reachableFrames = reachableFrames->next; - } - private: #ifdef __GNUC__ # pragma GCC visibility push(default) @@ -1450,6 +1342,12 @@ struct JSContext currentCallStack->restore(); } + /* + * Perform a linear search of all frames in all callstacks in the given context + * for the given frame, returning the callstack, if found, and null otherwise. + */ + js::CallStack *containingCallStack(JSStackFrame *target); + #ifdef JS_THREADSAFE JSThread *thread; jsrefcount requestDepth; @@ -1466,8 +1364,8 @@ struct JSContext /* PDL of stack headers describing stack slots not rooted by argv, etc. */ JSStackHeader *stackHeaders; - /* Stack of thread-stack-allocated temporary GC roots. */ - JSTempValueRooter *tempValueRooters; + /* Stack of thread-stack-allocated GC roots. */ + js::AutoGCRooter *autoGCRooters; /* Debug hooks associated with the current context. */ const JSDebugHooks *debugHooks; @@ -1481,6 +1379,9 @@ struct JSContext /* Stored here to avoid passing it around as a parameter. */ uintN resolveFlags; + /* Random number generator state, used by jsmath.cpp. */ + int64 rngSeed; + #ifdef JS_TRACER /* * State for the current tree execution. bailExit is valid if the tree has @@ -1704,93 +1605,265 @@ FrameAtomBase(JSContext *cx, JSStackFrame *fp) : fp->script->atomMap.vector; } -/* FIXME(bug 332648): Move this into a public header. */ -class JSAutoTempValueRooter -{ +namespace js { + +class AutoGCRooter { public: - JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx) { - JS_GUARD_OBJECT_NOTIFIER_INIT; - JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr); - } - explicit JSAutoTempValueRooter(JSContext *cx, jsval v = JSVAL_NULL - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx) { - JS_GUARD_OBJECT_NOTIFIER_INIT; - JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr); - } - JSAutoTempValueRooter(JSContext *cx, JSString *str - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx) { - JS_GUARD_OBJECT_NOTIFIER_INIT; - JS_PUSH_TEMP_ROOT_STRING(mContext, str, &mTvr); - } - JSAutoTempValueRooter(JSContext *cx, JSObject *obj - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx) { - JS_GUARD_OBJECT_NOTIFIER_INIT; - JS_PUSH_TEMP_ROOT_OBJECT(mContext, obj, &mTvr); - } - JSAutoTempValueRooter(JSContext *cx, JSScopeProperty *sprop - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx) { - JS_GUARD_OBJECT_NOTIFIER_INIT; - JS_PUSH_TEMP_ROOT_SPROP(mContext, sprop, &mTvr); + AutoGCRooter(JSContext *cx, ptrdiff_t tag) + : down(cx->autoGCRooters), tag(tag), context(cx) + { + JS_ASSERT(this != cx->autoGCRooters); + cx->autoGCRooters = this; } - ~JSAutoTempValueRooter() { - JS_POP_TEMP_ROOT(mContext, &mTvr); + ~AutoGCRooter() { + JS_ASSERT(this == context->autoGCRooters); + context->autoGCRooters = down; } - jsval value() { return mTvr.u.value; } - jsval *addr() { return &mTvr.u.value; } + inline void trace(JSTracer *trc); + +#ifdef __GNUC__ +# pragma GCC visibility push(default) +#endif + friend void ::js_TraceContext(JSTracer *trc, JSContext *acx); +#ifdef __GNUC__ +# pragma GCC visibility pop +#endif protected: - JSContext *mContext; + AutoGCRooter * const down; - private: - JSTempValueRooter mTvr; - JS_DECL_USE_GUARD_OBJECT_NOTIFIER + /* + * Discriminates actual subclass of this being used. If non-negative, the + * subclass roots an array of jsvals of the length stored in this field. + * If negative, meaning is indicated by the corresponding value in the enum + * below. Any other negative value indicates some deeper problem such as + * memory corruption. + */ + ptrdiff_t tag; + + JSContext * const context; + + enum { + JSVAL = -1, /* js::AutoValueRooter */ + SPROP = -2, /* js::AutoScopePropertyRooter */ + WEAKROOTS = -3, /* js::AutoSaveWeakRoots */ + COMPILER = -4, /* JSCompiler */ + SCRIPT = -5, /* js::AutoScriptRooter */ + ENUMERATOR = -6, /* js::AutoEnumStateRooter */ + IDARRAY = -7, /* js::AutoIdArray */ + DESCRIPTORS = -8, /* js::AutoDescriptorArray */ + NAMESPACES = -9, /* js::AutoNamespaceArray */ + XML = -10, /* js::AutoXMLRooter */ + OBJECT = -11, /* js::AutoObjectRooter */ + ID = -12, /* js::AutoIdRooter */ + VECTOR = -13 /* js::AutoValueVector */ + }; }; -class JSAutoTempIdRooter +class AutoSaveWeakRoots : private AutoGCRooter { public: - explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0) - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx) { + explicit AutoSaveWeakRoots(JSContext *cx + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, WEAKROOTS), savedRoots(cx->weakRoots) + { JS_GUARD_OBJECT_NOTIFIER_INIT; - JS_PUSH_SINGLE_TEMP_ROOT(mContext, ID_TO_VALUE(id), &mTvr); } - ~JSAutoTempIdRooter() { - JS_POP_TEMP_ROOT(mContext, &mTvr); - } - - jsid id() { return (jsid) mTvr.u.value; } - jsid * addr() { return (jsid *) &mTvr.u.value; } + friend void AutoGCRooter::trace(JSTracer *trc); private: - JSContext *mContext; - JSTempValueRooter mTvr; + JSWeakRoots savedRoots; JS_DECL_USE_GUARD_OBJECT_NOTIFIER }; -class JSAutoIdArray { +/* FIXME(bug 332648): Move this into a public header. */ +class AutoValueRooter : private AutoGCRooter +{ public: - JSAutoIdArray(JSContext *cx, JSIdArray *ida - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : cx(cx), idArray(ida) { + explicit AutoValueRooter(JSContext *cx, jsval v = JSVAL_NULL + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, JSVAL), val(v) + { JS_GUARD_OBJECT_NOTIFIER_INIT; - if (ida) - JS_PUSH_TEMP_ROOT(cx, ida->length, ida->vector, &tvr); } - ~JSAutoIdArray() { - if (idArray) { - JS_POP_TEMP_ROOT(cx, &tvr); - JS_DestroyIdArray(cx, idArray); - } + AutoValueRooter(JSContext *cx, JSString *str + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, JSVAL), val(STRING_TO_JSVAL(str)) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + AutoValueRooter(JSContext *cx, JSObject *obj + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, JSVAL), val(OBJECT_TO_JSVAL(obj)) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + + void setObject(JSObject *obj) { + JS_ASSERT(tag == JSVAL); + val = OBJECT_TO_JSVAL(obj); + } + + void setString(JSString *str) { + JS_ASSERT(tag == JSVAL); + JS_ASSERT(str); + val = STRING_TO_JSVAL(str); + } + + void setDouble(jsdouble *dp) { + JS_ASSERT(tag == JSVAL); + JS_ASSERT(dp); + val = DOUBLE_TO_JSVAL(dp); + } + + jsval value() const { + JS_ASSERT(tag == JSVAL); + return val; + } + + jsval *addr() { + JS_ASSERT(tag == JSVAL); + return &val; + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + jsval val; + JS_DECL_USE_GUARD_OBJECT_NOTIFIER +}; + +class AutoObjectRooter : private AutoGCRooter { + public: + AutoObjectRooter(JSContext *cx, JSObject *obj = NULL + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, OBJECT), obj(obj) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + + void setObject(JSObject *obj) { + this->obj = obj; + } + + JSObject * object() const { + return obj; + } + + JSObject ** addr() { + return &obj; + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + JSObject *obj; + JS_DECL_USE_GUARD_OBJECT_NOTIFIER +}; + +class AutoArrayRooter : private AutoGCRooter { + public: + AutoArrayRooter(JSContext *cx, size_t len, jsval *vec + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, len), array(vec) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + JS_ASSERT(tag >= 0); + } + + void changeLength(size_t newLength) { + tag = ptrdiff_t(newLength); + JS_ASSERT(tag >= 0); + } + + void changeArray(jsval *newArray, size_t newLength) { + changeLength(newLength); + array = newArray; + } + + jsval *array; + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + JS_DECL_USE_GUARD_OBJECT_NOTIFIER +}; + +class AutoScopePropertyRooter : private AutoGCRooter { + public: + AutoScopePropertyRooter(JSContext *cx, JSScopeProperty *sprop + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, SPROP), sprop(sprop) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + JSScopeProperty * const sprop; + JS_DECL_USE_GUARD_OBJECT_NOTIFIER +}; + +class AutoScriptRooter : private AutoGCRooter { + public: + AutoScriptRooter(JSContext *cx, JSScript *script + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, SCRIPT), script(script) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + + void setScript(JSScript *script) { + this->script = script; + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + JSScript *script; + JS_DECL_USE_GUARD_OBJECT_NOTIFIER +}; + +class AutoIdRooter : private AutoGCRooter +{ + public: + explicit AutoIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0) + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, ID), idval(id) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + + jsid id() { + return idval; + } + + jsid * addr() { + return &idval; + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + jsid idval; + JS_DECL_USE_GUARD_OBJECT_NOTIFIER +}; + +class AutoIdArray : private AutoGCRooter { + public: + AutoIdArray(JSContext *cx, JSIdArray *ida + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, ida ? ida->length : 0), idArray(ida) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + ~AutoIdArray() { + if (idArray) + JS_DestroyIdArray(context, idArray); } bool operator!() { return idArray == NULL; @@ -1803,52 +1876,86 @@ class JSAutoIdArray { size_t length() const { return idArray->length; } + + friend void AutoGCRooter::trace(JSTracer *trc); + + protected: + inline void trace(JSTracer *trc); + private: - JSContext * const cx; JSIdArray * const idArray; - JSTempValueRooter tvr; JS_DECL_USE_GUARD_OBJECT_NOTIFIER /* No copy or assignment semantics. */ - JSAutoIdArray(JSAutoIdArray &); - void operator=(JSAutoIdArray &); + AutoIdArray(AutoIdArray &ida); + void operator=(AutoIdArray &ida); }; /* The auto-root for enumeration object and its state. */ -class JSAutoEnumStateRooter : public JSTempValueRooter +class AutoEnumStateRooter : private AutoGCRooter { public: - JSAutoEnumStateRooter(JSContext *cx, JSObject *obj, jsval *statep - JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx), mStatep(statep) + AutoEnumStateRooter(JSContext *cx, JSObject *obj + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, ENUMERATOR), obj(obj), stateValue(JSVAL_NULL) { JS_GUARD_OBJECT_NOTIFIER_INIT; JS_ASSERT(obj); - JS_ASSERT(statep); - JS_PUSH_TEMP_ROOT_COMMON(cx, obj, this, JSTVU_ENUMERATOR, object); } - ~JSAutoEnumStateRooter() { - JS_POP_TEMP_ROOT(mContext, this); + ~AutoEnumStateRooter() { + if (!JSVAL_IS_NULL(stateValue)) { +#ifdef DEBUG + JSBool ok = +#endif + obj->enumerate(context, JSENUMERATE_DESTROY, &stateValue, 0); + JS_ASSERT(ok); + } } - void mark(JSTracer *trc) { - JS_CALL_OBJECT_TRACER(trc, u.object, "enumerator_obj"); - js_MarkEnumeratorState(trc, u.object, *mStatep); + friend void AutoGCRooter::trace(JSTracer *trc); + + jsval state() const { return stateValue; } + jsval * addr() { return &stateValue; } + + protected: + void trace(JSTracer *trc) { + JS_CALL_OBJECT_TRACER(trc, obj, "js::AutoEnumStateRooter.obj"); + js_MarkEnumeratorState(trc, obj, stateValue); } + JSObject * const obj; + private: - JSContext *mContext; - jsval *mStatep; + jsval stateValue; JS_DECL_USE_GUARD_OBJECT_NOTIFIER }; +#ifdef JS_HAS_XML_SUPPORT +class AutoXMLRooter : private AutoGCRooter { + public: + AutoXMLRooter(JSContext *cx, JSXML *xml) + : AutoGCRooter(cx, XML), xml(xml) + { + JS_ASSERT(xml); + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + JSXML * const xml; +}; +#endif /* JS_HAS_XML_SUPPORT */ + +} /* namespace js */ + class JSAutoResolveFlags { public: JSAutoResolveFlags(JSContext *cx, uintN flags JS_GUARD_OBJECT_NOTIFIER_PARAM) - : mContext(cx), mSaved(cx->resolveFlags) { + : mContext(cx), mSaved(cx->resolveFlags) + { JS_GUARD_OBJECT_NOTIFIER_INIT; cx->resolveFlags = flags; } @@ -2140,11 +2247,8 @@ js_ReportValueErrorFlags(JSContext *cx, uintN flags, const uintN errorNumber, extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit]; /* - * See JS_SetThreadStackLimit in jsapi.c, where we check that the stack grows - * in the expected direction. On Unix-y systems, JS_STACK_GROWTH_DIRECTION is - * computed on the build host by jscpucfg.c and written into jsautocfg.h. The - * macro is hardcoded in jscpucfg.h on Windows and Mac systems (for historical - * reasons pre-dating autoconf usage). + * See JS_SetThreadStackLimit in jsapi.c, where we check that the stack + * grows in the expected direction. */ #if JS_STACK_GROWTH_DIRECTION > 0 # define JS_CHECK_STACK_SIZE(cx, lval) ((jsuword)&(lval) < (cx)->stackLimit) @@ -2243,7 +2347,7 @@ js_GetTopStackFrame(JSContext *cx) static JS_INLINE JSBool js_IsPropertyCacheDisabled(JSContext *cx) { - return cx->runtime->shapeGen >= SHAPE_OVERFLOW_BIT; + return cx->runtime->shapeGen >= js::SHAPE_OVERFLOW_BIT; } static JS_INLINE uint32 @@ -2258,7 +2362,7 @@ js_RegenerateShapeForGC(JSContext *cx) * the shape stays such. */ uint32 shape = cx->runtime->shapeGen; - shape = (shape + 1) | (shape & SHAPE_OVERFLOW_BIT); + shape = (shape + 1) | (shape & js::SHAPE_OVERFLOW_BIT); cx->runtime->shapeGen = shape; return shape; } @@ -2289,6 +2393,52 @@ ContextAllocPolicy::reportAllocOverflow() const js_ReportAllocationOverflow(cx); } +class AutoValueVector : private AutoGCRooter +{ + public: + explicit AutoValueVector(JSContext *cx + JS_GUARD_OBJECT_NOTIFIER_PARAM) + : AutoGCRooter(cx, VECTOR), vector(cx) + { + JS_GUARD_OBJECT_NOTIFIER_INIT; + } + + size_t length() const { return vector.length(); } + + bool push(jsval v) { return vector.append(v); } + bool push(JSString *str) { return push(STRING_TO_JSVAL(str)); } + bool push(JSObject *obj) { return push(OBJECT_TO_JSVAL(obj)); } + bool push(jsdouble *dp) { return push(DOUBLE_TO_JSVAL(dp)); } + + void pop() { vector.popBack(); } + + bool resize(size_t newLength) { + size_t oldLength = vector.length(); + if (!vector.resize(newLength)) + return false; + JS_STATIC_ASSERT(JSVAL_NULL == 0); + if (newLength > oldLength) + PodZero(vector.begin(), newLength - oldLength); + return true; + } + + bool reserve(size_t newLength) { + return vector.reserve(newLength); + } + + jsval & operator[](size_t i) { return vector[i]; } + jsval operator[](size_t i) const { return vector[i]; } + + const jsval * buffer() const { return vector.begin(); } + jsval * buffer() { return vector.begin(); } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + Vector vector; + JS_DECL_USE_GUARD_OBJECT_NOTIFIER +}; + } #ifdef _MSC_VER diff --git a/js/src/jscntxtinlines.h b/js/src/jscntxtinlines.h new file mode 100644 index 00000000000..3141959102c --- /dev/null +++ b/js/src/jscntxtinlines.h @@ -0,0 +1,153 @@ +/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- + * + * ***** BEGIN LICENSE BLOCK ***** + * Version: MPL 1.1/GPL 2.0/LGPL 2.1 + * + * The contents of this file are subject to the Mozilla Public License Version + * 1.1 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the License is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License + * for the specific language governing rights and limitations under the + * License. + * + * The Original Code is SpiderMonkey code. + * + * The Initial Developer of the Original Code is + * Mozilla Corporation. + * Portions created by the Initial Developer are Copyright (C) 2010 + * the Initial Developer. All Rights Reserved. + * + * Contributor(s): + * Jeff Walden (original author) + * + * Alternatively, the contents of this file may be used under the terms of + * either of the GNU General Public License Version 2 or later (the "GPL"), + * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), + * in which case the provisions of the GPL or the LGPL are applicable instead + * of those above. If you wish to allow use of your version of this file only + * under the terms of either the GPL or the LGPL, and not to allow others to + * use your version of this file under the terms of the MPL, indicate your + * decision by deleting the provisions above and replace them with the notice + * and other provisions required by the GPL or the LGPL. If you do not delete + * the provisions above, a recipient may use your version of this file under + * the terms of any one of the MPL, the GPL or the LGPL. + * + * ***** END LICENSE BLOCK ***** */ + +#ifndef jscntxtinlines_h___ +#define jscntxtinlines_h___ + +#include "jscntxt.h" +#include "jsxml.h" + +#include "jsobjinlines.h" + +namespace js { + +void +AutoIdArray::trace(JSTracer *trc) { + JS_ASSERT(tag == IDARRAY); + js::TraceValues(trc, idArray->length, idArray->vector, "JSAutoIdArray.idArray"); +} + +class AutoNamespaces : protected AutoGCRooter { + protected: + AutoNamespaces(JSContext *cx) : AutoGCRooter(cx, NAMESPACES) { + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + public: + JSXMLArray array; +}; + +inline void +AutoGCRooter::trace(JSTracer *trc) +{ + switch (tag) { + case JSVAL: + JS_SET_TRACING_NAME(trc, "js::AutoValueRooter.val"); + js_CallValueTracerIfGCThing(trc, static_cast(this)->val); + return; + + case SPROP: + static_cast(this)->sprop->trace(trc); + return; + + case WEAKROOTS: + static_cast(this)->savedRoots.mark(trc); + return; + + case COMPILER: + static_cast(this)->trace(trc); + return; + + case SCRIPT: + if (JSScript *script = static_cast(this)->script) + js_TraceScript(trc, script); + return; + + case ENUMERATOR: + static_cast(this)->trace(trc); + return; + + case IDARRAY: { + JSIdArray *ida = static_cast(this)->idArray; + TraceValues(trc, ida->length, ida->vector, "js::AutoIdArray.idArray"); + return; + } + + case DESCRIPTORS: { + PropertyDescriptorArray &descriptors = + static_cast(this)->descriptors; + for (size_t i = 0, len = descriptors.length(); i < len; i++) { + PropertyDescriptor &desc = descriptors[i]; + + JS_CALL_VALUE_TRACER(trc, desc.value, "PropertyDescriptor::value"); + JS_CALL_VALUE_TRACER(trc, desc.get, "PropertyDescriptor::get"); + JS_CALL_VALUE_TRACER(trc, desc.set, "PropertyDescriptor::set"); + js_TraceId(trc, desc.id); + } + return; + } + + case NAMESPACES: { + JSXMLArray &array = static_cast(this)->array; + TraceObjectVector(trc, reinterpret_cast(array.vector), array.length); + array.cursors->trace(trc); + return; + } + + case XML: + js_TraceXML(trc, static_cast(this)->xml); + return; + + case OBJECT: + if (JSObject *obj = static_cast(this)->obj) { + JS_SET_TRACING_NAME(trc, "js::AutoObjectRooter.obj"); + js_CallGCMarker(trc, obj, JSTRACE_OBJECT); + } + return; + + case ID: + JS_SET_TRACING_NAME(trc, "js::AutoIdRooter.val"); + js_CallValueTracerIfGCThing(trc, static_cast(this)->idval); + return; + + case VECTOR: { + js::Vector &vector = static_cast(this)->vector; + js::TraceValues(trc, vector.length(), vector.begin(), "js::AutoValueVector.vector"); + return; + } + } + + JS_ASSERT(tag >= 0); + TraceValues(trc, tag, static_cast(this)->array, "js::AutoArrayRooter.array"); +} + +} + +#endif /* jscntxtinlines_h___ */ diff --git a/js/src/jscpucfg.cpp b/js/src/jscpucfg.cpp index be4bf296c58..c0974d4e121 100644 --- a/js/src/jscpucfg.cpp +++ b/js/src/jscpucfg.cpp @@ -50,28 +50,8 @@ /************************************************************************/ -#ifdef __GNUC__ -#define NS_NEVER_INLINE __attribute__((noinline)) -#else -#define NS_NEVER_INLINE -#endif - -#ifdef __SUNPRO_C -static int StackGrowthDirection(int *dummy1addr); -#pragma no_inline(StackGrowthDirection) -#endif - -static int NS_NEVER_INLINE StackGrowthDirection(int *dummy1addr) -{ - int dummy2; - - return (&dummy2 < dummy1addr) ? -1 : 1; -} - int main(int argc, char **argv) { - int dummy1; - printf("#ifndef js_cpucfg___\n"); printf("#define js_cpucfg___\n\n"); @@ -185,7 +165,15 @@ int main(int argc, char **argv) #endif /* CROSS_COMPILE */ - printf("#define JS_STACK_GROWTH_DIRECTION (%d)\n", StackGrowthDirection(&dummy1)); + // PA-RISC is the only platform we try to support on which the stack + // grows towards higher addresses. Trying to detect it here has + // historically led to portability problems, which aren't worth it + // given the near consensus on stack growth direction. + printf("#ifdef __hppa\n" + "# define JS_STACK_GROWTH_DIRECTION (1)\n" + "#else\n" + "# define JS_STACK_GROWTH_DIRECTION (-1)\n" + "#endif\n"); printf("#endif /* js_cpucfg___ */\n"); diff --git a/js/src/jsdate.cpp b/js/src/jsdate.cpp index 9aa79066183..994a5009bf1 100644 --- a/js/src/jsdate.cpp +++ b/js/src/jsdate.cpp @@ -2175,7 +2175,7 @@ date_toSource(JSContext *cx, uintN argc, jsval *vp) if (!GetUTCTime(cx, JS_THIS_OBJECT(cx, vp), vp, &utctime)) return JS_FALSE; - numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, utctime); + numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, buf, sizeof buf, DTOSTR_STANDARD, 0, utctime); if (!numStr) { JS_ReportOutOfMemory(cx); return JS_FALSE; diff --git a/js/src/jsdbgapi.cpp b/js/src/jsdbgapi.cpp index a964851f364..f6ab235ce25 100644 --- a/js/src/jsdbgapi.cpp +++ b/js/src/jsdbgapi.cpp @@ -64,6 +64,7 @@ #include "jsstr.h" #include "jsatominlines.h" +#include "jsobjinlines.h" #include "jsscopeinlines.h" #include "jsautooplen.h" @@ -473,9 +474,9 @@ DropWatchPointAndUnlock(JSContext *cx, JSWatchPoint *wp, uintN flag) */ JSScopeProperty *wprop = scope->lookup(sprop->id); if (wprop && - ((wprop->attrs ^ sprop->attrs) & JSPROP_SETTER) == 0 && + wprop->hasSetterValue() == sprop->hasSetterValue() && IsWatchedProperty(cx, wprop)) { - sprop = scope->changeProperty(cx, wprop, 0, wprop->attrs, + sprop = scope->changeProperty(cx, wprop, 0, wprop->attributes(), wprop->getter(), wp->setter); if (!sprop) ok = JS_FALSE; @@ -505,7 +506,7 @@ js_TraceWatchPoints(JSTracer *trc, JSObject *obj) wp = (JSWatchPoint *)wp->links.next) { if (wp->object == obj) { wp->sprop->trace(trc); - if ((wp->sprop->attrs & JSPROP_SETTER) && wp->setter) { + if (wp->sprop->hasSetterValue() && wp->setter) { JS_CALL_OBJECT_TRACER(trc, js_CastAsObject(wp->setter), "wp->setter"); } @@ -630,7 +631,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) /* NB: wp is held, so we can safely dereference it still. */ ok = wp->handler(cx, obj, propid, SPROP_HAS_VALID_SLOT(sprop, scope) - ? OBJ_GET_SLOT(cx, obj, sprop->slot) + ? obj->getSlotMT(cx, sprop->slot) : JSVAL_VOID, vp, wp->closure); if (ok) { @@ -697,9 +698,9 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) argv[0] = OBJECT_TO_JSVAL(closure); argv[1] = JSVAL_NULL; - memset(argv + 2, 0, (nslots - 2) * sizeof(jsval)); + PodZero(argv + 2, nslots - 2); - memset(&frame, 0, sizeof(frame)); + PodZero(&frame); frame.script = script; frame.regs = NULL; frame.fun = fun; @@ -732,7 +733,7 @@ js_watch_set(JSContext *cx, JSObject *obj, jsval id, jsval *vp) argv = NULL; /* suppress bogus gcc warnings */ #endif ok = !wp->setter || - ((sprop->attrs & JSPROP_SETTER) + (sprop->hasSetterValue() ? js_InternalCall(cx, obj, js_CastAsObjectJSVal(wp->setter), 1, vp, vp) @@ -771,7 +772,7 @@ js_watch_set_wrapper(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, static bool IsWatchedProperty(JSContext *cx, JSScopeProperty *sprop) { - if (sprop->attrs & JSPROP_SETTER) { + if (sprop->hasSetterValue()) { JSObject *funobj = sprop->setterObject(); if (!funobj->isFunction()) return false; @@ -845,7 +846,7 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval, if (origobj != obj && !obj->checkAccess(cx, propid, JSACC_WATCH, &v, &attrs)) return JS_FALSE; - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_WATCH, OBJ_GET_CLASS(cx, obj)->name); return JS_FALSE; @@ -873,13 +874,13 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval, uintN attrs, flags; intN shortid; - if (OBJ_IS_NATIVE(pobj)) { + if (pobj->isNative()) { value = SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj)) ? LOCKED_OBJ_GET_SLOT(pobj, sprop->slot) : JSVAL_VOID; getter = sprop->getter(); setter = sprop->setter(); - attrs = sprop->attrs; + attrs = sprop->attributes(); flags = sprop->getFlags(); shortid = sprop->shortid; } else { @@ -911,7 +912,7 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval, wp = FindWatchPoint(rt, OBJ_SCOPE(obj), propid); if (!wp) { DBG_UNLOCK(rt); - watcher = js_WrapWatchedSetter(cx, propid, sprop->attrs, sprop->setter()); + watcher = js_WrapWatchedSetter(cx, propid, sprop->attributes(), sprop->setter()); if (!watcher) { ok = JS_FALSE; goto out; @@ -930,7 +931,7 @@ JS_SetWatchPoint(JSContext *cx, JSObject *obj, jsval idval, wp->flags = JSWP_LIVE; /* XXXbe nest in obj lock here */ - sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, 0, sprop->attrs, + sprop = js_ChangeNativePropertyAttrs(cx, obj, sprop, 0, sprop->attributes(), sprop->getter(), watcher); if (!sprop) { /* Self-link so DropWatchPointAndUnlock can JS_REMOVE_LINK it. */ @@ -1226,30 +1227,7 @@ JS_GetFrameCallObject(JSContext *cx, JSStackFrame *fp) JS_PUBLIC_API(JSObject *) JS_GetFrameThis(JSContext *cx, JSStackFrame *fp) { - if (fp->flags & JSFRAME_COMPUTED_THIS) - return JSVAL_TO_OBJECT(fp->thisv); /* JSVAL_COMPUTED_THIS invariant */ - - /* js_ComputeThis gets confused if fp != cx->fp, so set it aside. */ - JSStackFrame *afp = js_GetTopStackFrame(cx); - JSGCReachableFrame reachable; - if (afp != fp) { - if (afp) { - cx->fp = fp; - cx->pushGCReachableFrame(reachable, afp); - } - } else { - afp = NULL; - } - - if (fp->argv) - fp->thisv = OBJECT_TO_JSVAL(js_ComputeThis(cx, JS_TRUE, fp->argv)); - - if (afp) { - cx->fp = afp; - cx->popGCReachableFrame(); - } - - return JSVAL_TO_OBJECT(fp->thisv); + return fp->getThisObject(cx); } JS_PUBLIC_API(JSFunction *) @@ -1264,7 +1242,7 @@ JS_GetFrameFunctionObject(JSContext *cx, JSStackFrame *fp) if (!fp->fun) return NULL; - JS_ASSERT(HAS_FUNCTION_CLASS(fp->callee())); + JS_ASSERT(fp->callee()->isFunction()); JS_ASSERT(fp->callee()->getPrivate() == fp->fun); return fp->callee(); } @@ -1466,7 +1444,7 @@ JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, pd->id = ID_TO_VALUE(sprop->id); JSBool wasThrowing = cx->throwing; - JSAutoTempValueRooter lastException(cx, cx->exception); + AutoValueRooter lastException(cx, cx->exception); cx->throwing = JS_FALSE; if (!js_GetProperty(cx, obj, sprop->id, &pd->value)) { @@ -1485,9 +1463,9 @@ JS_GetPropertyDesc(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, if (wasThrowing) cx->exception = lastException.value(); - pd->flags |= ((sprop->attrs & JSPROP_ENUMERATE) ? JSPD_ENUMERATE : 0) - | ((sprop->attrs & JSPROP_READONLY) ? JSPD_READONLY : 0) - | ((sprop->attrs & JSPROP_PERMANENT) ? JSPD_PERMANENT : 0); + pd->flags |= (sprop->enumerable() ? JSPD_ENUMERATE : 0) + | (!sprop->writable() ? JSPD_READONLY : 0) + | (!sprop->configurable() ? JSPD_PERMANENT : 0); pd->spare = 0; if (sprop->getter() == js_GetCallArg) { pd->slot = sprop->shortid; @@ -1523,7 +1501,7 @@ JS_GetPropertyDescArray(JSContext *cx, JSObject *obj, JSPropertyDescArray *pda) JSScopeProperty *sprop; clasp = OBJ_GET_CLASS(cx, obj); - if (!OBJ_IS_NATIVE(obj) || (clasp->flags & JSCLASS_NEW_ENUMERATE)) { + if (!obj->isNative() || (clasp->flags & JSCLASS_NEW_ENUMERATE)) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_CANT_DESCRIBE_PROPS, clasp->name); return JS_FALSE; @@ -1674,7 +1652,7 @@ JS_GetObjectTotalSize(JSContext *cx, JSObject *obj) nbytes += ((uint32)obj->dslots[-1] - JS_INITIAL_NSLOTS + 1) * sizeof obj->dslots[0]; } - if (OBJ_IS_NATIVE(obj)) { + if (obj->isNative()) { scope = OBJ_SCOPE(obj); if (!scope->isSharedEmpty()) { nbytes += sizeof *scope; diff --git a/js/src/jsdtoa.cpp b/js/src/jsdtoa.cpp index 111cb5dec9d..b181142fce8 100644 --- a/js/src/jsdtoa.cpp +++ b/js/src/jsdtoa.cpp @@ -49,10 +49,7 @@ #include "jsnum.h" #include "jsbit.h" #include "jslibmath.h" - -#ifdef JS_THREADSAFE -#include "jslock.h" -#endif +#include "jscntxt.h" #ifdef IS_LITTLE_ENDIAN #define IEEE_8087 @@ -78,46 +75,11 @@ #endif */ -#ifdef JS_THREADSAFE -static PRLock *dtoalock; -static JSBool _dtoainited = JS_FALSE; - -#define LOCK_DTOA() PR_Lock(dtoalock); -#define UNLOCK_DTOA() PR_Unlock(dtoalock) -#else -#define LOCK_DTOA() -#define UNLOCK_DTOA() -#endif +#define NO_GLOBAL_STATE +#define MALLOC js_malloc +#define FREE js_free #include "dtoa.c" -JS_FRIEND_API(JSBool) -js_InitDtoa() -{ -#ifdef JS_THREADSAFE - if (!_dtoainited) { - dtoalock = PR_NewLock(); - JS_ASSERT(dtoalock); - _dtoainited = JS_TRUE; - } - - return (dtoalock != 0); -#else - return JS_TRUE; -#endif -} - -JS_FRIEND_API(void) -js_FinishDtoa() -{ -#ifdef JS_THREADSAFE - if (_dtoainited) { - PR_DestroyLock(dtoalock); - dtoalock = NULL; - _dtoainited = JS_FALSE; - } -#endif -} - /* Mapping of JSDToStrMode -> js_dtoa mode */ static const uint8 dtoaModes[] = { 0, /* DTOSTR_STANDARD */ @@ -126,20 +88,19 @@ static const uint8 dtoaModes[] = { 2, /* DTOSTR_EXPONENTIAL, */ 2}; /* DTOSTR_PRECISION */ -JS_FRIEND_API(double) -JS_strtod(const char *s00, char **se, int *err) +double +js_strtod_harder(DtoaState *state, const char *s00, char **se, int *err) { double retval; if (err) *err = 0; - LOCK_DTOA(); - retval = _strtod(s00, se); - UNLOCK_DTOA(); + retval = _strtod(state, s00, se); return retval; } -JS_FRIEND_API(char *) -JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, double dinput) +char * +js_dtostr(DtoaState *state, char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, + double dinput) { U d; int decPt; /* Offset of decimal point from first digit */ @@ -159,24 +120,20 @@ JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, dou if (mode == DTOSTR_FIXED && (dinput >= 1e21 || dinput <= -1e21)) mode = DTOSTR_STANDARD; - LOCK_DTOA(); dval(d) = dinput; - numBegin = dtoa(d, dtoaModes[mode], precision, &decPt, &sign, &numEnd); + numBegin = dtoa(PASS_STATE d, dtoaModes[mode], precision, &decPt, &sign, &numEnd); if (!numBegin) { - UNLOCK_DTOA(); return NULL; } nDigits = numEnd - numBegin; JS_ASSERT((size_t) nDigits <= bufferSize - 2); if ((size_t) nDigits > bufferSize - 2) { - UNLOCK_DTOA(); return NULL; } memcpy(buffer + 2, numBegin, nDigits); - freedtoa(numBegin); - UNLOCK_DTOA(); + freedtoa(PASS_STATE numBegin); numBegin = buffer + 2; /* +2 leaves space for sign and/or decimal point */ numEnd = numBegin + nDigits; *numEnd = '\0'; @@ -353,8 +310,8 @@ static uint32 quorem2(Bigint *b, int32 k) #define DTOBASESTR_BUFFER_SIZE 1078 #define BASEDIGIT(digit) ((char)(((digit) >= 10) ? 'a' - 10 + (digit) : '0' + (digit))) -JS_FRIEND_API(char *) -JS_dtobasestr(int base, double dinput) +char * +js_dtobasestr(DtoaState *state, int base, double dinput) { U d; char *buffer; /* The output string */ @@ -386,7 +343,6 @@ JS_dtobasestr(int base, double dinput) return buffer; } - LOCK_DTOA(); /* Output the integer part of d with the digits in reverse order. */ pInt = p; dval(di) = floor(dval(d)); @@ -404,14 +360,13 @@ JS_dtobasestr(int base, double dinput) } else { int e; int bits; /* Number of significant bits in di; not used. */ - Bigint *b = d2b(di, &e, &bits); + Bigint *b = d2b(PASS_STATE di, &e, &bits); if (!b) goto nomem1; - b = lshift(b, e); + b = lshift(PASS_STATE b, e); if (!b) { nomem1: - Bfree(b); - UNLOCK_DTOA(); + Bfree(PASS_STATE b); js_free(buffer); return NULL; } @@ -420,7 +375,7 @@ JS_dtobasestr(int base, double dinput) JS_ASSERT(digit < (uint32)base); *p++ = BASEDIGIT(digit); } while (b->wds); - Bfree(b); + Bfree(PASS_STATE b); } /* Reverse the digits of the integer part of d. */ q = p-1; @@ -440,15 +395,14 @@ JS_dtobasestr(int base, double dinput) b = s = mlo = mhi = NULL; *p++ = '.'; - b = d2b(df, &e, &bbits); + b = d2b(PASS_STATE df, &e, &bbits); if (!b) { nomem2: - Bfree(b); - Bfree(s); + Bfree(PASS_STATE b); + Bfree(PASS_STATE s); if (mlo != mhi) - Bfree(mlo); - Bfree(mhi); - UNLOCK_DTOA(); + Bfree(PASS_STATE mlo); + Bfree(PASS_STATE mhi); js_free(buffer); return NULL; } @@ -463,7 +417,7 @@ JS_dtobasestr(int base, double dinput) s2 += Bias + P; /* 1/2^s2 = (nextDouble(d) - d)/2 */ JS_ASSERT(-s2 < e); - mlo = i2b(1); + mlo = i2b(PASS_STATE 1); if (!mlo) goto nomem2; mhi = mlo; @@ -475,17 +429,17 @@ JS_dtobasestr(int base, double dinput) /* The special case. Here we want to be within a quarter of the last input significant digit instead of one half of it when the output string's value is less than d. */ s2 += Log2P; - mhi = i2b(1<sign ? 1 : cmp(b, delta); - Bfree(delta); + Bfree(PASS_STATE delta); /* j1 is b/2^s2 compared with 1 - mhi/2^s2. */ #ifndef ROUND_BIASED @@ -542,7 +496,7 @@ JS_dtobasestr(int base, double dinput) if (j1 > 0) { /* Either dig or dig+1 would work here as the least significant digit. Use whichever would produce an output value closer to d. */ - b = lshift(b, 1); + b = lshift(PASS_STATE b, 1); if (!b) goto nomem2; j1 = cmp(b, s); @@ -558,15 +512,26 @@ JS_dtobasestr(int base, double dinput) JS_ASSERT(digit < (uint32)base); *p++ = BASEDIGIT(digit); } while (!done); - Bfree(b); - Bfree(s); + Bfree(PASS_STATE b); + Bfree(PASS_STATE s); if (mlo != mhi) - Bfree(mlo); - Bfree(mhi); + Bfree(PASS_STATE mlo); + Bfree(PASS_STATE mhi); } JS_ASSERT(p < buffer + DTOBASESTR_BUFFER_SIZE); *p = '\0'; - UNLOCK_DTOA(); } return buffer; } + +DtoaState * +js_NewDtoaState() +{ + return newdtoa(); +} + +void +js_DestroyDtoaState(DtoaState *state) +{ + destroydtoa(state); +} diff --git a/js/src/jsdtoa.h b/js/src/jsdtoa.h index b074c9aba70..6bd8a170989 100644 --- a/js/src/jsdtoa.h +++ b/js/src/jsdtoa.h @@ -48,23 +48,30 @@ JS_BEGIN_EXTERN_C +struct DtoaState; + +DtoaState * +js_NewDtoaState(); + +void +js_DestroyDtoaState(DtoaState *state); + /* - * JS_strtod() returns as a double-precision floating-point number - * the value represented by the character string pointed to by - * s00. The string is scanned up to the first unrecognized - * character. - * If the value of se is not (char **)NULL, a pointer to - * the character terminating the scan is returned in the location pointed - * to by se. If no number can be formed, se is set to s00r, and - * zero is returned. + * js_strtod_harder() returns as a double-precision floating-point number the + * value represented by the character string pointed to by s00. The string is + * scanned up to the first unrecognized character. + * + * If se is not NULL, *se receives a pointer to the character terminating the + * scan. If no number can be formed, *se receives a pointer to the first + * unparseable character in s00, and zero is returned. * * *err is set to zero on success; it's set to JS_DTOA_ERANGE on range * errors and JS_DTOA_ENOMEM on memory failure. */ #define JS_DTOA_ERANGE 1 #define JS_DTOA_ENOMEM 2 -JS_FRIEND_API(double) -JS_strtod(const char *s00, char **se, int *err); +double +js_strtod_harder(DtoaState *state, const char *s00, char **se, int *err); /* * Modes for converting floating-point numbers to strings. @@ -102,8 +109,9 @@ typedef enum JSDToStrMode { * * Return NULL if out of memory. */ -JS_FRIEND_API(char *) -JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, double dval); +char * +js_dtostr(DtoaState *state, char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, + double dval); /* * Convert d to a string in the given base. The integral part of d will be printed exactly @@ -116,15 +124,8 @@ JS_dtostr(char *buffer, size_t bufferSize, JSDToStrMode mode, int precision, dou * * Return NULL if out of memory. If the result is not NULL, it must be released via free(). */ -JS_FRIEND_API(char *) -JS_dtobasestr(int base, double d); - -/* - * Clean up any persistent RAM allocated during the execution of DtoA - * routines, and remove any locks that might have been created. - */ -JS_FRIEND_API(JSBool) js_InitDtoa(void); -JS_FRIEND_API(void) js_FinishDtoa(void); +char * +js_dtobasestr(DtoaState *state, int base, double d); JS_END_EXTERN_C diff --git a/js/src/jsemit.cpp b/js/src/jsemit.cpp index 378c20a0572..d4c0b78286d 100644 --- a/js/src/jsemit.cpp +++ b/js/src/jsemit.cpp @@ -79,6 +79,8 @@ #define SRCNOTE_SIZE(n) ((n) * sizeof(jssrcnote)) #define TRYNOTE_SIZE(n) ((n) * sizeof(JSTryNote)) +using namespace js; + static JSBool NewTryNote(JSContext *cx, JSCodeGenerator *cg, JSTryNoteKind kind, uintN stackDepth, size_t start, size_t end); @@ -184,14 +186,14 @@ UpdateDepth(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t target) JS_ASSERT(cg->stackDepth >= 0); if (cg->stackDepth < 0) { char numBuf[12]; - JSTokenStream *ts; + TokenStream *ts; JS_snprintf(numBuf, sizeof numBuf, "%d", target); ts = &cg->compiler->tokenStream; JS_ReportErrorFlagsAndNumber(cx, JSREPORT_WARNING, js_GetErrorMessage, NULL, JSMSG_STACK_UNDERFLOW, - ts->filename ? ts->filename : "stdin", + ts->getFilename() ? ts->getFilename() : "stdin", numBuf); } ndefs = cs->ndefs; @@ -202,7 +204,7 @@ UpdateDepth(JSContext *cx, JSCodeGenerator *cg, ptrdiff_t target) JS_ASSERT(op == JSOP_ENTERBLOCK); JS_ASSERT(nuses == 0); blockObj = cg->objectList.lastbox->object; - JS_ASSERT(STOBJ_GET_CLASS(blockObj) == &js_BlockClass); + JS_ASSERT(blockObj->getClass() == &js_BlockClass); JS_ASSERT(JSVAL_IS_VOID(blockObj->fslots[JSSLOT_BLOCK_DEPTH])); OBJ_SET_BLOCK_DEPTH(cx, blockObj, cg->stackDepth); @@ -893,7 +895,7 @@ OptimizeSpanDeps(JSContext *cx, JSCodeGenerator *cg) if (growth) { #ifdef DEBUG_brendan - JSTokenStream *ts = &cg->compiler->tokenStream; + TokenStream *ts = &cg->compiler->tokenStream; printf("%s:%u: %u/%u jumps extended in %d passes (%d=%d+%d)\n", ts->filename ? ts->filename : "stdin", cg->firstLine, @@ -1835,9 +1837,7 @@ AdjustBlockSlot(JSContext *cx, JSCodeGenerator *cg, jsint slot) if (cg->flags & TCF_IN_FUNCTION) { slot += cg->fun->u.i.nvars; if ((uintN) slot >= SLOTNO_LIMIT) { - js_ReportCompileErrorNumber(cx, CG_TS(cg), NULL, - JSREPORT_ERROR, - JSMSG_TOO_MANY_LOCALS); + ReportCompileErrorNumber(cx, CG_TS(cg), NULL, JSREPORT_ERROR, JSMSG_TOO_MANY_LOCALS); slot = -1; } } @@ -1859,7 +1859,7 @@ EmitEnterBlock(JSContext *cx, JSParseNode *pn, JSCodeGenerator *cg) for (uintN slot = JSSLOT_FREE(&js_BlockClass), limit = slot + OBJ_BLOCK_COUNT(cx, blockObj); slot < limit; slot++) { - jsval v = STOBJ_GET_SLOT(blockObj, slot); + jsval v = blockObj->getSlot(slot); /* Beware the empty destructuring dummy. */ if (JSVAL_IS_VOID(v)) { @@ -2101,7 +2101,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) JSObject *scopeobj = (cg->flags & TCF_IN_FUNCTION) ? FUN_OBJECT(cg->fun)->getParent() : cg->scopeChain; - if (scopeobj != caller->varobj(cx)) + if (scopeobj != cg->compiler->callerVarObj) return JS_TRUE; /* @@ -2196,7 +2196,7 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) JSCodeGenerator *evalcg = (JSCodeGenerator *) tc; JS_ASSERT(evalcg->flags & TCF_COMPILE_N_GO); - JS_ASSERT(caller->fun && caller->varobj(cx) == evalcg->scopeChain); + JS_ASSERT(caller->fun && cg->compiler->callerVarObj == evalcg->scopeChain); /* * Don't generate upvars on the left side of a for loop. See @@ -2686,10 +2686,10 @@ static JSBool EmitSpecialPropOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg) { /* - * Special case for obj.__proto__, obj.__parent__, obj.__count__ to - * deoptimize away from fast paths in the interpreter and trace recorder, - * which skip dense array instances by going up to Array.prototype before - * looking up the property name. + * Special case for obj.__proto__ and obj.__parent__ to deoptimize away + * from fast paths in the interpreter and trace recorder, which skip dense + * array instances by going up to Array.prototype before looking up the + * property name. */ JSAtomListElement *ale = cg->atomList.add(cg->compiler, pn->pn_atom); if (!ale) @@ -2711,11 +2711,10 @@ EmitPropOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg, JS_ASSERT(pn->pn_arity == PN_NAME); pn2 = pn->maybeExpr(); - /* Special case deoptimization on __proto__, __count__ and __parent__. */ + /* Special case deoptimization on __proto__ and __parent__. */ if ((op == JSOP_GETPROP || op == JSOP_CALLPROP) && (pn->pn_atom == cx->runtime->atomState.protoAtom || - pn->pn_atom == cx->runtime->atomState.parentAtom || - pn->pn_atom == cx->runtime->atomState.countAtom)) { + pn->pn_atom == cx->runtime->atomState.parentAtom)) { if (pn2 && !js_EmitTree(cx, cg, pn2)) return JS_FALSE; return EmitSpecialPropOp(cx, pn, callContext ? JSOP_CALLELEM : JSOP_GETELEM, cg); @@ -2802,13 +2801,12 @@ EmitPropOp(JSContext *cx, JSParseNode *pn, JSOp op, JSCodeGenerator *cg, } /* - * Special case deoptimization on __proto__, __count__ and - * __parent__, as above. + * Special case deoptimization on __proto__ and __parent__, as + * above. */ if (pndot->pn_arity == PN_NAME && (pndot->pn_atom == cx->runtime->atomState.protoAtom || - pndot->pn_atom == cx->runtime->atomState.parentAtom || - pndot->pn_atom == cx->runtime->atomState.countAtom)) { + pndot->pn_atom == cx->runtime->atomState.parentAtom)) { if (!EmitSpecialPropOp(cx, pndot, JSOP_GETELEM, cg)) return JS_FALSE; } else if (!EmitAtomOp(cx, pndot, PN_OP(pndot), cg)) { @@ -3936,8 +3934,7 @@ EmitGroupAssignment(JSContext *cx, JSCodeGenerator *cg, JSOp prologOp, depth = limit = (uintN) cg->stackDepth; for (pn = rhs->pn_head; pn; pn = pn->pn_next) { if (limit == JS_BIT(16)) { - js_ReportCompileErrorNumber(cx, CG_TS(cg), rhs, JSREPORT_ERROR, - JSMSG_ARRAY_INIT_TOO_BIG); + ReportCompileErrorNumber(cx, CG_TS(cg), rhs, JSREPORT_ERROR, JSMSG_ARRAY_INIT_TOO_BIG); return JS_FALSE; } @@ -4335,7 +4332,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) JSSrcNoteType noteType; jsbytecode *pc; JSOp op; - JSTokenType type; + TokenKind type; uint32 argc; #if JS_HAS_SHARP_VARS jsint sharpnum; @@ -4697,7 +4694,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) pn3 = pn2->pn_left; type = PN_TYPE(pn3); cg->flags |= TCF_IN_FOR_INIT; - if (TOKEN_TYPE_IS_DECL(type) && !js_EmitTree(cx, cg, pn3)) + if (TokenKindIsDecl(type) && !js_EmitTree(cx, cg, pn3)) return JS_FALSE; cg->flags &= ~TCF_IN_FOR_INIT; @@ -4806,8 +4803,8 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) op = PN_OP(pn3); } if (pn3->isConst()) { - js_ReportCompileErrorNumber(cx, CG_TS(cg), pn3, JSREPORT_ERROR, - JSMSG_BAD_FOR_LEFTSIDE); + ReportCompileErrorNumber(cx, CG_TS(cg), pn3, JSREPORT_ERROR, + JSMSG_BAD_FOR_LEFTSIDE); return JS_FALSE; } if (pn3->pn_cookie != FREE_UPVAR_COOKIE) { @@ -4919,7 +4916,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) if (op == JSOP_POP) { if (!js_EmitTree(cx, cg, pn3)) return JS_FALSE; - if (TOKEN_TYPE_IS_DECL(pn3->pn_type)) { + if (TokenKindIsDecl(PN_TYPE(pn3))) { /* * Check whether a destructuring-initialized var decl * was optimized to a group assignment. If so, we do @@ -5488,9 +5485,9 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) #if JS_HAS_GENERATORS case TOK_YIELD: if (!(cg->flags & TCF_IN_FUNCTION)) { - js_ReportCompileErrorNumber(cx, CG_TS(cg), pn, JSREPORT_ERROR, - JSMSG_BAD_RETURN_OR_YIELD, - js_yield_str); + ReportCompileErrorNumber(cx, CG_TS(cg), pn, JSREPORT_ERROR, + JSMSG_BAD_RETURN_OR_YIELD, + js_yield_str); return JS_FALSE; } if (pn->pn_kid) { @@ -5628,10 +5625,9 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) cg->topStmt->type != STMT_LABEL || cg->topStmt->update < CG_OFFSET(cg))) { CG_CURRENT_LINE(cg) = pn2->pn_pos.begin.lineno; - if (!js_ReportCompileErrorNumber(cx, CG_TS(cg), pn2, - JSREPORT_WARNING | - JSREPORT_STRICT, - JSMSG_USELESS_EXPR)) { + if (!ReportCompileErrorNumber(cx, CG_TS(cg), pn2, + JSREPORT_WARNING | JSREPORT_STRICT, + JSMSG_USELESS_EXPR)) { return JS_FALSE; } } else { @@ -5798,13 +5794,9 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) * x getter = y where x is a local or let variable is not * supported. */ - js_ReportCompileErrorNumber(cx, - TS(cg->compiler), - pn2, JSREPORT_ERROR, - JSMSG_BAD_GETTER_OR_SETTER, - (op == JSOP_GETTER) - ? js_getter_str - : js_setter_str); + ReportCompileErrorNumber(cx, TS(cg->compiler), pn2, JSREPORT_ERROR, + JSMSG_BAD_GETTER_OR_SETTER, + (op == JSOP_GETTER) ? js_getter_str : js_setter_str); return JS_FALSE; } @@ -6635,8 +6627,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn) #endif #if JS_HAS_DESTRUCTURING_SHORTHAND if (pn->pn_xflags & PNX_DESTRUCT) { - js_ReportCompileErrorNumber(cx, CG_TS(cg), pn, JSREPORT_ERROR, - JSMSG_BAD_OBJECT_INIT); + ReportCompileErrorNumber(cx, CG_TS(cg), pn, JSREPORT_ERROR, JSMSG_BAD_OBJECT_INIT); return JS_FALSE; } #endif diff --git a/js/src/jsemit.h b/js/src/jsemit.h index 94bcfc1af88..b7124cee5f3 100644 --- a/js/src/jsemit.h +++ b/js/src/jsemit.h @@ -201,9 +201,10 @@ struct JSTreeContext { /* tree context for semantic checks */ JSTreeContext(JSCompiler *jsc) : flags(0), ngvars(0), bodyid(0), blockidGen(0), topStmt(NULL), topScopeStmt(NULL), blockChain(NULL), blockNode(NULL), - compiler(jsc), scopeChain(NULL), parent(NULL), staticLevel(0), + compiler(jsc), scopeChain(NULL), parent(jsc->tc), staticLevel(0), funbox(NULL), functionList(NULL), sharpSlotBase(-1) { + jsc->tc = this; JS_SCOPE_DEPTH_METERING(scopeDepth = maxScopeDepth = 0); } @@ -213,6 +214,7 @@ struct JSTreeContext { /* tree context for semantic checks */ * cases, we store uint16(-1) in maxScopeDepth. */ ~JSTreeContext() { + compiler->tc = this->parent; JS_SCOPE_DEPTH_METERING_IF((maxScopeDepth != uint16(-1)), JS_BASIC_STATS_ACCUM(&compiler ->context diff --git a/js/src/jsexn.cpp b/js/src/jsexn.cpp index 670efd3196a..80daa9223a7 100644 --- a/js/src/jsexn.cpp +++ b/js/src/jsexn.cpp @@ -63,6 +63,8 @@ #include "jsscript.h" #include "jsstaticcheck.h" +using namespace js; + /* Forward declarations for js_ErrorClass's initializer. */ static JSBool Exception(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); @@ -349,7 +351,7 @@ InitExnPrivate(JSContext *cx, JSObject *exnObject, JSString *message, * Construct a new copy of the error report struct. We can't use the * error report struct that was passed in, because it's allocated on * the stack, and also because it may point to transient data in the - * JSTokenStream. + * TokenStream. */ priv->errorReport = CopyErrorReport(cx, report); if (!priv->errorReport) { @@ -824,131 +826,119 @@ exn_toSource(JSContext *cx, uintN argc, jsval *vp) JSObject *obj; JSString *name, *message, *filename, *lineno_as_str, *result; jsval localroots[3] = {JSVAL_NULL, JSVAL_NULL, JSVAL_NULL}; - JSTempValueRooter tvr; - JSBool ok; uint32 lineno; size_t lineno_length, name_length, message_length, filename_length, length; jschar *chars, *cp; obj = JS_THIS_OBJECT(cx, vp); if (!obj || !obj->getProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.nameAtom), vp)) - return JS_FALSE; + return false; name = js_ValueToString(cx, *vp); if (!name) - return JS_FALSE; + return false; *vp = STRING_TO_JSVAL(name); - MUST_FLOW_THROUGH("out"); - JS_PUSH_TEMP_ROOT(cx, 3, localroots, &tvr); + { + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(localroots), localroots); #ifdef __GNUC__ - message = filename = NULL; + message = filename = NULL; #endif - ok = JS_GetProperty(cx, obj, js_message_str, &localroots[0]) && - (message = js_ValueToSource(cx, localroots[0])); - if (!ok) - goto out; - localroots[0] = STRING_TO_JSVAL(message); - - ok = JS_GetProperty(cx, obj, js_fileName_str, &localroots[1]) && - (filename = js_ValueToSource(cx, localroots[1])); - if (!ok) - goto out; - localroots[1] = STRING_TO_JSVAL(filename); - - ok = JS_GetProperty(cx, obj, js_lineNumber_str, &localroots[2]); - if (!ok) - goto out; - lineno = js_ValueToECMAUint32 (cx, &localroots[2]); - ok = !JSVAL_IS_NULL(localroots[2]); - if (!ok) - goto out; - - if (lineno != 0) { - lineno_as_str = js_ValueToString(cx, localroots[2]); - if (!lineno_as_str) { - ok = JS_FALSE; - goto out; + if (!JS_GetProperty(cx, obj, js_message_str, &localroots[0]) || + !(message = js_ValueToSource(cx, localroots[0]))) { + return false; } - lineno_length = lineno_as_str->length(); - } else { - lineno_as_str = NULL; - lineno_length = 0; - } + localroots[0] = STRING_TO_JSVAL(message); - /* Magic 8, for the characters in ``(new ())''. */ - name_length = name->length(); - message_length = message->length(); - length = 8 + name_length + message_length; + if (!JS_GetProperty(cx, obj, js_fileName_str, &localroots[1]) || + !(filename = js_ValueToSource(cx, localroots[1]))) { + return false; + } + localroots[1] = STRING_TO_JSVAL(filename); - filename_length = filename->length(); - if (filename_length != 0) { - /* append filename as ``, {filename}'' */ - length += 2 + filename_length; + if (!JS_GetProperty(cx, obj, js_lineNumber_str, &localroots[2])) + return false; + lineno = js_ValueToECMAUint32 (cx, &localroots[2]); + if (JSVAL_IS_NULL(localroots[2])) + return false; + + if (lineno != 0) { + lineno_as_str = js_ValueToString(cx, localroots[2]); + if (!lineno_as_str) + return false; + lineno_length = lineno_as_str->length(); + } else { + lineno_as_str = NULL; + lineno_length = 0; + } + + /* Magic 8, for the characters in ``(new ())''. */ + name_length = name->length(); + message_length = message->length(); + length = 8 + name_length + message_length; + + filename_length = filename->length(); + if (filename_length != 0) { + /* append filename as ``, {filename}'' */ + length += 2 + filename_length; + if (lineno_as_str) { + /* append lineno as ``, {lineno_as_str}'' */ + length += 2 + lineno_length; + } + } else { + if (lineno_as_str) { + /* + * no filename, but have line number, + * need to append ``, "", {lineno_as_str}'' + */ + length += 6 + lineno_length; + } + } + + cp = chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); + if (!chars) + return false; + + *cp++ = '('; *cp++ = 'n'; *cp++ = 'e'; *cp++ = 'w'; *cp++ = ' '; + js_strncpy(cp, name->chars(), name_length); + cp += name_length; + *cp++ = '('; + if (message_length != 0) { + js_strncpy(cp, message->chars(), message_length); + cp += message_length; + } + + if (filename_length != 0) { + /* append filename as ``, {filename}'' */ + *cp++ = ','; *cp++ = ' '; + js_strncpy(cp, filename->chars(), filename_length); + cp += filename_length; + } else { + if (lineno_as_str) { + /* + * no filename, but have line number, + * need to append ``, "", {lineno_as_str}'' + */ + *cp++ = ','; *cp++ = ' '; *cp++ = '"'; *cp++ = '"'; + } + } if (lineno_as_str) { /* append lineno as ``, {lineno_as_str}'' */ - length += 2 + lineno_length; + *cp++ = ','; *cp++ = ' '; + js_strncpy(cp, lineno_as_str->chars(), lineno_length); + cp += lineno_length; } - } else { - if (lineno_as_str) { - /* - * no filename, but have line number, - * need to append ``, "", {lineno_as_str}'' - */ - length += 6 + lineno_length; + + *cp++ = ')'; *cp++ = ')'; *cp = 0; + + result = js_NewString(cx, chars, length); + if (!result) { + cx->free(chars); + return false; } + *vp = STRING_TO_JSVAL(result); + return true; } - - cp = chars = (jschar *) cx->malloc((length + 1) * sizeof(jschar)); - if (!chars) { - ok = JS_FALSE; - goto out; - } - - *cp++ = '('; *cp++ = 'n'; *cp++ = 'e'; *cp++ = 'w'; *cp++ = ' '; - js_strncpy(cp, name->chars(), name_length); - cp += name_length; - *cp++ = '('; - if (message_length != 0) { - js_strncpy(cp, message->chars(), message_length); - cp += message_length; - } - - if (filename_length != 0) { - /* append filename as ``, {filename}'' */ - *cp++ = ','; *cp++ = ' '; - js_strncpy(cp, filename->chars(), filename_length); - cp += filename_length; - } else { - if (lineno_as_str) { - /* - * no filename, but have line number, - * need to append ``, "", {lineno_as_str}'' - */ - *cp++ = ','; *cp++ = ' '; *cp++ = '"'; *cp++ = '"'; - } - } - if (lineno_as_str) { - /* append lineno as ``, {lineno_as_str}'' */ - *cp++ = ','; *cp++ = ' '; - js_strncpy(cp, lineno_as_str->chars(), lineno_length); - cp += lineno_length; - } - - *cp++ = ')'; *cp++ = ')'; *cp = 0; - - result = js_NewString(cx, chars, length); - if (!result) { - cx->free(chars); - ok = JS_FALSE; - goto out; - } - *vp = STRING_TO_JSVAL(result); - ok = JS_TRUE; - -out: - JS_POP_TEMP_ROOT(cx, &tvr); - return ok; } #endif @@ -998,8 +988,8 @@ js_InitExceptionClasses(JSContext *cx, JSObject *obj) if (!js_GetClassPrototype(cx, obj, JSProto_Object, &obj_proto)) return NULL; - memset(roots, 0, sizeof(roots)); - JSAutoTempValueRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); + PodArrayZero(roots); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); #ifdef __GNUC__ error_proto = NULL; /* quell GCC overwarning */ @@ -1109,7 +1099,6 @@ js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp, const JSErrorFormatString *errorString; JSExnType exn; jsval tv[4]; - JSTempValueRooter tvr; JSBool ok; JSObject *errProto, *errObject; JSString *messageStr, *filenameStr; @@ -1157,8 +1146,8 @@ js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp, cx->generatingError = JS_TRUE; /* Protect the newly-created strings below from nesting GCs. */ - memset(tv, 0, sizeof tv); - JS_PUSH_TEMP_ROOT(cx, JS_ARRAY_LENGTH(tv), tv, &tvr); + PodArrayZero(tv); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(tv), tv); /* * Try to get an appropriate prototype by looking up the corresponding @@ -1202,7 +1191,6 @@ js_ErrorToException(JSContext *cx, const char *message, JSErrorReport *reportp, reportp->flags |= JSREPORT_EXCEPTION; out: - JS_POP_TEMP_ROOT(cx, &tvr); cx->generatingError = JS_FALSE; return ok; } @@ -1213,20 +1201,18 @@ js_ReportUncaughtException(JSContext *cx) jsval exn; JSObject *exnObject; jsval roots[5]; - JSTempValueRooter tvr; JSErrorReport *reportp, report; JSString *str; const char *bytes; - JSBool ok; if (!JS_IsExceptionPending(cx)) - return JS_TRUE; + return true; if (!JS_GetPendingException(cx, &exn)) - return JS_FALSE; + return false; - memset(roots, 0, sizeof roots); - JS_PUSH_TEMP_ROOT(cx, JS_ARRAY_LENGTH(roots), roots, &tvr); + PodArrayZero(roots); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); /* * Because js_ValueToString below could error and an exception object @@ -1251,54 +1237,39 @@ js_ReportUncaughtException(JSContext *cx) } else { roots[1] = STRING_TO_JSVAL(str); bytes = js_GetStringBytes(cx, str); - if (!bytes) { - ok = JS_FALSE; - goto out; - } + if (!bytes) + return false; } - ok = JS_TRUE; - if (!reportp && - exnObject && - OBJ_GET_CLASS(cx, exnObject) == &js_ErrorClass) { + if (!reportp && exnObject && exnObject->getClass() == &js_ErrorClass) { const char *filename; uint32 lineno; - ok = JS_GetProperty(cx, exnObject, js_message_str, &roots[2]); - if (!ok) - goto out; + if (!JS_GetProperty(cx, exnObject, js_message_str, &roots[2])) + return false; if (JSVAL_IS_STRING(roots[2])) { bytes = js_GetStringBytes(cx, JSVAL_TO_STRING(roots[2])); - if (!bytes) { - ok = JS_FALSE; - goto out; - } + if (!bytes) + return false; } - ok = JS_GetProperty(cx, exnObject, js_fileName_str, &roots[3]); - if (!ok) - goto out; + if (!JS_GetProperty(cx, exnObject, js_fileName_str, &roots[3])) + return false; str = js_ValueToString(cx, roots[3]); - if (!str) { - ok = JS_FALSE; - goto out; - } + if (!str) + return false; filename = StringToFilename(cx, str); - if (!filename) { - ok = JS_FALSE; - goto out; - } + if (!filename) + return false; - ok = JS_GetProperty(cx, exnObject, js_lineNumber_str, &roots[4]); - if (!ok) - goto out; + if (!JS_GetProperty(cx, exnObject, js_lineNumber_str, &roots[4])) + return false; lineno = js_ValueToECMAUint32 (cx, &roots[4]); - ok = !JSVAL_IS_NULL(roots[4]); - if (!ok) - goto out; + if (JSVAL_IS_NULL(roots[4])) + return false; reportp = &report; - memset(&report, 0, sizeof report); + PodZero(&report); report.filename = filename; report.lineno = (uintN) lineno; } @@ -1316,7 +1287,5 @@ js_ReportUncaughtException(JSContext *cx) JS_ClearPendingException(cx); } -out: - JS_POP_TEMP_ROOT(cx, &tvr); - return ok; + return true; } diff --git a/js/src/jsfun.cpp b/js/src/jsfun.cpp index 132cd4e86e5..3d3c1f54ece 100644 --- a/js/src/jsfun.cpp +++ b/js/src/jsfun.cpp @@ -80,13 +80,14 @@ #endif #include "jsatominlines.h" +#include "jsobjinlines.h" using namespace js; static inline void SetOverriddenArgsLength(JSObject *obj) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); + JS_ASSERT(obj->isArguments()); jsval v = obj->fslots[JSSLOT_ARGS_LENGTH]; v = INT_TO_JSVAL(JSVAL_TO_INT(v) | 1); @@ -97,27 +98,17 @@ SetOverriddenArgsLength(JSObject *obj) static inline void InitArgsLengthSlot(JSObject *obj, uint32 argc) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); + JS_ASSERT(obj->isArguments()); JS_ASSERT(argc <= JS_ARGS_LENGTH_MAX); JS_ASSERT(obj->fslots[JSSLOT_ARGS_LENGTH] == JSVAL_VOID); obj->fslots[JSSLOT_ARGS_LENGTH] = INT_TO_JSVAL(argc << 1); - JS_ASSERT(!js_IsOverriddenArgsLength(obj)); -} - -static inline uint32 -GetArgsLength(JSObject *obj) -{ - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); - - uint32 argc = uint32(JSVAL_TO_INT(obj->fslots[JSSLOT_ARGS_LENGTH])) >> 1; - JS_ASSERT(argc <= JS_ARGS_LENGTH_MAX); - return argc; + JS_ASSERT(!IsOverriddenArgsLength(obj)); } static inline void SetArgsPrivateNative(JSObject *argsobj, ArgsPrivateNative *apn) { - JS_ASSERT(STOBJ_GET_CLASS(argsobj) == &js_ArgumentsClass); + JS_ASSERT(argsobj->isArguments()); uintptr_t p = (uintptr_t) apn; argsobj->setPrivate((void*) (p | 2)); } @@ -167,7 +158,7 @@ js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id, jsval *vp) JSObject *argsobj = JSVAL_TO_OBJECT(fp->argsobj); if (arg < fp->argc) { if (argsobj) { - jsval v = OBJ_GET_SLOT(cx, argsobj, JSSLOT_ARGS_COPY_START+arg); + jsval v = GetArgsSlot(argsobj, arg); if (v == JSVAL_HOLE) return argsobj->getProperty(cx, id, vp); } @@ -190,7 +181,7 @@ js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id, jsval *vp) } } else if (id == ATOM_TO_JSID(cx->runtime->atomState.lengthAtom)) { JSObject *argsobj = JSVAL_TO_OBJECT(fp->argsobj); - if (argsobj && js_IsOverriddenArgsLength(argsobj)) + if (argsobj && IsOverriddenArgsLength(argsobj)) return argsobj->getProperty(cx, id, vp); *vp = INT_TO_JSVAL(jsint(fp->argc)); } @@ -200,12 +191,25 @@ js_GetArgsProperty(JSContext *cx, JSStackFrame *fp, jsid id, jsval *vp) static JSObject * NewArguments(JSContext *cx, JSObject *parent, uint32 argc, JSObject *callee) { - JSObject *argsobj = js_NewObject(cx, &js_ArgumentsClass, NULL, parent, 0); - if (!argsobj || !js_EnsureReservedSlots(cx, argsobj, argc)) + JSObject *proto; + if (!js_GetClassPrototype(cx, parent, JSProto_Object, &proto)) return NULL; + JSObject *argsobj = js_NewGCObject(cx); + if (!argsobj) + return NULL; + + /* Init immediately to avoid GC seeing a half-init'ed object. */ + argsobj->init(&js_ArgumentsClass, proto, parent, JSVAL_NULL); argsobj->fslots[JSSLOT_ARGS_CALLEE] = OBJECT_TO_JSVAL(callee); InitArgsLengthSlot(argsobj, argc); + + argsobj->map = cx->runtime->emptyArgumentsScope; + cx->runtime->emptyArgumentsScope->hold(); + + /* This must come after argsobj->map has been set. */ + if (!js_EnsureReservedSlots(cx, argsobj, argc)) + return NULL; return argsobj; } @@ -213,13 +217,11 @@ static void PutArguments(JSContext *cx, JSObject *argsobj, jsval *args) { uint32 argc = GetArgsLength(argsobj); - JS_LOCK_OBJ(cx, argsobj); for (uint32 i = 0; i != argc; ++i) { - jsval v = STOBJ_GET_SLOT(argsobj, JSSLOT_ARGS_COPY_START + i); + jsval v = argsobj->dslots[i]; if (v != JSVAL_HOLE) - STOBJ_SET_SLOT(argsobj, JSSLOT_ARGS_COPY_START + i, args[i]); + argsobj->dslots[i] = args[i]; } - JS_UNLOCK_OBJ(cx, argsobj); } JSObject * @@ -231,7 +233,7 @@ js_GetArgsObject(JSContext *cx, JSStackFrame *fp) */ JS_ASSERT(fp->fun); JS_ASSERT_IF(fp->fun->flags & JSFUN_HEAVYWEIGHT, - fp->varobj(js_ContainingCallStack(cx, fp))); + fp->varobj(cx->containingCallStack(fp))); /* Skip eval and debugger frames. */ while (fp->flags & JSFRAME_SPECIAL) @@ -303,7 +305,7 @@ JS_DEFINE_CALLINFO_6(extern, OBJECT, js_Arguments, CONTEXT, OBJECT, UINT32, OBJE JSBool JS_FASTCALL js_PutArguments(JSContext *cx, JSObject *argsobj, jsval *args) { - JS_ASSERT(js_GetArgsPrivateNative(argsobj)); + JS_ASSERT(GetArgsPrivateNative(argsobj)); PutArguments(cx, argsobj, args); argsobj->setPrivate(NULL); return true; @@ -315,12 +317,12 @@ JS_DEFINE_CALLINFO_3(extern, BOOL, js_PutArguments, CONTEXT, OBJECT, JSVALPTR, 0 static JSBool args_delProperty(JSContext *cx, JSObject *obj, jsval idval, jsval *vp) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); + JS_ASSERT(obj->isArguments()); if (JSVAL_IS_INT(idval)) { uintN arg = uintN(JSVAL_TO_INT(idval)); if (arg < GetArgsLength(obj)) - OBJ_SET_SLOT(cx, obj, JSSLOT_ARGS_COPY_START + arg, JSVAL_HOLE); + SetArgsSlot(obj, arg, JSVAL_HOLE); } else if (idval == ATOM_KEY(cx->runtime->atomState.lengthAtom)) { SetOverriddenArgsLength(obj); } else if (idval == ATOM_KEY(cx->runtime->atomState.calleeAtom)) { @@ -351,7 +353,7 @@ WrapEscapingClosure(JSContext *cx, JSStackFrame *fp, JSObject *funobj, JSFunctio funobj, scopeChain); if (!wfunobj) return NULL; - JSAutoTempValueRooter tvr(cx, wfunobj); + AutoValueRooter tvr(cx, wfunobj); JSFunction *wfun = (JSFunction *) wfunobj; wfunobj->setPrivate(wfun); @@ -514,7 +516,7 @@ ArgGetter(JSContext *cx, JSObject *obj, jsval idval, jsval *vp) uintN arg = uintN(JSVAL_TO_INT(idval)); if (arg < GetArgsLength(obj)) { #ifdef JS_TRACER - ArgsPrivateNative *argp = js_GetArgsPrivateNative(obj); + ArgsPrivateNative *argp = GetArgsPrivateNative(obj); if (argp) { if (NativeToValue(cx, *vp, argp->typemap()[arg], &argp->argv[arg])) return true; @@ -527,13 +529,13 @@ ArgGetter(JSContext *cx, JSObject *obj, jsval idval, jsval *vp) if (fp) { *vp = fp->argv[arg]; } else { - jsval v = OBJ_GET_SLOT(cx, obj, JSSLOT_ARGS_COPY_START + arg); + jsval v = GetArgsSlot(obj, arg); if (v != JSVAL_HOLE) *vp = v; } } } else if (idval == ATOM_KEY(cx->runtime->atomState.lengthAtom)) { - if (!js_IsOverriddenArgsLength(obj)) + if (!IsOverriddenArgsLength(obj)) *vp = INT_TO_JSVAL(GetArgsLength(obj)); } else { JS_ASSERT(idval == ATOM_KEY(cx->runtime->atomState.calleeAtom)); @@ -599,7 +601,7 @@ ArgSetter(JSContext *cx, JSObject *obj, jsval idval, jsval *vp) if (!JS_ValueToId(cx, idval, &id)) return false; - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); return js_DeleteProperty(cx, obj, id, tvr.addr()) && js_SetProperty(cx, obj, id, vp); } @@ -608,20 +610,17 @@ static JSBool args_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags, JSObject **objp) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); + JS_ASSERT(obj->isArguments()); *objp = NULL; jsid id = 0; if (JSVAL_IS_INT(idval)) { uint32 arg = uint32(JSVAL_TO_INT(idval)); - if (arg < GetArgsLength(obj) && - OBJ_GET_SLOT(cx, obj, JSSLOT_ARGS_COPY_START + arg) != JSVAL_HOLE) { + if (arg < GetArgsLength(obj) && GetArgsSlot(obj, arg) != JSVAL_HOLE) id = INT_JSVAL_TO_JSID(idval); - } } else if (idval == ATOM_KEY(cx->runtime->atomState.lengthAtom)) { - if (!js_IsOverriddenArgsLength(obj)) + if (!IsOverriddenArgsLength(obj)) id = ATOM_TO_JSID(cx->runtime->atomState.lengthAtom); - } else if (idval == ATOM_KEY(cx->runtime->atomState.calleeAtom)) { if (obj->fslots[JSSLOT_ARGS_CALLEE] != JSVAL_HOLE) id = ATOM_TO_JSID(cx->runtime->atomState.calleeAtom); @@ -642,7 +641,7 @@ args_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags, static JSBool args_enumerate(JSContext *cx, JSObject *obj) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); + JS_ASSERT(obj->isArguments()); /* * Trigger reflection in args_resolve using a series of js_LookupProperty @@ -676,10 +675,12 @@ args_enumerate(JSContext *cx, JSObject *obj) static void args_or_call_trace(JSTracer *trc, JSObject *obj) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass || - STOBJ_GET_CLASS(obj) == &js_CallClass); - if (STOBJ_GET_CLASS(obj) == &js_ArgumentsClass && js_GetArgsPrivateNative(obj)) - return; + if (obj->isArguments()) { + if (GetArgsPrivateNative(obj)) + return; + } else { + JS_ASSERT(obj->getClass() == &js_CallClass); + } JSStackFrame *fp = (JSStackFrame *) obj->getPrivate(); if (fp && (fp->flags & JSFRAME_GENERATOR)) { @@ -694,7 +695,7 @@ args_or_call_trace(JSTracer *trc, JSObject *obj) static uint32 args_reserveSlots(JSContext *cx, JSObject *obj) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); + JS_ASSERT(obj->isArguments()); return GetArgsLength(obj); } @@ -712,7 +713,7 @@ args_reserveSlots(JSContext *cx, JSObject *obj) JSClass js_ArgumentsClass = { js_Object_str, JSCLASS_HAS_PRIVATE | JSCLASS_NEW_RESOLVE | - JSCLASS_HAS_RESERVED_SLOTS(ARGS_CLASS_FIXED_RESERVED_SLOTS) | + JSCLASS_HAS_RESERVED_SLOTS(ARGS_FIXED_RESERVED_SLOTS) | JSCLASS_MARK_IS_TRACE | JSCLASS_HAS_CACHED_PROTO(JSProto_Object), JS_PropertyStub, args_delProperty, JS_PropertyStub, JS_PropertyStub, @@ -743,8 +744,8 @@ JSClass js_DeclEnvClass = { static JSBool CheckForEscapingClosure(JSContext *cx, JSObject *obj, jsval *vp) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_CallClass || - STOBJ_GET_CLASS(obj) == &js_DeclEnvClass); + JS_ASSERT(obj->getClass() == &js_CallClass || + obj->getClass() == &js_DeclEnvClass); jsval v = *vp; @@ -846,7 +847,7 @@ js_GetCallObject(JSContext *cx, JSStackFrame *fp) callobj->setPrivate(fp); JS_ASSERT(fp->argv); JS_ASSERT(fp->fun == GET_FUNCTION_PRIVATE(cx, fp->calleeObject())); - STOBJ_SET_SLOT(callobj, JSSLOT_CALLEE, fp->calleeValue()); + callobj->setSlot(JSSLOT_CALLEE, fp->calleeValue()); fp->callobj = callobj; /* @@ -864,7 +865,7 @@ js_CreateCallObjectOnTrace(JSContext *cx, JSFunction *fun, JSObject *callee, JSO JSObject *callobj = NewCallObject(cx, fun, scopeChain); if (!callobj) return NULL; - STOBJ_SET_SLOT(callobj, JSSLOT_CALLEE, OBJECT_TO_JSVAL(callee)); + callobj->setSlot(JSSLOT_CALLEE, OBJECT_TO_JSVAL(callee)); return callobj; } @@ -876,8 +877,8 @@ js_GetCallObjectFunction(JSObject *obj) { jsval v; - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_CallClass); - v = STOBJ_GET_SLOT(obj, JSSLOT_CALLEE); + JS_ASSERT(obj->getClass() == &js_CallClass); + v = obj->getSlot(JSSLOT_CALLEE); if (JSVAL_IS_VOID(v)) { /* Newborn or prototype object. */ return NULL; @@ -902,7 +903,7 @@ js_PutCallObject(JSContext *cx, JSStackFrame *fp) /* Get the arguments object to snapshot fp's actual argument values. */ if (fp->argsobj) { if (!(fp->flags & JSFRAME_OVERRIDE_ARGS)) - STOBJ_SET_SLOT(callobj, JSSLOT_CALL_ARGUMENTS, fp->argsobj); + callobj->setSlot(JSSLOT_CALL_ARGUMENTS, fp->argsobj); js_PutArgsObject(cx, fp); } @@ -917,7 +918,7 @@ js_PutCallObject(JSContext *cx, JSStackFrame *fp) JS_STATIC_ASSERT(JS_INITIAL_NSLOTS - JSSLOT_PRIVATE == 1 + CALL_CLASS_FIXED_RESERVED_SLOTS); if (n != 0) { - JS_ASSERT(STOBJ_NSLOTS(callobj) >= JS_INITIAL_NSLOTS + n); + JS_ASSERT(callobj->numSlots() >= JS_INITIAL_NSLOTS + n); n += JS_INITIAL_NSLOTS; CopyValuesToCallObject(callobj, fun->nargs, fp->argv, fun->u.i.nvars, fp->slots); } @@ -926,7 +927,7 @@ js_PutCallObject(JSContext *cx, JSStackFrame *fp) if (js_IsNamedLambda(fun)) { JSObject *env = callobj->getParent(); - JS_ASSERT(STOBJ_GET_CLASS(env) == &js_DeclEnvClass); + JS_ASSERT(env->getClass() == &js_DeclEnvClass); JS_ASSERT(env->getPrivate() == fp); env->setPrivate(NULL); } @@ -1028,7 +1029,7 @@ CallPropertyOp(JSContext *cx, JSObject *obj, jsid id, jsval *vp, jsval *array; if (kind == JSCPK_UPVAR) { - JSObject *callee = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(obj, JSSLOT_CALLEE)); + JSObject *callee = JSVAL_TO_OBJECT(obj->getSlot(JSSLOT_CALLEE)); #ifdef DEBUG JSFunction *callee_fun = (JSFunction *) callee->getPrivate(); @@ -1048,7 +1049,7 @@ CallPropertyOp(JSContext *cx, JSObject *obj, jsid id, jsval *vp, if (setter) { if (fp) fp->flags |= JSFRAME_OVERRIDE_ARGS; - STOBJ_SET_SLOT(obj, JSSLOT_CALL_ARGUMENTS, *vp); + obj->setSlot(JSSLOT_CALL_ARGUMENTS, *vp); } else { if (fp && !(fp->flags & JSFRAME_OVERRIDE_ARGS)) { JSObject *argsobj; @@ -1058,7 +1059,7 @@ CallPropertyOp(JSContext *cx, JSObject *obj, jsid id, jsval *vp, return false; *vp = OBJECT_TO_JSVAL(argsobj); } else { - *vp = STOBJ_GET_SLOT(obj, JSSLOT_CALL_ARGUMENTS); + *vp = obj->getSlot(JSSLOT_CALL_ARGUMENTS); } } return true; @@ -1177,13 +1178,13 @@ call_resolve(JSContext *cx, JSObject *obj, jsval idval, uintN flags, JSPropertyOp getter, setter; uintN slot, attrs; - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_CallClass); + JS_ASSERT(obj->getClass() == &js_CallClass); JS_ASSERT(!obj->getProto()); if (!JSVAL_IS_STRING(idval)) return JS_TRUE; - callee = STOBJ_GET_SLOT(obj, JSSLOT_CALLEE); + callee = obj->getSlot(JSSLOT_CALLEE); if (JSVAL_IS_VOID(callee)) return JS_TRUE; fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(callee)); @@ -1494,7 +1495,7 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, * script or embedding code and then be mutated. */ if (flags & JSRESOLVE_ASSIGNING) { - JS_ASSERT(!js_IsInternalFunctionObject(obj)); + JS_ASSERT(!IsInternalFunctionObject(obj)); return JS_TRUE; } @@ -1504,7 +1505,7 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, */ atom = cx->runtime->atomState.classPrototypeAtom; if (id == ATOM_KEY(atom)) { - JS_ASSERT(!js_IsInternalFunctionObject(obj)); + JS_ASSERT(!IsInternalFunctionObject(obj)); /* * Beware of the wacky case of a user function named Object -- trying @@ -1541,7 +1542,7 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, atom = OFFSET_TO_ATOM(cx->runtime, lfp->atomOffset); if (id == ATOM_KEY(atom)) { - JS_ASSERT(!js_IsInternalFunctionObject(obj)); + JS_ASSERT(!IsInternalFunctionObject(obj)); if (!js_DefineNativeProperty(cx, obj, ATOM_TO_JSID(atom), JSVAL_VOID, @@ -1584,8 +1585,6 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) uintN nargs, nvars, nupvars, n; uint32 localsword; /* word for argument and variable counts */ uint32 flagsword; /* word for fun->u.i.nupvars and fun->flags */ - JSTempValueRooter tvr; - JSBool ok; cx = xdr->cx; if (xdr->mode == JSXDR_ENCODE) { @@ -1594,13 +1593,13 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_SCRIPTED_FUNCTION, JS_GetFunctionName(fun)); - return JS_FALSE; + return false; } if (fun->u.i.wrapper) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_XDR_CLOSURE_WRAPPER, JS_GetFunctionName(fun)); - return JS_FALSE; + return false; } JS_ASSERT((fun->u.i.wrapper & ~1U) == 0); firstword = (fun->u.i.skipmin << 2) | (fun->u.i.wrapper << 1) | !!fun->atom; @@ -1612,7 +1611,7 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) } else { fun = js_NewFunction(cx, NULL, NULL, 0, JSFUN_INTERPRETED, NULL, NULL); if (!fun) - return JS_FALSE; + return false; FUN_OBJECT(fun)->clearParent(); FUN_OBJECT(fun)->clearProto(); #ifdef __GNUC__ @@ -1620,18 +1619,15 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) #endif } - /* From here on, control flow must flow through label out. */ - MUST_FLOW_THROUGH("out"); - JS_PUSH_TEMP_ROOT_OBJECT(cx, FUN_OBJECT(fun), &tvr); - ok = JS_TRUE; + AutoValueRooter tvr(cx, FUN_OBJECT(fun)); if (!JS_XDRUint32(xdr, &firstword)) - goto bad; + return false; if ((firstword & 1U) && !js_XDRStringAtom(xdr, &fun->atom)) - goto bad; + return false; if (!JS_XDRUint32(xdr, &localsword) || !JS_XDRUint32(xdr, &flagsword)) { - goto bad; + return false; } if (xdr->mode == JSXDR_DECODE) { @@ -1655,6 +1651,7 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) JSAtom *name; JSLocalKind localKind; + bool ok = true; mark = JS_ARENA_MARK(&xdr->cx->tempPool); /* @@ -1673,16 +1670,16 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) bitmapLength * sizeof *bitmap); if (!bitmap) { js_ReportOutOfScriptQuota(xdr->cx); - ok = JS_FALSE; + ok = false; goto release_mark; } if (xdr->mode == JSXDR_ENCODE) { names = js_GetLocalNameArray(xdr->cx, fun, &xdr->cx->tempPool); if (!names) { - ok = JS_FALSE; + ok = false; goto release_mark; } - memset(bitmap, 0, bitmapLength * sizeof *bitmap); + PodZero(bitmap, bitmapLength); for (i = 0; i != n; ++i) { if (i < fun->nargs ? JS_LOCAL_NAME_TO_ATOM(names[i]) != NULL @@ -1698,7 +1695,7 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) } #endif for (i = 0; i != bitmapLength; ++i) { - ok = JS_XDRUint32(xdr, &bitmap[i]); + ok = !!JS_XDRUint32(xdr, &bitmap[i]); if (!ok) goto release_mark; } @@ -1707,7 +1704,7 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) !(bitmap[i >> JS_BITS_PER_UINT32_LOG2] & JS_BIT(i & (JS_BITS_PER_UINT32 - 1)))) { if (xdr->mode == JSXDR_DECODE) { - ok = js_AddLocal(xdr->cx, fun, NULL, JSLOCAL_ARG); + ok = !!js_AddLocal(xdr->cx, fun, NULL, JSLOCAL_ARG); if (!ok) goto release_mark; } else { @@ -1717,7 +1714,7 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) } if (xdr->mode == JSXDR_ENCODE) name = JS_LOCAL_NAME_TO_ATOM(names[i]); - ok = js_XDRStringAtom(xdr, &name); + ok = !!js_XDRStringAtom(xdr, &name); if (!ok) goto release_mark; if (xdr->mode == JSXDR_DECODE) { @@ -1729,24 +1726,23 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) ? JSLOCAL_CONST : JSLOCAL_VAR) : JSLOCAL_UPVAR; - ok = js_AddLocal(xdr->cx, fun, name, localKind); + ok = !!js_AddLocal(xdr->cx, fun, name, localKind); if (!ok) goto release_mark; } } - ok = JS_TRUE; release_mark: JS_ARENA_RELEASE(&xdr->cx->tempPool, mark); if (!ok) - goto out; + return false; if (xdr->mode == JSXDR_DECODE) js_FreezeLocalNames(cx, fun); } if (!js_XDRScript(xdr, &fun->u.i.script, false, NULL)) - goto bad; + return false; if (xdr->mode == JSXDR_DECODE) { *objp = FUN_OBJECT(fun); @@ -1758,13 +1754,7 @@ js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp) } } -out: - JS_POP_TEMP_ROOT(cx, &tvr); - return ok; - -bad: - ok = JS_FALSE; - goto out; + return true; } #else /* !JS_HAS_XDR */ @@ -2110,11 +2100,34 @@ js_fun_apply(JSContext *cx, uintN argc, jsval *vp) sp = invokevp; *sp++ = fval; *sp++ = OBJECT_TO_JSVAL(obj); - for (i = 0; i < argc; i++) { - ok = JS_GetElement(cx, aobj, (jsint)i, sp); - if (!ok) - goto out; - sp++; + if (aobj && aobj->isArguments()) { + /* + * Two cases, two loops: note how in the case of an active stack frame + * backing aobj, even though we copy from fp->argv, we still must check + * aobj->dslots[i] for a hole, to handle a delete on the corresponding + * arguments element. See args_delProperty. + */ + JSStackFrame *fp = (JSStackFrame *) aobj->getPrivate(); + if (fp) { + memcpy(sp, fp->argv, argc * sizeof(jsval)); + for (i = 0; i < argc; i++) { + if (aobj->dslots[i] == JSVAL_HOLE) // suppress deleted element + sp[i] = JSVAL_VOID; + } + } else { + memcpy(sp, aobj->dslots, argc * sizeof(jsval)); + for (i = 0; i < argc; i++) { + if (sp[i] == JSVAL_HOLE) + sp[i] = JSVAL_VOID; + } + } + } else { + for (i = 0; i < argc; i++) { + ok = aobj->getProperty(cx, INT_TO_JSID(jsint(i)), sp); + if (!ok) + goto out; + sp++; + } } ok = js_Invoke(cx, argc, invokevp, 0); @@ -2136,8 +2149,8 @@ fun_applyConstructor(JSContext *cx, uintN argc, jsval *vp) if (JSVAL_IS_PRIMITIVE(vp[2]) || (aobj = JSVAL_TO_OBJECT(vp[2]), - OBJ_GET_CLASS(cx, aobj) != &js_ArrayClass && - OBJ_GET_CLASS(cx, aobj) != &js_ArgumentsClass)) { + !aobj->isArray() && + !aobj->isArguments())) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_APPLY_ARGS, "__applyConstruct__"); return JS_FALSE; @@ -2156,7 +2169,7 @@ fun_applyConstructor(JSContext *cx, uintN argc, jsval *vp) *sp++ = vp[1]; *sp++ = JSVAL_NULL; /* this is filled automagically */ for (i = 0; i < length; i++) { - ok = JS_GetElement(cx, aobj, (jsint)i, sp); + ok = aobj->getProperty(cx, INT_TO_JSID(jsint(i)), sp); if (!ok) goto out; sp++; @@ -2194,12 +2207,12 @@ Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) const char *filename; JSBool ok; JSString *str, *arg; - JSTokenStream ts(cx); + TokenStream ts(cx); JSPrincipals *principals; jschar *collected_args, *cp; void *mark; size_t arg_length, args_length, old_args_length; - JSTokenType tt; + TokenKind tt; if (!JS_IsConstructing(cx)) { obj = js_NewObject(cx, &js_FunctionClass, NULL, NULL); @@ -2341,13 +2354,13 @@ Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) } /* Initialize a tokenstream that reads from the given string. */ - if (!ts.init(cx, collected_args, args_length, NULL, filename, lineno)) { + if (!ts.init(collected_args, args_length, NULL, filename, lineno)) { JS_ARENA_RELEASE(&cx->tempPool, mark); return JS_FALSE; } /* The argument string may be empty or contain no tokens. */ - tt = js_GetToken(cx, &ts); + tt = ts.getToken(); if (tt != TOK_EOF) { for (;;) { /* @@ -2362,19 +2375,16 @@ Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) * stream; we're assured at this point that it's a valid * identifier. */ - atom = CURRENT_TOKEN(&ts).t_atom; + atom = ts.currentToken().t_atom; /* Check for a duplicate parameter name. */ if (js_LookupLocal(cx, fun, atom, NULL) != JSLOCAL_NONE) { const char *name; name = js_AtomToPrintableString(cx, atom); - ok = name && - js_ReportCompileErrorNumber(cx, &ts, NULL, - JSREPORT_WARNING | - JSREPORT_STRICT, - JSMSG_DUPLICATE_FORMAL, - name); + ok = name && ReportCompileErrorNumber(cx, &ts, NULL, + JSREPORT_WARNING | JSREPORT_STRICT, + JSMSG_DUPLICATE_FORMAL, name); if (!ok) goto after_args; } @@ -2385,12 +2395,12 @@ Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) * Get the next token. Stop on end of stream. Otherwise * insist on a comma, get another name, and iterate. */ - tt = js_GetToken(cx, &ts); + tt = ts.getToken(); if (tt == TOK_EOF) break; if (tt != TOK_COMMA) goto after_args; - tt = js_GetToken(cx, &ts); + tt = ts.getToken(); } } @@ -2404,7 +2414,7 @@ Function(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_BAD_FORMAL); } - ts.close(cx); + ts.close(); JS_ARENA_RELEASE(&cx->tempPool, mark); if (state != OK) return JS_FALSE; @@ -2448,7 +2458,7 @@ js_NewFunction(JSContext *cx, JSObject *funobj, JSNative native, uintN nargs, JSFunction *fun; if (funobj) { - JS_ASSERT(HAS_FUNCTION_CLASS(funobj)); + JS_ASSERT(funobj->isFunction()); funobj->setParent(parent); } else { funobj = js_NewObject(cx, &js_FunctionClass, NULL, parent); @@ -2692,26 +2702,26 @@ js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags) JSStackFrame *fp; uintN error; const char *name, *source; - JSTempValueRooter tvr; for (fp = js_GetTopStackFrame(cx); fp && !fp->regs; fp = fp->down) continue; name = source = NULL; - JS_PUSH_TEMP_ROOT_STRING(cx, NULL, &tvr); + + AutoValueRooter tvr(cx); if (flags & JSV2F_ITERATOR) { error = JSMSG_BAD_ITERATOR; name = js_iterator_str; JSString *src = js_ValueToSource(cx, *vp); if (!src) - goto out; - tvr.u.value = STRING_TO_JSVAL(src); + return; + tvr.setString(src); JSString *qsrc = js_QuoteString(cx, src, 0); if (!qsrc) - goto out; - tvr.u.value = STRING_TO_JSVAL(qsrc); + return; + tvr.setString(qsrc); source = js_GetStringBytes(cx, qsrc); if (!source) - goto out; + return; } else if (flags & JSV2F_CONSTRUCT) { error = JSMSG_NOT_CONSTRUCTOR; } else { @@ -2727,9 +2737,6 @@ js_ReportIsNotFunction(JSContext *cx, jsval *vp, uintN flags) : JSDVG_IGNORE_STACK, *vp, NULL, name, source); - - out: - JS_POP_TEMP_ROOT(cx, &tvr); } /* @@ -3060,7 +3067,7 @@ js_GetLocalNameArray(JSContext *cx, JSFunction *fun, JSArenaPool *pool) #if JS_HAS_DESTRUCTURING /* Some parameter names can be NULL due to destructuring patterns. */ - memset(names, 0, fun->nargs * sizeof *names); + PodZero(names, fun->nargs); #endif map = fun->u.i.names.map; args.fun = fun; diff --git a/js/src/jsfun.h b/js/src/jsfun.h index 37f7d1f825b..55145922de4 100644 --- a/js/src/jsfun.h +++ b/js/src/jsfun.h @@ -214,7 +214,27 @@ struct JSFunction : public JSObject JS_FN(name, fastcall, nargs, flags) #endif +/* + * NB: the Arguments class is an uninitialized internal class that masquerades + * (according to Object.prototype.toString.call(argsobj)) as "Object". + * + * WARNING (to alert embedders reading this private .h file): arguments objects + * are *not* thread-safe and should not be used concurrently -- they should be + * used by only one thread at a time, preferably by only one thread over their + * lifetime (a JS worker that migrates from one OS thread to another but shares + * nothing is ok). + * + * Yes, this is an incompatible change, which prefigures the impending move to + * single-threaded objects and GC heaps. + */ extern JSClass js_ArgumentsClass; + +inline bool +JSObject::isArguments() const +{ + return getClass() == &js_ArgumentsClass; +} + extern JS_FRIEND_DATA(JSClass) js_CallClass; extern JSClass js_DeclEnvClass; extern const uint32 CALL_CLASS_FIXED_RESERVED_SLOTS; @@ -228,45 +248,47 @@ JSObject::isFunction() const return getClass() == &js_FunctionClass; } -#define HAS_FUNCTION_CLASS(obj) (obj)->isFunction() - /* * NB: jsapi.h and jsobj.h must be included before any call to this macro. */ #define VALUE_IS_FUNCTION(cx, v) \ - (!JSVAL_IS_PRIMITIVE(v) && HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))) + (!JSVAL_IS_PRIMITIVE(v) && JSVAL_TO_OBJECT(v)->isFunction()) /* * Macro to access the private slot of the function object after the slot is * initialized. */ #define GET_FUNCTION_PRIVATE(cx, funobj) \ - (JS_ASSERT(HAS_FUNCTION_CLASS(funobj)), \ + (JS_ASSERT((funobj)->isFunction()), \ (JSFunction *) (funobj)->getPrivate()) +namespace js { + /* * Return true if this is a compiler-created internal function accessed by * its own object. Such a function object must not be accessible to script * or embedding code. */ inline bool -js_IsInternalFunctionObject(JSObject *funobj) +IsInternalFunctionObject(JSObject *funobj) { - JS_ASSERT(HAS_FUNCTION_CLASS(funobj)); + JS_ASSERT(funobj->isFunction()); JSFunction *fun = (JSFunction *) funobj->getPrivate(); return funobj == fun && (fun->flags & JSFUN_LAMBDA) && !funobj->getParent(); } + +struct ArgsPrivateNative; -namespace js { struct ArgsPrivateNative; } - -inline js::ArgsPrivateNative * -js_GetArgsPrivateNative(JSObject *argsobj) +inline ArgsPrivateNative * +GetArgsPrivateNative(JSObject *argsobj) { - JS_ASSERT(STOBJ_GET_CLASS(argsobj) == &js_ArgumentsClass); + JS_ASSERT(argsobj->isArguments()); uintptr_t p = (uintptr_t) argsobj->getPrivate(); - return (js::ArgsPrivateNative *) (p & 2 ? p & ~2 : NULL); + return (ArgsPrivateNative *) (p & 2 ? p & ~2 : NULL); } +} /* namespace js */ + extern JSObject * js_InitFunctionClass(JSContext *cx, JSObject *obj); @@ -399,17 +421,30 @@ js_IsNamedLambda(JSFunction *fun) { return (fun->flags & JSFUN_LAMBDA) && fun->a * whether arguments.length was overwritten. * JSSLOT_ARGS_CALLEE - the arguments.callee value or JSVAL_HOLE if that was * overwritten. - * JSSLOT_ARGS_COPY_START .. - room to store the corresponding arguments after - * the frame exists. The slot's value will be JSVAL_HOLE - * if arguments[i] was deleted or overwritten. + * JSSLOT_ARGS_START - room to store the corresponding arguments after the + * frame exists. The slot's value will be JSVAL_HOLE if + * arguments[i] was deleted or overwritten. + * + * The static assertion checks that hand-optimized code can fetch and store the + * argument value at argsobj->dslots[i] for argument index i. But future-proof + * your code by using {Get,Set}ArgsSlot instead of naked dslots references. */ -const uint32 JSSLOT_ARGS_LENGTH = JSSLOT_PRIVATE + 1; -const uint32 JSSLOT_ARGS_CALLEE = JSSLOT_PRIVATE + 2; -const uint32 JSSLOT_ARGS_COPY_START = JSSLOT_PRIVATE + 3; +const uint32 JSSLOT_ARGS_LENGTH = JSSLOT_PRIVATE + 1; +const uint32 JSSLOT_ARGS_CALLEE = JSSLOT_PRIVATE + 2; +const uint32 JSSLOT_ARGS_START = JSSLOT_PRIVATE + 3; + +JS_STATIC_ASSERT(JSSLOT_ARGS_START == JS_INITIAL_NSLOTS); /* Number of extra fixed slots besides JSSLOT_PRIVATE. */ -const uint32 ARGS_CLASS_FIXED_RESERVED_SLOTS = JSSLOT_ARGS_COPY_START - - JSSLOT_ARGS_LENGTH; +const uint32 ARGS_FIXED_RESERVED_SLOTS = JSSLOT_ARGS_START - JSSLOT_ARGS_LENGTH; + +/* + * Maximum supported value of arguments.length. It bounds the maximum number of + * arguments that can be supplied via the second (so-called |argArray|) param + * to Function.prototype.apply. This value also bounds the number of elements + * parsed in an array initialiser. + */ +const uint32 JS_ARGS_LENGTH_MAX = JS_BIT(24) - 1; /* * JSSLOT_ARGS_LENGTH stores ((argc << 1) | overwritten_flag) as int jsval. @@ -419,15 +454,41 @@ const uint32 ARGS_CLASS_FIXED_RESERVED_SLOTS = JSSLOT_ARGS_COPY_START - JS_STATIC_ASSERT(JS_ARGS_LENGTH_MAX <= JS_BIT(30)); JS_STATIC_ASSERT(jsval((JS_ARGS_LENGTH_MAX << 1) | 1) <= JSVAL_INT_MAX); -JS_INLINE bool -js_IsOverriddenArgsLength(JSObject *obj) +namespace js { + +inline jsval +GetArgsSlot(JSObject *argsobj, uint32 arg) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_ArgumentsClass); + return argsobj->dslots[arg]; +} + +inline void +SetArgsSlot(JSObject *argsobj, uint32 arg, jsval v) +{ + argsobj->dslots[arg] = v; +} + +inline bool +IsOverriddenArgsLength(JSObject *obj) +{ + JS_ASSERT(obj->isArguments()); jsval v = obj->fslots[JSSLOT_ARGS_LENGTH]; return (JSVAL_TO_INT(v) & 1) != 0; } +inline uint32 +GetArgsLength(JSObject *obj) +{ + JS_ASSERT(obj->isArguments()); + + uint32 argc = uint32(JSVAL_TO_INT(obj->fslots[JSSLOT_ARGS_LENGTH])) >> 1; + JS_ASSERT(argc <= JS_ARGS_LENGTH_MAX); + return argc; +} + +} /* namespace js */ + extern JSBool js_XDRFunctionObject(JSXDRState *xdr, JSObject **objp); diff --git a/js/src/jsgc.cpp b/js/src/jsgc.cpp index 422260b931f..89d5f5c6250 100644 --- a/js/src/jsgc.cpp +++ b/js/src/jsgc.cpp @@ -87,6 +87,7 @@ #include "jsdtracef.h" #endif +#include "jscntxtinlines.h" #include "jsobjinlines.h" /* @@ -109,14 +110,6 @@ using namespace js; -/* - * Check JSTempValueUnion has the size of jsval and void * so we can - * reinterpret jsval as void* GC-thing pointer and use JSTVU_SINGLE for - * different GC-things. - */ -JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(jsval)); -JS_STATIC_ASSERT(sizeof(JSTempValueUnion) == sizeof(void *)); - /* * Check that JSTRACE_XML follows JSTRACE_OBJECT, JSTRACE_DOUBLE and * JSTRACE_STRING. @@ -366,7 +359,7 @@ struct JSGCArena { } void clearMarkBitmap() { - memset(markBitmap, 0, sizeof(markBitmap)); + PodArrayZero(markBitmap); } jsbitmap *getMarkBitmapEnd() { @@ -574,11 +567,18 @@ MakeNewArenaFreeList(JSGCArena *a, size_t thingSize) #define METER_UPDATE_MAX(maxLval, rval) \ METER_IF((maxLval) < (rval), (maxLval) = (rval)) +#ifdef MOZ_GCTIMER +static jsrefcount newChunkCount = 0; +static jsrefcount destroyChunkCount = 0; +#endif + static jsuword NewGCChunk(void) { void *p; - +#ifdef MOZ_GCTIMER + JS_ATOMIC_INCREMENT(&newChunkCount); +#endif #if defined(XP_WIN) p = VirtualAlloc(NULL, GC_CHUNK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); @@ -600,6 +600,9 @@ NewGCChunk(void) static void DestroyGCChunk(jsuword chunk) { +#ifdef MOZ_GCTIMER + JS_ATOMIC_INCREMENT(&destroyChunkCount); +#endif JS_ASSERT((chunk & GC_ARENA_MASK) == 0); #if defined(XP_WIN) VirtualFree((void *) chunk, 0, MEM_RELEASE); @@ -954,7 +957,7 @@ js_InitGC(JSRuntime *rt, uint32 maxbytes) */ rt->setGCLastBytes(8192); - METER(memset(&rt->gcStats, 0, sizeof rt->gcStats)); + METER(PodZero(&rt->gcStats)); return true; } @@ -1409,7 +1412,7 @@ JSGCFreeLists::moveTo(JSGCFreeLists *another) { *another = *this; doubles = NULL; - memset(finalizables, 0, sizeof(finalizables)); + PodArrayZero(finalizables); JS_ASSERT(isEmpty()); } @@ -2259,19 +2262,20 @@ gc_lock_traversal(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 num, return JS_DHASH_NEXT; } -#define TRACE_JSVALS(trc, len, vec, name) \ - JS_BEGIN_MACRO \ - jsval _v, *_vp, *_end; \ - \ - for (_vp = vec, _end = _vp + len; _vp < _end; _vp++) { \ - _v = *_vp; \ - if (JSVAL_IS_TRACEABLE(_v)) { \ - JS_SET_TRACING_INDEX(trc, name, _vp - (vec)); \ - js_CallGCMarker(trc, JSVAL_TO_TRACEABLE(_v), \ - JSVAL_TRACE_KIND(_v)); \ - } \ - } \ - JS_END_MACRO +namespace js { + +void +TraceObjectVector(JSTracer *trc, JSObject **vec, uint32 len) +{ + for (uint32 i = 0; i < len; i++) { + if (JSObject *obj = vec[i]) { + JS_SET_TRACING_INDEX(trc, "vector", i); + js_CallGCMarker(trc, obj, JSTRACE_OBJECT); + } + } +} + +} void js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp) @@ -2297,7 +2301,7 @@ js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp) } else { nslots = fp->script->nfixed; } - TRACE_JSVALS(trc, nslots, fp->slots, "slot"); + TraceValues(trc, nslots, fp->slots, "slot"); } } else { JS_ASSERT(!fp->slots); @@ -2322,7 +2326,7 @@ js_TraceStackFrame(JSTracer *trc, JSStackFrame *fp) if (fp->fun->flags & JSFRAME_ROOTED_ARGV) skip = 2 + fp->argc; } - TRACE_JSVALS(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand"); + TraceValues(trc, 2 + nslots - skip, fp->argv - 2 + skip, "operand"); } JS_CALL_VALUE_TRACER(trc, fp->rval, "rval"); @@ -2377,7 +2381,6 @@ JS_REQUIRES_STACK JS_FRIEND_API(void) js_TraceContext(JSTracer *trc, JSContext *acx) { JSStackHeader *sh; - JSTempValueRooter *tvr; /* * Trace active and suspended callstacks. @@ -2409,10 +2412,6 @@ js_TraceContext(JSTracer *trc, JSContext *acx) } } - /* Trace frames that have been temporarily removed but need to be marked. */ - for (JSGCReachableFrame *rf = acx->reachableFrames; rf; rf = rf->next) - TraceFrameChain(trc, rf->frame); - /* Mark other roots-by-definition in acx. */ if (acx->globalObject && !JS_HAS_OPTION(acx, JSOPTION_UNROOTED_GLOBAL)) JS_CALL_OBJECT_TRACER(trc, acx->globalObject, "global object"); @@ -2427,38 +2426,11 @@ js_TraceContext(JSTracer *trc, JSContext *acx) for (sh = acx->stackHeaders; sh; sh = sh->down) { METER(trc->context->runtime->gcStats.stackseg++); METER(trc->context->runtime->gcStats.segslots += sh->nslots); - TRACE_JSVALS(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack"); + TraceValues(trc, sh->nslots, JS_STACK_SEGMENT(sh), "stack"); } - for (tvr = acx->tempValueRooters; tvr; tvr = tvr->down) { - switch (tvr->count) { - case JSTVU_SINGLE: - JS_SET_TRACING_NAME(trc, "tvr->u.value"); - js_CallValueTracerIfGCThing(trc, tvr->u.value); - break; - case JSTVU_TRACE: - tvr->u.trace(trc, tvr); - break; - case JSTVU_SPROP: - tvr->u.sprop->trace(trc); - break; - case JSTVU_WEAK_ROOTS: - tvr->u.weakRoots->mark(trc); - break; - case JSTVU_COMPILER: - tvr->u.compiler->trace(trc); - break; - case JSTVU_SCRIPT: - js_TraceScript(trc, tvr->u.script); - break; - case JSTVU_ENUMERATOR: - static_cast(tvr)->mark(trc); - break; - default: - JS_ASSERT(tvr->count >= 0); - TRACE_JSVALS(trc, tvr->count, tvr->u.array, "tvr->u.array"); - } - } + for (js::AutoGCRooter *gcr = acx->autoGCRooters; gcr; gcr = gcr->down) + gcr->trace(trc); if (acx->sharpObjectMap.depth > 0) js_TraceSharpMap(trc, &acx->sharpObjectMap); @@ -2469,7 +2441,7 @@ js_TraceContext(JSTracer *trc, JSContext *acx) InterpState* state = acx->interpState; while (state) { if (state->nativeVp) - TRACE_JSVALS(trc, state->nativeVpLen, state->nativeVp, "nativeVp"); + TraceValues(trc, state->nativeVpLen, state->nativeVp, "nativeVp"); state = state->prev; } #endif @@ -2532,7 +2504,7 @@ ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr) ssr->cycle = true; return; } - pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot)); + pobj = JSVAL_TO_OBJECT(pobj->getSlot(slot)); } pobj = ssr->pobj; @@ -2578,7 +2550,7 @@ FinalizeObject(JSContext *cx, JSObject *obj, unsigned thingKind) jsdtrace_object_finalize(obj); #endif - if (JS_LIKELY(OBJ_IS_NATIVE(obj))) { + if (JS_LIKELY(obj->isNative())) { JSScope *scope = OBJ_SCOPE(obj); if (scope->isSharedEmpty()) static_cast(scope)->dropFromGC(cx); @@ -2656,8 +2628,6 @@ FinalizeString(JSContext *cx, JSString *str, unsigned thingKind) */ cx->free(str->flatChars()); } - if (str->isDeflated()) - js_PurgeDeflatedStringCache(cx->runtime, str); } inline void @@ -2677,8 +2647,6 @@ FinalizeExternalString(JSContext *cx, JSString *str, unsigned thingKind) JSStringFinalizeOp finalizer = str_finalizers[type]; if (finalizer) finalizer(cx, str); - if (str->isDeflated()) - js_PurgeDeflatedStringCache(cx->runtime, str); } /* @@ -2720,8 +2688,6 @@ js_FinalizeStringRT(JSRuntime *rt, JSString *str) } } } - if (str->isDeflated()) - js_PurgeDeflatedStringCache(rt, str); } templateenter - firstEnter) / 1E6, + (double)(gcT->end-gcT->enter) / 1E6, + (double)(gcT->startSweep - gcT->startMark) / 1E6, + (double)(gcT->sweepDestroyEnd - gcT->startSweep) / 1E6, + (double)(gcT->sweepObjectEnd - gcT->startSweep) / 1E6, + (double)(gcT->sweepStringEnd - gcT->sweepObjectEnd) / 1E6, + (double)(gcT->sweepDoubleEnd - gcT->sweepStringEnd) / 1E6, + (double)(gcT->sweepDestroyEnd - gcT->sweepDoubleEnd) / 1E6); + fprintf(gcFile, "%10d, %10d \n", newChunkCount, destroyChunkCount); + fflush(gcFile); + + if (lastGC) + fclose(gcFile); +} +#endif + /* * The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with * rt->gcLock already held, so the lock should be kept on return. @@ -2873,6 +2879,16 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) */ if (rt->state != JSRTS_UP && gckind != GC_LAST_CONTEXT) return; + +#ifdef MOZ_GCTIMER + static uint64 firstEnter = rdtsc(); + GCTimer gcTimer; + memset(&gcTimer, 0, sizeof(GCTimer)); +# define TIMESTAMP(x) (x = rdtsc()) +#else +# define TIMESTAMP(x) ((void) 0) +#endif + TIMESTAMP(gcTimer.enter); restart_at_beginning: /* @@ -3118,11 +3134,10 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) acx->purge(); } - #ifdef JS_TRACER if (gckind == GC_LAST_CONTEXT) { /* Clear builtin functions, which are recreated on demand. */ - memset(rt->builtinFunctions, 0, sizeof rt->builtinFunctions); + PodArrayZero(rt->builtinFunctions); } #endif @@ -3130,6 +3145,8 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) if (!(gckind & GC_KEEP_ATOMS)) JS_CLEAR_WEAK_ROOTS(&cx->weakRoots); + TIMESTAMP(gcTimer.startMark); + restart: rt->gcNumber++; JS_ASSERT(!rt->gcUnmarkedArenaStackTop); @@ -3192,6 +3209,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) * JSString* assuming that they are unique. This works since the * atomization API must not be called during GC. */ + TIMESTAMP(gcTimer.startSweep); js_SweepAtomState(cx); /* Finalize iterator states before the objects they iterate over. */ @@ -3229,6 +3247,14 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) #if JS_HAS_XML_SUPPORT FinalizeArenaList(cx, FINALIZE_XML, &emptyArenas); #endif + TIMESTAMP(gcTimer.sweepObjectEnd); + + /* + * We sweep the deflated cache before we finalize the strings so the + * cache can safely use js_IsAboutToBeFinalized.. + */ + rt->deflatedStringCache->sweep(cx); + FinalizeArenaList (cx, FINALIZE_STRING, &emptyArenas); for (unsigned i = FINALIZE_EXTERNAL_STRING0; @@ -3237,6 +3263,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) FinalizeArenaList (cx, i, &emptyArenas); } + TIMESTAMP(gcTimer.sweepStringEnd); ap = &rt->gcDoubleArenaList.head; METER((nlivearenas = 0, nkilledarenas = 0, nthings = 0)); @@ -3264,12 +3291,12 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) METER(UpdateArenaStats(&rt->gcStats.doubleArenaStats, nlivearenas, nkilledarenas, nthings)); rt->gcDoubleArenaList.cursor = rt->gcDoubleArenaList.head; - + TIMESTAMP(gcTimer.sweepDoubleEnd); /* * Sweep the runtime's property tree after finalizing objects, in case any * had watchpoints referencing tree nodes. */ - js_SweepScopeProperties(cx); + js::SweepScopeProperties(cx); /* * Sweep script filenames after sweeping functions in the generic loop @@ -3284,6 +3311,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind) * use js_IsAboutToBeFinalized(). */ DestroyGCArenas(rt, emptyArenas); + TIMESTAMP(gcTimer.sweepDestroyEnd); #ifdef JS_THREADSAFE cx->submitDeallocatorTask(); @@ -3365,33 +3393,38 @@ out: * interlock mechanism here. */ if (gckind != GC_SET_SLOT_REQUEST && (callback = rt->gcCallback)) { - JSWeakRoots savedWeakRoots; - JSTempValueRooter tvr; + if (!(gckind & GC_KEEP_ATOMS)) { + (void) callback(cx, JSGC_END); - if (gckind & GC_KEEP_ATOMS) { + /* + * On shutdown iterate until JSGC_END callback stops creating + * garbage. + */ + if (gckind == GC_LAST_CONTEXT && rt->gcPoke) + goto restart_at_beginning; + } else { /* * We allow JSGC_END implementation to force a full GC or allocate * new GC things. Thus we must protect the weak roots from garbage * collection and overwrites. */ - savedWeakRoots = cx->weakRoots; - JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr); + AutoSaveWeakRoots save(cx); + JS_KEEP_ATOMS(rt); JS_UNLOCK_GC(rt); - } - (void) callback(cx, JSGC_END); + (void) callback(cx, JSGC_END); - if (gckind & GC_KEEP_ATOMS) { JS_LOCK_GC(rt); JS_UNKEEP_ATOMS(rt); - JS_POP_TEMP_ROOT(cx, &tvr); - } else if (gckind == GC_LAST_CONTEXT && rt->gcPoke) { - /* - * On shutdown iterate until JSGC_END callback stops creating - * garbage. - */ - goto restart_at_beginning; } } + TIMESTAMP(gcTimer.end); + +#ifdef MOZ_GCTIMER + if (gcTimer.startMark > 0) + dumpGCTimer(&gcTimer, firstEnter, gckind == GC_LAST_CONTEXT); + newChunkCount = 0; + destroyChunkCount = 0; +#endif } diff --git a/js/src/jsgc.h b/js/src/jsgc.h index 058dafdb49c..d7726d8b960 100644 --- a/js/src/jsgc.h +++ b/js/src/jsgc.h @@ -48,6 +48,7 @@ #include "jsbit.h" #include "jsutil.h" #include "jstask.h" +#include "jsversion.h" JS_BEGIN_EXTERN_C @@ -454,4 +455,23 @@ js_MarkTraps(JSTracer *trc); JS_END_EXTERN_C +namespace js { + +void +TraceObjectVector(JSTracer *trc, JSObject **vec, uint32 len); + +inline void +TraceValues(JSTracer *trc, size_t len, jsval *vec, const char *name) +{ + for (jsval *vp = vec, *end = vp + len; vp < end; vp++) { + jsval v = *vp; + if (JSVAL_IS_TRACEABLE(v)) { + JS_SET_TRACING_INDEX(trc, name, vp - vec); + js_CallGCMarker(trc, JSVAL_TO_TRACEABLE(v), JSVAL_TRACE_KIND(v)); + } + } +} + +} + #endif /* jsgc_h___ */ diff --git a/js/src/jsinterp.cpp b/js/src/jsinterp.cpp index 19115c6b2a0..6f9c5baf99a 100644 --- a/js/src/jsinterp.cpp +++ b/js/src/jsinterp.cpp @@ -65,6 +65,7 @@ #include "jsnum.h" #include "jsobj.h" #include "jsopcode.h" +#include "jspropertycache.h" #include "jsscan.h" #include "jsscope.h" #include "jsscript.h" @@ -75,7 +76,7 @@ #include "jsvector.h" #include "jsatominlines.h" -#include "jsinterpinlines.h" +#include "jspropertycacheinlines.h" #include "jsobjinlines.h" #include "jsscopeinlines.h" #include "jsscriptinlines.h" @@ -96,493 +97,6 @@ using namespace js; /* jsinvoke_cpp___ indicates inclusion from jsinvoke.cpp. */ #if !JS_LONE_INTERPRET ^ defined jsinvoke_cpp___ -JS_REQUIRES_STACK JSPropCacheEntry * -js_FillPropertyCache(JSContext *cx, JSObject *obj, - uintN scopeIndex, uintN protoIndex, JSObject *pobj, - JSScopeProperty *sprop, JSBool adding) -{ - JSPropertyCache *cache; - jsbytecode *pc; - JSScope *scope; - jsuword kshape, vshape, khash; - JSOp op; - const JSCodeSpec *cs; - jsuword vword; - ptrdiff_t pcoff; - JSAtom *atom; - JSPropCacheEntry *entry; - - JS_ASSERT(!cx->runtime->gcRunning); - cache = &JS_PROPERTY_CACHE(cx); - - /* FIXME bug 489098: consider enabling the property cache for eval. */ - if (js_IsPropertyCacheDisabled(cx) || (cx->fp->flags & JSFRAME_EVAL)) { - PCMETER(cache->disfills++); - return JS_NO_PROP_CACHE_FILL; - } - - /* - * Check for fill from js_SetPropertyHelper where the setter removed sprop - * from pobj's scope (via unwatch or delete, e.g.). - */ - scope = OBJ_SCOPE(pobj); - if (!scope->hasProperty(sprop)) { - PCMETER(cache->oddfills++); - return JS_NO_PROP_CACHE_FILL; - } - - /* - * Check for overdeep scope and prototype chain. Because resolve, getter, - * and setter hooks can change the prototype chain using JS_SetPrototype - * after js_LookupPropertyWithFlags has returned the nominal protoIndex, - * we have to validate protoIndex if it is non-zero. If it is zero, then - * we know thanks to the scope->hasProperty test above, combined with the - * fact that obj == pobj, that protoIndex is invariant. - * - * The scopeIndex can't be wrong. We require JS_SetParent calls to happen - * before any running script might consult a parent-linked scope chain. If - * this requirement is not satisfied, the fill in progress will never hit, - * but vcap vs. scope shape tests ensure nothing malfunctions. - */ - JS_ASSERT_IF(scopeIndex == 0 && protoIndex == 0, obj == pobj); - - if (protoIndex != 0) { - JSObject *tmp = obj; - - for (uintN i = 0; i != scopeIndex; i++) - tmp = tmp->getParent(); - JS_ASSERT(tmp != pobj); - - protoIndex = 1; - for (;;) { - tmp = tmp->getProto(); - - /* - * We cannot cache properties coming from native objects behind - * non-native ones on the prototype chain. The non-natives can - * mutate in arbitrary way without changing any shapes. - */ - if (!tmp || !OBJ_IS_NATIVE(tmp)) { - PCMETER(cache->noprotos++); - return JS_NO_PROP_CACHE_FILL; - } - if (tmp == pobj) - break; - ++protoIndex; - } - } - - if (scopeIndex > PCVCAP_SCOPEMASK || protoIndex > PCVCAP_PROTOMASK) { - PCMETER(cache->longchains++); - return JS_NO_PROP_CACHE_FILL; - } - - /* - * Optimize the cached vword based on our parameters and the current pc's - * opcode format flags. - */ - pc = cx->fp->regs->pc; - op = js_GetOpcode(cx, cx->fp->script, pc); - cs = &js_CodeSpec[op]; - kshape = 0; - - do { - /* - * Check for a prototype "plain old method" callee computation. What - * is a plain old method? It's a function-valued property with stub - * getter, so get of a function is idempotent. - */ - if (cs->format & JOF_CALLOP) { - jsval v; - - if (sprop->isMethod()) { - /* - * A compiler-created function object, AKA a method, already - * memoized in the property tree. - */ - JS_ASSERT(scope->hasMethodBarrier()); - v = sprop->methodValue(); - JS_ASSERT(VALUE_IS_FUNCTION(cx, v)); - JS_ASSERT(v == LOCKED_OBJ_GET_SLOT(pobj, sprop->slot)); - vword = JSVAL_OBJECT_TO_PCVAL(v); - break; - } - - if (!scope->generic() && - SPROP_HAS_STUB_GETTER(sprop) && - SPROP_HAS_VALID_SLOT(sprop, scope)) { - v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); - if (VALUE_IS_FUNCTION(cx, v)) { - /* - * Great, we have a function-valued prototype property - * where the getter is JS_PropertyStub. The type id in - * pobj's scope does not evolve with changes to property - * values, however. - * - * So here, on first cache fill for this method, we brand - * the scope with a new shape and set the JSScope::BRANDED - * flag. Once this flag is set, any property assignment - * that changes the value from or to a different function - * object will result in shape being regenerated. - */ - if (!scope->branded()) { - PCMETER(cache->brandfills++); -#ifdef DEBUG_notme - fprintf(stderr, - "branding %p (%s) for funobj %p (%s), shape %lu\n", - pobj, pobj->getClass()->name, - JSVAL_TO_OBJECT(v), - JS_GetFunctionName(GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v))), - OBJ_SHAPE(obj)); -#endif - scope->brandingShapeChange(cx, sprop->slot, v); - if (js_IsPropertyCacheDisabled(cx)) /* check for rt->shapeGen overflow */ - return JS_NO_PROP_CACHE_FILL; - scope->setBranded(); - } - vword = JSVAL_OBJECT_TO_PCVAL(v); - break; - } - } - } - - /* If getting a value via a stub getter, we can cache the slot. */ - if (!(cs->format & (JOF_SET | JOF_INCDEC | JOF_FOR)) && - SPROP_HAS_STUB_GETTER(sprop) && - SPROP_HAS_VALID_SLOT(sprop, scope)) { - /* Great, let's cache sprop's slot and use it on cache hit. */ - vword = SLOT_TO_PCVAL(sprop->slot); - } else { - /* Best we can do is to cache sprop (still a nice speedup). */ - vword = SPROP_TO_PCVAL(sprop); - if (adding && - sprop == scope->lastProperty() && - scope->shape == sprop->shape) { - /* - * Our caller added a new property. We also know that a setter - * that js_NativeSet could have run has not mutated the scope, - * so the added property is still the last one added, and the - * scope is not branded. - * - * We want to cache under scope's shape before the property - * addition to bias for the case when the mutator opcode - * always adds the same property. This allows us to optimize - * periodic execution of object initializers or other explicit - * initialization sequences such as - * - * obj = {}; obj.x = 1; obj.y = 2; - * - * We assume that on average the win from this optimization is - * greater than the cost of an extra mismatch per loop owing to - * the bias for the following case: - * - * obj = {}; ... for (...) { ... obj.x = ... } - * - * On the first iteration of such a for loop, JSOP_SETPROP - * fills the cache with the shape of the newly created object - * obj, not the shape of obj after obj.x has been assigned. - * That mismatches obj's shape on the second iteration. Note - * that on the third and subsequent iterations the cache will - * be hit because the shape is no longer updated. - */ - JS_ASSERT(!scope->isSharedEmpty()); - if (sprop->parent) { - kshape = sprop->parent->shape; - } else { - /* - * If obj had its own empty scope before, with a unique - * shape, that is lost. Here we only attempt to find a - * matching empty scope. In unusual cases involving - * __proto__ assignment we may not find one. - */ - JSObject *proto = obj->getProto(); - if (!proto || !OBJ_IS_NATIVE(proto)) - return JS_NO_PROP_CACHE_FILL; - JSScope *protoscope = OBJ_SCOPE(proto); - if (!protoscope->emptyScope || - protoscope->emptyScope->clasp != obj->getClass()) { - return JS_NO_PROP_CACHE_FILL; - } - kshape = protoscope->emptyScope->shape; - } - - /* - * When adding we predict no prototype object will later gain a - * readonly property or setter. - */ - vshape = cx->runtime->protoHazardShape; - } - } - } while (0); - - if (kshape == 0) { - kshape = OBJ_SHAPE(obj); - vshape = scope->shape; - } - JS_ASSERT(kshape < SHAPE_OVERFLOW_BIT); - - khash = PROPERTY_CACHE_HASH_PC(pc, kshape); - if (obj == pobj) { - JS_ASSERT(scopeIndex == 0 && protoIndex == 0); - } else { - if (op == JSOP_LENGTH) { - atom = cx->runtime->atomState.lengthAtom; - } else { - pcoff = (JOF_TYPE(cs->format) == JOF_SLOTATOM) ? SLOTNO_LEN : 0; - GET_ATOM_FROM_BYTECODE(cx->fp->script, pc, pcoff, atom); - } - -#ifdef DEBUG - if (scopeIndex == 0) { - JS_ASSERT(protoIndex != 0); - JS_ASSERT((protoIndex == 1) == (obj->getProto() == pobj)); - } -#endif - - if (scopeIndex != 0 || protoIndex != 1) { - khash = PROPERTY_CACHE_HASH_ATOM(atom, obj); - PCMETER(if (PCVCAP_TAG(cache->table[khash].vcap) <= 1) - cache->pcrecycles++); - pc = (jsbytecode *) atom; - kshape = (jsuword) obj; - - /* - * Make sure that a later shadowing assignment will enter - * PurgeProtoChain and invalidate this entry, bug 479198. - * - * This is thread-safe even though obj is not locked. Only the - * DELEGATE bit of obj->classword can change at runtime, given that - * obj is native; and the bit is only set, never cleared. And on - * platforms where another CPU can fail to see this write, it's OK - * because the property cache and JIT cache are thread-local. - */ - obj->setDelegate(); - } - } - JS_ASSERT(vshape < SHAPE_OVERFLOW_BIT); - - entry = &cache->table[khash]; - PCMETER(PCVAL_IS_NULL(entry->vword) || cache->recycles++); - entry->kpc = pc; - entry->kshape = kshape; - entry->vcap = PCVCAP_MAKE(vshape, scopeIndex, protoIndex); - entry->vword = vword; - - cache->empty = JS_FALSE; - PCMETER(cache->fills++); - - /* - * The modfills counter is not exact. It increases if a getter or setter - * recurse into the interpreter. - */ - PCMETER(entry == cache->pctestentry || cache->modfills++); - PCMETER(cache->pctestentry = NULL); - return entry; -} - -JS_REQUIRES_STACK JSAtom * -js_FullTestPropertyCache(JSContext *cx, jsbytecode *pc, - JSObject **objp, JSObject **pobjp, - JSPropCacheEntry **entryp) -{ - JSOp op; - const JSCodeSpec *cs; - ptrdiff_t pcoff; - JSAtom *atom; - JSObject *obj, *pobj, *tmp; - JSPropCacheEntry *entry; - uint32 vcap; - - JS_ASSERT(uintN((cx->fp->imacpc ? cx->fp->imacpc : pc) - cx->fp->script->code) - < cx->fp->script->length); - - op = js_GetOpcode(cx, cx->fp->script, pc); - cs = &js_CodeSpec[op]; - if (op == JSOP_LENGTH) { - atom = cx->runtime->atomState.lengthAtom; - } else { - pcoff = (JOF_TYPE(cs->format) == JOF_SLOTATOM) ? SLOTNO_LEN : 0; - GET_ATOM_FROM_BYTECODE(cx->fp->script, pc, pcoff, atom); - } - - obj = *objp; - JS_ASSERT(OBJ_IS_NATIVE(obj)); - entry = &JS_PROPERTY_CACHE(cx).table[PROPERTY_CACHE_HASH_ATOM(atom, obj)]; - *entryp = entry; - vcap = entry->vcap; - - if (entry->kpc != (jsbytecode *) atom) { - PCMETER(JS_PROPERTY_CACHE(cx).idmisses++); - -#ifdef DEBUG_notme - entry = &JS_PROPERTY_CACHE(cx).table[PROPERTY_CACHE_HASH_PC(pc, OBJ_SHAPE(obj))]; - fprintf(stderr, - "id miss for %s from %s:%u" - " (pc %u, kpc %u, kshape %u, shape %u)\n", - js_AtomToPrintableString(cx, atom), - cx->fp->script->filename, - js_PCToLineNumber(cx, cx->fp->script, pc), - pc - cx->fp->script->code, - entry->kpc - cx->fp->script->code, - entry->kshape, - OBJ_SHAPE(obj)); - js_Disassemble1(cx, cx->fp->script, pc, - pc - cx->fp->script->code, - JS_FALSE, stderr); -#endif - - return atom; - } - - if (entry->kshape != (jsuword) obj) { - PCMETER(JS_PROPERTY_CACHE(cx).komisses++); - return atom; - } - - pobj = obj; - - if (JOF_MODE(cs->format) == JOF_NAME) { - while (vcap & (PCVCAP_SCOPEMASK << PCVCAP_PROTOBITS)) { - tmp = pobj->getParent(); - if (!tmp || !OBJ_IS_NATIVE(tmp)) - break; - pobj = tmp; - vcap -= PCVCAP_PROTOSIZE; - } - - *objp = pobj; - } - - while (vcap & PCVCAP_PROTOMASK) { - tmp = pobj->getProto(); - if (!tmp || !OBJ_IS_NATIVE(tmp)) - break; - pobj = tmp; - --vcap; - } - - if (js_MatchPropertyCacheShape(cx, pobj, PCVCAP_SHAPE(vcap))) { -#ifdef DEBUG - jsid id = ATOM_TO_JSID(atom); - - id = js_CheckForStringIndex(id); - JS_ASSERT(OBJ_SCOPE(pobj)->lookup(id)); - JS_ASSERT_IF(OBJ_SCOPE(pobj)->object, OBJ_SCOPE(pobj)->object == pobj); -#endif - *pobjp = pobj; - return NULL; - } - - PCMETER(JS_PROPERTY_CACHE(cx).vcmisses++); - return atom; -} - -#ifdef DEBUG -#define ASSERT_CACHE_IS_EMPTY(cache) \ - JS_BEGIN_MACRO \ - JSPropertyCache *cache_ = (cache); \ - uintN i_; \ - JS_ASSERT(cache_->empty); \ - for (i_ = 0; i_ < PROPERTY_CACHE_SIZE; i_++) { \ - JS_ASSERT(!cache_->table[i_].kpc); \ - JS_ASSERT(!cache_->table[i_].kshape); \ - JS_ASSERT(!cache_->table[i_].vcap); \ - JS_ASSERT(!cache_->table[i_].vword); \ - } \ - JS_END_MACRO -#else -#define ASSERT_CACHE_IS_EMPTY(cache) ((void)0) -#endif - -JS_STATIC_ASSERT(PCVAL_NULL == 0); - -void -js_PurgePropertyCache(JSContext *cx, JSPropertyCache *cache) -{ - if (cache->empty) { - ASSERT_CACHE_IS_EMPTY(cache); - return; - } - - memset(cache->table, 0, sizeof cache->table); - cache->empty = JS_TRUE; - -#ifdef JS_PROPERTY_CACHE_METERING - { static FILE *fp; - if (!fp) - fp = fopen("/tmp/propcache.stats", "w"); - if (fp) { - fputs("Property cache stats for ", fp); -#ifdef JS_THREADSAFE - fprintf(fp, "thread %lu, ", (unsigned long) cx->thread->id); -#endif - fprintf(fp, "GC %u\n", cx->runtime->gcNumber); - -# define P(mem) fprintf(fp, "%11s %10lu\n", #mem, (unsigned long)cache->mem) - P(fills); - P(nofills); - P(rofills); - P(disfills); - P(oddfills); - P(modfills); - P(brandfills); - P(noprotos); - P(longchains); - P(recycles); - P(pcrecycles); - P(tests); - P(pchits); - P(protopchits); - P(initests); - P(inipchits); - P(inipcmisses); - P(settests); - P(addpchits); - P(setpchits); - P(setpcmisses); - P(setmisses); - P(idmisses); - P(komisses); - P(vcmisses); - P(misses); - P(flushes); - P(pcpurges); -# undef P - - fprintf(fp, "hit rates: pc %g%% (proto %g%%), set %g%%, ini %g%%, full %g%%\n", - (100. * cache->pchits) / cache->tests, - (100. * cache->protopchits) / cache->tests, - (100. * (cache->addpchits + cache->setpchits)) - / cache->settests, - (100. * cache->inipchits) / cache->initests, - (100. * (cache->tests - cache->misses)) / cache->tests); - fflush(fp); - } - } -#endif - - PCMETER(cache->flushes++); -} - -void -js_PurgePropertyCacheForScript(JSContext *cx, JSScript *script) -{ - JSPropertyCache *cache; - JSPropCacheEntry *entry; - - cache = &JS_PROPERTY_CACHE(cx); - for (entry = cache->table; entry < cache->table + PROPERTY_CACHE_SIZE; - entry++) { - if (JS_UPTRDIFF(entry->kpc, script->code) < script->length) { - entry->kpc = NULL; - entry->kshape = 0; -#ifdef DEBUG - entry->vcap = entry->vword = 0; -#endif - } - } -} - /* * Check if the current arena has enough space to fit nslots after sp and, if * so, reserve the necessary space. @@ -800,7 +314,7 @@ js_GetScopeChain(JSContext *cx, JSStackFrame *fp) JSObject *innermostNewChild = js_CloneBlockObject(cx, sharedBlock, fp); if (!innermostNewChild) return NULL; - JSAutoTempValueRooter tvr(cx, innermostNewChild); + AutoValueRooter tvr(cx, innermostNewChild); /* * Clone our way towards outer scopes until we reach the innermost @@ -885,7 +399,7 @@ CallThisObjectHook(JSContext *cx, JSObject *obj, jsval *argv) * The alert should display "true". */ JS_STATIC_INTERPRET JSObject * -js_ComputeGlobalThis(JSContext *cx, JSBool lazy, jsval *argv) +js_ComputeGlobalThis(JSContext *cx, jsval *argv) { JSObject *thisp; @@ -893,57 +407,14 @@ js_ComputeGlobalThis(JSContext *cx, JSBool lazy, jsval *argv) !JSVAL_TO_OBJECT(argv[-2])->getParent()) { thisp = cx->globalObject; } else { - jsid id; - jsval v; - uintN attrs; - JSBool ok; - JSObject *parent; - - /* - * Walk up the parent chain, first checking that the running script - * has access to the callee's parent object. Note that if lazy, the - * running script whose principals we want to check is the script - * associated with fp->down, not with fp. - * - * FIXME: 417851 -- this access check should not be required, as it - * imposes a performance penalty on all js_ComputeGlobalThis calls, - * and it represents a maintenance hazard. - * - * When the above FIXME is made fixed, the whole GC reachable frame - * mechanism can be removed as well. - */ - JSStackFrame *fp = js_GetTopStackFrame(cx); - JSGCReachableFrame reachable; - if (lazy) { - JS_ASSERT(fp->argv == argv); - cx->fp = fp->down; - fp->down = NULL; - cx->pushGCReachableFrame(reachable, fp); - } - thisp = JSVAL_TO_OBJECT(argv[-2]); - id = ATOM_TO_JSID(cx->runtime->atomState.parentAtom); - - ok = thisp->checkAccess(cx, id, JSACC_PARENT, &v, &attrs); - if (lazy) { - fp->down = cx->fp; - cx->fp = fp; - cx->popGCReachableFrame(); - } - if (!ok) - return NULL; - - if (v != JSVAL_NULL) { - thisp = JSVAL_IS_VOID(v) ? thisp->getParent() : JSVAL_TO_OBJECT(v); - while ((parent = thisp->getParent()) != NULL) - thisp = parent; - } + thisp = JSVAL_TO_OBJECT(argv[-2])->getGlobal(); } return CallThisObjectHook(cx, thisp, argv); } static JSObject * -ComputeThis(JSContext *cx, JSBool lazy, jsval *argv) +ComputeThis(JSContext *cx, jsval *argv) { JSObject *thisp; @@ -957,18 +428,18 @@ ComputeThis(JSContext *cx, JSBool lazy, jsval *argv) thisp = JSVAL_TO_OBJECT(argv[-1]); if (OBJ_GET_CLASS(cx, thisp) == &js_CallClass || OBJ_GET_CLASS(cx, thisp) == &js_BlockClass) - return js_ComputeGlobalThis(cx, lazy, argv); + return js_ComputeGlobalThis(cx, argv); return CallThisObjectHook(cx, thisp, argv); } JSObject * -js_ComputeThis(JSContext *cx, JSBool lazy, jsval *argv) +js_ComputeThis(JSContext *cx, jsval *argv) { JS_ASSERT(argv[-1] != JSVAL_HOLE); // check for SynthesizeFrame poisoning if (JSVAL_IS_NULL(argv[-1])) - return js_ComputeGlobalThis(cx, lazy, argv); - return ComputeThis(cx, lazy, argv); + return js_ComputeGlobalThis(cx, argv); + return ComputeThis(cx, argv); } #if JS_HAS_NO_SUCH_METHOD @@ -1005,7 +476,7 @@ js_OnUnknownMethod(JSContext *cx, jsval *vp) JSObject *obj = JSVAL_TO_OBJECT(vp[1]); jsid id = ATOM_TO_JSID(cx->runtime->atomState.noSuchMethodAtom); - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx, JSVAL_NULL); if (!js_GetMethod(cx, obj, id, JSGET_NO_METHOD_BARRIER, tvr.addr())) return false; if (JSVAL_IS_PRIMITIVE(tvr.value())) { @@ -1047,7 +518,7 @@ NoSuchMethod(JSContext *cx, uintN argc, jsval *vp, uint32 flags) JS_ASSERT(!JSVAL_IS_PRIMITIVE(vp[0])); JS_ASSERT(!JSVAL_IS_PRIMITIVE(vp[1])); obj = JSVAL_TO_OBJECT(vp[0]); - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_NoSuchMethodClass); + JS_ASSERT(obj->getClass() == &js_NoSuchMethodClass); invokevp[0] = obj->fslots[JSSLOT_FOUND_FUNCTION]; invokevp[1] = vp[1]; @@ -1209,7 +680,7 @@ js_Invoke(JSContext *cx, uintN argc, jsval *vp, uintN flags) * the appropriate this-computing bytecode, e.g., JSOP_THIS. */ if (native && (!fun || !(fun->flags & JSFUN_FAST_NATIVE))) { - if (!js_ComputeThis(cx, JS_FALSE, vp + 2)) { + if (!js_ComputeThis(cx, vp + 2)) { ok = JS_FALSE; goto out2; } @@ -1466,33 +937,6 @@ js_InternalGetOrSet(JSContext *cx, JSObject *obj, jsid id, jsval fval, return js_InternalCall(cx, obj, fval, argc, argv, rval); } -CallStack * -js_ContainingCallStack(JSContext *cx, JSStackFrame *target) -{ - JS_ASSERT(cx->fp); - - /* The active callstack's top frame is cx->fp. */ - CallStack *cs = cx->activeCallStack(); - JSStackFrame *f = cx->fp; - JSStackFrame *stop = cs->getInitialFrame()->down; - for (; f != stop; f = f->down) { - if (f == target) - return cs; - } - - /* A suspended callstack's top frame is its suspended frame. */ - for (cs = cs->getPrevious(); cs; cs = cs->getPrevious()) { - f = cs->getSuspendedFrame(); - stop = cs->getInitialFrame()->down; - for (; f != stop; f = f->down) { - if (f == target) - return cs; - } - } - - return NULL; -} - JSBool js_Execute(JSContext *cx, JSObject *chain, JSScript *script, JSStackFrame *down, uintN flags, jsval *result) @@ -1547,7 +991,7 @@ js_Execute(JSContext *cx, JSObject *chain, JSScript *script, if (down == cx->fp) { callStack.setInitialVarObj(down->varobj(cx)); } else { - CallStack *cs = js_ContainingCallStack(cx, down); + CallStack *cs = cx->containingCallStack(down); callStack.setInitialVarObj(down->varobj(cs)); } } else { @@ -2101,7 +1545,7 @@ js_GetUpvar(JSContext *cx, uintN level, uintN cookie) uintN slot = UPVAR_FRAME_SLOT(cookie); jsval *vp; - if (!fp->fun) { + if (!fp->fun || (fp->flags & JSFRAME_EVAL)) { vp = fp->slots + fp->script->nfixed; } else if (slot < fp->fun->nargs) { vp = fp->argv; @@ -2697,7 +2141,7 @@ JS_STATIC_ASSERT(!CAN_DO_FAST_INC_DEC(INT_TO_JSVAL_CONSTEXPR(JSVAL_INT_MAX))); #ifdef JS_REPRMETER # define METER_REPR(fp) (reprmeter::MeterRepr(fp)) #else -# define METER_REPR(fp) +# define METER_REPR(fp) ((void) 0) #endif /* JS_REPRMETER */ /* @@ -2735,7 +2179,7 @@ JS_STATIC_ASSERT(!CAN_DO_FAST_INC_DEC(INT_TO_JSVAL_CONSTEXPR(JSVAL_INT_MAX))); static bool AssertValidPropertyCacheHit(JSContext *cx, JSScript *script, JSFrameRegs& regs, ptrdiff_t pcoff, JSObject *start, JSObject *found, - JSPropCacheEntry *entry) + PropertyCacheEntry *entry) { uint32 sample = cx->runtime->gcNumber; @@ -2757,8 +2201,7 @@ AssertValidPropertyCacheHit(JSContext *cx, JSScript *script, JSFrameRegs& regs, } if (!ok) return false; - if (cx->runtime->gcNumber != sample || - PCVCAP_SHAPE(entry->vcap) != OBJ_SHAPE(pobj)) { + if (cx->runtime->gcNumber != sample || entry->vshape() != OBJ_SHAPE(pobj)) { pobj->dropProperty(cx, prop); return true; } @@ -2766,23 +2209,23 @@ AssertValidPropertyCacheHit(JSContext *cx, JSScript *script, JSFrameRegs& regs, JS_ASSERT(pobj == found); JSScopeProperty *sprop = (JSScopeProperty *) prop; - if (PCVAL_IS_SLOT(entry->vword)) { - JS_ASSERT(PCVAL_TO_SLOT(entry->vword) == sprop->slot); + if (entry->vword.isSlot()) { + JS_ASSERT(entry->vword.toSlot() == sprop->slot); JS_ASSERT(!sprop->isMethod()); - } else if (PCVAL_IS_SPROP(entry->vword)) { - JS_ASSERT(PCVAL_TO_SPROP(entry->vword) == sprop); + } else if (entry->vword.isSprop()) { + JS_ASSERT(entry->vword.toSprop() == sprop); JS_ASSERT_IF(sprop->isMethod(), sprop->methodValue() == LOCKED_OBJ_GET_SLOT(pobj, sprop->slot)); } else { jsval v; - JS_ASSERT(PCVAL_IS_OBJECT(entry->vword)); - JS_ASSERT(entry->vword != PCVAL_NULL); + JS_ASSERT(entry->vword.isObject()); + JS_ASSERT(!entry->vword.isNull()); JS_ASSERT(OBJ_SCOPE(pobj)->brandedOrHasMethodBarrier()); - JS_ASSERT(SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)); + JS_ASSERT(sprop->hasDefaultGetterOrIsMethod()); JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))); v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); JS_ASSERT(VALUE_IS_FUNCTION(cx, v)); - JS_ASSERT(PCVAL_TO_OBJECT(entry->vword) == JSVAL_TO_OBJECT(v)); + JS_ASSERT(entry->vword.toObject() == JSVAL_TO_OBJECT(v)); if (sprop->isMethod()) { JS_ASSERT(js_CodeSpec[*regs.pc].format & JOF_CALLOP); diff --git a/js/src/jsinterp.h b/js/src/jsinterp.h index b6ae9aeee5a..fa53ca1f5af 100644 --- a/js/src/jsinterp.h +++ b/js/src/jsinterp.h @@ -167,16 +167,11 @@ struct JSStackFrame { /* Short for: varobj(cx->activeCallStack()). */ JSObject *varobj(JSContext *cx); + + inline JSObject *getThisObject(JSContext *cx); }; #ifdef __cplusplus -/* - * Perform a linear search of all frames in all callstacks in the given context - * for the given frame, returning the callstack, if found, and NULL otherwise. - */ -extern js::CallStack * -js_ContainingCallStack(JSContext *cx, JSStackFrame *target); - static JS_INLINE uintN FramePCOffset(JSStackFrame* fp) { @@ -233,224 +228,6 @@ typedef struct JSInlineFrame { #define JSFRAME_SPECIAL (JSFRAME_DEBUGGER | JSFRAME_EVAL) -/* - * Property cache with structurally typed capabilities for invalidation, for - * polymorphic callsite method/get/set speedups. For details, see - * . - */ -#define PROPERTY_CACHE_LOG2 12 -#define PROPERTY_CACHE_SIZE JS_BIT(PROPERTY_CACHE_LOG2) -#define PROPERTY_CACHE_MASK JS_BITMASK(PROPERTY_CACHE_LOG2) - -/* - * Add kshape rather than xor it to avoid collisions between nearby bytecode - * that are evolving an object by setting successive properties, incrementing - * the object's scope->shape on each set. - */ -#define PROPERTY_CACHE_HASH(pc,kshape) \ - (((((jsuword)(pc) >> PROPERTY_CACHE_LOG2) ^ (jsuword)(pc)) + (kshape)) & \ - PROPERTY_CACHE_MASK) - -#define PROPERTY_CACHE_HASH_PC(pc,kshape) \ - PROPERTY_CACHE_HASH(pc, kshape) - -#define PROPERTY_CACHE_HASH_ATOM(atom,obj) \ - PROPERTY_CACHE_HASH((jsuword)(atom) >> 2, OBJ_SHAPE(obj)) - -/* - * Property cache value capability macros. - */ -#define PCVCAP_PROTOBITS 4 -#define PCVCAP_PROTOSIZE JS_BIT(PCVCAP_PROTOBITS) -#define PCVCAP_PROTOMASK JS_BITMASK(PCVCAP_PROTOBITS) - -#define PCVCAP_SCOPEBITS 4 -#define PCVCAP_SCOPESIZE JS_BIT(PCVCAP_SCOPEBITS) -#define PCVCAP_SCOPEMASK JS_BITMASK(PCVCAP_SCOPEBITS) - -#define PCVCAP_TAGBITS (PCVCAP_PROTOBITS + PCVCAP_SCOPEBITS) -#define PCVCAP_TAGMASK JS_BITMASK(PCVCAP_TAGBITS) -#define PCVCAP_TAG(t) ((t) & PCVCAP_TAGMASK) - -#define PCVCAP_MAKE(t,s,p) ((uint32(t) << PCVCAP_TAGBITS) | \ - ((s) << PCVCAP_PROTOBITS) | \ - (p)) -#define PCVCAP_SHAPE(t) ((t) >> PCVCAP_TAGBITS) - -#define SHAPE_OVERFLOW_BIT JS_BIT(32 - PCVCAP_TAGBITS) - -struct JSPropCacheEntry { - jsbytecode *kpc; /* pc if vcap tag is <= 1, else atom */ - jsuword kshape; /* key shape if pc, else obj for atom */ - jsuword vcap; /* value capability, see above */ - jsuword vword; /* value word, see PCVAL_* below */ - - bool adding() const { - return PCVCAP_TAG(vcap) == 0 && kshape != PCVCAP_SHAPE(vcap); - } - - bool directHit() const { - return PCVCAP_TAG(vcap) == 0 && kshape == PCVCAP_SHAPE(vcap); - } -}; - -/* - * Special value for functions returning JSPropCacheEntry * to distinguish - * between failure and no no-cache-fill cases. - */ -#define JS_NO_PROP_CACHE_FILL ((JSPropCacheEntry *) NULL + 1) - -#if defined DEBUG_brendan || defined DEBUG_brendaneich -#define JS_PROPERTY_CACHE_METERING 1 -#endif - -typedef struct JSPropertyCache { - JSPropCacheEntry table[PROPERTY_CACHE_SIZE]; - JSBool empty; -#ifdef JS_PROPERTY_CACHE_METERING - JSPropCacheEntry *pctestentry; /* entry of the last PC-based test */ - uint32 fills; /* number of cache entry fills */ - uint32 nofills; /* couldn't fill (e.g. default get) */ - uint32 rofills; /* set on read-only prop can't fill */ - uint32 disfills; /* fill attempts on disabled cache */ - uint32 oddfills; /* fill attempt after setter deleted */ - uint32 modfills; /* fill that rehashed to a new entry */ - uint32 brandfills; /* scope brandings to type structural - method fills */ - uint32 noprotos; /* resolve-returned non-proto pobj */ - uint32 longchains; /* overlong scope and/or proto chain */ - uint32 recycles; /* cache entries recycled by fills */ - uint32 pcrecycles; /* pc-keyed entries recycled by atom- - keyed fills */ - uint32 tests; /* cache probes */ - uint32 pchits; /* fast-path polymorphic op hits */ - uint32 protopchits; /* pchits hitting immediate prototype */ - uint32 initests; /* cache probes from JSOP_INITPROP */ - uint32 inipchits; /* init'ing next property pchit case */ - uint32 inipcmisses; /* init'ing next property pc misses */ - uint32 settests; /* cache probes from JOF_SET opcodes */ - uint32 addpchits; /* adding next property pchit case */ - uint32 setpchits; /* setting existing property pchit */ - uint32 setpcmisses; /* setting/adding property pc misses */ - uint32 setmisses; /* JSOP_SET{NAME,PROP} total misses */ - uint32 idmisses; /* slow-path key id == atom misses */ - uint32 komisses; /* slow-path key object misses */ - uint32 vcmisses; /* value capability misses */ - uint32 misses; /* cache misses */ - uint32 flushes; /* cache flushes */ - uint32 pcpurges; /* shadowing purges on proto chain */ -# define PCMETER(x) x -#else -# define PCMETER(x) ((void)0) -#endif -} JSPropertyCache; - -/* - * Property cache value tagging/untagging macros. - */ -#define PCVAL_OBJECT 0 -#define PCVAL_SLOT 1 -#define PCVAL_SPROP 2 - -#define PCVAL_TAGBITS 2 -#define PCVAL_TAGMASK JS_BITMASK(PCVAL_TAGBITS) -#define PCVAL_TAG(v) ((v) & PCVAL_TAGMASK) -#define PCVAL_CLRTAG(v) ((v) & ~(jsuword)PCVAL_TAGMASK) -#define PCVAL_SETTAG(v,t) ((jsuword)(v) | (t)) - -#define PCVAL_NULL 0 -#define PCVAL_IS_NULL(v) ((v) == PCVAL_NULL) - -#define PCVAL_IS_OBJECT(v) (PCVAL_TAG(v) == PCVAL_OBJECT) -#define PCVAL_TO_OBJECT(v) ((JSObject *) (v)) -#define OBJECT_TO_PCVAL(obj) ((jsuword) (obj)) - -#define PCVAL_OBJECT_TO_JSVAL(v) OBJECT_TO_JSVAL(PCVAL_TO_OBJECT(v)) -#define JSVAL_OBJECT_TO_PCVAL(v) OBJECT_TO_PCVAL(JSVAL_TO_OBJECT(v)) - -#define PCVAL_IS_SLOT(v) ((v) & PCVAL_SLOT) -#define PCVAL_TO_SLOT(v) ((jsuint)(v) >> 1) -#define SLOT_TO_PCVAL(i) (((jsuword)(i) << 1) | PCVAL_SLOT) - -#define PCVAL_IS_SPROP(v) (PCVAL_TAG(v) == PCVAL_SPROP) -#define PCVAL_TO_SPROP(v) ((JSScopeProperty *) PCVAL_CLRTAG(v)) -#define SPROP_TO_PCVAL(sprop) PCVAL_SETTAG(sprop, PCVAL_SPROP) - -inline bool -js_MatchPropertyCacheShape(JSContext *cx, JSObject *obj, uint32 shape); - -/* - * Fill property cache entry for key cx->fp->pc, optimized value word computed - * from obj and sprop, and entry capability forged from 24-bit OBJ_SHAPE(obj), - * 4-bit scopeIndex, and 4-bit protoIndex. - * - * Return the filled cache entry or JS_NO_PROP_CACHE_FILL if caching was not - * possible. - */ -extern JS_REQUIRES_STACK JSPropCacheEntry * -js_FillPropertyCache(JSContext *cx, JSObject *obj, - uintN scopeIndex, uintN protoIndex, JSObject *pobj, - JSScopeProperty *sprop, JSBool adding); - -/* - * Property cache lookup macros. PROPERTY_CACHE_TEST is designed to inline the - * fast path in js_Interpret, so it makes "just-so" restrictions on parameters, - * e.g. pobj and obj should not be the same variable, since for JOF_PROP-mode - * opcodes, obj must not be changed because of a cache miss. - * - * On return from PROPERTY_CACHE_TEST, if atom is null then obj points to the - * scope chain element in which the property was found, pobj is locked, and - * entry is valid. If atom is non-null then no object is locked but entry is - * still set correctly for use, e.g., by js_FillPropertyCache and atom should - * be used as the id to find. - * - * We must lock pobj on a hit in order to close races with threads that might - * be deleting a property from its scope, or otherwise invalidating property - * caches (on all threads) by re-generating scope->shape. - */ -#define PROPERTY_CACHE_TEST(cx, pc, obj, pobj, entry, atom) \ - do { \ - JSPropertyCache *cache_ = &JS_PROPERTY_CACHE(cx); \ - uint32 kshape_ = (JS_ASSERT(OBJ_IS_NATIVE(obj)), OBJ_SHAPE(obj)); \ - entry = &cache_->table[PROPERTY_CACHE_HASH_PC(pc, kshape_)]; \ - PCMETER(cache_->pctestentry = entry); \ - PCMETER(cache_->tests++); \ - JS_ASSERT(&obj != &pobj); \ - if (entry->kpc == pc && entry->kshape == kshape_) { \ - JSObject *tmp_; \ - pobj = obj; \ - JS_ASSERT(PCVCAP_TAG(entry->vcap) <= 1); \ - if (PCVCAP_TAG(entry->vcap) == 1 && \ - (tmp_ = pobj->getProto()) != NULL) { \ - pobj = tmp_; \ - } \ - \ - if (js_MatchPropertyCacheShape(cx,pobj,PCVCAP_SHAPE(entry->vcap))){\ - PCMETER(cache_->pchits++); \ - PCMETER(!PCVCAP_TAG(entry->vcap) || cache_->protopchits++); \ - atom = NULL; \ - break; \ - } \ - } \ - atom = js_FullTestPropertyCache(cx, pc, &obj, &pobj, &entry); \ - if (atom) \ - PCMETER(cache_->misses++); \ - } while (0) - -extern JS_REQUIRES_STACK JSAtom * -js_FullTestPropertyCache(JSContext *cx, jsbytecode *pc, - JSObject **objp, JSObject **pobjp, - JSPropCacheEntry **entryp); - -/* The property cache does not need a destructor. */ -#define js_FinishPropertyCache(cache) ((void) 0) - -extern void -js_PurgePropertyCache(JSContext *cx, JSPropertyCache *cache); - -extern void -js_PurgePropertyCacheForScript(JSContext *cx, JSScript *script); - /* * Interpreter stack arena-pool alloc and free functions. */ @@ -490,7 +267,7 @@ js_GetPrimitiveThis(JSContext *cx, jsval *vp, JSClass *clasp, jsval *thisvp); * must not be a JSVAL_VOID. */ extern JSObject * -js_ComputeThis(JSContext *cx, JSBool lazy, jsval *argv); +js_ComputeThis(JSContext *cx, jsval *argv); extern const uint16 js_PrimitiveTestFlags[]; @@ -499,21 +276,6 @@ extern const uint16 js_PrimitiveTestFlags[]; JSFUN_THISP_TEST(JSFUN_THISP_FLAGS((fun)->flags), \ js_PrimitiveTestFlags[JSVAL_TAG(thisv) - 1])) -#ifdef __cplusplus /* Aargh, libgjs, bug 492720. */ -static JS_INLINE JSObject * -js_ComputeThisForFrame(JSContext *cx, JSStackFrame *fp) -{ - if (fp->flags & JSFRAME_COMPUTED_THIS) - return JSVAL_TO_OBJECT(fp->thisv); /* JSVAL_COMPUTED_THIS invariant */ - JSObject* obj = js_ComputeThis(cx, JS_TRUE, fp->argv); - if (!obj) - return NULL; - fp->thisv = OBJECT_TO_JSVAL(obj); - fp->flags |= JSFRAME_COMPUTED_THIS; - return obj; -} -#endif - /* * NB: js_Invoke requires that cx is currently running JS (i.e., that cx->fp * is non-null), and that vp points to the callee, |this| parameter, and @@ -646,7 +408,7 @@ js_FreeRawStack(JSContext *cx, void *mark); * The alert should display "true". */ extern JSObject * -js_ComputeGlobalThis(JSContext *cx, JSBool lazy, jsval *argv); +js_ComputeGlobalThis(JSContext *cx, jsval *argv); extern JS_REQUIRES_STACK JSBool js_EnterWith(JSContext *cx, jsint stackIndex); @@ -697,4 +459,17 @@ js_MeterSlotOpcode(JSOp op, uint32 slot); JS_END_EXTERN_C +inline JSObject * +JSStackFrame::getThisObject(JSContext *cx) +{ + if (flags & JSFRAME_COMPUTED_THIS) + return JSVAL_TO_OBJECT(thisv); /* JSVAL_COMPUTED_THIS invariant */ + JSObject* obj = js_ComputeThis(cx, argv); + if (!obj) + return NULL; + thisv = OBJECT_TO_JSVAL(obj); + flags |= JSFRAME_COMPUTED_THIS; + return obj; +} + #endif /* jsinterp_h___ */ diff --git a/js/src/jsiter.cpp b/js/src/jsiter.cpp index eaa3c872cd6..55688b0ced8 100644 --- a/js/src/jsiter.cpp +++ b/js/src/jsiter.cpp @@ -93,10 +93,10 @@ js_CloseNativeIterator(JSContext *cx, JSObject *iterobj) jsval state; JSObject *iterable; - JS_ASSERT(STOBJ_GET_CLASS(iterobj) == &js_IteratorClass); + JS_ASSERT(iterobj->getClass() == &js_IteratorClass); /* Avoid double work if js_CloseNativeIterator was called on obj. */ - state = STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_STATE); + state = iterobj->getSlot(JSSLOT_ITER_STATE); if (JSVAL_IS_NULL(state)) return; @@ -104,7 +104,7 @@ js_CloseNativeIterator(JSContext *cx, JSObject *iterobj) iterable = iterobj->getParent(); if (iterable) { #if JS_HAS_XML_SUPPORT - uintN flags = JSVAL_TO_INT(STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_FLAGS)); + uintN flags = JSVAL_TO_INT(iterobj->getSlot(JSSLOT_ITER_FLAGS)); if ((flags & JSITER_FOREACH) && OBJECT_IS_XML(cx, iterable)) { js_EnumerateXMLValues(cx, iterable, JSENUMERATE_DESTROY, &state, NULL, NULL); @@ -112,7 +112,7 @@ js_CloseNativeIterator(JSContext *cx, JSObject *iterobj) #endif iterable->enumerate(cx, JSENUMERATE_DESTROY, &state, NULL); } - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, JSVAL_NULL); + iterobj->setSlot(JSSLOT_ITER_STATE, JSVAL_NULL); } static void @@ -150,12 +150,12 @@ InitNativeIterator(JSContext *cx, JSObject *iterobj, JSObject *obj, uintN flags) jsval state; JSBool ok; - JS_ASSERT(STOBJ_GET_CLASS(iterobj) == &js_IteratorClass); + JS_ASSERT(iterobj->getClass() == &js_IteratorClass); /* Initialize iterobj in case of enumerate hook failure. */ iterobj->setParent(obj); - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, JSVAL_NULL); - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_FLAGS, INT_TO_JSVAL(flags)); + iterobj->setSlot(JSSLOT_ITER_STATE, JSVAL_NULL); + iterobj->setSlot(JSSLOT_ITER_FLAGS, INT_TO_JSVAL(flags)); if (!js_RegisterCloseableIterator(cx, iterobj)) return JS_FALSE; if (!obj) @@ -171,7 +171,7 @@ InitNativeIterator(JSContext *cx, JSObject *iterobj, JSObject *obj, uintN flags) if (!ok) return JS_FALSE; - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, state); + iterobj->setSlot(JSSLOT_ITER_STATE, state); if (flags & JSITER_ENUMERATE) { /* * The enumerating iterator needs the original object to suppress @@ -215,18 +215,11 @@ Iterator(JSContext *cx, JSObject *iterobj, uintN argc, jsval *argv, jsval *rval) static JSBool NewKeyValuePair(JSContext *cx, jsid key, jsval val, jsval *rval) { - jsval vec[2]; - JSTempValueRooter tvr; - JSObject *aobj; + jsval vec[2] = { ID_TO_VALUE(key), val }; + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(vec), vec); - vec[0] = ID_TO_VALUE(key); - vec[1] = val; - - JS_PUSH_TEMP_ROOT(cx, 2, vec, &tvr); - aobj = js_NewArrayObject(cx, 2, vec); + JSObject *aobj = js_NewArrayObject(cx, 2, vec); *rval = OBJECT_TO_JSVAL(aobj); - JS_POP_TEMP_ROOT(cx, &tvr); - return aobj != NULL; } @@ -243,11 +236,11 @@ IteratorNextImpl(JSContext *cx, JSObject *obj, jsval *rval) iterable = obj->getParent(); JS_ASSERT(iterable); - state = STOBJ_GET_SLOT(obj, JSSLOT_ITER_STATE); + state = obj->getSlot(JSSLOT_ITER_STATE); if (JSVAL_IS_NULL(state)) goto stop; - flags = JSVAL_TO_INT(STOBJ_GET_SLOT(obj, JSSLOT_ITER_FLAGS)); + flags = JSVAL_TO_INT(obj->getSlot(JSSLOT_ITER_FLAGS)); JS_ASSERT(!(flags & JSITER_ENUMERATE)); foreach = (flags & JSITER_FOREACH) != 0; ok = @@ -261,7 +254,7 @@ IteratorNextImpl(JSContext *cx, JSObject *obj, jsval *rval) if (!ok) return JS_FALSE; - STOBJ_SET_SLOT(obj, JSSLOT_ITER_STATE, state); + obj->setSlot(JSSLOT_ITER_STATE, state); if (JSVAL_IS_NULL(state)) goto stop; @@ -280,7 +273,7 @@ IteratorNextImpl(JSContext *cx, JSObject *obj, jsval *rval) return JS_TRUE; stop: - JS_ASSERT(STOBJ_GET_SLOT(obj, JSSLOT_ITER_STATE) == JSVAL_NULL); + JS_ASSERT(obj->getSlot(JSSLOT_ITER_STATE) == JSVAL_NULL); *rval = JSVAL_HOLE; return JS_TRUE; } @@ -336,7 +329,7 @@ js_GetNativeIteratorFlags(JSContext *cx, JSObject *iterobj) { if (OBJ_GET_CLASS(cx, iterobj) != &js_IteratorClass) return 0; - return JSVAL_TO_INT(STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_FLAGS)); + return JSVAL_TO_INT(iterobj->getSlot(JSSLOT_ITER_FLAGS)); } /* @@ -347,21 +340,19 @@ JS_FRIEND_API(JSBool) js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp) { JSObject *obj; - JSTempValueRooter tvr; JSAtom *atom; JSClass *clasp; JSExtendedClass *xclasp; - JSBool ok; JSObject *iterobj; jsval arg; - JS_ASSERT(!(flags & ~(JSITER_ENUMERATE | - JSITER_FOREACH | - JSITER_KEYVALUE))); + JS_ASSERT(!(flags & ~(JSITER_ENUMERATE | JSITER_FOREACH | JSITER_KEYVALUE))); /* JSITER_KEYVALUE must always come with JSITER_FOREACH */ JS_ASSERT(!(flags & JSITER_KEYVALUE) || (flags & JSITER_FOREACH)); + AutoValueRooter tvr(cx); + /* XXX work around old valueOf call hidden beneath js_ValueToObject */ if (!JSVAL_IS_PRIMITIVE(*vp)) { obj = JSVAL_TO_OBJECT(*vp); @@ -374,30 +365,29 @@ js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp) */ if ((flags & JSITER_ENUMERATE)) { if (!js_ValueToObject(cx, *vp, &obj)) - return JS_FALSE; + return false; if (!obj) goto default_iter; } else { obj = js_ValueToNonNullObject(cx, *vp); if (!obj) - return JS_FALSE; + return false; } } - JS_ASSERT(obj); - JS_PUSH_TEMP_ROOT_OBJECT(cx, obj, &tvr); + tvr.setObject(obj); clasp = OBJ_GET_CLASS(cx, obj); if ((clasp->flags & JSCLASS_IS_EXTENDED) && (xclasp = (JSExtendedClass *) clasp)->iteratorObject) { iterobj = xclasp->iteratorObject(cx, obj, !(flags & JSITER_FOREACH)); if (!iterobj) - goto bad; + return false; *vp = OBJECT_TO_JSVAL(iterobj); } else { atom = cx->runtime->atomState.iteratorAtom; if (!js_GetMethod(cx, obj, ATOM_TO_JSID(atom), JSGET_NO_METHOD_BARRIER, vp)) - goto bad; + return false; if (JSVAL_IS_VOID(*vp)) { default_iter: /* @@ -410,36 +400,26 @@ js_ValueToIterator(JSContext *cx, uintN flags, jsval *vp) */ iterobj = js_NewObject(cx, &js_IteratorClass, NULL, NULL); if (!iterobj) - goto bad; + return false; /* Store in *vp to protect it from GC (callers must root vp). */ *vp = OBJECT_TO_JSVAL(iterobj); if (!InitNativeIterator(cx, iterobj, obj, flags)) - goto bad; + return false; } else { LeaveTrace(cx); arg = BOOLEAN_TO_JSVAL((flags & JSITER_FOREACH) == 0); - if (!js_InternalInvoke(cx, obj, *vp, JSINVOKE_ITERATOR, 1, &arg, - vp)) { - goto bad; - } + if (!js_InternalInvoke(cx, obj, *vp, JSINVOKE_ITERATOR, 1, &arg, vp)) + return false; if (JSVAL_IS_PRIMITIVE(*vp)) { - js_ReportValueError(cx, JSMSG_BAD_ITERATOR_RETURN, - JSDVG_SEARCH_STACK, *vp, NULL); - goto bad; + js_ReportValueError(cx, JSMSG_BAD_ITERATOR_RETURN, JSDVG_SEARCH_STACK, *vp, NULL); + return false; } } } - ok = JS_TRUE; - out: - if (obj) - JS_POP_TEMP_ROOT(cx, &tvr); - return ok; - bad: - ok = JS_FALSE; - goto out; + return true; } JS_FRIEND_API(JSBool) JS_FASTCALL @@ -485,7 +465,7 @@ CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval) obj = iterobj->getParent(); origobj = iterobj->getProto(); - state = STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_STATE); + state = iterobj->getSlot(JSSLOT_ITER_STATE); if (JSVAL_IS_NULL(state)) goto stop; @@ -505,7 +485,7 @@ CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval) if (!obj->enumerate(cx, JSENUMERATE_NEXT, &state, &id)) return JS_FALSE; } - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, state); + iterobj->setSlot(JSSLOT_ITER_STATE, state); if (JSVAL_IS_NULL(state)) goto stop; } else @@ -515,7 +495,7 @@ CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval) if (!obj->enumerate(cx, JSENUMERATE_NEXT, &state, &id)) return JS_FALSE; - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, state); + iterobj->setSlot(JSSLOT_ITER_STATE, state); if (JSVAL_IS_NULL(state)) { #if JS_HAS_XML_SUPPORT if (OBJECT_IS_XML(cx, obj)) { @@ -535,7 +515,7 @@ CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval) iterobj->setParent(obj); if (!obj->enumerate(cx, JSENUMERATE_INIT, &state, NULL)) return JS_FALSE; - STOBJ_SET_SLOT(iterobj, JSSLOT_ITER_STATE, state); + iterobj->setSlot(JSSLOT_ITER_STATE, state); if (!JSVAL_IS_NULL(state)) goto restart; } @@ -591,7 +571,7 @@ CallEnumeratorNext(JSContext *cx, JSObject *iterobj, uintN flags, jsval *rval) return JS_TRUE; stop: - JS_ASSERT(STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_STATE) == JSVAL_NULL); + JS_ASSERT(iterobj->getSlot(JSSLOT_ITER_STATE) == JSVAL_NULL); *rval = JSVAL_HOLE; return JS_TRUE; } @@ -603,7 +583,7 @@ js_CallIteratorNext(JSContext *cx, JSObject *iterobj, jsval *rval) /* Fast path for native iterators */ if (OBJ_GET_CLASS(cx, iterobj) == &js_IteratorClass) { - flags = JSVAL_TO_INT(STOBJ_GET_SLOT(iterobj, JSSLOT_ITER_FLAGS)); + flags = JSVAL_TO_INT(iterobj->getSlot(JSSLOT_ITER_FLAGS)); if (flags & JSITER_ENUMERATE) return CallEnumeratorNext(cx, iterobj, flags, rval); @@ -902,7 +882,7 @@ SendToGenerator(JSContext *cx, JSGeneratorOp op, JSObject *obj, static JS_REQUIRES_STACK JSBool CloseGenerator(JSContext *cx, JSObject *obj) { - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_GeneratorClass); + JS_ASSERT(obj->getClass() == &js_GeneratorClass); JSGenerator *gen = (JSGenerator *) obj->getPrivate(); if (!gen) { @@ -1030,8 +1010,8 @@ js_InitIteratorClasses(JSContext *cx, JSObject *obj) NULL, iterator_methods, NULL, NULL); if (!proto) return NULL; - STOBJ_SET_SLOT(proto, JSSLOT_ITER_STATE, JSVAL_NULL); - STOBJ_SET_SLOT(proto, JSSLOT_ITER_FLAGS, JSVAL_ZERO); + proto->setSlot(JSSLOT_ITER_STATE, JSVAL_NULL); + proto->setSlot(JSSLOT_ITER_FLAGS, JSVAL_ZERO); #if JS_HAS_GENERATORS /* Initialize the generator internals if configured. */ diff --git a/js/src/jsiter.h b/js/src/jsiter.h index 8ece3bf04c9..41826cebb8a 100644 --- a/js/src/jsiter.h +++ b/js/src/jsiter.h @@ -45,6 +45,7 @@ */ #include "jsprvtd.h" #include "jspubtd.h" +#include "jsversion.h" JS_BEGIN_EXTERN_C @@ -129,7 +130,7 @@ static inline bool js_ValueIsStopIteration(jsval v) { return !JSVAL_IS_PRIMITIVE(v) && - STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v)) == &js_StopIterationClass; + JSVAL_TO_OBJECT(v)->getClass() == &js_StopIterationClass; } extern JSObject * diff --git a/js/src/jslibmath.h b/js/src/jslibmath.h index 13aa01a61ef..cb22d859fbc 100644 --- a/js/src/jslibmath.h +++ b/js/src/jslibmath.h @@ -42,7 +42,6 @@ #define _LIBMATH_H #include -#include "jsversion.h" /* * Use system provided math routines. diff --git a/js/src/jslock.cpp b/js/src/jslock.cpp index ed232d6f32f..2982d7bcedb 100644 --- a/js/src/jslock.cpp +++ b/js/src/jslock.cpp @@ -56,6 +56,8 @@ #include "jsscope.h" #include "jsstr.h" +using namespace js; + #define ReadWord(W) (W) #if !defined(__GNUC__) @@ -317,7 +319,7 @@ js_InitLock(JSThinLock *tl) tl->owner = 0; tl->fat = (JSFatLock*)JS_NEW_LOCK(); #else - memset(tl, 0, sizeof(JSThinLock)); + PodZero(tl); #endif } @@ -485,7 +487,7 @@ FinishSharingTitle(JSContext *cx, JSTitle *title) uint32 nslots = scope->freeslot; JS_ASSERT(nslots >= JSSLOT_START(obj->getClass())); for (uint32 i = JSSLOT_START(obj->getClass()); i != nslots; ++i) { - jsval v = STOBJ_GET_SLOT(obj, i); + jsval v = obj->getSlot(i); if (JSVAL_IS_STRING(v) && !js_MakeStringImmutable(cx, JSVAL_TO_STRING(v))) { /* @@ -494,7 +496,7 @@ FinishSharingTitle(JSContext *cx, JSTitle *title) * ignoring errors except out-of-memory, which should have been * reported through JS_ReportOutOfMemory at this point. */ - STOBJ_SET_SLOT(obj, i, JSVAL_VOID); + obj->setSlot(i, JSVAL_VOID); } } } @@ -705,7 +707,7 @@ js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot) if (CX_THREAD_IS_RUNNING_GC(cx) || scope->sealed() || (title->ownercx && ClaimTitle(title, cx))) { - return STOBJ_GET_SLOT(obj, slot); + return obj->getSlot(slot); } #ifndef NSPR_LOCK @@ -720,7 +722,7 @@ js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot) * lock release followed by fat lock acquisition. */ if (scope == OBJ_SCOPE(obj)) { - v = STOBJ_GET_SLOT(obj, slot); + v = obj->getSlot(slot); if (!NativeCompareAndSwap(&tl->owner, me, 0)) { /* Assert that scope locks never revert to flyweight. */ JS_ASSERT(title->ownercx != cx); @@ -734,12 +736,12 @@ js_GetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot) js_Dequeue(tl); } else if (Thin_RemoveWait(ReadWord(tl->owner)) == me) { - return STOBJ_GET_SLOT(obj, slot); + return obj->getSlot(slot); } #endif js_LockObj(cx, obj); - v = STOBJ_GET_SLOT(obj, slot); + v = obj->getSlot(slot); /* * Test whether cx took ownership of obj's scope during js_LockObj. @@ -1311,7 +1313,7 @@ js_LockObj(JSContext *cx, JSObject *obj) JSScope *scope; JSTitle *title; - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); /* * We must test whether the GC is calling and return without mutating any @@ -1343,7 +1345,7 @@ js_LockObj(JSContext *cx, JSObject *obj) void js_UnlockObj(JSContext *cx, JSObject *obj) { - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); js_UnlockTitle(cx, &OBJ_SCOPE(obj)->title); } @@ -1352,7 +1354,7 @@ js_InitTitle(JSContext *cx, JSTitle *title) { #ifdef JS_THREADSAFE title->ownercx = cx; - memset(&title->lock, 0, sizeof title->lock); + PodZero(&title->lock); /* * Set u.link = NULL, not u.count = 0, in case the target architecture's diff --git a/js/src/jslock.h b/js/src/jslock.h index 90812617d12..15f44cc9921 100644 --- a/js/src/jslock.h +++ b/js/src/jslock.h @@ -149,8 +149,8 @@ struct JSTitle { /* * NB: The JS_LOCK_OBJ and JS_UNLOCK_OBJ macros work *only* on native objects - * (objects for which OBJ_IS_NATIVE returns true). All uses of these macros in - * the engine are predicated on OBJ_IS_NATIVE or equivalent checks. These uses + * (objects for which obj->isNative() returns true). All uses of these macros in + * the engine are predicated on obj->isNative or equivalent checks. These uses * are for optimizations above the JSObjectOps layer, under which object locks * normally hide. */ diff --git a/js/src/jsmath.cpp b/js/src/jsmath.cpp index 5ac7b49930a..bd48ed59da2 100644 --- a/js/src/jsmath.cpp +++ b/js/src/jsmath.cpp @@ -434,38 +434,46 @@ static const jsdouble RNG_DSCALE = jsdouble(1LL << 53); * Math.random() support, lifted from java.util.Random.java. */ static inline void -random_setSeed(JSThreadData *data, int64 seed) +random_setSeed(JSContext *cx, int64 seed) { - data->rngSeed = (seed ^ RNG_MULTIPLIER) & RNG_MASK; + cx->rngSeed = (seed ^ RNG_MULTIPLIER) & RNG_MASK; } void -js_InitRandom(JSThreadData *data) +js_InitRandom(JSContext *cx) { - /* Finally, set the seed from current time. */ - random_setSeed(data, PRMJ_Now() / 1000); + /* + * Set the seed from current time. Since we have a RNG per context and we often bring + * up several contexts at the same time, we xor in some additional values, namely + * the context and its successor. We don't just use the context because it might be + * possible to reverse engineer the context pointer if one guesses the time right. + */ + random_setSeed(cx, + (PRMJ_Now() / 1000) ^ + int64(cx) ^ + int64(cx->link.next)); } static inline uint64 -random_next(JSThreadData *data, int bits) +random_next(JSContext *cx, int bits) { - uint64 nextseed = data->rngSeed * RNG_MULTIPLIER; + uint64 nextseed = cx->rngSeed * RNG_MULTIPLIER; nextseed += RNG_ADDEND; nextseed &= RNG_MASK; - data->rngSeed = nextseed; + cx->rngSeed = nextseed; return nextseed >> (48 - bits); } static inline jsdouble -random_nextDouble(JSThreadData *data) +random_nextDouble(JSContext *cx) { - return jsdouble((random_next(data, 26) << 27) + random_next(data, 27)) / RNG_DSCALE; + return jsdouble((random_next(cx, 26) << 27) + random_next(cx, 27)) / RNG_DSCALE; } static JSBool math_random(JSContext *cx, uintN argc, jsval *vp) { - jsdouble z = random_nextDouble(JS_THREAD_DATA(cx)); + jsdouble z = random_nextDouble(cx); return js_NewNumberInRootedValue(cx, z, vp); } @@ -670,7 +678,7 @@ math_pow_tn(jsdouble d, jsdouble p) static jsdouble FASTCALL math_random_tn(JSContext *cx) { - return random_nextDouble(JS_THREAD_DATA(cx)); + return random_nextDouble(cx); } static jsdouble FASTCALL diff --git a/js/src/jsmath.h b/js/src/jsmath.h index 98a2bf1d537..0eab6dfb065 100644 --- a/js/src/jsmath.h +++ b/js/src/jsmath.h @@ -51,7 +51,7 @@ extern JSObject * js_InitMathClass(JSContext *cx, JSObject *obj); extern void -js_InitRandom(JSThreadData *data); +js_InitRandom(JSContext *cx); extern JSBool js_math_ceil(JSContext *cx, uintN argc, jsval *vp); diff --git a/js/src/jsnum.cpp b/js/src/jsnum.cpp index 9e21ed3b3cd..ecef52bb6ce 100644 --- a/js/src/jsnum.cpp +++ b/js/src/jsnum.cpp @@ -321,7 +321,8 @@ num_toSource(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; JS_ASSERT(JSVAL_IS_NUMBER(v)); d = JSVAL_IS_INT(v) ? (jsdouble)JSVAL_TO_INT(v) : *JSVAL_TO_DOUBLE(v); - numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d); + numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, numBuf, sizeof numBuf, + DTOSTR_STANDARD, 0, d); if (!numStr) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -575,7 +576,8 @@ num_to(JSContext *cx, JSDToStrMode zeroArgMode, JSDToStrMode oneArgMode, return JS_FALSE; precision = js_DoubleToInteger(precision); if (precision < precisionMin || precision > precisionMax) { - numStr = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, precision); + numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, buf, sizeof buf, + DTOSTR_STANDARD, 0, precision); if (!numStr) JS_ReportOutOfMemory(cx); else @@ -584,7 +586,8 @@ num_to(JSContext *cx, JSDToStrMode zeroArgMode, JSDToStrMode oneArgMode, } } - numStr = JS_dtostr(buf, sizeof buf, oneArgMode, (jsint)precision + precisionOffset, d); + numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, buf, sizeof buf, + oneArgMode, (jsint)precision + precisionOffset, d); if (!numStr) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -854,9 +857,10 @@ NumberToCString(JSContext *cx, jsdouble d, jsint base, char *buf, size_t bufSize numStr = IntToCString(i, base, buf, bufSize); } else { if (base == 10) - numStr = JS_dtostr(buf, bufSize, DTOSTR_STANDARD, 0, d); + numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, buf, bufSize, + DTOSTR_STANDARD, 0, d); else - numStr = JS_dtobasestr(base, d); + numStr = js_dtobasestr(JS_THREAD_DATA(cx)->dtoaState, base, d); if (!numStr) { JS_ReportOutOfMemory(cx); return NULL; @@ -922,7 +926,8 @@ js_NumberValueToCharBuffer(JSContext *cx, jsval v, JSCharBuffer &cb) cstr = IntToCString(JSVAL_TO_INT(v), 10, arr, arrSize); } else { JS_ASSERT(JSVAL_IS_DOUBLE(v)); - cstr = JS_dtostr(arr, arrSize, DTOSTR_STANDARD, 0, *JSVAL_TO_DOUBLE(v)); + cstr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, arr, arrSize, + DTOSTR_STANDARD, 0, *JSVAL_TO_DOUBLE(v)); } if (!cstr) return JS_FALSE; @@ -996,11 +1001,11 @@ js_ValueToNumber(JSContext *cx, jsval *vp) * vp roots obj so we cannot use it as an extra root for * obj->defaultValue result when calling the hook. */ - JSAutoTempValueRooter tvr(cx, v); - if (!obj->defaultValue(cx, JSTYPE_NUMBER, tvr.addr())) + AutoValueRooter gcr(cx, v); + if (!obj->defaultValue(cx, JSTYPE_NUMBER, gcr.addr())) obj = NULL; else - v = *vp = tvr.value(); + v = *vp = gcr.value(); if (!obj) { *vp = JSVAL_NULL; return 0.0; @@ -1185,7 +1190,7 @@ js_strtod(JSContext *cx, const jschar *s, const jschar *send, estr = istr + 8; } else { int err; - d = JS_strtod(cstr, &estr, &err); + d = js_strtod_harder(JS_THREAD_DATA(cx)->dtoaState, cstr, &estr, &err); if (d == HUGE_VAL) d = js_PositiveInfinity; else if (d == -HUGE_VAL) @@ -1303,7 +1308,7 @@ js_strtointeger(JSContext *cx, const jschar *s, const jschar *send, /* * If we're accumulating a decimal number and the number is >= * 2^53, then the result from the repeated multiply-add above may - * be inaccurate. Call JS_strtod to get the correct answer. + * be inaccurate. Call js_strtod_harder to get the correct answer. */ size_t i; size_t length = s1 - start; @@ -1317,7 +1322,7 @@ js_strtointeger(JSContext *cx, const jschar *s, const jschar *send, cstr[i] = (char)start[i]; cstr[length] = 0; - value = JS_strtod(cstr, &estr, &err); + value = js_strtod_harder(JS_THREAD_DATA(cx)->dtoaState, cstr, &estr, &err); if (err == JS_DTOA_ENOMEM) { JS_ReportOutOfMemory(cx); cx->free(cstr); diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp index 0a33ecb3172..ddb12f344c0 100644 --- a/js/src/jsobj.cpp +++ b/js/src/jsobj.cpp @@ -133,17 +133,12 @@ obj_getSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp); static JSBool obj_setSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp); -static JSBool -obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp); - static JSPropertySpec object_props[] = { /* These two must come first; see object_props[slot].name usage below. */ {js_proto_str, JSSLOT_PROTO, JSPROP_PERMANENT|JSPROP_SHARED, obj_getSlot, obj_setSlot}, {js_parent_str,JSSLOT_PARENT,JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED, obj_getSlot, obj_setSlot}, - {js_count_str, 0, JSPROP_READONLY|JSPROP_PERMANENT|JSPROP_SHARED, - obj_getCount, NULL}, {0,0,0,0,0} }; @@ -242,37 +237,6 @@ obj_setSlot(JSContext *cx, JSObject *obj, jsval id, jsval *vp) return js_SetProtoOrParent(cx, obj, slot, pobj, JS_TRUE); } -static JSBool -obj_getCount(JSContext *cx, JSObject *obj, jsval id, jsval *vp) -{ - jsval iter_state; - jsid num_properties; - JSBool ok; - - if (JS_HAS_STRICT_OPTION(cx) && !ReportStrictSlot(cx, JSSLOT_COUNT)) - return JS_FALSE; - - iter_state = JSVAL_NULL; - JSAutoEnumStateRooter tvr(cx, obj, &iter_state); - - /* Get the number of properties to enumerate. */ - ok = obj->enumerate(cx, JSENUMERATE_INIT, &iter_state, &num_properties); - if (!ok) - goto out; - - if (!JSVAL_IS_INT(num_properties)) { - JS_ASSERT(0); - *vp = JSVAL_ZERO; - goto out; - } - *vp = num_properties; - -out: - if (!JSVAL_IS_NULL(iter_state)) - ok &= obj->enumerate(cx, JSENUMERATE_DESTROY, &iter_state, 0); - return ok; -} - #else /* !JS_HAS_OBJ_PROTO_PROP */ #define object_props NULL @@ -287,7 +251,7 @@ js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj, JS_ASSERT_IF(!checkForCycles, obj != pobj); if (slot == JSSLOT_PROTO) { - if (OBJ_IS_NATIVE(obj)) { + if (obj->isNative()) { JS_LOCK_OBJ(cx, obj); bool ok = !!js_GetMutableScope(cx, obj); JS_UNLOCK_OBJ(cx, obj); @@ -301,7 +265,7 @@ js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj, * case any entries were filled by looking up through obj. */ JSObject *oldproto = obj; - while (oldproto && OBJ_IS_NATIVE(oldproto)) { + while (oldproto && oldproto->isNative()) { JS_LOCK_OBJ(cx, oldproto); JSScope *scope = OBJ_SCOPE(oldproto); scope->protoShapeChange(cx); @@ -358,7 +322,7 @@ js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj, static JSHashNumber js_hash_object(const void *key) { - return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS; + return JSHashNumber(uintptr_t(key) >> JSVAL_TAGBITS); } static JSHashEntry * @@ -390,8 +354,7 @@ MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap) he = *hep; if (!he) { sharpid = 0; - he = JS_HashTableRawAdd(table, hep, hash, obj, - JS_UINT32_TO_PTR(sharpid)); + he = JS_HashTableRawAdd(table, hep, hash, obj, (void *) sharpid); if (!he) { JS_ReportOutOfMemory(cx); return NULL; @@ -412,7 +375,7 @@ MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap) continue; ok = obj2->getAttributes(cx, id, prop, &attrs); if (ok) { - if (OBJ_IS_NATIVE(obj2) && + if (obj2->isNative() && (attrs & (JSPROP_GETTER | JSPROP_SETTER))) { JSScopeProperty *sprop = (JSScopeProperty *) prop; val = JSVAL_VOID; @@ -448,10 +411,10 @@ MarkSharpObjects(JSContext *cx, JSObject *obj, JSIdArray **idap) if (!ok) return NULL; } else { - sharpid = JS_PTR_TO_UINT32(he->value); + sharpid = uintptr_t(he->value); if (sharpid == 0) { sharpid = ++map->sharpgen << SHARP_ID_SHIFT; - he->value = JS_UINT32_TO_PTR(sharpid); + he->value = (void *) sharpid; } ida = NULL; } @@ -511,7 +474,7 @@ js_EnterSharpObject(JSContext *cx, JSObject *obj, JSIdArray **idap, --map->depth; if (!he) goto bad; - JS_ASSERT((JS_PTR_TO_UINT32(he->value) & SHARP_BIT) == 0); + JS_ASSERT((uintptr_t(he->value) & SHARP_BIT) == 0); if (!idap) { JS_DestroyIdArray(cx, ida); ida = NULL; @@ -539,7 +502,7 @@ js_EnterSharpObject(JSContext *cx, JSObject *obj, JSIdArray **idap, } } - sharpid = JS_PTR_TO_UINT32(he->value); + sharpid = uintptr_t(he->value); if (sharpid != 0) { len = JS_snprintf(buf, sizeof buf, "#%u%c", sharpid >> SHARP_ID_SHIFT, @@ -664,15 +627,13 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) #endif jsval *val; jsval localroot[4] = {JSVAL_NULL, JSVAL_NULL, JSVAL_NULL, JSVAL_NULL}; - JSTempValueRooter tvr; JSString *gsopold[2]; JSString *gsop[2]; JSString *idstr, *valstr, *str; JS_CHECK_RECURSION(cx, return JS_FALSE); - MUST_FLOW_THROUGH("out"); - JS_PUSH_TEMP_ROOT(cx, 4, localroot, &tvr); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(localroot), localroot); /* If outermost, we need parentheses to be an expression, not a block. */ outermost = (cx->sharpObjectMap.depth == 0); @@ -777,7 +738,7 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) obj2->dropProperty(cx, prop); goto error; } - if (OBJ_IS_NATIVE(obj2) && + if (obj2->isNative() && (attrs & (JSPROP_GETTER | JSPROP_SETTER))) { JSScopeProperty *sprop = (JSScopeProperty *) prop; if (attrs & JSPROP_GETTER) { @@ -1037,7 +998,6 @@ obj_toSource(JSContext *cx, uintN argc, jsval *vp) *vp = STRING_TO_JSVAL(str); ok = JS_TRUE; out: - JS_POP_TEMP_ROOT(cx, &tvr); return ok; overflow: @@ -1392,7 +1352,7 @@ obj_eval(JSContext *cx, uintN argc, jsval *vp) * with object to maintain invariants in the engine (see bug 520164). */ if (scopeobj->getParent()) { - JSObject *global = JS_GetGlobalForObject(cx, scopeobj); + JSObject *global = scopeobj->getGlobal(); withGuard.obj = js_NewWithObject(cx, scopeobj, global, 0); if (!withGuard.obj) return JS_FALSE; @@ -1683,12 +1643,12 @@ js_HasOwnProperty(JSContext *cx, JSLookupPropOp lookup, JSObject *obj, jsid id, JSObject **objp, JSProperty **propp) { if (!lookup(cx, obj, id, objp, propp)) - return JS_FALSE; + return false; if (!*propp) - return JS_TRUE; + return true; if (*objp == obj) - return JS_TRUE; + return true; JSExtendedClass *xclasp; JSObject *outer; @@ -1699,10 +1659,11 @@ js_HasOwnProperty(JSContext *cx, JSLookupPropOp lookup, JSObject *obj, jsid id, } else { outer = xclasp->outerObject(cx, *objp); if (!outer) - return JS_FALSE; + return false; } + if (outer != *objp) { - if (OBJ_IS_NATIVE(*objp) && obj->getClass() == clasp) { + if ((*objp)->isNative() && obj->getClass() == clasp) { /* * The combination of JSPROP_SHARED and JSPROP_PERMANENT in a * delegated property makes that property appear to be direct in @@ -1718,16 +1679,15 @@ js_HasOwnProperty(JSContext *cx, JSLookupPropOp lookup, JSObject *obj, jsid id, * the property, there is no way to tell whether it is directly * owned, or indirectly delegated. */ - if (!SPROP_IS_SHARED_PERMANENT((JSScopeProperty *) *propp)) { - (*objp)->dropProperty(cx, *propp); - *propp = NULL; - } - } else { - (*objp)->dropProperty(cx, *propp); - *propp = NULL; + JSScopeProperty *sprop = reinterpret_cast(*propp); + if (sprop->isSharedPermanent()) + return true; } + + (*objp)->dropProperty(cx, *propp); + *propp = NULL; } - return JS_TRUE; + return true; } #ifdef JS_TRACER @@ -1823,8 +1783,8 @@ js_PropertyIsEnumerable(JSContext *cx, JSObject *obj, jsid id, jsval *vp) * to distinguish a shared permanent proto-property from a local one. */ if (pobj != obj && - !(OBJ_IS_NATIVE(pobj) && - SPROP_IS_SHARED_PERMANENT((JSScopeProperty *)prop))) { + !(pobj->isNative() && + ((JSScopeProperty *)prop)->isSharedPermanent())) { pobj->dropProperty(cx, prop); *vp = JSVAL_FALSE; return JS_TRUE; @@ -1919,9 +1879,9 @@ obj_lookupGetter(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; *vp = JSVAL_VOID; if (prop) { - if (OBJ_IS_NATIVE(pobj)) { + if (pobj->isNative()) { sprop = (JSScopeProperty *) prop; - if (sprop->attrs & JSPROP_GETTER) + if (sprop->hasGetterValue()) *vp = sprop->getterValue(); } pobj->dropProperty(cx, prop); @@ -1944,9 +1904,9 @@ obj_lookupSetter(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; *vp = JSVAL_VOID; if (prop) { - if (OBJ_IS_NATIVE(pobj)) { + if (pobj->isNative()) { sprop = (JSScopeProperty *) prop; - if (sprop->attrs & JSPROP_SETTER) + if (sprop->hasSetterValue()) *vp = sprop->setterValue(); } pobj->dropProperty(cx, prop); @@ -1981,39 +1941,28 @@ obj_getPrototypeOf(JSContext *cx, uintN argc, jsval *vp) JSACC_PROTO, vp, &attrs); } -static JSBool -obj_getOwnPropertyDescriptor(JSContext *cx, uintN argc, jsval *vp) +JSBool +js_GetOwnPropertyDescriptor(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { - if (argc == 0 || JSVAL_IS_PRIMITIVE(vp[2])) { - JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_NONNULL_OBJECT); - return JS_FALSE; - } - - JSObject *obj = JSVAL_TO_OBJECT(vp[2]); - - JSAutoTempIdRooter nameidr(cx); - if (!JS_ValueToId(cx, argc >= 2 ? vp[3] : JSVAL_VOID, nameidr.addr())) - return JS_FALSE; - JSObject *pobj; JSProperty *prop; - if (!js_HasOwnProperty(cx, obj->map->ops->lookupProperty, obj, nameidr.id(), &pobj, &prop)) - return JS_FALSE; + if (!js_HasOwnProperty(cx, obj->map->ops->lookupProperty, obj, id, &pobj, &prop)) + return false; if (!prop) { *vp = JSVAL_VOID; - return JS_TRUE; + return true; } uintN attrs; - if (!pobj->getAttributes(cx, nameidr.id(), prop, &attrs)) { + if (!pobj->getAttributes(cx, id, prop, &attrs)) { pobj->dropProperty(cx, prop); - return JS_FALSE; + return false; } jsval roots[] = { JSVAL_VOID, JSVAL_VOID }; - JSAutoTempValueRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); if (attrs & (JSPROP_GETTER | JSPROP_SETTER)) { - if (OBJ_IS_NATIVE(obj)) { + if (obj->isNative()) { JSScopeProperty *sprop = reinterpret_cast(prop); if (attrs & JSPROP_GETTER) roots[0] = sprop->getterValue(); @@ -2025,15 +1974,15 @@ obj_getOwnPropertyDescriptor(JSContext *cx, uintN argc, jsval *vp) } else { pobj->dropProperty(cx, prop); - if (!obj->getProperty(cx, nameidr.id(), &roots[0])) - return JS_FALSE; + if (!obj->getProperty(cx, id, &roots[0])) + return false; } /* We have our own property, so start creating the descriptor. */ JSObject *desc = js_NewObject(cx, &js_ObjectClass, NULL, NULL); if (!desc) - return JS_FALSE; + return false; *vp = OBJECT_TO_JSVAL(desc); /* Root and return. */ const JSAtomState &atomState = cx->runtime->atomState; @@ -2042,7 +1991,7 @@ obj_getOwnPropertyDescriptor(JSContext *cx, uintN argc, jsval *vp) JS_PropertyStub, JS_PropertyStub, JSPROP_ENUMERATE) || !desc->defineProperty(cx, ATOM_TO_JSID(atomState.setAtom), roots[1], JS_PropertyStub, JS_PropertyStub, JSPROP_ENUMERATE)) { - return JS_FALSE; + return false; } } else { if (!desc->defineProperty(cx, ATOM_TO_JSID(atomState.valueAtom), roots[0], @@ -2050,7 +1999,7 @@ obj_getOwnPropertyDescriptor(JSContext *cx, uintN argc, jsval *vp) !desc->defineProperty(cx, ATOM_TO_JSID(atomState.writableAtom), BOOLEAN_TO_JSVAL((attrs & JSPROP_READONLY) == 0), JS_PropertyStub, JS_PropertyStub, JSPROP_ENUMERATE)) { - return JS_FALSE; + return false; } } @@ -2062,6 +2011,20 @@ obj_getOwnPropertyDescriptor(JSContext *cx, uintN argc, jsval *vp) JS_PropertyStub, JS_PropertyStub, JSPROP_ENUMERATE); } +static JSBool +obj_getOwnPropertyDescriptor(JSContext *cx, uintN argc, jsval *vp) +{ + if (argc == 0 || JSVAL_IS_PRIMITIVE(vp[2])) { + JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NOT_NONNULL_OBJECT); + return false; + } + JSObject *obj = JSVAL_TO_OBJECT(vp[2]); + AutoIdRooter nameidr(cx); + if (!JS_ValueToId(cx, argc >= 2 ? vp[3] : JSVAL_VOID, nameidr.addr())) + return JS_FALSE; + return js_GetOwnPropertyDescriptor(cx, obj, nameidr.id(), vp); +} + static JSBool obj_keys(JSContext *cx, uintN argc, jsval *vp) { @@ -2072,7 +2035,7 @@ obj_keys(JSContext *cx, uintN argc, jsval *vp) } JSObject *obj = JSVAL_TO_OBJECT(v); - JSAutoIdArray ida(cx, JS_Enumerate(cx, obj)); + AutoIdArray ida(cx, JS_Enumerate(cx, obj)); if (!ida) return JS_FALSE; @@ -2237,50 +2200,6 @@ PropertyDescriptor::initialize(JSContext* cx, jsid id, jsval v) return true; } -typedef js::Vector PropertyDescriptorArray; - -class AutoDescriptorArray : private JSTempValueRooter -{ - private: - JSContext *cx; - PropertyDescriptorArray descriptors; - - public: - AutoDescriptorArray(JSContext *cx) - : cx(cx), descriptors(cx) - { - JS_PUSH_TEMP_ROOT_TRACE(cx, trace, this); - } - ~AutoDescriptorArray() { - JS_POP_TEMP_ROOT(cx, this); - } - - PropertyDescriptor *append() { - if (!descriptors.append(PropertyDescriptor())) - return NULL; - return &descriptors.back(); - } - - PropertyDescriptor& operator[](size_t i) { - JS_ASSERT(i < descriptors.length()); - return descriptors[i]; - } - - private: - static void trace(JSTracer *trc, JSTempValueRooter *tvr) { - PropertyDescriptorArray &descs = - static_cast(tvr)->descriptors; - for (size_t i = 0, len = descs.length(); i < len; i++) { - PropertyDescriptor &desc = descs[i]; - - JS_CALL_VALUE_TRACER(trc, desc.value, "PropertyDescriptor::value"); - JS_CALL_VALUE_TRACER(trc, desc.get, "PropertyDescriptor::get"); - JS_CALL_VALUE_TRACER(trc, desc.set, "PropertyDescriptor::set"); - js_TraceId(trc, desc.id); - } - } -}; - static JSBool Reject(JSContext *cx, uintN errorNumber, bool throwError, jsid id, bool *rval) { @@ -2383,17 +2302,13 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des if (desc.hasGet && !js_SameValue(desc.getterValue(), - (sprop->attrs & JSPROP_GETTER) - ? sprop->getterValue() - : JSVAL_VOID, cx)) { + sprop->hasGetterValue() ? sprop->getterValue() : JSVAL_VOID, cx)) { break; } if (desc.hasSet && !js_SameValue(desc.setterValue(), - (sprop->attrs & JSPROP_SETTER) - ? sprop->setterValue() - : JSVAL_VOID, cx)) { + sprop->hasSetterValue() ? sprop->setterValue() : JSVAL_VOID, cx)) { break; } } else { @@ -2429,7 +2344,7 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des * for now we'll simply forbid their redefinition. */ if (!sprop->configurable() && - (!SPROP_HAS_STUB_GETTER(sprop) || !SPROP_HAS_STUB_SETTER(sprop))) { + (!sprop->hasDefaultGetter() || !sprop->hasDefaultSetter())) { return Reject(cx, obj2, current, JSMSG_CANT_REDEFINE_UNCONFIGURABLE_PROP, throwError, desc.id, rval); } @@ -2505,12 +2420,11 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des if (!sprop->configurable()) { if ((desc.hasSet && !js_SameValue(desc.setterValue(), - (sprop->attrs & JSPROP_SETTER) ? sprop->setterValue() : JSVAL_VOID, + sprop->hasSetterValue() ? sprop->setterValue() : JSVAL_VOID, cx)) || (desc.hasGet && !js_SameValue(desc.getterValue(), - (sprop->attrs & JSPROP_GETTER) ? sprop->getterValue() : JSVAL_VOID, - cx))) + sprop->hasGetterValue() ? sprop->getterValue() : JSVAL_VOID, cx))) { return Reject(cx, obj2, current, JSMSG_CANT_REDEFINE_UNCONFIGURABLE_PROP, throwError, desc.id, rval); @@ -2528,7 +2442,7 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des if (desc.hasEnumerable) changed |= JSPROP_ENUMERATE; - attrs = (sprop->attrs & ~changed) | (desc.attrs & changed); + attrs = (sprop->attributes() & ~changed) | (desc.attrs & changed); getter = sprop->getter(); setter = sprop->setter(); } else if (desc.isDataDescriptor()) { @@ -2542,7 +2456,7 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des if (desc.hasValue) v = desc.value; - attrs = (desc.attrs & ~unchanged) | (sprop->attrs & unchanged); + attrs = (desc.attrs & ~unchanged) | (sprop->attributes() & unchanged); getter = setter = JS_PropertyStub; } else { JS_ASSERT(desc.isAccessorDescriptor()); @@ -2569,7 +2483,7 @@ DefinePropertyObject(JSContext *cx, JSObject *obj, const PropertyDescriptor &des if (desc.hasSet) changed |= JSPROP_SETTER | JSPROP_SHARED; - attrs = (desc.attrs & changed) | (sprop->attrs & ~changed); + attrs = (desc.attrs & changed) | (sprop->attributes() & ~changed); if (desc.hasGet) getter = desc.getterObject() ? desc.getter() : JS_PropertyStub; else @@ -2644,12 +2558,27 @@ DefineProperty(JSContext *cx, JSObject *obj, const PropertyDescriptor &desc, boo if (obj->isArray()) return DefinePropertyArray(cx, obj, desc, throwError, rval); - if (!OBJ_IS_NATIVE(obj)) + if (!obj->isNative()) return Reject(cx, JSMSG_OBJECT_NOT_EXTENSIBLE, throwError, rval); return DefinePropertyObject(cx, obj, desc, throwError, rval); } +JSBool +js_DefineOwnProperty(JSContext *cx, JSObject *obj, jsid id, jsval descriptor, JSBool *bp) +{ + AutoDescriptorArray descs(cx); + PropertyDescriptor *desc = descs.append(); + if (!desc || !desc->initialize(cx, id, descriptor)) + return false; + + bool rval; + if (!DefineProperty(cx, obj, *desc, true, &rval)) + return false; + *bp = !!rval; + return true; +} + /* ES5 15.2.3.6: Object.defineProperty(O, P, Attributes) */ static JSBool obj_defineProperty(JSContext* cx, uintN argc, jsval* vp) @@ -2664,19 +2593,16 @@ obj_defineProperty(JSContext* cx, uintN argc, jsval* vp) JSObject* obj = JSVAL_TO_OBJECT(*vp); /* 15.2.3.6 step 2. */ - JSAutoTempIdRooter nameidr(cx); + AutoIdRooter nameidr(cx); if (!JS_ValueToId(cx, argc >= 2 ? vp[3] : JSVAL_VOID, nameidr.addr())) return JS_FALSE; /* 15.2.3.6 step 3. */ - AutoDescriptorArray descs(cx); - PropertyDescriptor *desc = descs.append(); - if (!desc || !desc->initialize(cx, nameidr.id(), argc >= 3 ? vp[4] : JSVAL_VOID)) - return JS_FALSE; + jsval descval = argc >= 3 ? vp[4] : JSVAL_VOID; /* 15.2.3.6 step 4 */ - bool dummy; - return DefineProperty(cx, obj, *desc, true, &dummy); + JSBool junk; + return js_DefineOwnProperty(cx, obj, nameidr.id(), descval, &junk); } /* ES5 15.2.3.7: Object.defineProperties(O, Properties) */ @@ -2701,7 +2627,7 @@ obj_defineProperties(JSContext* cx, uintN argc, jsval* vp) return JS_FALSE; vp[3] = OBJECT_TO_JSVAL(props); - JSAutoIdArray ida(cx, JS_Enumerate(cx, props)); + AutoIdArray ida(cx, JS_Enumerate(cx, props)); if (!ida) return JS_FALSE; @@ -2765,7 +2691,7 @@ obj_create(JSContext *cx, uintN argc, jsval *vp) } JSObject *props = JSVAL_TO_OBJECT(vp[3]); - JSAutoIdArray ida(cx, JS_Enumerate(cx, props)); + AutoIdArray ida(cx, JS_Enumerate(cx, props)); if (!ida) return JS_FALSE; @@ -2862,7 +2788,7 @@ InitScopeForObject(JSContext* cx, JSObject* obj, JSObject* proto, JSObjectOps* o JSClass *clasp = OBJ_GET_CLASS(cx, obj); JSScope *scope = NULL; - if (proto && OBJ_IS_NATIVE(proto)) { + if (proto && proto->isNative()) { JS_LOCK_OBJ(cx, proto); scope = OBJ_SCOPE(proto); if (scope->canProvideEmptyScope(ops, clasp)) { @@ -2963,7 +2889,7 @@ js_NewObjectWithGivenProto(JSContext *cx, JSClass *clasp, JSObject *proto, * builtin. See bug 481444. */ if (cx->debugHooks->objectHook && !JS_ON_TRACE(cx)) { - JSAutoTempValueRooter tvr(cx, obj); + AutoValueRooter tvr(cx, obj); JS_KEEP_ATOMS(cx->runtime); cx->debugHooks->objectHook(cx, obj, JS_TRUE, cx->debugHooks->objectHookData); @@ -3102,7 +3028,7 @@ NewNativeObject(JSContext* cx, JSClass* clasp, JSObject* proto, JSObject* FASTCALL js_NewInstance(JSContext *cx, JSClass *clasp, JSObject *ctor) { - JS_ASSERT(HAS_FUNCTION_CLASS(ctor)); + JS_ASSERT(ctor->isFunction()); JSAtom *atom = cx->runtime->atomState.classPrototypeAtom; @@ -3118,7 +3044,7 @@ js_NewInstance(JSContext *cx, JSClass *clasp, JSObject *ctor) } JSScopeProperty *sprop = scope->lookup(ATOM_TO_JSID(atom)); - jsval pval = sprop ? STOBJ_GET_SLOT(ctor, sprop->slot) : JSVAL_HOLE; + jsval pval = sprop ? ctor->getSlot(sprop->slot) : JSVAL_HOLE; JSObject *proto; if (!JSVAL_IS_PRIMITIVE(pval)) { @@ -3424,7 +3350,7 @@ JSObject * js_CloneBlockObject(JSContext *cx, JSObject *proto, JSStackFrame *fp) { JS_ASSERT(!OBJ_IS_CLONED_BLOCK(proto)); - JS_ASSERT(STOBJ_GET_CLASS(proto) == &js_BlockClass); + JS_ASSERT(proto->getClass() == &js_BlockClass); JSObject *clone = js_NewGCObject(cx); if (!clone) @@ -3466,7 +3392,7 @@ js_PutBlockObject(JSContext *cx, JSBool normalUnwind) JS_ASSERT(OBJ_SCOPE(obj)->object != obj); /* Block objects should not have reserved slots before they are put. */ - JS_ASSERT(STOBJ_NSLOTS(obj) == JS_INITIAL_NSLOTS); + JS_ASSERT(obj->numSlots() == JS_INITIAL_NSLOTS); /* The block and its locals must be on the current stack for GC safety. */ depth = OBJ_BLOCK_DEPTH(cx, obj); @@ -3519,8 +3445,8 @@ block_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) /* Values are in reserved slots immediately following DEPTH. */ uint32 slot = JSSLOT_BLOCK_DEPTH + 1 + index; JS_LOCK_OBJ(cx, obj); - JS_ASSERT(slot < STOBJ_NSLOTS(obj)); - *vp = STOBJ_GET_SLOT(obj, slot); + JS_ASSERT(slot < obj->numSlots()); + *vp = obj->getSlot(slot); JS_UNLOCK_OBJ(cx, obj); return true; } @@ -3544,8 +3470,8 @@ block_setProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) /* Values are in reserved slots immediately following DEPTH. */ uint32 slot = JSSLOT_BLOCK_DEPTH + 1 + index; JS_LOCK_OBJ(cx, obj); - JS_ASSERT(slot < STOBJ_NSLOTS(obj)); - STOBJ_SET_SLOT(obj, slot, *vp); + JS_ASSERT(slot < obj->numSlots()); + obj->setSlot(slot, *vp); JS_UNLOCK_OBJ(cx, obj); return true; } @@ -3639,7 +3565,7 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp) obj->setParent(parent); } - JSAutoTempValueRooter tvr(cx, obj); + AutoValueRooter tvr(cx, obj); if (!JS_XDRUint32(xdr, &tmp)) return false; @@ -3647,7 +3573,7 @@ js_XDRBlockObject(JSXDRState *xdr, JSObject **objp) if (xdr->mode == JSXDR_DECODE) { depth = (uint16)(tmp >> 16); count = (uint16)tmp; - STOBJ_SET_SLOT(obj, JSSLOT_BLOCK_DEPTH, INT_TO_JSVAL(depth)); + obj->setSlot(JSSLOT_BLOCK_DEPTH, INT_TO_JSVAL(depth)); } /* @@ -3732,7 +3658,6 @@ js_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto, JSAtom *atom; JSProtoKey key; JSObject *proto, *ctor; - JSTempValueRooter tvr; jsval cval, rval; JSBool named; JSFunction *fun; @@ -3768,7 +3693,7 @@ js_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto, return NULL; /* After this point, control must exit via label bad or out. */ - JS_PUSH_TEMP_ROOT_OBJECT(cx, proto, &tvr); + AutoValueRooter tvr(cx, proto); if (!constructor) { /* @@ -3858,7 +3783,6 @@ js_InitClass(JSContext *cx, JSObject *obj, JSObject *parent_proto, goto bad; out: - JS_POP_TEMP_ROOT(cx, &tvr); return proto; bad: @@ -3978,11 +3902,11 @@ js_ShrinkSlots(JSContext *cx, JSObject *obj, size_t nslots) bool js_EnsureReservedSlots(JSContext *cx, JSObject *obj, size_t nreserved) { - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); JS_ASSERT(!obj->dslots); - uintN nslots = JSSLOT_FREE(STOBJ_GET_CLASS(obj)) + nreserved; - if (nslots > STOBJ_NSLOTS(obj) && !AllocSlots(cx, obj, nslots)) + uintN nslots = JSSLOT_FREE(obj->getClass()) + nreserved; + if (nslots > obj->numSlots() && !AllocSlots(cx, obj, nslots)) return false; JSScope *scope = OBJ_SCOPE(obj); @@ -3990,7 +3914,7 @@ js_EnsureReservedSlots(JSContext *cx, JSObject *obj, size_t nreserved) #ifdef JS_THREADSAFE JS_ASSERT(scope->title.ownercx->thread == cx->thread); #endif - JS_ASSERT(scope->freeslot == JSSLOT_FREE(STOBJ_GET_CLASS(obj))); + JS_ASSERT(scope->freeslot == JSSLOT_FREE(obj->getClass())); if (scope->freeslot < nslots) scope->freeslot = nslots; } @@ -4138,14 +4062,14 @@ js_FindClassObject(JSContext *cx, JSObject *start, JSProtoKey protoKey, id = ATOM_TO_JSID(atom); } - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); if (js_LookupPropertyWithFlags(cx, obj, id, JSRESOLVE_CLASSNAME, &pobj, &prop) < 0) { return JS_FALSE; } v = JSVAL_VOID; if (prop) { - if (OBJ_IS_NATIVE(pobj)) { + if (pobj->isNative()) { sprop = (JSScopeProperty *) prop; if (SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))) { v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); @@ -4166,7 +4090,7 @@ js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto, jsval cval, rval; JSObject *obj, *ctor; - JSAutoTempValueRooter argtvr(cx, argc, argv); + AutoArrayRooter argtvr(cx, argc, argv); JSProtoKey protoKey = GetClassProtoKey(clasp); if (!js_FindClassObject(cx, parent, protoKey, &cval, clasp)) @@ -4178,7 +4102,7 @@ js_ConstructObject(JSContext *cx, JSClass *clasp, JSObject *proto, } /* Protect cval in case a crazy getter for .prototype uproots it. */ - JSAutoTempValueRooter tvr(cx, cval); + AutoValueRooter tvr(cx, cval); /* * If proto or parent are NULL, set them to Constructor.prototype and/or @@ -4241,13 +4165,13 @@ js_AllocSlot(JSContext *cx, JSObject *obj, uint32 *slotp) scope->freeslot += clasp->reserveSlots(cx, obj); } - if (scope->freeslot >= STOBJ_NSLOTS(obj) && + if (scope->freeslot >= obj->numSlots() && !js_GrowSlots(cx, obj, scope->freeslot + 1)) { return JS_FALSE; } /* js_ReallocSlots or js_FreeSlot should set the free slots to void. */ - JS_ASSERT(JSVAL_IS_VOID(STOBJ_GET_SLOT(obj, scope->freeslot))); + JS_ASSERT(JSVAL_IS_VOID(obj->getSlot(scope->freeslot))); *slotp = scope->freeslot++; return JS_TRUE; } @@ -4334,7 +4258,7 @@ PurgeProtoChain(JSContext *cx, JSObject *obj, jsid id) JSScopeProperty *sprop; while (obj) { - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { obj = obj->getProto(); continue; } @@ -4374,7 +4298,7 @@ js_PurgeScopeChainHelper(JSContext *cx, JSObject *obj, jsid id) * properties with the same names have been cached or traced. Call objects * may gain such properties via eval introducing new vars; see bug 490364. */ - if (STOBJ_GET_CLASS(obj) == &js_CallClass) { + if (obj->getClass() == &js_CallClass) { while ((obj = obj->getParent()) != NULL) { if (PurgeProtoChain(cx, obj, id)) break; @@ -4500,9 +4424,7 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, if (!js_LookupProperty(cx, obj, id, &pobj, &prop)) return JS_FALSE; sprop = (JSScopeProperty *) prop; - if (sprop && - pobj == obj && - (sprop->attrs & (JSPROP_GETTER | JSPROP_SETTER))) { + if (sprop && pobj == obj && sprop->isAccessorDescriptor()) { sprop = OBJ_SCOPE(obj)->changeProperty(cx, sprop, attrs, JSPROP_GETTER | JSPROP_SETTER, (attrs & JSPROP_GETTER) @@ -4595,8 +4517,8 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, if (defineHow & JSDNP_CACHE_RESULT) { JS_ASSERT_NOT_ON_TRACE(cx); - JSPropCacheEntry *entry; - entry = js_FillPropertyCache(cx, obj, 0, 0, obj, sprop, added); + PropertyCacheEntry *entry = + JS_PROPERTY_CACHE(cx).fill(cx, obj, 0, 0, obj, sprop, added); TRACE_2(SetPropHit, entry, sprop); } if (propp) @@ -4697,7 +4619,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags, /* Resolved: juggle locks and lookup id again. */ if (obj2 != obj) { JS_UNLOCK_OBJ(cx, obj); - if (OBJ_IS_NATIVE(obj2)) + if (obj2->isNative()) JS_LOCK_OBJ(cx, obj2); } protoIndex = 0; @@ -4705,7 +4627,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags, proto = proto->getProto()) { protoIndex++; } - if (!OBJ_IS_NATIVE(obj2)) { + if (!obj2->isNative()) { /* Whoops, newresolve handed back a foreign obj2. */ JS_ASSERT(obj2 != obj); ok = obj2->lookupProperty(cx, id, objp, propp); @@ -4730,7 +4652,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags, JS_ASSERT(!scope->isSharedEmpty()); obj = obj2; } else if (obj2 != obj) { - if (OBJ_IS_NATIVE(obj2)) + if (obj2->isNative()) JS_UNLOCK_OBJ(cx, obj2); JS_LOCK_OBJ(cx, obj); } @@ -4745,7 +4667,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags, if (!ok) goto cleanup; JS_LOCK_OBJ(cx, obj); - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); scope = OBJ_SCOPE(obj); if (!scope->isSharedEmpty()) sprop = scope->lookup(id); @@ -4773,7 +4695,7 @@ js_LookupPropertyWithFlags(JSContext *cx, JSObject *obj, jsid id, uintN flags, JS_UNLOCK_OBJ(cx, obj); if (!proto) break; - if (!OBJ_IS_NATIVE(proto)) { + if (!proto->isNative()) { if (!proto->lookupProperty(cx, id, objp, propp)) return -1; return protoIndex + 1; @@ -4800,12 +4722,12 @@ out: return protoIndex; } -JSPropCacheEntry * +PropertyCacheEntry * js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult, JSObject **objp, JSObject **pobjp, JSProperty **propp) { JSObject *scopeChain, *obj, *parent, *pobj; - JSPropCacheEntry *entry; + PropertyCacheEntry *entry; int scopeIndex, protoIndex; JSProperty *prop; @@ -4831,7 +4753,7 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult, #ifdef DEBUG if (parent) { JSClass *clasp = OBJ_GET_CLASS(cx, obj); - JS_ASSERT(OBJ_IS_NATIVE(pobj)); + JS_ASSERT(pobj->isNative()); JS_ASSERT(OBJ_GET_CLASS(cx, pobj) == clasp); if (clasp == &js_BlockClass) { /* @@ -4848,9 +4770,8 @@ js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult, } #endif if (cacheResult) { - entry = js_FillPropertyCache(cx, scopeChain, - scopeIndex, protoIndex, pobj, - (JSScopeProperty *) prop, false); + entry = JS_PROPERTY_CACHE(cx).fill(cx, scopeChain, scopeIndex, protoIndex, pobj, + (JSScopeProperty *) prop); } SCOPE_DEPTH_ACCUM(&cx->runtime->scopeSearchDepthStats, scopeIndex); goto out; @@ -4926,14 +4847,13 @@ js_FindIdentifierBase(JSContext *cx, JSObject *scopeChain, jsid id) if (protoIndex < 0) return NULL; if (prop) { - JS_ASSERT(OBJ_IS_NATIVE(pobj)); + JS_ASSERT(pobj->isNative()); JS_ASSERT(OBJ_GET_CLASS(cx, pobj) == OBJ_GET_CLASS(cx, obj)); #ifdef DEBUG - JSPropCacheEntry *entry = + PropertyCacheEntry *entry = #endif - js_FillPropertyCache(cx, scopeChain, - scopeIndex, protoIndex, pobj, - (JSScopeProperty *) prop, false); + JS_PROPERTY_CACHE(cx).fill(cx, scopeChain, scopeIndex, protoIndex, pobj, + (JSScopeProperty *) prop); JS_ASSERT(entry); JS_UNLOCK_OBJ(cx, pobj); return obj; @@ -4978,10 +4898,8 @@ js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, JSScope *scope; uint32 slot; int32 sample; - JSTempValueRooter tvr, tvr2; - JSBool ok; - JS_ASSERT(OBJ_IS_NATIVE(pobj)); + JS_ASSERT(pobj->isNative()); JS_ASSERT(JS_IS_OBJ_LOCKED(cx, pobj)); scope = OBJ_SCOPE(pobj); @@ -4989,7 +4907,7 @@ js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, *vp = (slot != SPROP_INVALID_SLOT) ? LOCKED_OBJ_GET_SLOT(pobj, slot) : JSVAL_VOID; - if (SPROP_HAS_STUB_GETTER(sprop)) + if (sprop->hasDefaultGetter()) return true; if (JS_UNLIKELY(sprop->isMethod()) && (getHow & JSGET_NO_METHOD_BARRIER)) { @@ -4999,15 +4917,14 @@ js_NativeGet(JSContext *cx, JSObject *obj, JSObject *pobj, sample = cx->runtime->propertyRemovals; JS_UNLOCK_SCOPE(cx, scope); - JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr); - JS_PUSH_TEMP_ROOT_OBJECT(cx, pobj, &tvr2); - ok = sprop->get(cx, obj, pobj, vp); - JS_POP_TEMP_ROOT(cx, &tvr2); - JS_POP_TEMP_ROOT(cx, &tvr); - if (!ok) - return false; - + { + AutoScopePropertyRooter tvr(cx, sprop); + AutoValueRooter tvr2(cx, pobj); + if (!sprop->get(cx, obj, pobj, vp)) + return false; + } JS_LOCK_SCOPE(cx, scope); + if (SLOT_IN_SCOPE(slot, scope) && (JS_LIKELY(cx->runtime->propertyRemovals == sample) || scope->hasProperty(sprop))) { @@ -5031,10 +4948,8 @@ js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, bool added, JSScope *scope; uint32 slot; int32 sample; - JSTempValueRooter tvr; - JSBool ok; - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); JS_ASSERT(JS_IS_OBJ_LOCKED(cx, obj)); scope = OBJ_SCOPE(obj); @@ -5043,7 +4958,7 @@ js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, bool added, OBJ_CHECK_SLOT(obj, slot); /* If sprop has a stub setter, keep scope locked and just store *vp. */ - if (SPROP_HAS_STUB_SETTER(sprop)) { + if (sprop->hasDefaultSetter()) { if (!added && !scope->methodWriteBarrier(cx, sprop, *vp)) { JS_UNLOCK_SCOPE(cx, scope); return false; @@ -5058,17 +4973,17 @@ js_NativeSet(JSContext *cx, JSObject *obj, JSScopeProperty *sprop, bool added, * not writable, so attempting to set such a property should do nothing * or throw if we're in strict mode. */ - if (!(sprop->attrs & JSPROP_GETTER) && SPROP_HAS_STUB_SETTER(sprop)) + if (!sprop->hasGetterValue() && sprop->hasDefaultSetter()) return js_ReportGetterOnlyAssignment(cx); } sample = cx->runtime->propertyRemovals; JS_UNLOCK_SCOPE(cx, scope); - JS_PUSH_TEMP_ROOT_SPROP(cx, sprop, &tvr); - ok = sprop->set(cx, obj, vp); - JS_POP_TEMP_ROOT(cx, &tvr); - if (!ok) - return false; + { + AutoScopePropertyRooter tvr(cx, sprop); + if (!sprop->set(cx, obj, vp)) + return false; + } JS_LOCK_SCOPE(cx, scope); if (SLOT_IN_SCOPE(slot, scope) && @@ -5165,7 +5080,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, return JS_TRUE; } - if (!OBJ_IS_NATIVE(obj2)) { + if (!obj2->isNative()) { obj2->dropProperty(cx, prop); return obj2->getProperty(cx, id, vp); } @@ -5174,7 +5089,7 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, if (getHow & JSGET_CACHE_RESULT) { JS_ASSERT_NOT_ON_TRACE(cx); - js_FillPropertyCache(cx, aobj, 0, protoIndex, obj2, sprop, false); + JS_PROPERTY_CACHE(cx).fill(cx, aobj, 0, protoIndex, obj2, sprop); } if (!js_NativeGet(cx, obj, obj2, sprop, getHow, vp)) @@ -5279,7 +5194,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, if (protoIndex < 0) return JS_FALSE; if (prop) { - if (!OBJ_IS_NATIVE(pobj)) { + if (!pobj->isNative()) { pobj->dropProperty(cx, prop); prop = NULL; } @@ -5315,9 +5230,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, */ scope = OBJ_SCOPE(pobj); - attrs = sprop->attrs; - if ((attrs & JSPROP_READONLY) || - (scope->sealed() && (attrs & JSPROP_SHARED))) { + if (!sprop->writable() || (scope->sealed() && !sprop->hasSlot())) { JS_UNLOCK_SCOPE(cx, scope); /* @@ -5329,7 +5242,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, * read_only_error;' case. */ flags = JSREPORT_ERROR; - if (attrs & JSPROP_READONLY) { + if (!sprop->writable()) { if (!JS_HAS_STRICT_OPTION(cx)) { /* Just return true per ECMA if not in strict mode. */ PCMETER((defineHow & JSDNP_CACHE_RESULT) && JS_PROPERTY_CACHE(cx).rofills++); @@ -5350,6 +5263,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, goto read_only_error; } + attrs = sprop->attributes(); if (pobj != obj) { /* * We found id in a prototype object: prepare to share or shadow. @@ -5360,19 +5274,17 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, */ JS_UNLOCK_SCOPE(cx, scope); - /* Don't clone a shared prototype property. */ - if (attrs & JSPROP_SHARED) { + /* Don't clone a prototype property that doesn't have a slot. */ + if (!sprop->hasSlot()) { if (defineHow & JSDNP_CACHE_RESULT) { JS_ASSERT_NOT_ON_TRACE(cx); - JSPropCacheEntry *entry; - entry = js_FillPropertyCache(cx, obj, 0, protoIndex, pobj, sprop, false); + PropertyCacheEntry *entry = + JS_PROPERTY_CACHE(cx).fill(cx, obj, 0, protoIndex, pobj, sprop); TRACE_2(SetPropHit, entry, sprop); } - if (SPROP_HAS_STUB_SETTER(sprop) && - !(sprop->attrs & JSPROP_GETTER)) { + if (sprop->hasDefaultSetter() && !sprop->hasGetterValue()) return JS_TRUE; - } return sprop->set(cx, obj, vp); } @@ -5467,8 +5379,7 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow, if (defineHow & JSDNP_CACHE_RESULT) { JS_ASSERT_NOT_ON_TRACE(cx); - JSPropCacheEntry *entry; - entry = js_FillPropertyCache(cx, obj, 0, 0, obj, sprop, added); + PropertyCacheEntry *entry = JS_PROPERTY_CACHE(cx).fill(cx, obj, 0, 0, obj, sprop, added); TRACE_2(SetPropHit, entry, sprop); } @@ -5505,14 +5416,14 @@ js_GetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop, *attrsp = 0; return JS_TRUE; } - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { ok = obj->getAttributes(cx, id, prop, attrsp); obj->dropProperty(cx, prop); return ok; } } sprop = (JSScopeProperty *)prop; - *attrsp = sprop->attrs; + *attrsp = sprop->attributes(); if (noprop) obj->dropProperty(cx, prop); return JS_TRUE; @@ -5531,7 +5442,7 @@ js_SetAttributes(JSContext *cx, JSObject *obj, jsid id, JSProperty *prop, return JS_FALSE; if (!prop) return JS_TRUE; - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { ok = obj->setAttributes(cx, id, prop, attrsp); obj->dropProperty(cx, prop); return ok; @@ -5569,9 +5480,9 @@ js_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval) * each delegating object. */ if (prop) { - if (OBJ_IS_NATIVE(proto)) { + if (proto->isNative()) { sprop = (JSScopeProperty *)prop; - if (SPROP_IS_SHARED_PERMANENT(sprop)) + if (sprop->isSharedPermanent()) *rval = JSVAL_FALSE; } proto->dropProperty(cx, prop); @@ -5589,7 +5500,7 @@ js_DeleteProperty(JSContext *cx, JSObject *obj, jsid id, jsval *rval) } sprop = (JSScopeProperty *)prop; - if (sprop->attrs & JSPROP_PERMANENT) { + if (!sprop->configurable()) { obj->dropProperty(cx, prop); *rval = JSVAL_FALSE; return JS_TRUE; @@ -5643,9 +5554,7 @@ js_DefaultValue(JSContext *cx, JSObject *obj, JSType hint, jsval *vp) } } - if (sprop && - SPROP_HAS_STUB_GETTER(sprop) && - SPROP_HAS_VALID_SLOT(sprop, scope)) { + if (sprop && sprop->hasDefaultGetter() && SPROP_HAS_VALID_SLOT(sprop, scope)) { jsval fval = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); if (VALUE_IS_FUNCTION(cx, fval)) { @@ -6011,7 +5920,7 @@ js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode, break; } - if (!OBJ_IS_NATIVE(pobj)) { + if (!pobj->isNative()) { pobj->dropProperty(cx, prop); /* Avoid diverging for non-natives that reuse js_CheckAccess. */ @@ -6026,7 +5935,7 @@ js_CheckAccess(JSContext *cx, JSObject *obj, jsid id, JSAccessMode mode, } sprop = (JSScopeProperty *)prop; - *attrsp = sprop->attrs; + *attrsp = sprop->attributes(); if (!writing) { *vp = (SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))) ? LOCKED_OBJ_GET_SLOT(pobj, sprop->slot) @@ -6607,7 +6516,7 @@ js_PrintObjectSlotName(JSTracer *trc, char *buf, size_t bufsize) JS_ASSERT(slot >= JSSLOT_START(obj->getClass())); JSScopeProperty *sprop; - if (OBJ_IS_NATIVE(obj)) { + if (obj->isNative()) { JSScope *scope = OBJ_SCOPE(obj); sprop = scope->lastProperty(); while (sprop && sprop->slot != slot) @@ -6647,7 +6556,7 @@ js_PrintObjectSlotName(JSTracer *trc, char *buf, size_t bufsize) void js_TraceObject(JSTracer *trc, JSObject *obj) { - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); JSContext *cx = trc->context; JSScope *scope = OBJ_SCOPE(obj); @@ -6658,7 +6567,7 @@ js_TraceObject(JSTracer *trc, JSObject *obj) * that share their scope, scope->freeslot can be an underestimate. */ size_t slots = scope->freeslot; - if (STOBJ_NSLOTS(obj) != slots) + if (obj->numSlots() != slots) js_ShrinkSlots(cx, obj, slots); } @@ -6685,19 +6594,19 @@ js_TraceObject(JSTracer *trc, JSObject *obj) /* * An unmutated object that shares a prototype object's scope. We can't * tell how many slots are in use in obj by looking at its scope, so we - * use STOBJ_NSLOTS(obj). + * use obj->numSlots(). * * NB: In case clasp->mark mutates something, leave this code here -- * don't move it up and unify it with the |if (!traceScope)| section * above. */ - uint32 nslots = STOBJ_NSLOTS(obj); + uint32 nslots = obj->numSlots(); if (!scope->isSharedEmpty() && scope->freeslot < nslots) nslots = scope->freeslot; JS_ASSERT(nslots >= JSSLOT_START(clasp)); for (uint32 i = JSSLOT_START(clasp); i != nslots; ++i) { - jsval v = STOBJ_GET_SLOT(obj, i); + jsval v = obj->getSlot(i); if (JSVAL_IS_TRACEABLE(v)) { JS_SET_TRACING_DETAILS(trc, js_PrintObjectSlotName, obj, i); js_CallGCMarker(trc, JSVAL_TO_TRACEABLE(v), JSVAL_TRACE_KIND(v)); @@ -6723,10 +6632,10 @@ js_Clear(JSContext *cx, JSObject *obj) scope->clear(cx); /* Clear slot values and reset freeslot so we're consistent. */ - i = STOBJ_NSLOTS(obj); + i = obj->numSlots(); n = JSSLOT_FREE(obj->getClass()); while (--i >= n) - STOBJ_SET_SLOT(obj, i, JSVAL_VOID); + obj->setSlot(i, JSVAL_VOID); scope->freeslot = n; } JS_UNLOCK_OBJ(cx, obj); @@ -6754,7 +6663,7 @@ ReservedSlotIndexOK(JSContext *cx, JSObject *obj, JSClass *clasp, bool js_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval *vp) { - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { *vp = JSVAL_VOID; return true; } @@ -6767,7 +6676,7 @@ js_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval *vp) return false; uint32 slot = JSSLOT_START(clasp) + index; - *vp = (slot < STOBJ_NSLOTS(obj)) ? STOBJ_GET_SLOT(obj, slot) : JSVAL_VOID; + *vp = (slot < obj->numSlots()) ? obj->getSlot(slot) : JSVAL_VOID; JS_UNLOCK_OBJ(cx, obj); return true; } @@ -6775,7 +6684,7 @@ js_GetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval *vp) bool js_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v) { - if (!OBJ_IS_NATIVE(obj)) + if (!obj->isNative()) return true; JSClass *clasp = OBJ_GET_CLASS(cx, obj); @@ -6808,13 +6717,13 @@ js_SetReservedSlot(JSContext *cx, JSObject *obj, uint32 index, jsval v) * If scope is shared, do not modify scope->freeslot. It is OK for freeslot * to be an underestimate in objects with shared scopes, as they will get * their own scopes before mutating, and elsewhere (e.g. js_TraceObject) we - * use STOBJ_NSLOTS(obj) rather than rely on freeslot. + * use obj->numSlots() rather than rely on freeslot. */ JSScope *scope = OBJ_SCOPE(obj); if (!scope->isSharedEmpty() && slot >= scope->freeslot) scope->freeslot = slot + 1; - STOBJ_SET_SLOT(obj, slot, v); + obj->setSlot(slot, v); GC_POKE(cx, JS_NULL); JS_UNLOCK_SCOPE(cx, scope); return true; @@ -6837,6 +6746,15 @@ js_GetWrappedObject(JSContext *cx, JSObject *obj) return obj; } +JSObject * +JSObject::getGlobal() +{ + JSObject *obj = this; + while (JSObject *parent = obj->getParent()) + obj = parent; + return obj; +} + bool JSObject::isCallable() { @@ -6934,7 +6852,7 @@ dumpValue(jsval val) } else if (JSVAL_IS_VOID(val)) { fprintf(stderr, "undefined"); } else if (JSVAL_IS_OBJECT(val) && - HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(val))) { + JSVAL_TO_OBJECT(val)->isFunction()) { JSObject *funobj = JSVAL_TO_OBJECT(val); JSFunction *fun = GET_FUNCTION_PRIVATE(cx, funobj); fprintf(stderr, "<%s %s at %p (JSFunction at %p)>", @@ -6944,7 +6862,7 @@ dumpValue(jsval val) (void *) fun); } else if (JSVAL_IS_OBJECT(val)) { JSObject *obj = JSVAL_TO_OBJECT(val); - JSClass *cls = STOBJ_GET_CLASS(obj); + JSClass *cls = obj->getClass(); fprintf(stderr, "<%s%s at %p>", cls->name, cls == &js_ObjectClass ? "" : " object", @@ -6987,7 +6905,7 @@ static void dumpScopeProp(JSScopeProperty *sprop) { jsid id = sprop->id; - uint8 attrs = sprop->attrs; + uint8 attrs = sprop->attributes(); fprintf(stderr, " "); if (attrs & JSPROP_ENUMERATE) fprintf(stderr, "enumerate "); @@ -7015,7 +6933,7 @@ js_DumpObject(JSObject *obj) jsuint reservedEnd; fprintf(stderr, "object %p\n", (void *) obj); - clasp = STOBJ_GET_CLASS(obj); + clasp = obj->getClass(); fprintf(stderr, "class %p %s\n", (void *)clasp, clasp->name); if (obj->isDenseArray()) { @@ -7031,7 +6949,7 @@ js_DumpObject(JSObject *obj) return; } - if (OBJ_IS_NATIVE(obj)) { + if (obj->isNative()) { JSScope *scope = OBJ_SCOPE(obj); if (scope->sealed()) fprintf(stderr, "sealed\n"); @@ -7042,7 +6960,7 @@ js_DumpObject(JSObject *obj) dumpScopeProp(sprop); } } else { - if (!OBJ_IS_NATIVE(obj)) + if (!obj->isNative()) fprintf(stderr, "not native\n"); } @@ -7062,15 +6980,15 @@ js_DumpObject(JSObject *obj) fprintf(stderr, "slots:\n"); reservedEnd = i + JSCLASS_RESERVED_SLOTS(clasp); - slots = (OBJ_IS_NATIVE(obj) && !OBJ_SCOPE(obj)->isSharedEmpty()) + slots = (obj->isNative() && !OBJ_SCOPE(obj)->isSharedEmpty()) ? OBJ_SCOPE(obj)->freeslot - : STOBJ_NSLOTS(obj); + : obj->numSlots(); for (; i < slots; i++) { fprintf(stderr, " %3d ", i); if (i < reservedEnd) fprintf(stderr, "(reserved) "); fprintf(stderr, "= "); - dumpValue(STOBJ_GET_SLOT(obj, i)); + dumpValue(obj->getSlot(i)); fputc('\n', stderr); } fputc('\n', stderr); diff --git a/js/src/jsobj.h b/js/src/jsobj.h index 2d5866428bf..07949114905 100644 --- a/js/src/jsobj.h +++ b/js/src/jsobj.h @@ -52,12 +52,14 @@ #include "jspubtd.h" #include "jsprvtd.h" +namespace js { class AutoDescriptorArray; } + /* * A representation of ECMA-262 ed. 5's internal property descriptor data * structure. */ struct PropertyDescriptor { - friend class AutoDescriptorArray; + friend class js::AutoDescriptorArray; private: PropertyDescriptor(); @@ -282,6 +284,42 @@ struct JSObject { classword |= jsuword(2); } + uint32 numSlots(void) { + return dslots ? (uint32)dslots[-1] : (uint32)JS_INITIAL_NSLOTS; + } + + jsval& getSlotRef(uintN slot) { + return (slot < JS_INITIAL_NSLOTS) + ? fslots[slot] + : (JS_ASSERT(slot < (uint32)dslots[-1]), + dslots[slot - JS_INITIAL_NSLOTS]); + } + + jsval getSlot(uintN slot) const { + return (slot < JS_INITIAL_NSLOTS) + ? fslots[slot] + : (JS_ASSERT(slot < (uint32)dslots[-1]), + dslots[slot - JS_INITIAL_NSLOTS]); + } + + void setSlot(uintN slot, jsval value) { + if (slot < JS_INITIAL_NSLOTS) { + fslots[slot] = value; + } else { + JS_ASSERT(slot < (uint32)dslots[-1]); + dslots[slot - JS_INITIAL_NSLOTS] = value; + } + } + + /* + * These ones are for multi-threaded ("MT") objects. Use getSlot(), + * getSlotRef(), setSlot() to directly manipulate slots in obj when only + * one thread can access obj, or when accessing read-only slots within + * JS_INITIAL_NSLOTS. + */ + inline jsval getSlotMT(JSContext *cx, uintN slot); + inline void setSlotMT(JSContext *cx, uintN slot, jsval value); + JSObject *getProto() const { return JSVAL_TO_OBJECT(fslots[JSSLOT_PROTO]); } @@ -318,6 +356,8 @@ struct JSObject { JS_CALL_OBJECT_TRACER(trc, parent, "__parent__"); } + JSObject *getGlobal(); + void *getPrivate() const { JS_ASSERT(getClass()->flags & JSCLASS_HAS_PRIVATE); jsval v = fslots[JSSLOT_PRIVATE]; @@ -434,6 +474,7 @@ struct JSObject { map->ops->dropProperty(cx, this, prop); } + inline bool isArguments() const; inline bool isArray() const; inline bool isDenseArray() const; inline bool isFunction() const; @@ -443,9 +484,6 @@ struct JSObject { inline bool unbrand(JSContext *cx); }; -/* Compatibility macro. */ -#define OBJ_IS_NATIVE(obj) ((obj)->isNative()) - #define JSSLOT_START(clasp) (((clasp)->flags & JSCLASS_HAS_PRIVATE) \ ? JSSLOT_PRIVATE + 1 \ : JSSLOT_PRIVATE) @@ -461,77 +499,19 @@ struct JSObject { #define MAX_DSLOTS_LENGTH (JS_MAX(~uint32(0), ~size_t(0)) / sizeof(jsval) - 1) #define MAX_DSLOTS_LENGTH32 (~uint32(0) / sizeof(jsval) - 1) -/* - * STOBJ prefix means Single Threaded Object. Use the following fast macros to - * directly manipulate slots in obj when only one thread can access obj, or - * when accessing read-only slots within JS_INITIAL_NSLOTS. - */ - -#define STOBJ_NSLOTS(obj) \ - ((obj)->dslots ? (uint32)(obj)->dslots[-1] : (uint32)JS_INITIAL_NSLOTS) - -inline jsval& -STOBJ_GET_SLOT(JSObject *obj, uintN slot) -{ - return (slot < JS_INITIAL_NSLOTS) - ? obj->fslots[slot] - : (JS_ASSERT(slot < (uint32)obj->dslots[-1]), - obj->dslots[slot - JS_INITIAL_NSLOTS]); -} - -inline void -STOBJ_SET_SLOT(JSObject *obj, uintN slot, jsval value) -{ - if (slot < JS_INITIAL_NSLOTS) { - obj->fslots[slot] = value; - } else { - JS_ASSERT(slot < (uint32)obj->dslots[-1]); - obj->dslots[slot - JS_INITIAL_NSLOTS] = value; - } -} - -inline JSClass* -STOBJ_GET_CLASS(const JSObject* obj) -{ - return obj->getClass(); -} - #define OBJ_CHECK_SLOT(obj,slot) \ (JS_ASSERT(obj->isNative()), JS_ASSERT(slot < OBJ_SCOPE(obj)->freeslot)) #define LOCKED_OBJ_GET_SLOT(obj,slot) \ - (OBJ_CHECK_SLOT(obj, slot), STOBJ_GET_SLOT(obj, slot)) + (OBJ_CHECK_SLOT(obj, slot), obj->getSlot(slot)) #define LOCKED_OBJ_SET_SLOT(obj,slot,value) \ - (OBJ_CHECK_SLOT(obj, slot), STOBJ_SET_SLOT(obj, slot, value)) + (OBJ_CHECK_SLOT(obj, slot), obj->setSlot(slot, value)) #ifdef JS_THREADSAFE -/* Thread-safe functions and wrapper macros for accessing slots in obj. */ -#define OBJ_GET_SLOT(cx,obj,slot) \ - (OBJ_CHECK_SLOT(obj, slot), \ - (OBJ_SCOPE(obj)->title.ownercx == cx) \ - ? LOCKED_OBJ_GET_SLOT(obj, slot) \ - : js_GetSlotThreadSafe(cx, obj, slot)) - -#define OBJ_SET_SLOT(cx,obj,slot,value) \ - JS_BEGIN_MACRO \ - OBJ_CHECK_SLOT(obj, slot); \ - if (OBJ_SCOPE(obj)->title.ownercx == cx) \ - LOCKED_OBJ_SET_SLOT(obj, slot, value); \ - else \ - js_SetSlotThreadSafe(cx, obj, slot, value); \ - JS_END_MACRO - /* - * If thread-safe, define an OBJ_GET_SLOT wrapper that bypasses, for a native - * object, the lock-free "fast path" test of (OBJ_SCOPE(obj)->ownercx == cx), - * to avoid needlessly switching from lock-free to lock-full scope when doing - * GC on a different context from the last one to own the scope. The caller - * in this case is probably a JSClass.mark function, e.g., fun_mark, or maybe - * a finalizer. - * * The GC runs only when all threads except the one on which the GC is active - * are suspended at GC-safe points, so calling STOBJ_GET_SLOT from the GC's + * are suspended at GC-safe points, so calling obj->getSlot() from the GC's * thread is safe when rt->gcRunning is set. See jsgc.c for details. */ #define THREAD_IS_RUNNING_GC(rt, thread) \ @@ -540,18 +520,13 @@ STOBJ_GET_CLASS(const JSObject* obj) #define CX_THREAD_IS_RUNNING_GC(cx) \ THREAD_IS_RUNNING_GC((cx)->runtime, (cx)->thread) -#else /* !JS_THREADSAFE */ - -#define OBJ_GET_SLOT(cx,obj,slot) LOCKED_OBJ_GET_SLOT(obj,slot) -#define OBJ_SET_SLOT(cx,obj,slot,value) LOCKED_OBJ_SET_SLOT(obj,slot,value) - -#endif /* !JS_THREADSAFE */ +#endif /* JS_THREADSAFE */ /* * Class is invariant and comes from the fixed clasp member. Thus no locking * is necessary to read it. Same for the private slot. */ -#define OBJ_GET_CLASS(cx,obj) STOBJ_GET_CLASS(obj) +#define OBJ_GET_CLASS(cx,obj) (obj)->getClass() #ifdef __cplusplus inline void @@ -614,9 +589,9 @@ js_DefineBlockVariable(JSContext *cx, JSObject *obj, jsid id, intN index); #define OBJ_BLOCK_COUNT(cx,obj) \ (OBJ_SCOPE(OBJ_IS_CLONED_BLOCK(obj) ? obj->getProto() : obj)->entryCount) #define OBJ_BLOCK_DEPTH(cx,obj) \ - JSVAL_TO_INT(STOBJ_GET_SLOT(obj, JSSLOT_BLOCK_DEPTH)) + JSVAL_TO_INT(obj->getSlot(JSSLOT_BLOCK_DEPTH)) #define OBJ_SET_BLOCK_DEPTH(cx,obj,depth) \ - STOBJ_SET_SLOT(obj, JSSLOT_BLOCK_DEPTH, INT_TO_JSVAL(depth)) + obj->setSlot(JSSLOT_BLOCK_DEPTH, INT_TO_JSVAL(depth)) /* * To make sure this slot is well-defined, always call js_NewWithObject to @@ -656,11 +631,11 @@ struct JSSharpObjectMap { #define SHARP_BIT ((jsatomid) 1) #define BUSY_BIT ((jsatomid) 2) #define SHARP_ID_SHIFT 2 -#define IS_SHARP(he) (JS_PTR_TO_UINT32((he)->value) & SHARP_BIT) -#define MAKE_SHARP(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)|SHARP_BIT)) -#define IS_BUSY(he) (JS_PTR_TO_UINT32((he)->value) & BUSY_BIT) -#define MAKE_BUSY(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)|BUSY_BIT)) -#define CLEAR_BUSY(he) ((he)->value = JS_UINT32_TO_PTR(JS_PTR_TO_UINT32((he)->value)&~BUSY_BIT)) +#define IS_SHARP(he) (uintptr_t((he)->value) & SHARP_BIT) +#define MAKE_SHARP(he) ((he)->value = (void *) (uintptr_t((he)->value)|SHARP_BIT)) +#define IS_BUSY(he) (uintptr_t((he)->value) & BUSY_BIT) +#define MAKE_BUSY(he) ((he)->value = (void *) (uintptr_t((he)->value)|BUSY_BIT)) +#define CLEAR_BUSY(he) ((he)->value = (void *) (uintptr_t((he)->value)&~BUSY_BIT)) extern JSHashEntry * js_EnterSharpObject(JSContext *cx, JSObject *obj, JSIdArray **idap, @@ -828,6 +803,9 @@ extern JSBool js_DefineProperty(JSContext *cx, JSObject *obj, jsid id, jsval value, JSPropertyOp getter, JSPropertyOp setter, uintN attrs); +extern JSBool +js_DefineOwnProperty(JSContext *cx, JSObject *obj, jsid id, jsval descriptor, JSBool *bp); + /* * Flags for the defineHow parameter of js_DefineNativeProperty. */ @@ -883,7 +861,7 @@ js_IsCacheableNonGlobalScope(JSObject *obj) extern JS_FRIEND_DATA(JSClass) js_DeclEnvClass; JS_ASSERT(obj->getParent()); - JSClass *clasp = STOBJ_GET_CLASS(obj); + JSClass *clasp = obj->getClass(); bool cacheable = (clasp == &js_CallClass || clasp == &js_BlockClass || clasp == &js_DeclEnvClass); @@ -895,7 +873,7 @@ js_IsCacheableNonGlobalScope(JSObject *obj) /* * If cacheResult is false, return JS_NO_PROP_CACHE_FILL on success. */ -extern JSPropCacheEntry * +extern js::PropertyCacheEntry * js_FindPropertyHelper(JSContext *cx, jsid id, JSBool cacheResult, JSObject **objp, JSObject **pobjp, JSProperty **propp); @@ -951,6 +929,9 @@ js_GetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN getHow, extern JSBool js_GetProperty(JSContext *cx, JSObject *obj, jsid id, jsval *vp); +extern JSBool +js_GetOwnPropertyDescriptor(JSContext *cx, JSObject *obj, jsid id, jsval *vp); + extern JSBool js_GetMethod(JSContext *cx, JSObject *obj, jsid id, uintN getHow, jsval *vp); @@ -1022,7 +1003,7 @@ js_IsDelegate(JSContext *cx, JSObject *obj, jsval v, JSBool *bp); */ extern JS_FRIEND_API(JSBool) js_GetClassPrototype(JSContext *cx, JSObject *scope, JSProtoKey protoKey, - JSObject **protop, JSClass *clasp = NULL); + JSObject **protop, JSClass *clasp = NULL); extern JSBool js_SetClassPrototype(JSContext *cx, JSObject *ctor, JSObject *proto, diff --git a/js/src/jsobjinlines.h b/js/src/jsobjinlines.h index 376bca89333..7ba3a71be1c 100644 --- a/js/src/jsobjinlines.h +++ b/js/src/jsobjinlines.h @@ -44,6 +44,41 @@ #include "jsobj.h" #include "jsscope.h" +inline jsval +JSObject::getSlotMT(JSContext *cx, uintN slot) { +#ifdef JS_THREADSAFE + /* + * If thread-safe, define a getSlotMT() that bypasses, for a native + * object, the lock-free "fast path" test of + * (OBJ_SCOPE(obj)->ownercx == cx), to avoid needlessly switching from + * lock-free to lock-full scope when doing GC on a different context + * from the last one to own the scope. The caller in this case is + * probably a JSClass.mark function, e.g., fun_mark, or maybe a + * finalizer. + */ + OBJ_CHECK_SLOT(this, slot); + return (OBJ_SCOPE(this)->title.ownercx == cx) + ? LOCKED_OBJ_GET_SLOT(this, slot) + : js_GetSlotThreadSafe(cx, this, slot); +#else + return LOCKED_OBJ_GET_SLOT(this, slot); +#endif +} + +inline void +JSObject::setSlotMT(JSContext *cx, uintN slot, jsval value) { +#ifdef JS_THREADSAFE + /* Thread-safe way to set a slot. */ + OBJ_CHECK_SLOT(this, slot); + if (OBJ_SCOPE(this)->title.ownercx == cx) + LOCKED_OBJ_SET_SLOT(this, slot, value); + else + js_SetSlotThreadSafe(cx, this, slot, value); +#else + LOCKED_OBJ_SET_SLOT(this, slot, value); +#endif +} + inline void JSObject::initSharingEmptyScope(JSClass *clasp, JSObject *proto, JSObject *parent, jsval privateSlotValue) @@ -67,7 +102,7 @@ JSObject::freeSlotsArray(JSContext *cx) inline bool JSObject::unbrand(JSContext *cx) { - if (OBJ_IS_NATIVE(this)) { + if (this->isNative()) { JS_LOCK_OBJ(cx, this); JSScope *scope = OBJ_SCOPE(this); if (scope->isSharedEmpty()) { @@ -83,4 +118,34 @@ JSObject::unbrand(JSContext *cx) return true; } +namespace js { + +typedef Vector PropertyDescriptorArray; + +class AutoDescriptorArray : private AutoGCRooter +{ + public: + AutoDescriptorArray(JSContext *cx) + : AutoGCRooter(cx, DESCRIPTORS), descriptors(cx) + { } + + PropertyDescriptor *append() { + if (!descriptors.append(PropertyDescriptor())) + return NULL; + return &descriptors.back(); + } + + PropertyDescriptor& operator[](size_t i) { + JS_ASSERT(i < descriptors.length()); + return descriptors[i]; + } + + friend void AutoGCRooter::trace(JSTracer *trc); + + private: + PropertyDescriptorArray descriptors; +}; + +} + #endif /* jsobjinlines_h___ */ diff --git a/js/src/json.cpp b/js/src/json.cpp index af0c1bfd9df..a4883610c02 100644 --- a/js/src/json.cpp +++ b/js/src/json.cpp @@ -65,6 +65,8 @@ #include "jsatominlines.h" +using namespace js; + #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable:4351) @@ -106,10 +108,9 @@ js_json_parse(JSContext *cx, uintN argc, jsval *vp) { JSString *s = NULL; jsval *argv = vp + 2; - jsval reviver = JSVAL_NULL; - JSAutoTempValueRooter tvr(cx, 1, &reviver); + AutoValueRooter reviver(cx, JSVAL_NULL); - if (!JS_ConvertArguments(cx, argc, argv, "S / v", &s, &reviver)) + if (!JS_ConvertArguments(cx, argc, argv, "S / v", &s, reviver.addr())) return JS_FALSE; JSONParser *jp = js_BeginJSONParse(cx, vp); @@ -119,7 +120,7 @@ js_json_parse(JSContext *cx, uintN argc, jsval *vp) size_t length; s->getCharsAndLength(chars, length); ok = js_ConsumeJSONText(cx, jp, chars, length); - ok &= js_FinishJSONParse(cx, jp, reviver); + ok &= !!js_FinishJSONParse(cx, jp, reviver.value()); } return ok; @@ -129,18 +130,16 @@ JSBool js_json_stringify(JSContext *cx, uintN argc, jsval *vp) { jsval *argv = vp + 2; - JSObject *replacer = NULL; - jsval space = JSVAL_NULL; - JSAutoTempValueRooter tvr(cx, replacer); - JSAutoTempValueRooter tvr2(cx, 1, &space); + AutoValueRooter space(cx, JSVAL_NULL); + AutoObjectRooter replacer(cx); // Must throw an Error if there isn't a first arg - if (!JS_ConvertArguments(cx, argc, argv, "v / o v", vp, &replacer, &space)) + if (!JS_ConvertArguments(cx, argc, argv, "v / o v", vp, replacer.addr(), space.addr())) return JS_FALSE; JSCharBuffer cb(cx); - if (!js_Stringify(cx, vp, replacer, space, cb)) + if (!js_Stringify(cx, vp, replacer.object(), space.value(), cb)) return JS_FALSE; // XXX This can never happen to nsJSON.cpp, but the JSON object @@ -258,7 +257,7 @@ JO(JSContext *cx, jsval *vp, StringifyContext *scx) return JS_FALSE; jsval vec[3] = {JSVAL_NULL, JSVAL_NULL, JSVAL_NULL}; - JSAutoTempValueRooter tvr(cx, 3, vec); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(vec), vec); jsval& key = vec[0]; jsval& outputValue = vec[1]; @@ -307,7 +306,7 @@ JO(JSContext *cx, jsval *vp, StringifyContext *scx) if (!ks) goto error_break; } - JSAutoTempValueRooter keyStringRoot(cx, ks); + AutoValueRooter keyStringRoot(cx, ks); // Don't include prototype properties, since this operation is // supposed to be implemented as if by ES3.1 Object.keys() @@ -393,21 +392,20 @@ JA(JSContext *cx, jsval *vp, StringifyContext *scx) if (!js_GetLengthProperty(cx, obj, &length)) return JS_FALSE; - jsval outputValue = JSVAL_NULL; - JSAutoTempValueRooter tvr(cx, 1, &outputValue); + AutoValueRooter outputValue(cx, JSVAL_NULL); jsid id; jsuint i; for (i = 0; i < length; i++) { id = INT_TO_JSID(i); - if (!obj->getProperty(cx, id, &outputValue)) + if (!obj->getProperty(cx, id, outputValue.addr())) return JS_FALSE; - if (!Str(cx, id, obj, scx, &outputValue)) + if (!Str(cx, id, obj, scx, outputValue.addr())) return JS_FALSE; - if (outputValue == JSVAL_VOID) { + if (outputValue.value() == JSVAL_VOID) { if (!js_AppendLiteral(scx->cb, "null")) return JS_FALSE; } @@ -484,7 +482,8 @@ Str(JSContext *cx, jsid id, JSObject *holder, StringifyContext *scx, jsval *vp, char numBuf[DTOSTR_STANDARD_BUFFER_SIZE], *numStr; jsdouble d = JSVAL_IS_INT(*vp) ? jsdouble(JSVAL_TO_INT(*vp)) : *JSVAL_TO_DOUBLE(*vp); - numStr = JS_dtostr(numBuf, sizeof numBuf, DTOSTR_STANDARD, 0, d); + numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, numBuf, sizeof numBuf, + DTOSTR_STANDARD, 0, d); if (!numStr) { JS_ReportOutOfMemory(cx); return JS_FALSE; @@ -515,19 +514,26 @@ Str(JSContext *cx, jsid id, JSObject *holder, StringifyContext *scx, jsval *vp, static JSBool InitializeGap(JSContext *cx, jsval space, JSCharBuffer &cb) { + AutoValueRooter gap(cx, space); + if (!JSVAL_IS_PRIMITIVE(space)) { - JSClass *clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(space)); - if (clasp == &js_StringClass || clasp == &js_NumberClass) - return js_ValueToCharBuffer(cx, space, cb); + JSObject *obj = JSVAL_TO_OBJECT(space); + JSClass *clasp = OBJ_GET_CLASS(cx, obj); + if (clasp == &js_NumberClass || clasp == &js_StringClass) + *gap.addr() = obj->fslots[JSSLOT_PRIMITIVE_THIS]; } - if (JSVAL_IS_STRING(space)) - return js_ValueToCharBuffer(cx, space, cb); + if (JSVAL_IS_STRING(gap.value())) { + if (!js_ValueToCharBuffer(cx, gap.value(), cb)) + return JS_FALSE; + if (cb.length() > 10) + cb.resize(10); + } - if (JSVAL_IS_NUMBER(space)) { - jsdouble d = JSVAL_IS_INT(space) - ? JSVAL_TO_INT(space) - : js_DoubleToInteger(*JSVAL_TO_DOUBLE(space)); + if (JSVAL_IS_NUMBER(gap.value())) { + jsdouble d = JSVAL_IS_INT(gap.value()) + ? JSVAL_TO_INT(gap.value()) + : js_DoubleToInteger(*JSVAL_TO_DOUBLE(gap.value())); d = JS_MIN(10, d); if (d >= 1 && !cb.appendN(' ', uint32(d))) return JS_FALSE; @@ -570,63 +576,54 @@ static JSBool IsNumChar(jschar c) static JSBool HandleData(JSContext *cx, JSONParser *jp, JSONDataType type); static JSBool PopState(JSContext *cx, JSONParser *jp); -static JSBool -DestroyIdArrayOnError(JSContext *cx, JSIdArray *ida) { - JS_DestroyIdArray(cx, ida); - return JS_FALSE; -} - -static JSBool +static bool Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) { - JS_CHECK_RECURSION(cx, return JS_FALSE); + JS_CHECK_RECURSION(cx, return false); if (!holder->getProperty(cx, id, vp)) - return JS_FALSE; + return false; JSObject *obj; if (!JSVAL_IS_PRIMITIVE(*vp) && !(obj = JSVAL_TO_OBJECT(*vp))->isCallable()) { - jsval propValue = JSVAL_NULL; - JSAutoTempValueRooter tvr(cx, 1, &propValue); + AutoValueRooter propValue(cx, JSVAL_NULL); if(obj->isArray()) { jsuint length = 0; if (!js_GetLengthProperty(cx, obj, &length)) - return JS_FALSE; + return false; for (jsuint i = 0; i < length; i++) { jsid index; if (!js_IndexToId(cx, i, &index)) - return JS_FALSE; + return false; - if (!Walk(cx, index, obj, reviver, &propValue)) - return JS_FALSE; + if (!Walk(cx, index, obj, reviver, propValue.addr())) + return false; - if (!obj->defineProperty(cx, index, propValue, NULL, NULL, JSPROP_ENUMERATE)) - return JS_FALSE; + if (!obj->defineProperty(cx, index, propValue.value(), NULL, NULL, JSPROP_ENUMERATE)) + return false; } } else { - JSIdArray *ida = JS_Enumerate(cx, obj); + AutoIdArray ida(cx, JS_Enumerate(cx, obj)); if (!ida) - return JS_FALSE; + return false; - JSAutoTempValueRooter idaroot(cx, JS_ARRAY_LENGTH(ida), (jsval*)ida); - - for(jsint i = 0; i < ida->length; i++) { - jsid idName = ida->vector[i]; - if (!Walk(cx, idName, obj, reviver, &propValue)) - return DestroyIdArrayOnError(cx, ida); - if (propValue == JSVAL_VOID) { - if (!js_DeleteProperty(cx, obj, idName, &propValue)) - return DestroyIdArrayOnError(cx, ida); + for (jsint i = 0, len = ida.length(); i < len; i++) { + jsid idName = ida[i]; + if (!Walk(cx, idName, obj, reviver, propValue.addr())) + return false; + if (propValue.value() == JSVAL_VOID) { + if (!js_DeleteProperty(cx, obj, idName, propValue.addr())) + return false; } else { - if (!obj->defineProperty(cx, idName, propValue, NULL, NULL, JSPROP_ENUMERATE)) - return DestroyIdArrayOnError(cx, ida); + if (!obj->defineProperty(cx, idName, propValue.value(), NULL, NULL, + JSPROP_ENUMERATE)) { + return false; + } } } - - JS_DestroyIdArray(cx, ida); } } @@ -634,31 +631,29 @@ Walk(JSContext *cx, jsid id, JSObject *holder, jsval reviver, jsval *vp) jsval value = *vp; JSString *key = js_ValueToString(cx, ID_TO_VALUE(id)); if (!key) - return JS_FALSE; + return false; jsval vec[2] = {STRING_TO_JSVAL(key), value}; jsval reviverResult; if (!JS_CallFunctionValue(cx, holder, reviver, 2, vec, &reviverResult)) - return JS_FALSE; + return false; *vp = reviverResult; - - return JS_TRUE; + return true; } -static JSBool +static bool Revive(JSContext *cx, jsval reviver, jsval *vp) { JSObject *obj = js_NewObject(cx, &js_ObjectClass, NULL, NULL); if (!obj) - return JS_FALSE; + return false; - jsval v = OBJECT_TO_JSVAL(obj); - JSAutoTempValueRooter tvr(cx, 1, &v); + AutoValueRooter tvr(cx, obj); if (!obj->defineProperty(cx, ATOM_TO_JSID(cx->runtime->atomState.emptyAtom), *vp, NULL, NULL, JSPROP_ENUMERATE)) { - return JS_FALSE; + return false; } return Walk(cx, ATOM_TO_JSID(cx->runtime->atomState.emptyAtom), obj, reviver, vp); @@ -693,11 +688,11 @@ bad: return NULL; } -JSBool +bool js_FinishJSONParse(JSContext *cx, JSONParser *jp, jsval reviver) { if (!jp) - return JS_TRUE; + return true; JSBool early_ok = JS_TRUE; @@ -718,20 +713,20 @@ js_FinishJSONParse(JSContext *cx, JSONParser *jp, jsval reviver) /* This internal API is infallible, in spite of its JSBool return type. */ js_RemoveRoot(cx->runtime, &jp->objectStack); - JSBool ok = *jp->statep == JSON_PARSE_STATE_FINISHED; + bool ok = *jp->statep == JSON_PARSE_STATE_FINISHED; jsval *vp = jp->rootVal; cx->destroy(jp); if (!early_ok) - return JS_FALSE; + return false; if (!ok) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_JSON_BAD_PARSE); - return JS_FALSE; + return false; } - if (!JSVAL_IS_PRIMITIVE(reviver) && js_IsCallable(reviver)) + if (!JSVAL_IS_PRIMITIVE(reviver) && JSVAL_TO_OBJECT(reviver)->isCallable()) ok = Revive(cx, reviver, vp); return ok; @@ -809,7 +804,7 @@ PushObject(JSContext *cx, JSONParser *jp, JSObject *obj) } jsval v = OBJECT_TO_JSVAL(obj); - JSAutoTempValueRooter tvr(cx, v); + AutoValueRooter tvr(cx, v); // Check if this is the root object if (len == 0) { @@ -882,7 +877,7 @@ CloseArray(JSContext *cx, JSONParser *jp) static JSBool PushPrimitive(JSContext *cx, JSONParser *jp, jsval value) { - JSAutoTempValueRooter tvr(cx, 1, &value); + AutoValueRooter tvr(cx, value); jsuint len; if (!js_GetLengthProperty(cx, jp->objectStack, &len)) @@ -936,7 +931,7 @@ static JSBool HandleKeyword(JSContext *cx, JSONParser *jp, const jschar *buf, uint32 len) { jsval keyword; - JSTokenType tt = js_CheckKeyword(buf, len); + TokenKind tt = js_CheckKeyword(buf, len); if (tt != TOK_PRIMARY) { // bad keyword JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_JSON_BAD_PARSE); @@ -1121,6 +1116,11 @@ js_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len return JS_FALSE; } else if (c == '\\') { *jp->statep = JSON_PARSE_STATE_STRING_ESCAPE; + } else if (c < 31) { + // The JSON lexical grammer does not allow a JSONStringCharacter to be + // any of the Unicode characters U+0000 thru U+001F (control characters). + JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_JSON_BAD_PARSE); + return JS_FALSE; } else { if (!jp->buffer.append(c)) return JS_FALSE; @@ -1234,8 +1234,8 @@ static JSFunctionSpec json_static_methods[] = { #if JS_HAS_TOSOURCE JS_FN(js_toSource_str, json_toSource, 0, 0), #endif - JS_FN("parse", js_json_parse, 1, 0), - JS_FN("stringify", js_json_stringify, 1, 0), + JS_FN("parse", js_json_parse, 2, 0), + JS_FN("stringify", js_json_stringify, 3, 0), JS_FS_END }; diff --git a/js/src/json.h b/js/src/json.h index 84f392419d9..e384863e160 100644 --- a/js/src/json.h +++ b/js/src/json.h @@ -89,7 +89,7 @@ js_BeginJSONParse(JSContext *cx, jsval *rootVal); extern JSBool js_ConsumeJSONText(JSContext *cx, JSONParser *jp, const jschar *data, uint32 len); -extern JSBool +extern bool js_FinishJSONParse(JSContext *cx, JSONParser *jp, jsval reviver); JS_END_EXTERN_C diff --git a/js/src/jsopcode.cpp b/js/src/jsopcode.cpp index 9fa44d4d1a3..5123d272d6f 100644 --- a/js/src/jsopcode.cpp +++ b/js/src/jsopcode.cpp @@ -79,6 +79,8 @@ #include "jsautooplen.h" +using namespace js; + /* * Index limit must stay within 32 bits. */ @@ -307,7 +309,7 @@ ToDisassemblySource(JSContext *cx, jsval v) } if (clasp == &js_RegExpClass) { - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); if (!js_regexp_toString(cx, obj, tvr.addr())) return NULL; return js_GetStringBytes(cx, JSVAL_TO_STRING(tvr.value())); @@ -553,13 +555,18 @@ SprintEnsureBuffer(Sprinter *sp, size_t len) static ptrdiff_t SprintPut(Sprinter *sp, const char *s, size_t len) { - ptrdiff_t offset; - char *bp; + ptrdiff_t offset = sp->size; /* save old size */ + char *bp = sp->base; /* save old base */ /* Allocate space for s, including the '\0' at the end. */ if (!SprintEnsureBuffer(sp, len)) return -1; + if (sp->base != bp && /* buffer was realloc'ed */ + s >= bp && s < bp + offset) { /* s was within the buffer */ + s = sp->base + (s - bp); /* this is where it lives now */ + } + /* Advance offset and copy s into sp's buffer. */ offset = sp->offset; sp->offset += len; @@ -746,9 +753,9 @@ js_NewPrinter(JSContext *cx, const char *name, JSFunction *fun, INIT_SPRINTER(cx, &jp->sprinter, &jp->pool, 0); JS_InitArenaPool(&jp->pool, name, 256, 1, &cx->scriptStackQuota); jp->indent = indent; - jp->pretty = pretty; - jp->grouped = grouped; - jp->strict = strict; + jp->pretty = !!pretty; + jp->grouped = !!grouped; + jp->strict = !!strict; jp->script = NULL; jp->dvgfence = NULL; jp->pcstack = NULL; @@ -1100,7 +1107,8 @@ SprintDoubleValue(Sprinter *sp, jsval v, JSOp *opp) : "1 / 0"); *opp = JSOP_DIV; } else { - s = JS_dtostr(buf, sizeof buf, DTOSTR_STANDARD, 0, d); + s = js_dtostr(JS_THREAD_DATA(sp->context)->dtoaState, buf, sizeof buf, + DTOSTR_STANDARD, 0, d); if (!s) { JS_ReportOutOfMemory(sp->context); return -1; diff --git a/js/src/jsops.cpp b/js/src/jsops.cpp index 15b540a8d49..9dedc21dcd3 100644 --- a/js/src/jsops.cpp +++ b/js/src/jsops.cpp @@ -217,16 +217,8 @@ BEGIN_CASE(JSOP_STOP) } JS_ASSERT(regs.sp == StackBase(fp)); - if ((fp->flags & JSFRAME_CONSTRUCTING) && - JSVAL_IS_PRIMITIVE(fp->rval)) { - if (!fp->fun) { - JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, - JSMSG_BAD_NEW_RESULT, - js_ValueToPrintableString(cx, rval)); - goto error; - } + if ((fp->flags & JSFRAME_CONSTRUCTING) && JSVAL_IS_PRIMITIVE(fp->rval)) fp->rval = fp->thisv; - } ok = JS_TRUE; if (inlineCallCount) inline_return: @@ -611,10 +603,10 @@ END_CASE(JSOP_PICK) #define NATIVE_GET(cx,obj,pobj,sprop,getHow,vp) \ JS_BEGIN_MACRO \ - if (SPROP_HAS_STUB_GETTER(sprop)) { \ + if (sprop->hasDefaultGetter()) { \ /* Fast path for Object instance properties. */ \ JS_ASSERT((sprop)->slot != SPROP_INVALID_SLOT || \ - !SPROP_HAS_STUB_SETTER(sprop)); \ + !sprop->hasDefaultSetter()); \ *vp = ((sprop)->slot != SPROP_INVALID_SLOT) \ ? LOCKED_OBJ_GET_SLOT(pobj, (sprop)->slot) \ : JSVAL_VOID; \ @@ -627,7 +619,7 @@ END_CASE(JSOP_PICK) #define NATIVE_SET(cx,obj,sprop,entry,vp) \ JS_BEGIN_MACRO \ TRACE_2(SetPropHit, entry, sprop); \ - if (SPROP_HAS_STUB_SETTER(sprop) && \ + if (sprop->hasDefaultSetter() && \ (sprop)->slot != SPROP_INVALID_SLOT && \ !OBJ_SCOPE(obj)->brandedOrHasMethodBarrier()) { \ /* Fast path for, e.g., plain Object instance properties. */ \ @@ -695,7 +687,7 @@ END_CASE(JSOP_ENUMCONSTELEM) BEGIN_CASE(JSOP_BINDNAME) do { - JSPropCacheEntry *entry; + PropertyCacheEntry *entry; /* * We can skip the property lookup for the global object. If the @@ -716,16 +708,13 @@ BEGIN_CASE(JSOP_BINDNAME) obj = fp->scopeChain; if (!obj->getParent()) break; - if (JS_LIKELY(OBJ_IS_NATIVE(obj))) { - PROPERTY_CACHE_TEST(cx, regs.pc, obj, obj2, entry, atom); - if (!atom) { - ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); - break; - } - } else { - entry = NULL; - LOAD_ATOM(0); + + JS_PROPERTY_CACHE(cx).test(cx, regs.pc, obj, obj2, entry, atom); + if (!atom) { + ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); + break; } + id = ATOM_TO_JSID(atom); obj = js_FindIdentifierBase(cx, fp->scopeChain, id); if (!obj) @@ -1224,33 +1213,31 @@ BEGIN_CASE(JSOP_DECNAME) BEGIN_CASE(JSOP_NAMEINC) BEGIN_CASE(JSOP_NAMEDEC) { - JSPropCacheEntry *entry; + PropertyCacheEntry *entry; obj = fp->scopeChain; - if (JS_LIKELY(OBJ_IS_NATIVE(obj))) { - PROPERTY_CACHE_TEST(cx, regs.pc, obj, obj2, entry, atom); - if (!atom) { - ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); - if (obj == obj2 && PCVAL_IS_SLOT(entry->vword)) { - slot = PCVAL_TO_SLOT(entry->vword); - JS_ASSERT(slot < OBJ_SCOPE(obj)->freeslot); - rval = LOCKED_OBJ_GET_SLOT(obj, slot); - if (JS_LIKELY(CAN_DO_FAST_INC_DEC(rval))) { + + JS_PROPERTY_CACHE(cx).test(cx, regs.pc, obj, obj2, entry, atom); + if (!atom) { + ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); + if (obj == obj2 && entry->vword.isSlot()) { + slot = entry->vword.toSlot(); + JS_ASSERT(slot < OBJ_SCOPE(obj)->freeslot); + rval = LOCKED_OBJ_GET_SLOT(obj, slot); + if (JS_LIKELY(CAN_DO_FAST_INC_DEC(rval))) { + rtmp = rval; + rval += (js_CodeSpec[op].format & JOF_INC) ? 2 : -2; + if (!(js_CodeSpec[op].format & JOF_POST)) rtmp = rval; - rval += (js_CodeSpec[op].format & JOF_INC) ? 2 : -2; - if (!(js_CodeSpec[op].format & JOF_POST)) - rtmp = rval; - LOCKED_OBJ_SET_SLOT(obj, slot, rval); - PUSH_OPND(rtmp); - len = JSOP_INCNAME_LENGTH; - DO_NEXT_OP(len); - } + LOCKED_OBJ_SET_SLOT(obj, slot, rval); + PUSH_OPND(rtmp); + len = JSOP_INCNAME_LENGTH; + DO_NEXT_OP(len); } - LOAD_ATOM(0); } - } else { LOAD_ATOM(0); } + id = ATOM_TO_JSID(atom); if (!js_FindPropertyHelper(cx, id, true, &obj, &obj2, &prop)) goto error; @@ -1413,7 +1400,7 @@ BEGIN_CASE(JSOP_GVARINC) } slot = JSVAL_TO_INT(lval); JS_ASSERT(fp->varobj(cx) == cx->activeCallStack()->getInitialVarObj()); - rval = OBJ_GET_SLOT(cx, cx->activeCallStack()->getInitialVarObj(), slot); + rval = cx->activeCallStack()->getInitialVarObj()->getSlotMT(cx, slot); if (JS_LIKELY(CAN_DO_FAST_INC_DEC(rval))) { PUSH_OPND(rval + incr2); rval += incr; @@ -1425,7 +1412,7 @@ BEGIN_CASE(JSOP_GVARINC) rval = regs.sp[-1]; --regs.sp; } - OBJ_SET_SLOT(cx, fp->varobj(cx), slot, rval); + fp->varobj(cx)->setSlotMT(cx, slot, rval); len = JSOP_INCGVAR_LENGTH; /* all gvar incops are same length */ JS_ASSERT(len == js_CodeSpec[op].length); DO_NEXT_OP(len); @@ -1433,7 +1420,7 @@ BEGIN_CASE(JSOP_GVARINC) #define COMPUTE_THIS(cx, fp, obj) \ JS_BEGIN_MACRO \ - if (!(obj = js_ComputeThisForFrame(cx, fp))) \ + if (!(obj = (fp)->getThisObject(cx))) \ goto error; \ JS_END_MACRO @@ -1483,7 +1470,7 @@ BEGIN_CASE(JSOP_GETXPROP) do_getprop_with_obj: do { JSObject *aobj; - JSPropCacheEntry *entry; + PropertyCacheEntry *entry; /* * We do not impose the method read barrier if in an imacro, @@ -1491,34 +1478,28 @@ BEGIN_CASE(JSOP_GETXPROP) * from JSOP_NEW) will not be leaked to the calling script. */ aobj = js_GetProtoIfDenseArray(obj); - if (JS_LIKELY(aobj->map->ops->getProperty == js_GetProperty)) { - PROPERTY_CACHE_TEST(cx, regs.pc, aobj, obj2, entry, atom); - if (!atom) { - ASSERT_VALID_PROPERTY_CACHE_HIT(i, aobj, obj2, entry); - if (PCVAL_IS_OBJECT(entry->vword)) { - rval = PCVAL_OBJECT_TO_JSVAL(entry->vword); - } else if (PCVAL_IS_SLOT(entry->vword)) { - slot = PCVAL_TO_SLOT(entry->vword); - JS_ASSERT(slot < OBJ_SCOPE(obj2)->freeslot); - rval = LOCKED_OBJ_GET_SLOT(obj2, slot); - } else { - JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); - sprop = PCVAL_TO_SPROP(entry->vword); - NATIVE_GET(cx, obj, obj2, sprop, - fp->imacpc ? JSGET_NO_METHOD_BARRIER : JSGET_METHOD_BARRIER, - &rval); - } - break; + + JS_PROPERTY_CACHE(cx).test(cx, regs.pc, aobj, obj2, entry, atom); + if (!atom) { + ASSERT_VALID_PROPERTY_CACHE_HIT(i, aobj, obj2, entry); + if (entry->vword.isObject()) { + rval = entry->vword.toJsval(); + } else if (entry->vword.isSlot()) { + slot = entry->vword.toSlot(); + JS_ASSERT(slot < OBJ_SCOPE(obj2)->freeslot); + rval = LOCKED_OBJ_GET_SLOT(obj2, slot); + } else { + JS_ASSERT(entry->vword.isSprop()); + sprop = entry->vword.toSprop(); + NATIVE_GET(cx, obj, obj2, sprop, + fp->imacpc ? JSGET_NO_METHOD_BARRIER : JSGET_METHOD_BARRIER, + &rval); } - } else { - entry = NULL; - if (i < 0) - atom = rt->atomState.lengthAtom; - else - LOAD_ATOM(i); + break; } + id = ATOM_TO_JSID(atom); - if (entry + if (JS_LIKELY(aobj->map->ops->getProperty == js_GetProperty) ? !js_GetPropertyHelper(cx, obj, id, fp->imacpc ? JSGET_CACHE_RESULT | JSGET_NO_METHOD_BARRIER @@ -1539,21 +1520,28 @@ BEGIN_CASE(JSOP_LENGTH) if (JSVAL_IS_STRING(lval)) { str = JSVAL_TO_STRING(lval); regs.sp[-1] = INT_TO_JSVAL(str->length()); - } else if (!JSVAL_IS_PRIMITIVE(lval) && - (obj = JSVAL_TO_OBJECT(lval), obj->isArray())) { - jsuint length; + } else if (!JSVAL_IS_PRIMITIVE(lval)) { + obj = JSVAL_TO_OBJECT(lval); + if (obj->isArray()) { + /* + * We know that the array is created with its 'length' private data + * in a fixed slot at JSSLOT_ARRAY_LENGTH. See also JSOP_ARRAYPUSH, + * far below. + */ + jsuint length = obj->fslots[JSSLOT_ARRAY_LENGTH]; - /* - * We know that the array is created with only its 'length' private - * data in a fixed slot at JSSLOT_ARRAY_LENGTH. See also - * JSOP_ARRAYPUSH, far below. - */ - length = obj->fslots[JSSLOT_ARRAY_LENGTH]; - if (length <= JSVAL_INT_MAX) { + if (length <= JSVAL_INT_MAX) + regs.sp[-1] = INT_TO_JSVAL(length); + else if (!js_NewDoubleInRootedValue(cx, (jsdouble) length, ®s.sp[-1])) + goto error; + } else if (obj->isArguments() && !IsOverriddenArgsLength(obj)) { + uint32 length = GetArgsLength(obj); + + JS_ASSERT(INT_FITS_IN_JSVAL(length)); regs.sp[-1] = INT_TO_JSVAL(length); - } else if (!js_NewDoubleInRootedValue(cx, (jsdouble) length, - ®s.sp[-1])) { - goto error; + } else { + i = -2; + goto do_getprop_with_lval; } } else { i = -2; @@ -1564,7 +1552,7 @@ END_CASE(JSOP_LENGTH) BEGIN_CASE(JSOP_CALLPROP) { JSObject *aobj; - JSPropCacheEntry *entry; + PropertyCacheEntry *entry; lval = FETCH_OPND(-1); if (!JSVAL_IS_PRIMITIVE(lval)) { @@ -1587,39 +1575,35 @@ BEGIN_CASE(JSOP_CALLPROP) } aobj = js_GetProtoIfDenseArray(obj); - if (JS_LIKELY(aobj->map->ops->getProperty == js_GetProperty)) { - PROPERTY_CACHE_TEST(cx, regs.pc, aobj, obj2, entry, atom); - if (!atom) { - ASSERT_VALID_PROPERTY_CACHE_HIT(0, aobj, obj2, entry); - if (PCVAL_IS_OBJECT(entry->vword)) { - rval = PCVAL_OBJECT_TO_JSVAL(entry->vword); - } else if (PCVAL_IS_SLOT(entry->vword)) { - slot = PCVAL_TO_SLOT(entry->vword); - JS_ASSERT(slot < OBJ_SCOPE(obj2)->freeslot); - rval = LOCKED_OBJ_GET_SLOT(obj2, slot); - } else { - JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); - sprop = PCVAL_TO_SPROP(entry->vword); - NATIVE_GET(cx, obj, obj2, sprop, JSGET_NO_METHOD_BARRIER, &rval); - } - STORE_OPND(-1, rval); - PUSH_OPND(lval); - goto end_callprop; + + JS_PROPERTY_CACHE(cx).test(cx, regs.pc, aobj, obj2, entry, atom); + if (!atom) { + ASSERT_VALID_PROPERTY_CACHE_HIT(0, aobj, obj2, entry); + if (entry->vword.isObject()) { + rval = entry->vword.toJsval(); + } else if (entry->vword.isSlot()) { + slot = entry->vword.toSlot(); + JS_ASSERT(slot < OBJ_SCOPE(obj2)->freeslot); + rval = LOCKED_OBJ_GET_SLOT(obj2, slot); + } else { + JS_ASSERT(entry->vword.isSprop()); + sprop = entry->vword.toSprop(); + NATIVE_GET(cx, obj, obj2, sprop, JSGET_NO_METHOD_BARRIER, &rval); } - } else { - entry = NULL; - LOAD_ATOM(0); + STORE_OPND(-1, rval); + PUSH_OPND(lval); + goto end_callprop; } /* * Cache miss: use the immediate atom that was loaded for us under - * PROPERTY_CACHE_TEST. + * PropertyCache::test. */ id = ATOM_TO_JSID(atom); PUSH(JSVAL_NULL); if (!JSVAL_IS_PRIMITIVE(lval)) { if (!js_GetMethod(cx, obj, id, - entry + JS_LIKELY(aobj->map->ops->getProperty == js_GetProperty) ? JSGET_CACHE_RESULT | JSGET_NO_METHOD_BARRIER : JSGET_NO_METHOD_BARRIER, &rval)) { @@ -1679,193 +1663,185 @@ BEGIN_CASE(JSOP_SETMETHOD) VALUE_TO_OBJECT(cx, -2, lval, obj); do { - JSPropCacheEntry *entry; - - entry = NULL; + PropertyCache *cache = &JS_PROPERTY_CACHE(cx); + PropertyCacheEntry *entry = NULL; atom = NULL; - if (JS_LIKELY(obj->map->ops->setProperty == js_SetProperty)) { - JSPropertyCache *cache = &JS_PROPERTY_CACHE(cx); - uint32 kshape = OBJ_SHAPE(obj); + + /* + * Probe the property cache, specializing for two important + * set-property cases. First: + * + * function f(a, b, c) { + * var o = {p:a, q:b, r:c}; + * return o; + * } + * + * or similar real-world cases, which evolve a newborn native + * object predicatably through some bounded number of property + * additions. And second: + * + * o.p = x; + * + * in a frequently executed method or loop body, where p will + * (possibly after the first iteration) always exist in native + * object o. + */ + if (cache->testForSet(cx, regs.pc, obj, &entry, &obj2, &atom)) { + /* + * Fast property cache hit, only partially confirmed by + * testForSet. We know that the entry applies to regs.pc and + * that obj's shape matches. + * + * The entry predicts either a new property to be added + * directly to obj by this set, or on an existing "own" + * property, or on a prototype property that has a setter. + */ + JS_ASSERT(entry->vword.isSprop()); + sprop = entry->vword.toSprop(); + JS_ASSERT(sprop->writable()); + JS_ASSERT_IF(sprop->hasSlot(), entry->vcapTag() == 0); + + JSScope *scope = OBJ_SCOPE(obj); + JS_ASSERT(!scope->sealed()); /* - * Open-code PROPERTY_CACHE_TEST, specializing for two important - * set-property cases. First: - * - * function f(a, b, c) { - * var o = {p:a, q:b, r:c}; - * return o; - * } - * - * or similar real-world cases, which evolve a newborn native - * object predicatably through some bounded number of property - * additions. And second: - * - * o.p = x; - * - * in a frequently executed method or loop body, where p will - * (possibly after the first iteration) always exist in native - * object o. + * Fastest path: check whether the cached sprop is already + * in scope and call NATIVE_SET and break to get out of the + * do-while(0). But we can call NATIVE_SET only if obj owns + * scope or sprop is shared. */ - entry = &cache->table[PROPERTY_CACHE_HASH_PC(regs.pc, kshape)]; - PCMETER(cache->pctestentry = entry); - PCMETER(cache->tests++); - PCMETER(cache->settests++); - if (entry->kpc == regs.pc && entry->kshape == kshape) { - JS_ASSERT(PCVCAP_TAG(entry->vcap) <= 1); - if (js_MatchPropertyCacheShape(cx, obj, kshape)) { - JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); - sprop = PCVAL_TO_SPROP(entry->vword); - JS_ASSERT(!(sprop->attrs & JSPROP_READONLY)); - JS_ASSERT_IF(!(sprop->attrs & JSPROP_SHARED), - PCVCAP_TAG(entry->vcap) == 0); - - JSScope *scope = OBJ_SCOPE(obj); - JS_ASSERT(!scope->sealed()); - - /* - * Fastest path: check whether the cached sprop is already - * in scope and call NATIVE_SET and break to get out of the - * do-while(0). But we can call NATIVE_SET only if obj owns - * scope or sprop is shared. - */ - bool checkForAdd; - if (sprop->attrs & JSPROP_SHARED) { - if (PCVCAP_TAG(entry->vcap) == 0 || - ((obj2 = obj->getProto()) && - OBJ_IS_NATIVE(obj2) && - OBJ_SHAPE(obj2) == PCVCAP_SHAPE(entry->vcap))) { - goto fast_set_propcache_hit; - } - - /* The cache entry doesn't apply. vshape mismatch. */ - checkForAdd = false; - } else if (!scope->isSharedEmpty()) { - if (sprop == scope->lastProperty() || scope->hasProperty(sprop)) { - fast_set_propcache_hit: - PCMETER(cache->pchits++); - PCMETER(cache->setpchits++); - NATIVE_SET(cx, obj, sprop, entry, &rval); - break; - } - checkForAdd = - !(sprop->attrs & JSPROP_SHARED) && - sprop->parent == scope->lastProperty(); - } else { - /* - * We check that cx own obj here and will continue to - * own it after js_GetMutableScope returns so we can - * continue to skip JS_UNLOCK_OBJ calls. - */ - JS_ASSERT(CX_OWNS_OBJECT_TITLE(cx, obj)); - scope = js_GetMutableScope(cx, obj); - JS_ASSERT(CX_OWNS_OBJECT_TITLE(cx, obj)); - if (!scope) - goto error; - checkForAdd = !sprop->parent; - } - - if (checkForAdd && - SPROP_HAS_STUB_SETTER(sprop) && - (slot = sprop->slot) == scope->freeslot) { - /* - * Fast path: adding a plain old property that was once - * at the frontier of the property tree, whose slot is - * next to claim among the allocated slots in obj, - * where scope->table has not been created yet. - * - * We may want to remove hazard conditions above and - * inline compensation code here, depending on - * real-world workloads. - */ - JS_ASSERT(!(obj->getClass()->flags & - JSCLASS_SHARE_ALL_PROPERTIES)); - - PCMETER(cache->pchits++); - PCMETER(cache->addpchits++); - - /* - * Beware classes such as Function that use the - * reserveSlots hook to allocate a number of reserved - * slots that may vary with obj. - */ - if (slot < STOBJ_NSLOTS(obj) && - !OBJ_GET_CLASS(cx, obj)->reserveSlots) { - ++scope->freeslot; - } else { - if (!js_AllocSlot(cx, obj, &slot)) - goto error; - } - - /* - * If this obj's number of reserved slots differed, or - * if something created a hash table for scope, we must - * pay the price of JSScope::putProperty. - * - * (A reserveSlots hook can cause scopes of the same - * shape to have different freeslot values. This is - * what causes the slot != sprop->slot case. See - * js_GetMutableScope.) - */ - if (slot != sprop->slot || scope->table) { - JSScopeProperty *sprop2 = - scope->putProperty(cx, sprop->id, - sprop->getter(), sprop->setter(), - slot, sprop->attrs, - sprop->getFlags(), sprop->shortid); - if (!sprop2) { - js_FreeSlot(cx, obj, slot); - goto error; - } - sprop = sprop2; - } else { - scope->extend(cx, sprop); - } - - /* - * No method change check here because here we are - * adding a new property, not updating an existing - * slot's value that might contain a method of a - * branded scope. - */ - TRACE_2(SetPropHit, entry, sprop); - LOCKED_OBJ_SET_SLOT(obj, slot, rval); - - /* - * Purge the property cache of the id we may have just - * shadowed in obj's scope and proto chains. We do this - * after unlocking obj's scope to avoid lock nesting. - */ - js_PurgeScopeChain(cx, obj, sprop->id); - break; - } - PCMETER(cache->setpcmisses++); + bool checkForAdd; + if (!sprop->hasSlot()) { + if (entry->vcapTag() == 0 || + ((obj2 = obj->getProto()) && + obj2->isNative() && + OBJ_SHAPE(obj2) == entry->vshape())) { + goto fast_set_propcache_hit; } - } - atom = js_FullTestPropertyCache(cx, regs.pc, &obj, &obj2, - &entry); - if (atom) { - PCMETER(cache->misses++); - PCMETER(cache->setmisses++); - } else { - ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); - sprop = NULL; - if (obj == obj2) { - JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); - sprop = PCVAL_TO_SPROP(entry->vword); - JS_ASSERT(!(sprop->attrs & JSPROP_READONLY)); - JS_ASSERT(!OBJ_SCOPE(obj2)->sealed()); + /* The cache entry doesn't apply. vshape mismatch. */ + checkForAdd = false; + } else if (!scope->isSharedEmpty()) { + if (sprop == scope->lastProperty() || scope->hasProperty(sprop)) { + fast_set_propcache_hit: + PCMETER(cache->pchits++); + PCMETER(cache->setpchits++); NATIVE_SET(cx, obj, sprop, entry, &rval); - } - if (sprop) break; + } + checkForAdd = sprop->hasSlot() && sprop->parent == scope->lastProperty(); + } else { + /* + * We check that cx own obj here and will continue to + * own it after js_GetMutableScope returns so we can + * continue to skip JS_UNLOCK_OBJ calls. + */ + JS_ASSERT(CX_OWNS_OBJECT_TITLE(cx, obj)); + scope = js_GetMutableScope(cx, obj); + JS_ASSERT(CX_OWNS_OBJECT_TITLE(cx, obj)); + if (!scope) + goto error; + checkForAdd = !sprop->parent; } + + if (checkForAdd && + entry->vshape() == rt->protoHazardShape && + sprop->hasDefaultSetter() && + (slot = sprop->slot) == scope->freeslot) { + /* + * Fast path: adding a plain old property that was once + * at the frontier of the property tree, whose slot is + * next to claim among the allocated slots in obj, + * where scope->table has not been created yet. + * + * We may want to remove hazard conditions above and + * inline compensation code here, depending on + * real-world workloads. + */ + JS_ASSERT(!(obj->getClass()->flags & + JSCLASS_SHARE_ALL_PROPERTIES)); + + PCMETER(cache->pchits++); + PCMETER(cache->addpchits++); + + /* + * Beware classes such as Function that use the + * reserveSlots hook to allocate a number of reserved + * slots that may vary with obj. + */ + if (slot < obj->numSlots() && + !OBJ_GET_CLASS(cx, obj)->reserveSlots) { + ++scope->freeslot; + } else { + if (!js_AllocSlot(cx, obj, &slot)) + goto error; + } + + /* + * If this obj's number of reserved slots differed, or + * if something created a hash table for scope, we must + * pay the price of JSScope::putProperty. + * + * (A reserveSlots hook can cause scopes of the same + * shape to have different freeslot values. This is + * what causes the slot != sprop->slot case. See + * js_GetMutableScope.) + */ + if (slot != sprop->slot || scope->table) { + JSScopeProperty *sprop2 = + scope->putProperty(cx, sprop->id, + sprop->getter(), sprop->setter(), + slot, sprop->attributes(), + sprop->getFlags(), sprop->shortid); + if (!sprop2) { + js_FreeSlot(cx, obj, slot); + goto error; + } + sprop = sprop2; + } else { + scope->extend(cx, sprop); + } + + /* + * No method change check here because here we are + * adding a new property, not updating an existing + * slot's value that might contain a method of a + * branded scope. + */ + TRACE_2(SetPropHit, entry, sprop); + LOCKED_OBJ_SET_SLOT(obj, slot, rval); + + /* + * Purge the property cache of the id we may have just + * shadowed in obj's scope and proto chains. We do this + * after unlocking obj's scope to avoid lock nesting. + */ + js_PurgeScopeChain(cx, obj, sprop->id); + break; + } + PCMETER(cache->setpcmisses++); + atom = NULL; + } else if (!atom) { + /* + * Slower property cache hit, fully confirmed by testForSet (in + * the slow path, via fullTest). + */ + ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); + sprop = NULL; + if (obj == obj2) { + sprop = entry->vword.toSprop(); + JS_ASSERT(sprop->writable()); + JS_ASSERT(!OBJ_SCOPE(obj2)->sealed()); + NATIVE_SET(cx, obj, sprop, entry, &rval); + } + if (sprop) + break; } if (!atom) LOAD_ATOM(0); id = ATOM_TO_JSID(atom); - if (entry) { + if (entry && JS_LIKELY(obj->map->ops->setProperty == js_SetProperty)) { uintN defineHow = (op == JSOP_SETMETHOD) ? JSDNP_CACHE_RESULT | JSDNP_SET_METHOD : JSDNP_CACHE_RESULT; @@ -1898,19 +1874,36 @@ BEGIN_CASE(JSOP_GETELEM) VALUE_TO_OBJECT(cx, -2, lval, obj); if (JSVAL_IS_INT(rval)) { if (obj->isDenseArray()) { - jsuint length; + jsuint idx = jsuint(JSVAL_TO_INT(rval)); - length = js_DenseArrayCapacity(obj); - i = JSVAL_TO_INT(rval); - if ((jsuint)i < length && - i < obj->fslots[JSSLOT_ARRAY_LENGTH]) { - rval = obj->dslots[i]; + if (idx < jsuint(obj->fslots[JSSLOT_ARRAY_LENGTH]) && + idx < js_DenseArrayCapacity(obj)) { + rval = obj->dslots[idx]; if (rval != JSVAL_HOLE) goto end_getelem; /* Reload rval from the stack in the rare hole case. */ rval = FETCH_OPND(-1); } + } else if (obj->isArguments() +#ifdef JS_TRACER + && !GetArgsPrivateNative(obj) +#endif + ) { + uint32 arg = uint32(JSVAL_TO_INT(rval)); + + if (arg < GetArgsLength(obj)) { + JSStackFrame *afp = (JSStackFrame *) obj->getPrivate(); + if (afp) { + rval = afp->argv[arg]; + goto end_getelem; + } + + rval = GetArgsSlot(obj, arg); + if (rval != JSVAL_HOLE) + goto end_getelem; + rval = FETCH_OPND(-1); + } } id = INT_JSVAL_TO_JSID(rval); } else { @@ -2296,31 +2289,28 @@ END_CASE(JSOP_SETCALL) BEGIN_CASE(JSOP_NAME) BEGIN_CASE(JSOP_CALLNAME) { - JSPropCacheEntry *entry; + PropertyCacheEntry *entry; obj = fp->scopeChain; - if (JS_LIKELY(OBJ_IS_NATIVE(obj))) { - PROPERTY_CACHE_TEST(cx, regs.pc, obj, obj2, entry, atom); - if (!atom) { - ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); - if (PCVAL_IS_OBJECT(entry->vword)) { - rval = PCVAL_OBJECT_TO_JSVAL(entry->vword); - goto do_push_rval; - } - if (PCVAL_IS_SLOT(entry->vword)) { - slot = PCVAL_TO_SLOT(entry->vword); - JS_ASSERT(slot < OBJ_SCOPE(obj2)->freeslot); - rval = LOCKED_OBJ_GET_SLOT(obj2, slot); - goto do_push_rval; - } - - JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); - sprop = PCVAL_TO_SPROP(entry->vword); - goto do_native_get; + JS_PROPERTY_CACHE(cx).test(cx, regs.pc, obj, obj2, entry, atom); + if (!atom) { + ASSERT_VALID_PROPERTY_CACHE_HIT(0, obj, obj2, entry); + if (entry->vword.isObject()) { + rval = entry->vword.toJsval(); + goto do_push_rval; } - } else { - LOAD_ATOM(0); + + if (entry->vword.isSlot()) { + slot = entry->vword.toSlot(); + JS_ASSERT(slot < OBJ_SCOPE(obj2)->freeslot); + rval = LOCKED_OBJ_GET_SLOT(obj2, slot); + goto do_push_rval; + } + + JS_ASSERT(entry->vword.isSprop()); + sprop = entry->vword.toSprop(); + goto do_native_get; } id = ATOM_TO_JSID(atom); @@ -2339,7 +2329,7 @@ BEGIN_CASE(JSOP_CALLNAME) } /* Take the slow path if prop was not found in a native object. */ - if (!OBJ_IS_NATIVE(obj) || !OBJ_IS_NATIVE(obj2)) { + if (!obj->isNative() || !obj2->isNative()) { obj2->dropProperty(cx, prop); if (!obj->getProperty(cx, id, &rval)) goto error; @@ -2758,10 +2748,10 @@ BEGIN_CASE(JSOP_CALLGVAR) JS_ASSERT(fp->varobj(cx) == cx->activeCallStack()->getInitialVarObj()); obj = cx->activeCallStack()->getInitialVarObj(); slot = JSVAL_TO_INT(lval); - rval = OBJ_GET_SLOT(cx, obj, slot); + rval = obj->getSlotMT(cx, slot); PUSH_OPND(rval); if (op == JSOP_CALLGVAR) - PUSH_OPND(OBJECT_TO_JSVAL(obj)); + PUSH_OPND(JSVAL_NULL); END_CASE(JSOP_GETGVAR) BEGIN_CASE(JSOP_SETGVAR) @@ -2851,12 +2841,12 @@ BEGIN_CASE(JSOP_DEFVAR) if (!fp->fun && index < GlobalVarCount(fp) && obj2 == obj && - OBJ_IS_NATIVE(obj)) { + obj->isNative()) { sprop = (JSScopeProperty *) prop; - if ((sprop->attrs & JSPROP_PERMANENT) && + if (!sprop->configurable() && SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(obj)) && - SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && - SPROP_HAS_STUB_SETTER(sprop)) { + sprop->hasDefaultGetterOrIsMethod() && + sprop->hasDefaultSetter()) { /* * Fast globals use frame variables to map the global name's atom * index to the permanent varobj slot number, tagged as a jsval. @@ -2996,7 +2986,7 @@ BEGIN_CASE(JSOP_DEFFUN) if (prop) { if (parent == pobj && OBJ_GET_CLASS(cx, parent) == &js_CallClass && - (old = ((JSScopeProperty *) prop)->attrs, + (old = ((JSScopeProperty *) prop)->attributes(), !(old & (JSPROP_GETTER|JSPROP_SETTER)) && (old & (JSPROP_ENUMERATE|JSPROP_PERMANENT)) == attrs)) { /* @@ -3367,6 +3357,7 @@ END_CASE(JSOP_ENDINIT) BEGIN_CASE(JSOP_INITPROP) BEGIN_CASE(JSOP_INITMETHOD) +{ /* Load the property's initial value into rval. */ JS_ASSERT(regs.sp - StackBase(fp) >= 2); rval = FETCH_OPND(-1); @@ -3374,107 +3365,66 @@ BEGIN_CASE(JSOP_INITMETHOD) /* Load the object being initialized into lval/obj. */ lval = FETCH_OPND(-2); obj = JSVAL_TO_OBJECT(lval); - JS_ASSERT(OBJ_IS_NATIVE(obj)); + JS_ASSERT(obj->isNative()); JS_ASSERT(!OBJ_GET_CLASS(cx, obj)->reserveSlots); JS_ASSERT(!(obj->getClass()->flags & JSCLASS_SHARE_ALL_PROPERTIES)); - do { - JSScope *scope; - uint32 kshape; - JSPropertyCache *cache; - JSPropCacheEntry *entry; - /* - * We can not assume that the object created by JSOP_NEWINIT is still - * single-threaded as the debugger can access it from other threads. - */ - if (!CX_OWNS_OBJECT_TITLE(cx, obj)) - goto do_initprop_miss; + JSScope *scope = OBJ_SCOPE(obj); + PropertyCacheEntry *entry; - scope = OBJ_SCOPE(obj); - JS_ASSERT(scope->object == obj); - JS_ASSERT(!scope->sealed()); - kshape = scope->shape; - cache = &JS_PROPERTY_CACHE(cx); - entry = &cache->table[PROPERTY_CACHE_HASH_PC(regs.pc, kshape)]; - PCMETER(cache->pctestentry = entry); - PCMETER(cache->tests++); - PCMETER(cache->initests++); - - if (entry->kpc == regs.pc && - entry->kshape == kshape && - PCVCAP_SHAPE(entry->vcap) == rt->protoHazardShape) { - JS_ASSERT(PCVCAP_TAG(entry->vcap) == 0); - - PCMETER(cache->pchits++); - PCMETER(cache->inipchits++); - - JS_ASSERT(PCVAL_IS_SPROP(entry->vword)); - sprop = PCVAL_TO_SPROP(entry->vword); - JS_ASSERT(!(sprop->attrs & JSPROP_READONLY)); - - /* - * If this property has a non-stub setter, it must be __proto__, - * __parent__, or another "shared prototype" built-in. Force a miss - * to save code size here and let the standard code path take care - * of business. - */ - if (!SPROP_HAS_STUB_SETTER(sprop)) - goto do_initprop_miss; - - /* - * Detect a repeated property name and force a miss to share the - * strict warning code and consolidate all the complexity managed - * by JSScope::addProperty. - */ - if (sprop->parent != scope->lastProperty()) - goto do_initprop_miss; - - /* - * Otherwise this entry must be for a direct property of obj, not a - * proto-property, and there cannot have been any deletions of - * prior properties. - */ - JS_ASSERT(!scope->inDictionaryMode()); - JS_ASSERT_IF(scope->table, !scope->hasProperty(sprop)); - - slot = sprop->slot; - JS_ASSERT(slot == scope->freeslot); - if (slot < STOBJ_NSLOTS(obj)) { - ++scope->freeslot; - } else { - if (!js_AllocSlot(cx, obj, &slot)) - goto error; - JS_ASSERT(slot == sprop->slot); - } - - JS_ASSERT(!scope->lastProperty() || - scope->shape == scope->lastProperty()->shape); - if (scope->table) { - JSScopeProperty *sprop2 = - scope->addProperty(cx, sprop->id, sprop->getter(), sprop->setter(), slot, - sprop->attrs, sprop->getFlags(), sprop->shortid); - if (!sprop2) { - js_FreeSlot(cx, obj, slot); - goto error; - } - JS_ASSERT(sprop2 == sprop); - } else { - JS_ASSERT(!scope->isSharedEmpty()); - scope->extend(cx, sprop); - } - - /* - * No method change check here because here we are adding a new - * property, not updating an existing slot's value that might - * contain a method of a branded scope. - */ - TRACE_2(SetPropHit, entry, sprop); - LOCKED_OBJ_SET_SLOT(obj, slot, rval); - break; + /* + * Probe the property cache. + * + * We can not assume that the object created by JSOP_NEWINIT is still + * single-threaded as the debugger can access it from other threads. + * So check first. + * + * On a hit, if the cached sprop has a non-default setter, it must be + * __proto__ or __parent__. If sprop->parent != scope->lastProperty(), + * there is a repeated property name. The fast path does not handle these + * two cases. + */ + if (CX_OWNS_OBJECT_TITLE(cx, obj) && + JS_PROPERTY_CACHE(cx).testForInit(rt, regs.pc, obj, scope, &sprop, &entry) && + sprop->hasDefaultSetter() && + sprop->parent == scope->lastProperty()) + { + /* Fast path. Property cache hit. */ + slot = sprop->slot; + JS_ASSERT(slot == scope->freeslot); + if (slot < obj->numSlots()) { + ++scope->freeslot; + } else { + if (!js_AllocSlot(cx, obj, &slot)) + goto error; + JS_ASSERT(slot == sprop->slot); } - do_initprop_miss: - PCMETER(cache->inipcmisses++); + JS_ASSERT(!scope->lastProperty() || + scope->shape == scope->lastProperty()->shape); + if (scope->table) { + JSScopeProperty *sprop2 = + scope->addProperty(cx, sprop->id, sprop->getter(), sprop->setter(), slot, + sprop->attributes(), sprop->getFlags(), sprop->shortid); + if (!sprop2) { + js_FreeSlot(cx, obj, slot); + goto error; + } + JS_ASSERT(sprop2 == sprop); + } else { + JS_ASSERT(!scope->isSharedEmpty()); + scope->extend(cx, sprop); + } + + /* + * No method change check here because here we are adding a new + * property, not updating an existing slot's value that might + * contain a method of a branded scope. + */ + TRACE_2(SetPropHit, entry, sprop); + LOCKED_OBJ_SET_SLOT(obj, slot, rval); + } else { + PCMETER(JS_PROPERTY_CACHE(cx).inipcmisses++); /* Get the immediate property name into id. */ LOAD_ATOM(0); @@ -3496,10 +3446,11 @@ BEGIN_CASE(JSOP_INITMETHOD) defineHow))) { goto error; } - } while (0); + } /* Common tail for property cache hit and miss cases. */ regs.sp--; +} END_CASE(JSOP_INITPROP); BEGIN_CASE(JSOP_INITELEM) diff --git a/js/src/jsparse.cpp b/js/src/jsparse.cpp index e064011bc90..09c21da2f90 100644 --- a/js/src/jsparse.cpp +++ b/js/src/jsparse.cpp @@ -88,6 +88,8 @@ #include "jsdhash.h" #endif +using namespace js; + /* * Asserts to verify assumptions behind pn_ macros. */ @@ -98,68 +100,16 @@ JS_STATIC_ASSERT(pn_offsetof(pn_u.name.atom) == pn_offsetof(pn_u.apair.atom)); #undef pn_offsetof -/* - * JS parsers, from lowest to highest precedence. - * - * Each parser takes a context, a token stream, and a tree context struct. - * Each returns a parse node tree or null on error. - */ - -typedef JSParseNode * -JSParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc); - -typedef JSParseNode * -JSVariablesParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - bool inLetHead); - -typedef JSParseNode * -JSMemberParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSBool allowCallSyntax); - -typedef JSParseNode * -JSPrimaryParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSTokenType tt, JSBool afterDot); - -typedef JSParseNode * -JSParenParser(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSParseNode *pn1, JSBool *genexp); - -static JSParser FunctionStmt; -static JSParser FunctionExpr; -static JSParser Statements; -static JSParser Statement; -static JSVariablesParser Variables; -static JSParser Expr; -static JSParser AssignExpr; -static JSParser CondExpr; -static JSParser OrExpr; -static JSParser AndExpr; -static JSParser BitOrExpr; -static JSParser BitXorExpr; -static JSParser BitAndExpr; -static JSParser EqExpr; -static JSParser RelExpr; -static JSParser ShiftExpr; -static JSParser AddExpr; -static JSParser MulExpr; -static JSParser UnaryExpr; -static JSMemberParser MemberExpr; -static JSPrimaryParser PrimaryExpr; -static JSParenParser ParenExpr; - -static bool -RecognizeDirectivePrologue(JSContext *cx, JSTreeContext *tc, JSParseNode *pn); - /* * Insist that the next token be of type tt, or report errno and return null. * NB: this macro uses cx and ts from its lexical environment. */ -#define MUST_MATCH_TOKEN(tt, errno) \ - JS_BEGIN_MACRO \ - if (js_GetToken(cx, ts) != tt) { \ - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, errno); \ - return NULL; \ - } \ +#define MUST_MATCH_TOKEN(tt, errno) \ + JS_BEGIN_MACRO \ + if (tokenStream.getToken() != tt) { \ + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, errno); \ + return NULL; \ + } \ JS_END_MACRO #ifdef METER_PARSENODES @@ -215,14 +165,13 @@ JSCompiler::init(const jschar *base, size_t length, JSContext *cx = context; tempPoolMark = JS_ARENA_MARK(&cx->tempPool); - if (!tokenStream.init(cx, base, length, fp, filename, lineno)) { + if (!tokenStream.init(base, length, fp, filename, lineno)) { JS_ARENA_RELEASE(&cx->tempPool, tempPoolMark); return false; } /* Root atoms and objects allocated for the parsed tree. */ JS_KEEP_ATOMS(cx->runtime); - JS_PUSH_TEMP_ROOT_COMPILER(cx, this, &tempRoot); return true; } @@ -232,10 +181,8 @@ JSCompiler::~JSCompiler() if (principals) JSPRINCIPALS_DROP(cx, principals); - JS_ASSERT(tempRoot.u.compiler == this); - JS_POP_TEMP_ROOT(cx, &tempRoot); JS_UNKEEP_ATOMS(cx->runtime); - tokenStream.close(cx); + tokenStream.close(); JS_ARENA_RELEASE(&cx->tempPool, tempPoolMark); } @@ -276,7 +223,7 @@ JSFunctionBox * JSCompiler::newFunctionBox(JSObject *obj, JSParseNode *fn, JSTreeContext *tc) { JS_ASSERT(obj); - JS_ASSERT(HAS_FUNCTION_CLASS(obj)); + JS_ASSERT(obj->isFunction()); /* * We use JSContext.tempPool to allocate parsed objects and place them on @@ -338,10 +285,7 @@ JSFunctionBox::shouldUnbrand(uintN methods, uintN slowMethods) const void JSCompiler::trace(JSTracer *trc) { - JSObjectBox *objbox; - - JS_ASSERT(tempRoot.u.compiler == this); - objbox = traceListHead; + JSObjectBox *objbox = traceListHead; while (objbox) { JS_CALL_OBJECT_TRACER(trc, objbox->object, "parser.object"); objbox = objbox->traceLink; @@ -499,6 +443,10 @@ RecycleFuncNameKids(JSParseNode *pn, JSTreeContext *tc) } } +/* + * Allocate a JSParseNode from tc's node freelist or, failing that, from cx's + * temporary arena. + */ static JSParseNode * NewOrRecycledNode(JSTreeContext *tc) { @@ -573,64 +521,23 @@ NewOrRecycledNode(JSTreeContext *tc) return pn; } -static inline void -InitParseNode(JSParseNode *pn, JSTokenType type, JSOp op, JSParseNodeArity arity) -{ - pn->pn_type = type; - pn->pn_op = op; - pn->pn_arity = arity; - pn->pn_parens = false; - JS_ASSERT(!pn->pn_used); - JS_ASSERT(!pn->pn_defn); - pn->pn_next = pn->pn_link = NULL; -} +/* used only by static create methods of subclasses */ -/* - * Allocate a JSParseNode from tc's node freelist or, failing that, from cx's - * temporary arena. - */ -static JSParseNode * -NewParseNode(JSParseNodeArity arity, JSTreeContext *tc) +JSParseNode * +JSParseNode::create(JSParseNodeArity arity, JSTreeContext *tc) { - JSParseNode *pn; - JSToken *tp; - - pn = NewOrRecycledNode(tc); + JSParseNode *pn = NewOrRecycledNode(tc); if (!pn) return NULL; - tp = &CURRENT_TOKEN(&tc->compiler->tokenStream); - InitParseNode(pn, tp->type, JSOP_NOP, arity); + Token *tp = tc->compiler->tokenStream.mutableCurrentToken(); + pn->init(tp->type, JSOP_NOP, arity); pn->pn_pos = tp->pos; return pn; } -static inline void -InitNameNodeCommon(JSParseNode *pn, JSTreeContext *tc) -{ - pn->pn_expr = NULL; - pn->pn_cookie = FREE_UPVAR_COOKIE; - pn->pn_dflags = tc->atTopLevel() ? PND_TOPLEVEL : 0; - if (!tc->topStmt || tc->topStmt->type == STMT_BLOCK) - pn->pn_dflags |= PND_BLOCKCHILD; - pn->pn_blockid = tc->blockid(); -} - -static JSParseNode * -NewNameNode(JSContext *cx, JSAtom *atom, JSTreeContext *tc) -{ - JSParseNode *pn; - - pn = NewParseNode(PN_NAME, tc); - if (pn) { - pn->pn_atom = atom; - InitNameNodeCommon(pn, tc); - } - return pn; -} - -static JSParseNode * -NewBinary(JSTokenType tt, JSOp op, JSParseNode *left, JSParseNode *right, - JSTreeContext *tc) +JSParseNode * +JSParseNode::newBinaryOrAppend(TokenKind tt, JSOp op, JSParseNode *left, JSParseNode *right, + JSTreeContext *tc) { JSParseNode *pn, *pn1, *pn2; @@ -691,25 +598,53 @@ NewBinary(JSTokenType tt, JSOp op, JSParseNode *left, JSParseNode *right, pn = NewOrRecycledNode(tc); if (!pn) return NULL; - InitParseNode(pn, tt, op, PN_BINARY); + pn->init(tt, op, PN_BINARY); pn->pn_pos.begin = left->pn_pos.begin; pn->pn_pos.end = right->pn_pos.end; pn->pn_left = left; pn->pn_right = right; - return pn; + return (BinaryNode *)pn; } +namespace js { + +inline void +NameNode::initCommon(JSTreeContext *tc) +{ + pn_expr = NULL; + pn_cookie = FREE_UPVAR_COOKIE; + pn_dflags = tc->atTopLevel() ? PND_TOPLEVEL : 0; + if (!tc->topStmt || tc->topStmt->type == STMT_BLOCK) + pn_dflags |= PND_BLOCKCHILD; + pn_blockid = tc->blockid(); +} + +NameNode * +NameNode::create(JSAtom *atom, JSTreeContext *tc) +{ + JSParseNode *pn; + + pn = JSParseNode::create(PN_NAME, tc); + if (pn) { + pn->pn_atom = atom; + ((NameNode *)pn)->initCommon(tc); + } + return (NameNode *)pn; +} + +} /* namespace js */ + #if JS_HAS_GETTER_SETTER -static JSTokenType -CheckGetterOrSetter(JSContext *cx, JSTokenStream *ts, JSTokenType tt) +static TokenKind +CheckGetterOrSetter(JSContext *cx, TokenStream *ts, TokenKind tt) { JSAtom *atom; JSRuntime *rt; JSOp op; const char *name; - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_NAME); - atom = CURRENT_TOKEN(ts).t_atom; + JS_ASSERT(ts->currentToken().type == TOK_NAME); + atom = ts->currentToken().t_atom; rt = cx->runtime; if (atom == rt->atomState.getterAtom) op = JSOP_GETTER; @@ -717,25 +652,20 @@ CheckGetterOrSetter(JSContext *cx, JSTokenStream *ts, JSTokenType tt) op = JSOP_SETTER; else return TOK_NAME; - if (js_PeekTokenSameLine(cx, ts) != tt) + if (ts->peekTokenSameLine() != tt) return TOK_NAME; - (void) js_GetToken(cx, ts); - if (CURRENT_TOKEN(ts).t_op != JSOP_NOP) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_GETTER_OR_SETTER, - (op == JSOP_GETTER) - ? js_getter_str - : js_setter_str); + (void) ts->getToken(); + if (ts->currentToken().t_op != JSOP_NOP) { + ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, JSMSG_BAD_GETTER_OR_SETTER, + (op == JSOP_GETTER) ? js_getter_str : js_setter_str); return TOK_ERROR; } - CURRENT_TOKEN(ts).t_op = op; + ts->mutableCurrentToken()->t_op = op; if (JS_HAS_STRICT_OPTION(cx)) { name = js_AtomToPrintableString(cx, atom); if (!name || - !js_ReportCompileErrorNumber(cx, ts, NULL, - JSREPORT_WARNING | JSREPORT_STRICT, - JSMSG_DEPRECATED_USAGE, - name)) { + !ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_WARNING | JSREPORT_STRICT, + JSMSG_DEPRECATED_USAGE, name)) { return TOK_ERROR; } } @@ -781,19 +711,19 @@ JSCompiler::parse(JSObject *chain) * an object lock before it finishes generating bytecode into a script * protected from the GC by a root or a stack frame reference. */ - JSTreeContext tc(this); - tc.scopeChain = chain; - if (!GenerateBlockId(&tc, tc.bodyid)) + JSTreeContext globaltc(this); + globaltc.scopeChain = chain; + if (!GenerateBlockId(&globaltc, globaltc.bodyid)) return NULL; - JSParseNode *pn = Statements(context, TS(this), &tc); + JSParseNode *pn = statements(); if (pn) { - if (!js_MatchToken(context, TS(this), TOK_EOF)) { - js_ReportCompileErrorNumber(context, TS(this), NULL, JSREPORT_ERROR, - JSMSG_SYNTAX_ERROR); + if (!tokenStream.matchToken(TOK_EOF)) { + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_SYNTAX_ERROR); pn = NULL; } else { - if (!js_FoldConstants(context, pn, &tc)) + if (!js_FoldConstants(context, pn, &globaltc)) pn = NULL; } } @@ -836,7 +766,7 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal { JSCompiler jsc(cx, principals, callerFrame); JSArenaPool codePool, notePool; - JSTokenType tt; + TokenKind tt; JSParseNode *pn; uint32 scriptGlobals; JSScript *script; @@ -862,7 +792,7 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal JS_InitArenaPool(¬ePool, "note", 1024, sizeof(jssrcnote), &cx->scriptStackQuota); - JSCodeGenerator cg(&jsc, &codePool, ¬ePool, jsc.tokenStream.lineno); + JSCodeGenerator cg(&jsc, &codePool, ¬ePool, jsc.tokenStream.getLineno()); MUST_FLOW_THROUGH("out"); @@ -916,8 +846,8 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal } /* - * Inline Statements to emit as we go to save AST space. We must generate - * our script-body blockid since we aren't calling Statements. + * Inline this->statements to emit as we go to save AST space. We must + * generate our script-body blockid since we aren't calling Statements. */ uint32 bodyid; if (!GenerateBlockId(&cg, bodyid)) @@ -938,7 +868,7 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal inDirectivePrologue = true; for (;;) { jsc.tokenStream.flags |= TSF_OPERAND; - tt = js_PeekToken(cx, &jsc.tokenStream); + tt = jsc.tokenStream.peekToken(); jsc.tokenStream.flags &= ~TSF_OPERAND; if (tt <= TOK_EOF) { if (tt == TOK_EOF) @@ -947,13 +877,13 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal goto out; } - pn = Statement(cx, &jsc.tokenStream, &cg); + pn = jsc.statement(); if (!pn) goto out; JS_ASSERT(!cg.blockNode); if (inDirectivePrologue) - inDirectivePrologue = RecognizeDirectivePrologue(cx, &cg, pn); + inDirectivePrologue = jsc.recognizeDirectivePrologue(pn); if (!js_FoldConstants(cx, pn, &cg)) goto out; @@ -969,7 +899,7 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal #if JS_HAS_XML_SUPPORT if (PN_TYPE(pn) != TOK_SEMI || !pn->pn_kid || - !TREE_TYPE_IS_XML(PN_TYPE(pn->pn_kid))) { + !TreeTypeIsXML(PN_TYPE(pn->pn_kid))) { onlyXML = false; } #endif @@ -984,8 +914,8 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal * https://bugzilla.mozilla.org/show_bug.cgi?id=336551 */ if (pn && onlyXML && (tcflags & TCF_NO_SCRIPT_RVAL)) { - js_ReportCompileErrorNumber(cx, &jsc.tokenStream, NULL, JSREPORT_ERROR, - JSMSG_XML_WHOLE_PROGRAM); + ReportCompileErrorNumber(cx, &jsc.tokenStream, NULL, JSREPORT_ERROR, + JSMSG_XML_WHOLE_PROGRAM); goto out; } #endif @@ -1075,8 +1005,7 @@ JSCompiler::compileScript(JSContext *cx, JSObject *scopeChain, JSStackFrame *cal return script; too_many_slots: - js_ReportCompileErrorNumber(cx, &jsc.tokenStream, NULL, - JSREPORT_ERROR, JSMSG_TOO_MANY_LOCALS); + ReportCompileErrorNumber(cx, &jsc.tokenStream, NULL, JSREPORT_ERROR, JSMSG_TOO_MANY_LOCALS); script = NULL; goto out; } @@ -1221,8 +1150,7 @@ ReportBadReturn(JSContext *cx, JSTreeContext *tc, uintN flags, uintN errnum, errnum = anonerrnum; name = NULL; } - return js_ReportCompileErrorNumber(cx, TS(tc->compiler), NULL, flags, - errnum, name); + return ReportCompileErrorNumber(cx, TS(tc->compiler), NULL, flags, errnum, name); } static JSBool @@ -1248,8 +1176,8 @@ CheckStrictAssignment(JSContext *cx, JSTreeContext *tc, JSParseNode *lhs) if (atom == atomState->evalAtom || atom == atomState->argumentsAtom) { const char *name = js_AtomToPrintableString(cx, atom); if (!name || - !js_ReportStrictModeError(cx, TS(tc->compiler), tc, lhs, - JSMSG_DEPRECATED_ASSIGN, name)) { + !ReportStrictModeError(cx, TS(tc->compiler), tc, lhs, JSMSG_DEPRECATED_ASSIGN, + name)) { return false; } } @@ -1264,8 +1192,7 @@ CheckStrictAssignment(JSContext *cx, JSTreeContext *tc, JSParseNode *lhs) * pn is NULL. */ bool -CheckStrictBinding(JSContext *cx, JSTreeContext *tc, JSAtom *atom, - JSParseNode *pn) +CheckStrictBinding(JSContext *cx, JSTreeContext *tc, JSAtom *atom, JSParseNode *pn) { if (!tc->needStrictChecks()) return true; @@ -1274,8 +1201,7 @@ CheckStrictBinding(JSContext *cx, JSTreeContext *tc, JSAtom *atom, if (atom == atomState->evalAtom || atom == atomState->argumentsAtom) { const char *name = js_AtomToPrintableString(cx, atom); if (name) - js_ReportStrictModeError(cx, TS(tc->compiler), tc, pn, - JSMSG_BAD_BINDING, name); + ReportStrictModeError(cx, TS(tc->compiler), tc, pn, JSMSG_BAD_BINDING, name); return false; } return true; @@ -1286,7 +1212,7 @@ CheckStrictBinding(JSContext *cx, JSTreeContext *tc, JSAtom *atom, * formals are legit given fun's strictness level, return true. Otherwise, * report an error and return false. Use pn for error position reporting, * unless we can find something more accurate in tc's decls. - * + * * In some cases the code to parse the argument list will already have noticed * the duplication; we could try to use that knowledge instead of re-checking * here. But since the strictness of the function's body determines what @@ -1315,8 +1241,7 @@ CheckStrictFormals(JSContext *cx, JSTreeContext *tc, JSFunction *fun, pn = dn; const char *name = js_AtomToPrintableString(cx, atom); if (!name || - !js_ReportStrictModeError(cx, TS(tc->compiler), tc, pn, - JSMSG_DUPLICATE_FORMAL, name)) { + !ReportStrictModeError(cx, TS(tc->compiler), tc, pn, JSMSG_DUPLICATE_FORMAL, name)) { return false; } } @@ -1330,8 +1255,7 @@ CheckStrictFormals(JSContext *cx, JSTreeContext *tc, JSFunction *fun, JS_ASSERT(dn->pn_atom == atom); const char *name = js_AtomToPrintableString(cx, atom); if (!name || - !js_ReportStrictModeError(cx, TS(tc->compiler), tc, dn, - JSMSG_BAD_BINDING, name)) { + !ReportStrictModeError(cx, TS(tc->compiler), tc, dn, JSMSG_BAD_BINDING, name)) { return false; } } @@ -1339,8 +1263,8 @@ CheckStrictFormals(JSContext *cx, JSTreeContext *tc, JSFunction *fun, return true; } -static JSParseNode * -FunctionBody(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::functionBody() { JSStmtInfo stmtInfo; uintN oldflags, firstLine; @@ -1355,22 +1279,22 @@ FunctionBody(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) /* * Save the body's first line, and store it in pn->pn_pos.begin.lineno - * later, because we may have not peeked in ts yet, so Statements won't - * acquire a valid pn->pn_pos.begin from the current token. + * later, because we may have not peeked in tokenStream yet, so statements + * won't acquire a valid pn->pn_pos.begin from the current token. */ - firstLine = ts->lineno; + firstLine = tokenStream.getLineno(); #if JS_HAS_EXPR_CLOSURES - if (CURRENT_TOKEN(ts).type == TOK_LC) { - pn = Statements(cx, ts, tc); + if (tokenStream.currentToken().type == TOK_LC) { + pn = statements(); } else { - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (pn) { - pn->pn_kid = AssignExpr(cx, ts, tc); + pn->pn_kid = assignExpr(); if (!pn->pn_kid) { pn = NULL; } else { if (tc->flags & TCF_FUN_IS_GENERATOR) { - ReportBadReturn(cx, tc, JSREPORT_ERROR, + ReportBadReturn(context, tc, JSREPORT_ERROR, JSMSG_BAD_GENERATOR_RETURN, JSMSG_BAD_ANON_GENERATOR_RETURN); pn = NULL; @@ -1383,7 +1307,7 @@ FunctionBody(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) } } #else - pn = Statements(cx, ts, tc); + pn = statements(); #endif if (pn) { @@ -1392,8 +1316,8 @@ FunctionBody(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn->pn_pos.begin.lineno = firstLine; /* Check for falling off the end of a function that returns a value. */ - if (JS_HAS_STRICT_OPTION(cx) && (tc->flags & TCF_RETURN_EXPR) && - !CheckFinalReturn(cx, tc, pn)) { + if (JS_HAS_STRICT_OPTION(context) && (tc->flags & TCF_RETURN_EXPR) && + !CheckFinalReturn(context, tc, pn)) { pn = NULL; } } @@ -1409,8 +1333,7 @@ MakePlaceholder(JSParseNode *pn, JSTreeContext *tc) if (!ale) return NULL; - JSDefinition *dn = (JSDefinition *) - NewNameNode(tc->compiler->context, pn->pn_atom, tc); + JSDefinition *dn = (JSDefinition *)NameNode::create(pn->pn_atom, tc); if (!dn) return NULL; @@ -1592,7 +1515,7 @@ DefineArg(JSParseNode *pn, JSAtom *atom, uintN i, JSTreeContext *tc) * but having TOK_NAME type and JSOP_NOP op. Insert it in a TOK_ARGSBODY * list node returned via pn->pn_body. */ - argpn = NewNameNode(tc->compiler->context, atom, tc); + argpn = NameNode::create(atom, tc); if (!argpn) return false; JS_ASSERT(PN_TYPE(argpn) == TOK_NAME && PN_OP(argpn) == JSOP_NOP); @@ -1604,7 +1527,7 @@ DefineArg(JSParseNode *pn, JSAtom *atom, uintN i, JSTreeContext *tc) argsbody = pn->pn_body; if (!argsbody) { - argsbody = NewParseNode(PN_LIST, tc); + argsbody = ListNode::create(tc); if (!argsbody) return false; argsbody->pn_type = TOK_ARGSBODY; @@ -1641,15 +1564,15 @@ JSCompiler::compileFunctionBody(JSContext *cx, JSFunction *fun, JSPrincipals *pr JS_InitArenaPool(¬ePool, "note", 1024, sizeof(jssrcnote), &cx->scriptStackQuota); - JSCodeGenerator funcg(&jsc, &codePool, ¬ePool, jsc.tokenStream.lineno); + JSCodeGenerator funcg(&jsc, &codePool, ¬ePool, jsc.tokenStream.getLineno()); funcg.flags |= TCF_IN_FUNCTION; funcg.fun = fun; if (!GenerateBlockId(&funcg, funcg.bodyid)) return NULL; /* FIXME: make Function format the source for a function definition. */ - jsc.tokenStream.tokens[0].type = TOK_NAME; - JSParseNode *fn = NewParseNode(PN_FUNC, &funcg); + jsc.tokenStream.getMutableTokenAt(0)->type = TOK_NAME; + JSParseNode *fn = FunctionNode::create(&funcg); if (fn) { fn->pn_body = NULL; fn->pn_cookie = FREE_UPVAR_COOKIE; @@ -1677,14 +1600,14 @@ JSCompiler::compileFunctionBody(JSContext *cx, JSFunction *fun, JSPrincipals *pr * done parsing, we must fold constants, analyze any nested functions, and * generate code for this function, including a stop opcode at the end. */ - CURRENT_TOKEN(&jsc.tokenStream).type = TOK_LC; - JSParseNode *pn = fn ? FunctionBody(cx, &jsc.tokenStream, &funcg) : NULL; + jsc.tokenStream.mutableCurrentToken()->type = TOK_LC; + JSParseNode *pn = fn ? jsc.functionBody() : NULL; if (pn) { if (!CheckStrictFormals(cx, &funcg, fun, pn)) { pn = NULL; - } else if (!js_MatchToken(cx, &jsc.tokenStream, TOK_EOF)) { - js_ReportCompileErrorNumber(cx, &jsc.tokenStream, NULL, - JSREPORT_ERROR, JSMSG_SYNTAX_ERROR); + } else if (!jsc.tokenStream.matchToken(TOK_EOF)) { + ReportCompileErrorNumber(cx, &jsc.tokenStream, NULL, JSREPORT_ERROR, + JSMSG_SYNTAX_ERROR); pn = NULL; } else if (!js_FoldConstants(cx, pn, &funcg)) { /* js_FoldConstants reported the error already. */ @@ -1718,8 +1641,6 @@ JSCompiler::compileFunctionBody(JSContext *cx, JSFunction *fun, JSPrincipals *pr * function is called indirectly from the variable declaration parser by way * of CheckDestructuring and its friends. */ -typedef struct BindData BindData; - typedef JSBool (*Binder)(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc); @@ -1760,13 +1681,6 @@ BindLocalVariable(JSContext *cx, JSFunction *fun, JSAtom *atom, } #if JS_HAS_DESTRUCTURING -/* - * Forward declaration to maintain top-down presentation. - */ -static JSParseNode * -DestructuringExpr(JSContext *cx, BindData *data, JSTreeContext *tc, - JSTokenType tt); - static JSBool BindDestructuringArg(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc) @@ -1783,8 +1697,8 @@ BindDestructuringArg(JSContext *cx, BindData *data, JSAtom *atom, JSLocalKind localKind = js_LookupLocal(cx, tc->fun, atom, NULL); if (localKind != JSLOCAL_NONE) { - js_ReportCompileErrorNumber(cx, TS(tc->compiler), NULL, - JSREPORT_ERROR, JSMSG_DESTRUCT_DUP_ARG); + ReportCompileErrorNumber(cx, TS(tc->compiler), NULL, JSREPORT_ERROR, + JSMSG_DESTRUCT_DUP_ARG); return JS_FALSE; } JS_ASSERT(!tc->decls.lookup(atom)); @@ -1832,21 +1746,18 @@ JSCompiler::newFunction(JSTreeContext *tc, JSAtom *atom, uintN lambda) } static JSBool -MatchOrInsertSemicolon(JSContext *cx, JSTokenStream *ts) +MatchOrInsertSemicolon(JSContext *cx, TokenStream *ts) { - JSTokenType tt; - ts->flags |= TSF_OPERAND; - tt = js_PeekTokenSameLine(cx, ts); + TokenKind tt = ts->peekTokenSameLine(); ts->flags &= ~TSF_OPERAND; if (tt == TOK_ERROR) return JS_FALSE; if (tt != TOK_EOF && tt != TOK_EOL && tt != TOK_SEMI && tt != TOK_RC) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_SEMI_BEFORE_STMNT); + ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, JSMSG_SEMI_BEFORE_STMNT); return JS_FALSE; } - (void) js_MatchToken(cx, ts, TOK_SEMI); + (void) ts->matchToken(TOK_SEMI); return JS_TRUE; } @@ -2032,7 +1943,7 @@ JSCompiler::markFunArgs(JSFunctionBox *funbox, uintN tcflags) * funarg if it is immediately applied. However, if its * name is used in an escaping function nested within * it, then it must become flagged as a funarg again. - * See bug 545980. + * See bug 545980. */ afunbox = funbox; uintN calleeLevel = UPVAR_FRAME_SKIP(lexdep->pn_cookie); @@ -2234,7 +2145,7 @@ static void DeoptimizeUsesWithin(JSDefinition *dn, JSFunctionBox *funbox, uint32& tcflags) { uintN ndeoptimized = 0; - const JSTokenPos &pos = funbox->node->pn_body->pn_pos; + const TokenPos &pos = funbox->node->pn_body->pn_pos; for (JSParseNode *pnu = dn->dn_uses; pnu; pnu = pnu->pn_link) { JS_ASSERT(pnu->pn_used); @@ -2489,9 +2400,10 @@ JSDefinition::kindString(Kind kind) } static JSFunctionBox * -EnterFunction(JSParseNode *fn, JSTreeContext *tc, JSTreeContext *funtc, - JSAtom *funAtom = NULL, uintN lambda = JSFUN_LAMBDA) +EnterFunction(JSParseNode *fn, JSTreeContext *funtc, JSAtom *funAtom = NULL, + uintN lambda = JSFUN_LAMBDA) { + JSTreeContext *tc = funtc->parent; JSFunction *fun = tc->compiler->newFunction(tc, funAtom, lambda); if (!fun) return NULL; @@ -2508,7 +2420,6 @@ EnterFunction(JSParseNode *fn, JSTreeContext *tc, JSTreeContext *funtc, return NULL; funtc->fun = fun; funtc->funbox = funbox; - funtc->parent = tc; if (!SetStaticLevel(funtc, tc->staticLevel + 1)) return NULL; @@ -2516,9 +2427,10 @@ EnterFunction(JSParseNode *fn, JSTreeContext *tc, JSTreeContext *funtc, } static bool -LeaveFunction(JSParseNode *fn, JSTreeContext *funtc, JSTreeContext *tc, - JSAtom *funAtom = NULL, uintN lambda = JSFUN_LAMBDA) +LeaveFunction(JSParseNode *fn, JSTreeContext *funtc, JSAtom *funAtom = NULL, + uintN lambda = JSFUN_LAMBDA) { + JSTreeContext *tc = funtc->parent; tc->blockidGen = funtc->blockidGen; JSFunctionBox *funbox = fn->pn_funbox; @@ -2631,7 +2543,7 @@ LeaveFunction(JSParseNode *fn, JSTreeContext *funtc, JSTreeContext *tc, if (funtc->lexdeps.count - foundCallee != 0) { JSParseNode *body = fn->pn_body; - fn->pn_body = NewParseNode(PN_NAMESET, tc); + fn->pn_body = NameSetNode::create(tc); if (!fn->pn_body) return false; @@ -2649,13 +2561,12 @@ LeaveFunction(JSParseNode *fn, JSTreeContext *funtc, JSTreeContext *tc, return true; } -static JSParseNode * -FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - uintN lambda) +JSParseNode * +JSCompiler::functionDef(uintN lambda) { JSOp op; JSParseNode *pn, *body, *result; - JSTokenType tt; + TokenKind tt; JSAtom *funAtom; JSAtomListElement *ale; #if JS_HAS_DESTRUCTURING @@ -2666,9 +2577,9 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, /* Make a TOK_FUNCTION node. */ #if JS_HAS_GETTER_SETTER - op = CURRENT_TOKEN(ts).t_op; + op = tokenStream.currentToken().t_op; #endif - pn = NewParseNode(PN_FUNC, tc); + pn = FunctionNode::create(tc); if (!pn) return NULL; pn->pn_body = NULL; @@ -2676,7 +2587,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, /* * If a lambda, give up on JSOP_{GET,CALL}UPVAR usage unless this function - * is immediately applied (we clear PND_FUNARG if so -- see MemberExpr). + * is immediately applied (we clear PND_FUNARG if so -- see memberExpr). * * Also treat function sub-statements (non-lambda, non-top-level functions) * as escaping funargs, since we can't statically analyze their definitions @@ -2686,19 +2597,19 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn->pn_dflags = (lambda || !topLevel) ? PND_FUNARG : 0; /* Scan the optional function name into funAtom. */ - ts->flags |= TSF_KEYWORD_IS_NAME; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_KEYWORD_IS_NAME; + tokenStream.flags |= TSF_KEYWORD_IS_NAME; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_KEYWORD_IS_NAME; if (tt == TOK_NAME) { - funAtom = CURRENT_TOKEN(ts).t_atom; + funAtom = tokenStream.currentToken().t_atom; } else { - if (lambda == 0 && (cx->options & JSOPTION_ANONFUNFIX)) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_SYNTAX_ERROR); + if (lambda == 0 && (context->options & JSOPTION_ANONFUNFIX)) { + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_SYNTAX_ERROR); return NULL; } funAtom = NULL; - js_UngetToken(ts); + tokenStream.ungetToken(); } /* @@ -2714,16 +2625,16 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, JS_ASSERT(!dn->pn_used); JS_ASSERT(dn->pn_defn); - if (JS_HAS_STRICT_OPTION(cx) || dn_kind == JSDefinition::CONST) { - const char *name = js_AtomToPrintableString(cx, funAtom); + if (JS_HAS_STRICT_OPTION(context) || dn_kind == JSDefinition::CONST) { + const char *name = js_AtomToPrintableString(context, funAtom); if (!name || - !js_ReportCompileErrorNumber(cx, ts, NULL, - (dn_kind != JSDefinition::CONST) - ? JSREPORT_WARNING | JSREPORT_STRICT - : JSREPORT_ERROR, - JSMSG_REDECLARED_VAR, - JSDefinition::kindString(dn_kind), - name)) { + !ReportCompileErrorNumber(context, &tokenStream, NULL, + (dn_kind != JSDefinition::CONST) + ? JSREPORT_WARNING | JSREPORT_STRICT + : JSREPORT_ERROR, + JSMSG_REDECLARED_VAR, + JSDefinition::kindString(dn_kind), + name)) { return NULL; } } @@ -2739,7 +2650,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, } else if (topLevel) { /* * If this function was used before it was defined, claim the - * pre-created definition node for this function that PrimaryExpr + * pre-created definition node for this function that primaryExpr * put in tc->lexdeps on first forward reference, and recycle pn. */ JSHashEntry **hep; @@ -2786,12 +2697,12 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, * we add a variable even if a parameter with the given name * already exists. */ - localKind = js_LookupLocal(cx, tc->fun, funAtom, &index); + localKind = js_LookupLocal(context, tc->fun, funAtom, &index); switch (localKind) { case JSLOCAL_NONE: case JSLOCAL_ARG: index = tc->fun->u.i.nvars; - if (!js_AddLocal(cx, tc->fun, funAtom, JSLOCAL_VAR)) + if (!js_AddLocal(context, tc->fun, funAtom, JSLOCAL_VAR)) return NULL; /* FALL THROUGH */ @@ -2806,10 +2717,12 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, } } - /* Initialize early for possible flags mutation via DestructuringExpr. */ + JSTreeContext *outertc = tc; + + /* Initialize early for possible flags mutation via destructuringExpr. */ JSTreeContext funtc(tc->compiler); - JSFunctionBox *funbox = EnterFunction(pn, tc, &funtc, funAtom, lambda); + JSFunctionBox *funbox = EnterFunction(pn, &funtc, funAtom, lambda); if (!funbox) return NULL; @@ -2822,9 +2735,9 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, /* Now parse formal argument list and compute fun->nargs. */ MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_FORMAL); - if (!js_MatchToken(cx, ts, TOK_RP)) { + if (!tokenStream.matchToken(TOK_RP)) { do { - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); switch (tt) { #if JS_HAS_DESTRUCTURING case TOK_LB: @@ -2848,7 +2761,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, data.pn = NULL; data.op = JSOP_DEFVAR; data.binder = BindDestructuringArg; - lhs = DestructuringExpr(cx, &data, &funtc, tt); + lhs = destructuringExpr(&data, tt); if (!lhs) return NULL; @@ -2857,7 +2770,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, * parameter that is to be destructured. */ slot = fun->nargs; - if (!js_AddLocal(cx, fun, NULL, JSLOCAL_ARG)) + if (!js_AddLocal(context, fun, NULL, JSLOCAL_ARG)) return NULL; /* @@ -2865,7 +2778,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, * anonymous positional parameter into the destructuring * left-hand-side expression and accumulate it in list. */ - rhs = NewNameNode(cx, cx->runtime->atomState.emptyAtom, &funtc); + rhs = NameNode::create(context->runtime->atomState.emptyAtom, &funtc); if (!rhs) return NULL; rhs->pn_type = TOK_NAME; @@ -2873,11 +2786,11 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, rhs->pn_cookie = MAKE_UPVAR_COOKIE(funtc.staticLevel, slot); rhs->pn_dflags |= PND_BOUND; - item = NewBinary(TOK_ASSIGN, JSOP_NOP, lhs, rhs, &funtc); + item = JSParseNode::newBinaryOrAppend(TOK_ASSIGN, JSOP_NOP, lhs, rhs, &funtc); if (!item) return NULL; if (!list) { - list = NewParseNode(PN_LIST, &funtc); + list = ListNode::create(&funtc); if (!list) return NULL; list->pn_type = TOK_COMMA; @@ -2890,11 +2803,11 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, case TOK_NAME: { - JSAtom *atom = CURRENT_TOKEN(ts).t_atom; + JSAtom *atom = tokenStream.currentToken().t_atom; if (!DefineArg(pn, atom, fun->nargs, &funtc)) return NULL; #ifdef JS_HAS_DESTRUCTURING - /* + /* * ECMA-262 requires us to support duplicate parameter names, but if the * parameter list includes destructuring, we consider the code to have * opted in to higher standards, and forbid duplicates. We may see a @@ -2904,20 +2817,20 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, * mode code), but we do those tests in one place below, after having * parsed the body. */ - if (js_LookupLocal(cx, fun, atom, NULL) != JSLOCAL_NONE) { + if (js_LookupLocal(context, fun, atom, NULL) != JSLOCAL_NONE) { duplicatedArg = atom; if (destructuringArg) goto report_dup_and_destructuring; } #endif - if (!js_AddLocal(cx, fun, atom, JSLOCAL_ARG)) + if (!js_AddLocal(context, fun, atom, JSLOCAL_ARG)) return NULL; break; } default: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_MISSING_FORMAL); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_MISSING_FORMAL); /* FALL THROUGH */ case TOK_ERROR: return NULL; @@ -2925,48 +2838,47 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, #if JS_HAS_DESTRUCTURING report_dup_and_destructuring: JSDefinition *dn = ALE_DEFN(funtc.decls.lookup(duplicatedArg)); - js_ReportCompileErrorNumber(cx, TS(tc->compiler), dn, - JSREPORT_ERROR, - JSMSG_DESTRUCT_DUP_ARG); + ReportCompileErrorNumber(context, &tokenStream, dn, JSREPORT_ERROR, + JSMSG_DESTRUCT_DUP_ARG); return NULL; #endif } - } while (js_MatchToken(cx, ts, TOK_COMMA)); + } while (tokenStream.matchToken(TOK_COMMA)); MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FORMAL); } #if JS_HAS_EXPR_CLOSURES - ts->flags |= TSF_OPERAND; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_OPERAND; if (tt != TOK_LC) { - js_UngetToken(ts); + tokenStream.ungetToken(); fun->flags |= JSFUN_EXPR_CLOSURE; } #else MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_BODY); #endif - body = FunctionBody(cx, ts, &funtc); + body = functionBody(); if (!body) return NULL; - if (!CheckStrictBinding(cx, &funtc, funAtom, pn)) + if (!CheckStrictBinding(context, &funtc, funAtom, pn)) return NULL; - if (!CheckStrictFormals(cx, &funtc, fun, pn)) + if (!CheckStrictFormals(context, &funtc, fun, pn)) return NULL; #if JS_HAS_EXPR_CLOSURES if (tt == TOK_LC) MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_BODY); - else if (lambda == 0 && !MatchOrInsertSemicolon(cx, ts)) + else if (lambda == 0 && !MatchOrInsertSemicolon(context, &tokenStream)) return NULL; #else MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_BODY); #endif - pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn->pn_pos.end = tokenStream.currentToken().pos.end; #if JS_HAS_DESTRUCTURING /* @@ -2980,7 +2892,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, if (body->pn_arity != PN_LIST) { JSParseNode *block; - block = NewParseNode(PN_LIST, tc); + block = ListNode::create(outertc); if (!block) return NULL; block->pn_type = TOK_SEQ; @@ -2990,7 +2902,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, body = block; } - item = NewParseNode(PN_UNARY, tc); + item = UnaryNode::create(outertc); if (!item) return NULL; @@ -3014,7 +2926,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, */ if (funtc.flags & TCF_FUN_HEAVYWEIGHT) { fun->flags |= JSFUN_HEAVYWEIGHT; - tc->flags |= TCF_FUN_HEAVYWEIGHT; + outertc->flags |= TCF_FUN_HEAVYWEIGHT; } else { /* * If this function is a named statement function not at top-level @@ -3022,7 +2934,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, * enclosing function, if any, must be heavyweight. */ if (!topLevel && lambda == 0 && funAtom) - tc->flags |= TCF_FUN_HEAVYWEIGHT; + outertc->flags |= TCF_FUN_HEAVYWEIGHT; } result = pn; @@ -3039,7 +2951,7 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, * Edition 3 would have it). Backward compatibility must trump all, * unless JSOPTION_ANONFUNFIX is set. */ - result = NewParseNode(PN_UNARY, tc); + result = UnaryNode::create(outertc); if (!result) return NULL; result->pn_type = TOK_SEMI; @@ -3069,28 +2981,28 @@ FunctionDef(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn->pn_body = body; } - pn->pn_blockid = tc->blockid(); + pn->pn_blockid = outertc->blockid(); - if (!LeaveFunction(pn, &funtc, tc, funAtom, lambda)) + if (!LeaveFunction(pn, &funtc, funAtom, lambda)) return NULL; /* If the surrounding function is not strict code, reset the lexer. */ - if (!(tc->flags & TCF_STRICT_MODE_CODE)) - ts->flags &= ~TSF_STRICT_MODE_CODE; + if (!(outertc->flags & TCF_STRICT_MODE_CODE)) + tokenStream.flags &= ~TSF_STRICT_MODE_CODE; return result; } -static JSParseNode * -FunctionStmt(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::functionStmt() { - return FunctionDef(cx, ts, tc, 0); + return functionDef(0); } -static JSParseNode * -FunctionExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::functionExpr() { - return FunctionDef(cx, ts, tc, JSFUN_LAMBDA); + return functionDef(JSFUN_LAMBDA); } /* @@ -3111,16 +3023,16 @@ FunctionExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * That is, a statement can be a Directive Prologue member, even * if it can't possibly be a directive, now or in the future. */ -static bool -RecognizeDirectivePrologue(JSContext *cx, JSTreeContext *tc, JSParseNode *pn) +bool +JSCompiler::recognizeDirectivePrologue(JSParseNode *pn) { if (!pn->isDirectivePrologueMember()) return false; if (pn->isDirective()) { JSAtom *directive = pn->pn_kid->pn_atom; - if (directive == cx->runtime->atomState.useStrictAtom) { + if (directive == context->runtime->atomState.useStrictAtom) { tc->flags |= TCF_STRICT_MODE_CODE; - tc->compiler->tokenStream.flags |= TSF_STRICT_MODE_CODE; + tokenStream.flags |= TSF_STRICT_MODE_CODE; } } return true; @@ -3131,16 +3043,16 @@ RecognizeDirectivePrologue(JSContext *cx, JSTreeContext *tc, JSParseNode *pn) * statements' trees. If called from block-parsing code, the caller must * match { before and } after. */ -static JSParseNode * -Statements(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::statements() { JSParseNode *pn, *pn2, *saveBlock; - JSTokenType tt; + TokenKind tt; bool inDirectivePrologue = tc->atTopLevel(); - JS_CHECK_RECURSION(cx, return NULL); + JS_CHECK_RECURSION(context, return NULL); - pn = NewParseNode(PN_LIST, tc); + pn = ListNode::create(tc); if (!pn) return NULL; pn->pn_type = TOK_LC; @@ -3150,26 +3062,26 @@ Statements(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) tc->blockNode = pn; for (;;) { - ts->flags |= TSF_OPERAND; - tt = js_PeekToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.peekToken(); + tokenStream.flags &= ~TSF_OPERAND; if (tt <= TOK_EOF || tt == TOK_RC) { if (tt == TOK_ERROR) { - if (ts->flags & TSF_EOF) - ts->flags |= TSF_UNEXPECTED_EOF; + if (tokenStream.flags & TSF_EOF) + tokenStream.flags |= TSF_UNEXPECTED_EOF; return NULL; } break; } - pn2 = Statement(cx, ts, tc); + pn2 = statement(); if (!pn2) { - if (ts->flags & TSF_EOF) - ts->flags |= TSF_UNEXPECTED_EOF; + if (tokenStream.flags & TSF_EOF) + tokenStream.flags |= TSF_UNEXPECTED_EOF; return NULL; } if (inDirectivePrologue) - inDirectivePrologue = RecognizeDirectivePrologue(cx, tc, pn2); + inDirectivePrologue = recognizeDirectivePrologue(pn2); if (pn2->pn_type == TOK_FUNCTION) { /* @@ -3198,17 +3110,17 @@ Statements(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn = tc->blockNode; tc->blockNode = saveBlock; - pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn->pn_pos.end = tokenStream.currentToken().pos.end; return pn; } -static JSParseNode * -Condition(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::condition() { JSParseNode *pn; MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_COND); - pn = ParenExpr(cx, ts, tc, NULL, NULL); + pn = parenExpr(NULL, NULL); if (!pn) return NULL; MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_COND); @@ -3217,27 +3129,25 @@ Condition(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) if (pn->pn_type == TOK_ASSIGN && pn->pn_op == JSOP_NOP && !pn->pn_parens && - !js_ReportCompileErrorNumber(cx, ts, NULL, - JSREPORT_WARNING | JSREPORT_STRICT, - JSMSG_EQUAL_AS_ASSIGN, - "")) { + !ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_WARNING | JSREPORT_STRICT, + JSMSG_EQUAL_AS_ASSIGN, "")) { return NULL; } return pn; } static JSBool -MatchLabel(JSContext *cx, JSTokenStream *ts, JSParseNode *pn) +MatchLabel(JSContext *cx, TokenStream *ts, JSParseNode *pn) { JSAtom *label; - JSTokenType tt; + TokenKind tt; - tt = js_PeekTokenSameLine(cx, ts); + tt = ts->peekTokenSameLine(); if (tt == TOK_ERROR) return JS_FALSE; if (tt == TOK_NAME) { - (void) js_GetToken(cx, ts); - label = CURRENT_TOKEN(ts).t_atom; + (void) ts->getToken(); + label = ts->currentToken().t_atom; } else { label = NULL; } @@ -3268,20 +3178,20 @@ BindLet(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc) if (ale && ALE_DEFN(ale)->pn_blockid == tc->blockid()) { const char *name = js_AtomToPrintableString(cx, atom); if (name) { - js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, - JSREPORT_ERROR, JSMSG_REDECLARED_VAR, - (ale && ALE_DEFN(ale)->isConst()) - ? js_const_str - : js_variable_str, - name); + ReportCompileErrorNumber(cx, TS(tc->compiler), pn, + JSREPORT_ERROR, JSMSG_REDECLARED_VAR, + (ale && ALE_DEFN(ale)->isConst()) + ? js_const_str + : js_variable_str, + name); } return JS_FALSE; } n = OBJ_BLOCK_COUNT(cx, blockObj); if (n == JS_BIT(16)) { - js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, - JSREPORT_ERROR, data->let.overflow); + ReportCompileErrorNumber(cx, TS(tc->compiler), pn, + JSREPORT_ERROR, data->let.overflow); return JS_FALSE; } @@ -3317,12 +3227,12 @@ BindLet(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc) * slots in jsemit.cpp:EmitEnterBlock. */ uintN slot = JSSLOT_FREE(&js_BlockClass) + n; - if (slot >= STOBJ_NSLOTS(blockObj) && + if (slot >= blockObj->numSlots() && !js_GrowSlots(cx, blockObj, slot + 1)) { return JS_FALSE; } OBJ_SCOPE(blockObj)->freeslot = slot + 1; - STOBJ_SET_SLOT(blockObj, slot, PRIVATE_TO_JSVAL(pn)); + blockObj->setSlot(slot, PRIVATE_TO_JSVAL(pn)); return JS_TRUE; } @@ -3391,14 +3301,14 @@ BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc) return JS_FALSE; if (op == JSOP_DEFCONST) { - js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, - JSREPORT_ERROR, JSMSG_REDECLARED_PARAM, - name); + ReportCompileErrorNumber(cx, TS(tc->compiler), pn, + JSREPORT_ERROR, JSMSG_REDECLARED_PARAM, + name); return JS_FALSE; } - if (!js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, - JSREPORT_WARNING | JSREPORT_STRICT, - JSMSG_VAR_HIDES_ARG, name)) { + if (!ReportCompileErrorNumber(cx, TS(tc->compiler), pn, + JSREPORT_WARNING | JSREPORT_STRICT, + JSMSG_VAR_HIDES_ARG, name)) { return JS_FALSE; } } else { @@ -3412,13 +3322,13 @@ BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc) : error) { name = js_AtomToPrintableString(cx, atom); if (!name || - !js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, - !error - ? JSREPORT_WARNING | JSREPORT_STRICT - : JSREPORT_ERROR, - JSMSG_REDECLARED_VAR, - JSDefinition::kindString(dn_kind), - name)) { + !ReportCompileErrorNumber(cx, TS(tc->compiler), pn, + !error + ? JSREPORT_WARNING | JSREPORT_STRICT + : JSREPORT_ERROR, + JSMSG_REDECLARED_VAR, + JSDefinition::kindString(dn_kind), + name)) { return JS_FALSE; } } @@ -3449,7 +3359,7 @@ BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc) JSParseNode *pnu = pn; if (pn->pn_defn) { - pnu = NewNameNode(cx, atom, tc); + pnu = NameNode::create(atom, tc); if (!pnu) return JS_FALSE; } @@ -3486,7 +3396,7 @@ BindVarOrConst(JSContext *cx, BindData *data, JSAtom *atom, JSTreeContext *tc) pn = ALE_DEFN(ale); tc->lexdeps.rawRemove(tc->compiler, ale, hep); } else { - JSParseNode *pn2 = NewNameNode(cx, atom, tc); + JSParseNode *pn2 = NameNode::create(atom, tc); if (!pn2) return JS_FALSE; @@ -3594,7 +3504,7 @@ MakeSetCall(JSContext *cx, JSParseNode *pn, JSTreeContext *tc, uintN msg) JS_ASSERT(pn->pn_op == JSOP_CALL || pn->pn_op == JSOP_EVAL || pn->pn_op == JSOP_APPLY); pn2 = pn->pn_head; if (pn2->pn_type == TOK_FUNCTION && (pn2->pn_funbox->tcflags & TCF_GENEXP_LAMBDA)) { - js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, JSREPORT_ERROR, msg); + ReportCompileErrorNumber(cx, TS(tc->compiler), pn, JSREPORT_ERROR, msg); return JS_FALSE; } pn->pn_op = JSOP_SETCALL; @@ -3731,8 +3641,8 @@ BindDestructuringLHS(JSContext *cx, JSParseNode *pn, JSTreeContext *tc) #endif default: - js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, - JSREPORT_ERROR, JSMSG_BAD_LEFTSIDE_OF_ASS); + ReportCompileErrorNumber(cx, TS(tc->compiler), pn, + JSREPORT_ERROR, JSMSG_BAD_LEFTSIDE_OF_ASS); return JS_FALSE; } @@ -3893,12 +3803,12 @@ FindPropertyValue(JSParseNode *pn, JSParseNode *pnid, FindPropValData *data) * simple names; the destructuring defines them as new variables. * * In both cases, other code parses the pattern as an arbitrary - * PrimaryExpr, and then, here in CheckDestructuring, verify that the + * primaryExpr, and then, here in CheckDestructuring, verify that the * tree is a valid destructuring expression. * * In assignment-like contexts, we parse the pattern with the * TCF_DECL_DESTRUCTURING flag clear, so the lvalue expressions in the - * pattern are parsed normally. PrimaryExpr links variable references + * pattern are parsed normally. primaryExpr links variable references * into the appropriate use chains; creates placeholder definitions; * and so on. CheckDestructuring is called with |data| NULL (since we * won't be binding any new names), and we specialize lvalues as @@ -3908,7 +3818,7 @@ FindPropertyValue(JSParseNode *pn, JSParseNode *pnid, FindPropValData *data) * processing would just be an obstruction, because we're going to * define the names that appear in the property value positions as new * variables anyway. In this case, we parse the pattern with - * TCF_DECL_DESTRUCTURING set, which directs PrimaryExpr to leave + * TCF_DECL_DESTRUCTURING set, which directs primaryExpr to leave * whatever name nodes it creates unconnected. Then, here in * CheckDestructuring, we require the pattern's property value * positions to be simple names, and define them as appropriate to the @@ -3929,15 +3839,15 @@ CheckDestructuring(JSContext *cx, BindData *data, JSParseNode *lhs, *rhs, *pn, *pn2; if (left->pn_type == TOK_ARRAYCOMP) { - js_ReportCompileErrorNumber(cx, TS(tc->compiler), left, - JSREPORT_ERROR, JSMSG_ARRAY_COMP_LEFTSIDE); + ReportCompileErrorNumber(cx, TS(tc->compiler), left, JSREPORT_ERROR, + JSMSG_ARRAY_COMP_LEFTSIDE); return JS_FALSE; } #if JS_HAS_DESTRUCTURING_SHORTHAND if (right && right->pn_arity == PN_LIST && (right->pn_xflags & PNX_DESTRUCT)) { - js_ReportCompileErrorNumber(cx, TS(tc->compiler), right, - JSREPORT_ERROR, JSMSG_BAD_OBJECT_INIT); + ReportCompileErrorNumber(cx, TS(tc->compiler), right, JSREPORT_ERROR, + JSMSG_BAD_OBJECT_INIT); return JS_FALSE; } #endif @@ -4044,8 +3954,8 @@ CheckDestructuring(JSContext *cx, BindData *data, return ok; no_var_name: - js_ReportCompileErrorNumber(cx, TS(tc->compiler), pn, JSREPORT_ERROR, - JSMSG_NO_VARIABLE_NAME); + ReportCompileErrorNumber(cx, TS(tc->compiler), pn, JSREPORT_ERROR, + JSMSG_NO_VARIABLE_NAME); ok = JS_FALSE; goto out; } @@ -4078,8 +3988,8 @@ UndominateInitializers(JSParseNode *left, JSParseNode *right, JSTreeContext *tc) #if JS_HAS_DESTRUCTURING_SHORTHAND if (right->pn_arity == PN_LIST && (right->pn_xflags & PNX_DESTRUCT)) { - js_ReportCompileErrorNumber(tc->compiler->context, TS(tc->compiler), right, - JSREPORT_ERROR, JSMSG_BAD_OBJECT_INIT); + ReportCompileErrorNumber(tc->compiler->context, TS(tc->compiler), right, JSREPORT_ERROR, + JSMSG_BAD_OBJECT_INIT); return JS_FALSE; } #endif @@ -4130,20 +4040,17 @@ UndominateInitializers(JSParseNode *left, JSParseNode *right, JSTreeContext *tc) return JS_TRUE; } -static JSParseNode * -DestructuringExpr(JSContext *cx, BindData *data, JSTreeContext *tc, - JSTokenType tt) +JSParseNode * +JSCompiler::destructuringExpr(BindData *data, TokenKind tt) { - JSTokenStream *ts; JSParseNode *pn; - ts = TS(tc->compiler); tc->flags |= TCF_DECL_DESTRUCTURING; - pn = PrimaryExpr(cx, ts, tc, tt, JS_FALSE); + pn = primaryExpr(tt, JS_FALSE); tc->flags &= ~TCF_DECL_DESTRUCTURING; if (!pn) return NULL; - if (!CheckDestructuring(cx, data, pn, NULL, tc)) + if (!CheckDestructuring(context, data, pn, NULL, tc)) return NULL; return pn; } @@ -4258,7 +4165,7 @@ CloneParseTree(JSParseNode *opn, JSTreeContext *tc) extern const char js_with_statement_str[]; static JSParseNode * -ContainsStmt(JSParseNode *pn, JSTokenType tt) +ContainsStmt(JSParseNode *pn, TokenKind tt) { JSParseNode *pn2, *pnt; @@ -4306,21 +4213,20 @@ ContainsStmt(JSParseNode *pn, JSTokenType tt) return NULL; } -static JSParseNode * -ReturnOrYield(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSParser operandParser) +JSParseNode * +JSCompiler::returnOrYield(bool useAssignExpr) { - JSTokenType tt, tt2; + TokenKind tt, tt2; JSParseNode *pn, *pn2; - tt = CURRENT_TOKEN(ts).type; + tt = tokenStream.currentToken().type; if (tt == TOK_RETURN && !(tc->flags & TCF_IN_FUNCTION)) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_RETURN_OR_YIELD, js_return_str); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_RETURN_OR_YIELD, js_return_str); return NULL; } - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; @@ -4330,9 +4236,9 @@ ReturnOrYield(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, #endif /* This is ugly, but we don't want to require a semicolon. */ - ts->flags |= TSF_OPERAND; - tt2 = js_PeekTokenSameLine(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt2 = tokenStream.peekTokenSameLine(); + tokenStream.flags &= ~TSF_OPERAND; if (tt2 == TOK_ERROR) return NULL; @@ -4343,7 +4249,7 @@ ReturnOrYield(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, tt2 != TOK_COLON && tt2 != TOK_COMMA)) #endif ) { - pn2 = operandParser(cx, ts, tc); + pn2 = useAssignExpr ? assignExpr() : expr(); if (!pn2) return NULL; #if JS_HAS_GENERATORS @@ -4361,15 +4267,15 @@ ReturnOrYield(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, if ((~tc->flags & (TCF_RETURN_EXPR | TCF_FUN_IS_GENERATOR)) == 0) { /* As in Python (see PEP-255), disallow return v; in generators. */ - ReportBadReturn(cx, tc, JSREPORT_ERROR, + ReportBadReturn(context, tc, JSREPORT_ERROR, JSMSG_BAD_GENERATOR_RETURN, JSMSG_BAD_ANON_GENERATOR_RETURN); return NULL; } - if (JS_HAS_STRICT_OPTION(cx) && + if (JS_HAS_STRICT_OPTION(context) && (~tc->flags & (TCF_RETURN_EXPR | TCF_RETURN_VOID)) == 0 && - !ReportBadReturn(cx, tc, JSREPORT_WARNING | JSREPORT_STRICT, + !ReportBadReturn(context, tc, JSREPORT_WARNING | JSREPORT_STRICT, JSMSG_NO_RETURN_VALUE, JSMSG_ANON_NO_RETURN_VALUE)) { return NULL; @@ -4379,14 +4285,14 @@ ReturnOrYield(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, } static JSParseNode * -PushLexicalScope(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, +PushLexicalScope(JSContext *cx, TokenStream *ts, JSTreeContext *tc, JSStmtInfo *stmt) { JSParseNode *pn; JSObject *obj; JSObjectBox *blockbox; - pn = NewParseNode(PN_NAME, tc); + pn = LexicalScopeNode::create(tc); if (!pn) return NULL; @@ -4412,43 +4318,43 @@ PushLexicalScope(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, #if JS_HAS_BLOCK_SCOPE -static JSParseNode * -LetBlock(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, JSBool statement) +JSParseNode * +JSCompiler::letBlock(JSBool statement) { JSParseNode *pn, *pnblock, *pnlet; JSStmtInfo stmtInfo; - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_LET); + JS_ASSERT(tokenStream.currentToken().type == TOK_LET); /* Create the let binary node. */ - pnlet = NewParseNode(PN_BINARY, tc); + pnlet = BinaryNode::create(tc); if (!pnlet) return NULL; MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_LET); /* This is a let block or expression of the form: let (a, b, c) .... */ - pnblock = PushLexicalScope(cx, ts, tc, &stmtInfo); + pnblock = PushLexicalScope(context, &tokenStream, tc, &stmtInfo); if (!pnblock) return NULL; pn = pnblock; pn->pn_expr = pnlet; - pnlet->pn_left = Variables(cx, ts, tc, true); + pnlet->pn_left = variables(true); if (!pnlet->pn_left) return NULL; pnlet->pn_left->pn_xflags = PNX_POPVAR; MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_LET); - ts->flags |= TSF_OPERAND; - if (statement && !js_MatchToken(cx, ts, TOK_LC)) { + tokenStream.flags |= TSF_OPERAND; + if (statement && !tokenStream.matchToken(TOK_LC)) { /* * If this is really an expression in let statement guise, then we * need to wrap the TOK_LET node in a TOK_SEMI node so that we pop * the return value of the expression. */ - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; pn->pn_type = TOK_SEMI; @@ -4457,10 +4363,10 @@ LetBlock(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, JSBool statement) statement = JS_FALSE; } - ts->flags &= ~TSF_OPERAND; + tokenStream.flags &= ~TSF_OPERAND; if (statement) { - pnlet->pn_right = Statements(cx, ts, tc); + pnlet->pn_right = statements(); if (!pnlet->pn_right) return NULL; MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_LET); @@ -4470,7 +4376,7 @@ LetBlock(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, JSBool statement) * result down after popping the block, and clear statement. */ pnblock->pn_op = JSOP_LEAVEBLOCKEXPR; - pnlet->pn_right = AssignExpr(cx, ts, tc); + pnlet->pn_right = assignExpr(); if (!pnlet->pn_right) return NULL; } @@ -4527,7 +4433,7 @@ NewBindingNode(JSAtom *atom, JSTreeContext *tc, bool let = false) } /* Make a new node for this declarator name (or destructuring pattern). */ - pn = NewNameNode(tc->compiler->context, atom, tc); + pn = NameNode::create(atom, tc); if (!pn) return NULL; return pn; @@ -4608,23 +4514,23 @@ RebindLets(JSParseNode *pn, JSTreeContext *tc) } #endif /* JS_HAS_BLOCK_SCOPE */ -static JSParseNode * -Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::statement() { - JSTokenType tt; + TokenKind tt; JSParseNode *pn, *pn1, *pn2, *pn3, *pn4; JSStmtInfo stmtInfo, *stmt, *stmt2; JSAtom *label; - JS_CHECK_RECURSION(cx, return NULL); + JS_CHECK_RECURSION(context, return NULL); - ts->flags |= TSF_OPERAND; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_OPERAND; #if JS_HAS_GETTER_SETTER if (tt == TOK_NAME) { - tt = CheckGetterOrSetter(cx, ts, TOK_FUNCTION); + tt = CheckGetterOrSetter(context, &tokenStream, TOK_FUNCTION); if (tt == TOK_ERROR) return NULL; } @@ -4633,36 +4539,36 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) switch (tt) { case TOK_FUNCTION: #if JS_HAS_XML_SUPPORT - ts->flags |= TSF_KEYWORD_IS_NAME; - tt = js_PeekToken(cx, ts); - ts->flags &= ~TSF_KEYWORD_IS_NAME; + tokenStream.flags |= TSF_KEYWORD_IS_NAME; + tt = tokenStream.peekToken(); + tokenStream.flags &= ~TSF_KEYWORD_IS_NAME; if (tt == TOK_DBLCOLON) goto expression; #endif - return FunctionStmt(cx, ts, tc); + return functionStmt(); case TOK_IF: /* An IF node has three kids: condition, then, and optional else. */ - pn = NewParseNode(PN_TERNARY, tc); + pn = TernaryNode::create(tc); if (!pn) return NULL; - pn1 = Condition(cx, ts, tc); + pn1 = condition(); if (!pn1) return NULL; js_PushStatement(tc, &stmtInfo, STMT_IF, -1); - pn2 = Statement(cx, ts, tc); + pn2 = statement(); if (!pn2) return NULL; - ts->flags |= TSF_OPERAND; - if (js_MatchToken(cx, ts, TOK_ELSE)) { - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + if (tokenStream.matchToken(TOK_ELSE)) { + tokenStream.flags &= ~TSF_OPERAND; stmtInfo.type = STMT_ELSE; - pn3 = Statement(cx, ts, tc); + pn3 = statement(); if (!pn3) return NULL; pn->pn_pos.end = pn3->pn_pos.end; } else { - ts->flags &= ~TSF_OPERAND; + tokenStream.flags &= ~TSF_OPERAND; pn3 = NULL; pn->pn_pos.end = pn2->pn_pos.end; } @@ -4677,13 +4583,13 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) JSParseNode *pn5, *saveBlock; JSBool seenDefault = JS_FALSE; - pn = NewParseNode(PN_BINARY, tc); + pn = BinaryNode::create(tc); if (!pn) return NULL; MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_SWITCH); /* pn1 points to the switch's discriminant. */ - pn1 = ParenExpr(cx, ts, tc, NULL, NULL); + pn1 = parenExpr(NULL, NULL); if (!pn1) return NULL; @@ -4697,7 +4603,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) js_PushStatement(tc, &stmtInfo, STMT_SWITCH, -1); /* pn2 is a list of case nodes. The default case has pn_left == NULL */ - pn2 = NewParseNode(PN_LIST, tc); + pn2 = ListNode::create(tc); if (!pn2) return NULL; pn2->makeEmpty(); @@ -4706,30 +4612,30 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) saveBlock = tc->blockNode; tc->blockNode = pn2; - while ((tt = js_GetToken(cx, ts)) != TOK_RC) { + while ((tt = tokenStream.getToken()) != TOK_RC) { switch (tt) { case TOK_DEFAULT: if (seenDefault) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_TOO_MANY_DEFAULTS); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_TOO_MANY_DEFAULTS); return NULL; } seenDefault = JS_TRUE; /* FALL THROUGH */ case TOK_CASE: - pn3 = NewParseNode(PN_BINARY, tc); + pn3 = BinaryNode::create(tc); if (!pn3) return NULL; if (tt == TOK_CASE) { - pn3->pn_left = Expr(cx, ts, tc); + pn3->pn_left = expr(); if (!pn3->pn_left) return NULL; } pn2->append(pn3); if (pn2->pn_count == JS_BIT(16)) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_TOO_MANY_CASES); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_TOO_MANY_CASES); return NULL; } break; @@ -4738,31 +4644,31 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return NULL; default: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_SWITCH); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_SWITCH); return NULL; } MUST_MATCH_TOKEN(TOK_COLON, JSMSG_COLON_AFTER_CASE); - pn4 = NewParseNode(PN_LIST, tc); + pn4 = ListNode::create(tc); if (!pn4) return NULL; pn4->pn_type = TOK_LC; pn4->makeEmpty(); - ts->flags |= TSF_OPERAND; - while ((tt = js_PeekToken(cx, ts)) != TOK_RC && + tokenStream.flags |= TSF_OPERAND; + while ((tt = tokenStream.peekToken()) != TOK_RC && tt != TOK_CASE && tt != TOK_DEFAULT) { - ts->flags &= ~TSF_OPERAND; + tokenStream.flags &= ~TSF_OPERAND; if (tt == TOK_ERROR) return NULL; - pn5 = Statement(cx, ts, tc); + pn5 = statement(); if (!pn5) return NULL; pn4->pn_pos.end = pn5->pn_pos.end; pn4->append(pn5); - ts->flags |= TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; } - ts->flags &= ~TSF_OPERAND; + tokenStream.flags &= ~TSF_OPERAND; /* Fix the PN_LIST so it doesn't begin at the TOK_COLON. */ if (pn4->pn_head) @@ -4782,22 +4688,22 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) tc->blockNode = saveBlock; PopStatement(tc); - pn->pn_pos.end = pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn->pn_pos.end = pn2->pn_pos.end = tokenStream.currentToken().pos.end; pn->pn_left = pn1; pn->pn_right = pn2; return pn; } case TOK_WHILE: - pn = NewParseNode(PN_BINARY, tc); + pn = BinaryNode::create(tc); if (!pn) return NULL; js_PushStatement(tc, &stmtInfo, STMT_WHILE_LOOP, -1); - pn2 = Condition(cx, ts, tc); + pn2 = condition(); if (!pn2) return NULL; pn->pn_left = pn2; - pn2 = Statement(cx, ts, tc); + pn2 = statement(); if (!pn2) return NULL; PopStatement(tc); @@ -4806,28 +4712,28 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return pn; case TOK_DO: - pn = NewParseNode(PN_BINARY, tc); + pn = BinaryNode::create(tc); if (!pn) return NULL; js_PushStatement(tc, &stmtInfo, STMT_DO_LOOP, -1); - pn2 = Statement(cx, ts, tc); + pn2 = statement(); if (!pn2) return NULL; pn->pn_left = pn2; MUST_MATCH_TOKEN(TOK_WHILE, JSMSG_WHILE_AFTER_DO); - pn2 = Condition(cx, ts, tc); + pn2 = condition(); if (!pn2) return NULL; PopStatement(tc); pn->pn_pos.end = pn2->pn_pos.end; pn->pn_right = pn2; - if (JSVERSION_NUMBER(cx) != JSVERSION_ECMA_3) { + if (JSVERSION_NUMBER(context) != JSVERSION_ECMA_3) { /* * All legacy and extended versions must do automatic semicolon * insertion after do-while. See the testcase and discussion in * http://bugzilla.mozilla.org/show_bug.cgi?id=238945. */ - (void) js_MatchToken(cx, ts, TOK_SEMI); + (void) tokenStream.matchToken(TOK_SEMI); return pn; } break; @@ -4841,24 +4747,24 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #endif /* A FOR node is binary, left is loop control and right is the body. */ - pn = NewParseNode(PN_BINARY, tc); + pn = BinaryNode::create(tc); if (!pn) return NULL; js_PushStatement(tc, &stmtInfo, STMT_FOR_LOOP, -1); pn->pn_op = JSOP_ITER; pn->pn_iflags = 0; - if (js_MatchToken(cx, ts, TOK_NAME)) { - if (CURRENT_TOKEN(ts).t_atom == cx->runtime->atomState.eachAtom) + if (tokenStream.matchToken(TOK_NAME)) { + if (tokenStream.currentToken().t_atom == context->runtime->atomState.eachAtom) pn->pn_iflags = JSITER_FOREACH; else - js_UngetToken(ts); + tokenStream.ungetToken(); } MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_AFTER_FOR); - ts->flags |= TSF_OPERAND; - tt = js_PeekToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.peekToken(); + tokenStream.flags &= ~TSF_OPERAND; #if JS_HAS_BLOCK_SCOPE bool let = false; @@ -4886,25 +4792,25 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) */ tc->flags |= TCF_IN_FOR_INIT; if (tt == TOK_VAR) { - (void) js_GetToken(cx, ts); - pn1 = Variables(cx, ts, tc, false); + (void) tokenStream.getToken(); + pn1 = variables(false); #if JS_HAS_BLOCK_SCOPE } else if (tt == TOK_LET) { let = true; - (void) js_GetToken(cx, ts); - if (js_PeekToken(cx, ts) == TOK_LP) { - pn1 = LetBlock(cx, ts, tc, JS_FALSE); + (void) tokenStream.getToken(); + if (tokenStream.peekToken() == TOK_LP) { + pn1 = letBlock(JS_FALSE); tt = TOK_LEXICALSCOPE; } else { - pnlet = PushLexicalScope(cx, ts, tc, &blockInfo); + pnlet = PushLexicalScope(context, &tokenStream, tc, &blockInfo); if (!pnlet) return NULL; blockInfo.flags |= SIF_FOR_BLOCK; - pn1 = Variables(cx, ts, tc, false); + pn1 = variables(false); } #endif } else { - pn1 = Expr(cx, ts, tc); + pn1 = expr(); } tc->flags &= ~TCF_IN_FOR_INIT; if (!pn1) @@ -4917,16 +4823,16 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * as we've excluded 'in' from being parsed in RelExpr by setting * the TCF_IN_FOR_INIT flag in our JSTreeContext. */ - if (pn1 && js_MatchToken(cx, ts, TOK_IN)) { + if (pn1 && tokenStream.matchToken(TOK_IN)) { pn->pn_iflags |= JSITER_ENUMERATE; stmtInfo.type = STMT_FOR_IN_LOOP; /* Check that the left side of the 'in' is valid. */ - JS_ASSERT(!TOKEN_TYPE_IS_DECL(tt) || PN_TYPE(pn1) == tt); - if (TOKEN_TYPE_IS_DECL(tt) + JS_ASSERT(!TokenKindIsDecl(tt) || PN_TYPE(pn1) == tt); + if (TokenKindIsDecl(tt) ? (pn1->pn_count > 1 || pn1->pn_op == JSOP_DEFCONST #if JS_HAS_DESTRUCTURING - || (JSVERSION_NUMBER(cx) == JSVERSION_1_7 && + || (JSVERSION_NUMBER(context) == JSVERSION_1_7 && pn->pn_op == JSOP_ITER && !(pn->pn_iflags & JSITER_FOREACH) && (pn1->pn_head->pn_type == TOK_RC || @@ -4940,7 +4846,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) : (pn1->pn_type != TOK_NAME && pn1->pn_type != TOK_DOT && #if JS_HAS_DESTRUCTURING - ((JSVERSION_NUMBER(cx) == JSVERSION_1_7 && + ((JSVERSION_NUMBER(context) == JSVERSION_1_7 && pn->pn_op == JSOP_ITER && !(pn->pn_iflags & JSITER_FOREACH)) ? (pn1->pn_type != TOK_RB || pn1->pn_count != 2) @@ -4952,8 +4858,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn1->pn_op != JSOP_XMLNAME) && #endif pn1->pn_type != TOK_LB)) { - js_ReportCompileErrorNumber(cx, ts, pn1, JSREPORT_ERROR, - JSMSG_BAD_FOR_LEFTSIDE); + ReportCompileErrorNumber(context, &tokenStream, pn1, JSREPORT_ERROR, + JSMSG_BAD_FOR_LEFTSIDE); return NULL; } @@ -4961,7 +4867,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn2 = NULL; uintN dflag = PND_ASSIGNED; - if (TOKEN_TYPE_IS_DECL(tt)) { + if (TokenKindIsDecl(tt)) { /* Tell js_EmitTree(TOK_VAR) that pn1 is part of a for/in. */ pn1->pn_xflags |= PNX_FORINVAR; @@ -4977,7 +4883,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) || pn2->pn_type == TOK_ASSIGN #endif ) { - pnseq = NewParseNode(PN_LIST, tc); + pnseq = ListNode::create(tc); if (!pnseq) return NULL; pnseq->pn_type = TOK_SEQ; @@ -4989,7 +4895,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * Hoist just the 'i' from 'for (let x = i in o)' to * before the loop, glued together via pnseq. */ - pn3 = NewParseNode(PN_UNARY, tc); + pn3 = UnaryNode::create(tc); if (!pn3) return NULL; pn3->pn_type = TOK_SEMI; @@ -5035,7 +4941,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #endif { JS_ASSERT(pn2->pn_type == TOK_NAME); - pn1 = NewNameNode(cx, pn2->pn_atom, tc); + pn1 = NameNode::create(pn2->pn_atom, tc); if (!pn1) return NULL; pn1->pn_type = TOK_NAME; @@ -5052,7 +4958,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) if (!pn2) { pn2 = pn1; if (pn2->pn_type == TOK_LP && - !MakeSetCall(cx, pn2, tc, JSMSG_BAD_LEFTSIDE_OF_ASS)) { + !MakeSetCall(context, pn2, tc, JSMSG_BAD_LEFTSIDE_OF_ASS)) { return NULL; } #if JS_HAS_XML_SUPPORT @@ -5064,7 +4970,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) switch (pn2->pn_type) { case TOK_NAME: /* Beware 'for (arguments in ...)' with or without a 'var'. */ - NoteLValue(cx, pn2, tc, dflag); + NoteLValue(context, pn2, tc, dflag); break; #if JS_HAS_DESTRUCTURING @@ -5075,10 +4981,10 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) case TOK_RB: case TOK_RC: /* Check for valid lvalues in var-less destructuring for-in. */ - if (pn1 == pn2 && !CheckDestructuring(cx, NULL, pn2, NULL, tc)) + if (pn1 == pn2 && !CheckDestructuring(context, NULL, pn2, NULL, tc)) return NULL; - if (JSVERSION_NUMBER(cx) == JSVERSION_1_7) { + if (JSVERSION_NUMBER(context) == JSVERSION_1_7) { /* * Destructuring for-in requires [key, value] enumeration * in JS1.7. @@ -5103,13 +5009,13 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) if (let) tc->topStmt = save->down; #endif - pn2 = Expr(cx, ts, tc); + pn2 = expr(); #if JS_HAS_BLOCK_SCOPE if (let) tc->topStmt = save; #endif - pn2 = NewBinary(TOK_IN, JSOP_NOP, pn1, pn2, tc); + pn2 = JSParseNode::newBinaryOrAppend(TOK_IN, JSOP_NOP, pn1, pn2, tc); if (!pn2) return NULL; pn->pn_left = pn2; @@ -5120,32 +5026,32 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) /* Parse the loop condition or null into pn2. */ MUST_MATCH_TOKEN(TOK_SEMI, JSMSG_SEMI_AFTER_FOR_INIT); - ts->flags |= TSF_OPERAND; - tt = js_PeekToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.peekToken(); + tokenStream.flags &= ~TSF_OPERAND; if (tt == TOK_SEMI) { pn2 = NULL; } else { - pn2 = Expr(cx, ts, tc); + pn2 = expr(); if (!pn2) return NULL; } /* Parse the update expression or null into pn3. */ MUST_MATCH_TOKEN(TOK_SEMI, JSMSG_SEMI_AFTER_FOR_COND); - ts->flags |= TSF_OPERAND; - tt = js_PeekToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.peekToken(); + tokenStream.flags &= ~TSF_OPERAND; if (tt == TOK_RP) { pn3 = NULL; } else { - pn3 = Expr(cx, ts, tc); + pn3 = expr(); if (!pn3) return NULL; } /* Build the FORHEAD node to use as the left kid of pn. */ - pn4 = NewParseNode(PN_TERNARY, tc); + pn4 = TernaryNode::create(tc); if (!pn4) return NULL; pn4->pn_type = TOK_FORHEAD; @@ -5159,7 +5065,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FOR_CTRL); /* Parse the loop body into pn->pn_right. */ - pn2 = Statement(cx, ts, tc); + pn2 = statement(); if (!pn2) return NULL; pn->pn_right = pn2; @@ -5183,8 +5089,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return pn; bad_for_each: - js_ReportCompileErrorNumber(cx, ts, pn, JSREPORT_ERROR, - JSMSG_BAD_FOR_EACH_LOOP); + ReportCompileErrorNumber(context, &tokenStream, pn, JSREPORT_ERROR, + JSMSG_BAD_FOR_EACH_LOOP); return NULL; } @@ -5193,9 +5099,9 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) /* * try nodes are ternary. - * kid1 is the try Statement + * kid1 is the try statement * kid2 is the catch node list or null - * kid3 is the finally Statement + * kid3 is the finally statement * * catch nodes are ternary. * kid1 is the lvalue (TOK_NAME, TOK_LB, or TOK_LC) @@ -5206,9 +5112,9 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * TOK_NAME for a single identifier * TOK_RB or TOK_RC for a destructuring left-hand side * - * finally nodes are TOK_LC Statement lists. + * finally nodes are TOK_LC statement lists. */ - pn = NewParseNode(PN_TERNARY, tc); + pn = TernaryNode::create(tc); if (!pn) return NULL; pn->pn_op = JSOP_NOP; @@ -5216,16 +5122,16 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_TRY); if (!PushBlocklikeStatement(&stmtInfo, STMT_TRY, tc)) return NULL; - pn->pn_kid1 = Statements(cx, ts, tc); + pn->pn_kid1 = statements(); if (!pn->pn_kid1) return NULL; MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_TRY); PopStatement(tc); catchList = NULL; - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); if (tt == TOK_CATCH) { - catchList = NewParseNode(PN_LIST, tc); + catchList = ListNode::create(tc); if (!catchList) return NULL; catchList->pn_type = TOK_RESERVED; @@ -5238,8 +5144,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) /* Check for another catch after unconditional catch. */ if (lastCatch && !lastCatch->pn_kid2) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_CATCH_AFTER_GENERAL); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_CATCH_AFTER_GENERAL); return NULL; } @@ -5247,7 +5153,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * Create a lexical scope node around the whole catch clause, * including the head. */ - pnblock = PushLexicalScope(cx, ts, tc, &stmtInfo); + pnblock = PushLexicalScope(context, &tokenStream, tc, &stmtInfo); if (!pnblock) return NULL; stmtInfo.type = STMT_CATCH; @@ -5259,7 +5165,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * where lhs is a name or a destructuring left-hand side. * (the latter is legal only #ifdef JS_HAS_CATCH_GUARD) */ - pn2 = NewParseNode(PN_TERNARY, tc); + pn2 = TernaryNode::create(tc); if (!pn2) return NULL; pnblock->pn_expr = pn2; @@ -5275,30 +5181,30 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) data.binder = BindLet; data.let.overflow = JSMSG_TOO_MANY_CATCH_VARS; - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); switch (tt) { #if JS_HAS_DESTRUCTURING case TOK_LB: case TOK_LC: - pn3 = DestructuringExpr(cx, &data, tc, tt); + pn3 = destructuringExpr(&data, tt); if (!pn3) return NULL; break; #endif case TOK_NAME: - label = CURRENT_TOKEN(ts).t_atom; + label = tokenStream.currentToken().t_atom; pn3 = NewBindingNode(label, tc, true); if (!pn3) return NULL; data.pn = pn3; - if (!data.binder(cx, &data, label, tc)) + if (!data.binder(context, &data, label, tc)) return NULL; break; default: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_CATCH_IDENTIFIER); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_CATCH_IDENTIFIER); return NULL; } @@ -5309,8 +5215,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * to avoid conflicting with the JS2/ECMAv4 type annotation * catchguard syntax. */ - if (js_MatchToken(cx, ts, TOK_IF)) { - pn2->pn_kid2 = Expr(cx, ts, tc); + if (tokenStream.matchToken(TOK_IF)) { + pn2->pn_kid2 = expr(); if (!pn2->pn_kid2) return NULL; } @@ -5318,7 +5224,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_CATCH); MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_CATCH); - pn2->pn_kid3 = Statements(cx, ts, tc); + pn2->pn_kid3 = statements(); if (!pn2->pn_kid3) return NULL; MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_CATCH); @@ -5326,9 +5232,9 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) catchList->append(pnblock); lastCatch = pn2; - ts->flags |= TSF_OPERAND; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_OPERAND; } while (tt == TOK_CATCH); } pn->pn_kid2 = catchList; @@ -5337,40 +5243,40 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) MUST_MATCH_TOKEN(TOK_LC, JSMSG_CURLY_BEFORE_FINALLY); if (!PushBlocklikeStatement(&stmtInfo, STMT_FINALLY, tc)) return NULL; - pn->pn_kid3 = Statements(cx, ts, tc); + pn->pn_kid3 = statements(); if (!pn->pn_kid3) return NULL; MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_AFTER_FINALLY); PopStatement(tc); } else { - js_UngetToken(ts); + tokenStream.ungetToken(); } if (!catchList && !pn->pn_kid3) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_CATCH_OR_FINALLY); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_CATCH_OR_FINALLY); return NULL; } return pn; } case TOK_THROW: - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; /* ECMA-262 Edition 3 says 'throw [no LineTerminator here] Expr'. */ - ts->flags |= TSF_OPERAND; - tt = js_PeekTokenSameLine(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.peekTokenSameLine(); + tokenStream.flags &= ~TSF_OPERAND; if (tt == TOK_ERROR) return NULL; if (tt == TOK_EOF || tt == TOK_EOL || tt == TOK_SEMI || tt == TOK_RC) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_SYNTAX_ERROR); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_SYNTAX_ERROR); return NULL; } - pn2 = Expr(cx, ts, tc); + pn2 = expr(); if (!pn2) return NULL; pn->pn_pos.end = pn2->pn_pos.end; @@ -5380,28 +5286,28 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) /* TOK_CATCH and TOK_FINALLY are both handled in the TOK_TRY case */ case TOK_CATCH: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_CATCH_WITHOUT_TRY); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_CATCH_WITHOUT_TRY); return NULL; case TOK_FINALLY: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_FINALLY_WITHOUT_TRY); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_FINALLY_WITHOUT_TRY); return NULL; case TOK_BREAK: - pn = NewParseNode(PN_NULLARY, tc); + pn = NullaryNode::create(tc); if (!pn) return NULL; - if (!MatchLabel(cx, ts, pn)) + if (!MatchLabel(context, &tokenStream, pn)) return NULL; stmt = tc->topStmt; label = pn->pn_atom; if (label) { for (; ; stmt = stmt->down) { if (!stmt) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_LABEL_NOT_FOUND); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_LABEL_NOT_FOUND); return NULL; } if (stmt->type == STMT_LABEL && stmt->label == label) @@ -5410,8 +5316,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) } else { for (; ; stmt = stmt->down) { if (!stmt) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_TOUGH_BREAK); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_TOUGH_BREAK); return NULL; } if (STMT_IS_LOOP(stmt) || stmt->type == STMT_SWITCH) @@ -5419,30 +5325,29 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) } } if (label) - pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn->pn_pos.end = tokenStream.currentToken().pos.end; break; case TOK_CONTINUE: - pn = NewParseNode(PN_NULLARY, tc); + pn = NullaryNode::create(tc); if (!pn) return NULL; - if (!MatchLabel(cx, ts, pn)) + if (!MatchLabel(context, &tokenStream, pn)) return NULL; stmt = tc->topStmt; label = pn->pn_atom; if (label) { for (stmt2 = NULL; ; stmt = stmt->down) { if (!stmt) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, JSMSG_LABEL_NOT_FOUND); return NULL; } if (stmt->type == STMT_LABEL) { if (stmt->label == label) { if (!stmt2 || !STMT_IS_LOOP(stmt2)) { - js_ReportCompileErrorNumber(cx, ts, NULL, - JSREPORT_ERROR, - JSMSG_BAD_CONTINUE); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_CONTINUE); return NULL; } break; @@ -5454,8 +5359,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) } else { for (; ; stmt = stmt->down) { if (!stmt) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_CONTINUE); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_CONTINUE); return NULL; } if (STMT_IS_LOOP(stmt)) @@ -5463,36 +5368,36 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) } } if (label) - pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn->pn_pos.end = tokenStream.currentToken().pos.end; break; case TOK_WITH: /* * In most cases, we want the constructs forbidden in strict mode * code to be a subset of those that JSOPTION_STRICT warns about, and - * we should use js_ReportStrictModeError. However, 'with' is the sole + * we should use ReportStrictModeError. However, 'with' is the sole * instance of a construct that is forbidden in strict mode code, but * doesn't even merit a warning under JSOPTION_STRICT. See * https://bugzilla.mozilla.org/show_bug.cgi?id=514576#c1. */ if (tc->flags & TCF_STRICT_MODE_CODE) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_STRICT_CODE_WITH); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_STRICT_CODE_WITH); return NULL; } - pn = NewParseNode(PN_BINARY, tc); + pn = BinaryNode::create(tc); if (!pn) return NULL; MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_WITH); - pn2 = ParenExpr(cx, ts, tc, NULL, NULL); + pn2 = parenExpr(NULL, NULL); if (!pn2) return NULL; MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_WITH); pn->pn_left = pn2; js_PushStatement(tc, &stmtInfo, STMT_WITH, -1); - pn2 = Statement(cx, ts, tc); + pn2 = statement(); if (!pn2) return NULL; PopStatement(tc); @@ -5503,7 +5408,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return pn; case TOK_VAR: - pn = Variables(cx, ts, tc, false); + pn = variables(false); if (!pn) return NULL; @@ -5518,8 +5423,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) JSObjectBox *blockbox; /* Check for a let statement or let expression. */ - if (js_PeekToken(cx, ts) == TOK_LP) { - pn = LetBlock(cx, ts, tc, JS_TRUE); + if (tokenStream.peekToken() == TOK_LP) { + pn = letBlock(JS_TRUE); if (!pn || pn->pn_op == JSOP_LEAVEBLOCK) return pn; @@ -5543,8 +5448,8 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) stmt = tc->topStmt; if (stmt && (!STMT_MAYBE_SCOPE(stmt) || (stmt->flags & SIF_FOR_BLOCK))) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_LET_DECL_NOT_IN_BLOCK); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_LET_DECL_NOT_IN_BLOCK); return NULL; } @@ -5557,10 +5462,10 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * ES4 specifies that let at top level and at body-block scope * does not shadow var, so convert back to var. */ - CURRENT_TOKEN(ts).type = TOK_VAR; - CURRENT_TOKEN(ts).t_op = JSOP_DEFVAR; + tokenStream.mutableCurrentToken()->type = TOK_VAR; + tokenStream.mutableCurrentToken()->t_op = JSOP_DEFVAR; - pn = Variables(cx, ts, tc, false); + pn = variables(false); if (!pn) return NULL; pn->pn_xflags |= PNX_POPVAR; @@ -5611,7 +5516,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #endif /* Create a new lexical scope node for these statements. */ - pn1 = NewParseNode(PN_NAME, tc); + pn1 = LexicalScopeNode::create(tc); if (!pn1) return NULL; @@ -5624,7 +5529,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) tc->blockNode = pn1; } - pn = Variables(cx, ts, tc, false); + pn = variables(false); if (!pn) return NULL; pn->pn_xflags = PNX_POPVAR; @@ -5633,7 +5538,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #endif /* JS_HAS_BLOCK_SCOPE */ case TOK_RETURN: - pn = ReturnOrYield(cx, ts, tc, Expr); + pn = returnOrYield(false); if (!pn) return NULL; break; @@ -5646,7 +5551,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) tc->flags = oldflags & ~TCF_HAS_FUNCTION_STMT; if (!PushBlocklikeStatement(&stmtInfo, STMT_BLOCK, tc)) return NULL; - pn = Statements(cx, ts, tc); + pn = statements(); if (!pn) return NULL; @@ -5667,7 +5572,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) case TOK_EOL: case TOK_SEMI: - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; pn->pn_type = TOK_SEMI; @@ -5675,7 +5580,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #if JS_HAS_DEBUGGER_KEYWORD case TOK_DEBUGGER: - pn = NewParseNode(PN_NULLARY, tc); + pn = NullaryNode::create(tc); if (!pn) return NULL; pn->pn_type = TOK_DEBUGGER; @@ -5685,23 +5590,23 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #if JS_HAS_XML_SUPPORT case TOK_DEFAULT: - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; - if (!js_MatchToken(cx, ts, TOK_NAME) || - CURRENT_TOKEN(ts).t_atom != cx->runtime->atomState.xmlAtom || - !js_MatchToken(cx, ts, TOK_NAME) || - CURRENT_TOKEN(ts).t_atom != cx->runtime->atomState.namespaceAtom || - !js_MatchToken(cx, ts, TOK_ASSIGN) || - CURRENT_TOKEN(ts).t_op != JSOP_NOP) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_DEFAULT_XML_NAMESPACE); + if (!tokenStream.matchToken(TOK_NAME) || + tokenStream.currentToken().t_atom != context->runtime->atomState.xmlAtom || + !tokenStream.matchToken(TOK_NAME) || + tokenStream.currentToken().t_atom != context->runtime->atomState.namespaceAtom || + !tokenStream.matchToken(TOK_ASSIGN) || + tokenStream.currentToken().t_op != JSOP_NOP) { + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_DEFAULT_XML_NAMESPACE); return NULL; } /* Is this an E4X dagger I see before me? */ tc->flags |= TCF_FUN_HEAVYWEIGHT; - pn2 = Expr(cx, ts, tc); + pn2 = expr(); if (!pn2) return NULL; pn->pn_op = JSOP_DEFXMLNS; @@ -5717,33 +5622,33 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #if JS_HAS_XML_SUPPORT expression: #endif - js_UngetToken(ts); - pn2 = Expr(cx, ts, tc); + tokenStream.ungetToken(); + pn2 = expr(); if (!pn2) return NULL; - if (js_PeekToken(cx, ts) == TOK_COLON) { + if (tokenStream.peekToken() == TOK_COLON) { if (pn2->pn_type != TOK_NAME) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_LABEL); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_LABEL); return NULL; } label = pn2->pn_atom; for (stmt = tc->topStmt; stmt; stmt = stmt->down) { if (stmt->type == STMT_LABEL && stmt->label == label) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_DUPLICATE_LABEL); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_DUPLICATE_LABEL); return NULL; } } ForgetUse(pn2); - (void) js_GetToken(cx, ts); + (void) tokenStream.getToken(); /* Push a label struct and parse the statement. */ js_PushStatement(tc, &stmtInfo, STMT_LABEL, -1); stmtInfo.label = label; - pn = Statement(cx, ts, tc); + pn = statement(); if (!pn) return NULL; @@ -5762,7 +5667,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return pn2; } - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; pn->pn_type = TOK_SEMI; @@ -5803,7 +5708,7 @@ Statement(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) } /* Check termination of this primitive statement. */ - return MatchOrInsertSemicolon(cx, ts) ? pn : NULL; + return MatchOrInsertSemicolon(context, &tokenStream) ? pn : NULL; } static void @@ -5815,10 +5720,10 @@ NoteArgumentsUse(JSTreeContext *tc) tc->funbox->node->pn_dflags |= PND_FUNARG; } -static JSParseNode * -Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) +JSParseNode * +JSCompiler::variables(bool inLetHead) { - JSTokenType tt; + TokenKind tt; bool let; JSStmtInfo *scopeStmt; BindData data; @@ -5831,7 +5736,7 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) * - TOK_LP: We are parsing the head of a let block. * - Otherwise, we're parsing var declarations. */ - tt = CURRENT_TOKEN(ts).type; + tt = tokenStream.currentToken().type; let = (tt == TOK_LET || tt == TOK_LP); JS_ASSERT(let || tt == TOK_VAR); @@ -5840,7 +5745,7 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) JSStmtInfo *save = tc->topStmt, *saveScope = tc->topScopeStmt; #endif - /* Make sure that Statement set up the tree context correctly. */ + /* Make sure that statement set up the tree context correctly. */ scopeStmt = tc->topScopeStmt; if (let) { while (scopeStmt && !(scopeStmt->flags & SIF_SCOPE)) { @@ -5850,8 +5755,8 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) JS_ASSERT(scopeStmt); } - data.op = let ? JSOP_NOP : CURRENT_TOKEN(ts).t_op; - pn = NewParseNode(PN_LIST, tc); + data.op = let ? JSOP_NOP : tokenStream.currentToken().t_op; + pn = ListNode::create(tc); if (!pn) return NULL; pn->pn_op = data.op; @@ -5871,25 +5776,25 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) } do { - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); #if JS_HAS_DESTRUCTURING if (tt == TOK_LB || tt == TOK_LC) { tc->flags |= TCF_DECL_DESTRUCTURING; - pn2 = PrimaryExpr(cx, ts, tc, tt, JS_FALSE); + pn2 = primaryExpr(tt, JS_FALSE); tc->flags &= ~TCF_DECL_DESTRUCTURING; if (!pn2) return NULL; - if (!CheckDestructuring(cx, &data, pn2, NULL, tc)) + if (!CheckDestructuring(context, &data, pn2, NULL, tc)) return NULL; if ((tc->flags & TCF_IN_FOR_INIT) && - js_PeekToken(cx, ts) == TOK_IN) { + tokenStream.peekToken() == TOK_IN) { pn->append(pn2); continue; } MUST_MATCH_TOKEN(TOK_ASSIGN, JSMSG_BAD_DESTRUCT_DECL); - if (CURRENT_TOKEN(ts).t_op != JSOP_NOP) + if (tokenStream.currentToken().t_op != JSOP_NOP) goto bad_var_init; #if JS_HAS_BLOCK_SCOPE @@ -5898,7 +5803,7 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) tc->topScopeStmt = saveScope->downScope; } #endif - JSParseNode *init = AssignExpr(cx, ts, tc); + JSParseNode *init = assignExpr(); #if JS_HAS_BLOCK_SCOPE if (popScope) { tc->topStmt = save; @@ -5909,7 +5814,7 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) if (!init || !UndominateInitializers(pn2, init, tc)) return NULL; - pn2 = NewBinary(TOK_ASSIGN, JSOP_NOP, pn2, init, tc); + pn2 = JSParseNode::newBinaryOrAppend(TOK_ASSIGN, JSOP_NOP, pn2, init, tc); if (!pn2) return NULL; pn->append(pn2); @@ -5919,25 +5824,25 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) if (tt != TOK_NAME) { if (tt != TOK_ERROR) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_NO_VARIABLE_NAME); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_NO_VARIABLE_NAME); } return NULL; } - atom = CURRENT_TOKEN(ts).t_atom; + atom = tokenStream.currentToken().t_atom; pn2 = NewBindingNode(atom, tc, let); if (!pn2) return NULL; if (data.op == JSOP_DEFCONST) pn2->pn_dflags |= PND_CONST; data.pn = pn2; - if (!data.binder(cx, &data, atom, tc)) + if (!data.binder(context, &data, atom, tc)) return NULL; pn->append(pn2); - if (js_MatchToken(cx, ts, TOK_ASSIGN)) { - if (CURRENT_TOKEN(ts).t_op != JSOP_NOP) + if (tokenStream.matchToken(TOK_ASSIGN)) { + if (tokenStream.currentToken().t_op != JSOP_NOP) goto bad_var_init; #if JS_HAS_BLOCK_SCOPE @@ -5946,7 +5851,7 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) tc->topScopeStmt = saveScope->downScope; } #endif - JSParseNode *init = AssignExpr(cx, ts, tc); + JSParseNode *init = assignExpr(); #if JS_HAS_BLOCK_SCOPE if (popScope) { tc->topStmt = save; @@ -5974,37 +5879,37 @@ Variables(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, bool inLetHead) ? JSOP_SETCONST : JSOP_SETNAME; - NoteLValue(cx, pn2, tc, data.fresh ? PND_INITIALIZED : PND_ASSIGNED); + NoteLValue(context, pn2, tc, data.fresh ? PND_INITIALIZED : PND_ASSIGNED); /* The declarator's position must include the initializer. */ pn2->pn_pos.end = init->pn_pos.end; if ((tc->flags & TCF_IN_FUNCTION) && - atom == cx->runtime->atomState.argumentsAtom) { + atom == context->runtime->atomState.argumentsAtom) { NoteArgumentsUse(tc); if (!let) tc->flags |= TCF_FUN_HEAVYWEIGHT; } } - } while (js_MatchToken(cx, ts, TOK_COMMA)); + } while (tokenStream.matchToken(TOK_COMMA)); pn->pn_pos.end = pn->last()->pn_pos.end; return pn; bad_var_init: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_VAR_INIT); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_VAR_INIT); return NULL; } -static JSParseNode * -Expr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::expr() { JSParseNode *pn, *pn2; - pn = AssignExpr(cx, ts, tc); - if (pn && js_MatchToken(cx, ts, TOK_COMMA)) { - pn2 = NewParseNode(PN_LIST, tc); + pn = assignExpr(); + if (pn && tokenStream.matchToken(TOK_COMMA)) { + pn2 = ListNode::create(tc); if (!pn2) return NULL; pn2->pn_pos.begin = pn->pn_pos.begin; @@ -6014,64 +5919,64 @@ Expr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) #if JS_HAS_GENERATORS pn2 = pn->last(); if (pn2->pn_type == TOK_YIELD && !pn2->pn_parens) { - js_ReportCompileErrorNumber(cx, ts, pn2, JSREPORT_ERROR, - JSMSG_BAD_GENERATOR_SYNTAX, - js_yield_str); + ReportCompileErrorNumber(context, &tokenStream, pn2, JSREPORT_ERROR, + JSMSG_BAD_GENERATOR_SYNTAX, + js_yield_str); return NULL; } #endif - pn2 = AssignExpr(cx, ts, tc); + pn2 = assignExpr(); if (!pn2) return NULL; pn->append(pn2); - } while (js_MatchToken(cx, ts, TOK_COMMA)); + } while (tokenStream.matchToken(TOK_COMMA)); pn->pn_pos.end = pn->last()->pn_pos.end; } return pn; } -static JSParseNode * -AssignExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::assignExpr() { JSParseNode *pn, *rhs; - JSTokenType tt; + TokenKind tt; JSOp op; - JS_CHECK_RECURSION(cx, return NULL); + JS_CHECK_RECURSION(context, return NULL); #if JS_HAS_GENERATORS - ts->flags |= TSF_OPERAND; - if (js_MatchToken(cx, ts, TOK_YIELD)) { - ts->flags &= ~TSF_OPERAND; - return ReturnOrYield(cx, ts, tc, AssignExpr); + tokenStream.flags |= TSF_OPERAND; + if (tokenStream.matchToken(TOK_YIELD)) { + tokenStream.flags &= ~TSF_OPERAND; + return returnOrYield(true); } - ts->flags &= ~TSF_OPERAND; + tokenStream.flags &= ~TSF_OPERAND; #endif - pn = CondExpr(cx, ts, tc); + pn = condExpr(); if (!pn) return NULL; - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); #if JS_HAS_GETTER_SETTER if (tt == TOK_NAME) { - tt = CheckGetterOrSetter(cx, ts, TOK_ASSIGN); + tt = CheckGetterOrSetter(context, &tokenStream, TOK_ASSIGN); if (tt == TOK_ERROR) return NULL; } #endif if (tt != TOK_ASSIGN) { - js_UngetToken(ts); + tokenStream.ungetToken(); return pn; } - op = CURRENT_TOKEN(ts).t_op; + op = tokenStream.currentToken().t_op; switch (pn->pn_type) { case TOK_NAME: - if (!CheckStrictAssignment(cx, tc, pn)) + if (!CheckStrictAssignment(context, tc, pn)) return NULL; pn->pn_op = JSOP_SETNAME; - NoteLValue(cx, pn, tc); + NoteLValue(context, pn, tc); break; case TOK_DOT: pn->pn_op = JSOP_SETPROP; @@ -6083,17 +5988,17 @@ AssignExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) case TOK_RB: case TOK_RC: if (op != JSOP_NOP) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_DESTRUCT_ASS); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_DESTRUCT_ASS); return NULL; } - rhs = AssignExpr(cx, ts, tc); - if (!rhs || !CheckDestructuring(cx, NULL, pn, rhs, tc)) + rhs = assignExpr(); + if (!rhs || !CheckDestructuring(context, NULL, pn, rhs, tc)) return NULL; - return NewBinary(TOK_ASSIGN, op, pn, rhs, tc); + return JSParseNode::newBinaryOrAppend(TOK_ASSIGN, op, pn, rhs, tc); #endif case TOK_LP: - if (!MakeSetCall(cx, pn, tc, JSMSG_BAD_LEFTSIDE_OF_ASS)) + if (!MakeSetCall(context, pn, tc, JSMSG_BAD_LEFTSIDE_OF_ASS)) return NULL; break; #if JS_HAS_XML_SUPPORT @@ -6105,12 +6010,12 @@ AssignExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) /* FALL THROUGH */ #endif default: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_LEFTSIDE_OF_ASS); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_LEFTSIDE_OF_ASS); return NULL; } - rhs = AssignExpr(cx, ts, tc); + rhs = assignExpr(); if (rhs && PN_TYPE(pn) == TOK_NAME && pn->pn_used) { JSDefinition *dn = pn->pn_lexdef; @@ -6127,21 +6032,22 @@ AssignExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) } } - return NewBinary(TOK_ASSIGN, op, pn, rhs, tc); + return JSParseNode::newBinaryOrAppend(TOK_ASSIGN, op, pn, rhs, tc); } -static JSParseNode * -CondExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::condExpr() { JSParseNode *pn, *pn1, *pn2, *pn3; uintN oldflags; - pn = OrExpr(cx, ts, tc); - if (pn && js_MatchToken(cx, ts, TOK_HOOK)) { + pn = orExpr(); + if (pn && tokenStream.matchToken(TOK_HOOK)) { pn1 = pn; - pn = NewParseNode(PN_TERNARY, tc); + pn = TernaryNode::create(tc); if (!pn) return NULL; + /* * Always accept the 'in' operator in the middle clause of a ternary, * where it's unambiguous, even if we might be parsing the init of a @@ -6149,13 +6055,13 @@ CondExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) */ oldflags = tc->flags; tc->flags &= ~TCF_IN_FOR_INIT; - pn2 = AssignExpr(cx, ts, tc); + pn2 = assignExpr(); tc->flags = oldflags | (tc->flags & TCF_FUN_FLAGS); if (!pn2) return NULL; MUST_MATCH_TOKEN(TOK_COLON, JSMSG_COLON_IN_COND); - pn3 = AssignExpr(cx, ts, tc); + pn3 = assignExpr(); if (!pn3) return NULL; pn->pn_pos.begin = pn1->pn_pos.begin; @@ -6167,105 +6073,103 @@ CondExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return pn; } -static JSParseNode * -OrExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::orExpr() { JSParseNode *pn; - pn = AndExpr(cx, ts, tc); - while (pn && js_MatchToken(cx, ts, TOK_OR)) - pn = NewBinary(TOK_OR, JSOP_OR, pn, AndExpr(cx, ts, tc), tc); + pn = andExpr(); + while (pn && tokenStream.matchToken(TOK_OR)) + pn = JSParseNode::newBinaryOrAppend(TOK_OR, JSOP_OR, pn, andExpr(), tc); return pn; } -static JSParseNode * -AndExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::andExpr() { JSParseNode *pn; - pn = BitOrExpr(cx, ts, tc); - while (pn && js_MatchToken(cx, ts, TOK_AND)) - pn = NewBinary(TOK_AND, JSOP_AND, pn, BitOrExpr(cx, ts, tc), tc); + pn = bitOrExpr(); + while (pn && tokenStream.matchToken(TOK_AND)) + pn = JSParseNode::newBinaryOrAppend(TOK_AND, JSOP_AND, pn, bitOrExpr(), tc); return pn; } -static JSParseNode * -BitOrExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::bitOrExpr() { JSParseNode *pn; - pn = BitXorExpr(cx, ts, tc); - while (pn && js_MatchToken(cx, ts, TOK_BITOR)) { - pn = NewBinary(TOK_BITOR, JSOP_BITOR, pn, BitXorExpr(cx, ts, tc), - tc); + pn = bitXorExpr(); + while (pn && tokenStream.matchToken(TOK_BITOR)) { + pn = JSParseNode::newBinaryOrAppend(TOK_BITOR, JSOP_BITOR, pn, bitXorExpr(), tc); } return pn; } -static JSParseNode * -BitXorExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::bitXorExpr() { JSParseNode *pn; - pn = BitAndExpr(cx, ts, tc); - while (pn && js_MatchToken(cx, ts, TOK_BITXOR)) { - pn = NewBinary(TOK_BITXOR, JSOP_BITXOR, pn, BitAndExpr(cx, ts, tc), - tc); + pn = bitAndExpr(); + while (pn && tokenStream.matchToken(TOK_BITXOR)) { + pn = JSParseNode::newBinaryOrAppend(TOK_BITXOR, JSOP_BITXOR, pn, bitAndExpr(), tc); } return pn; } -static JSParseNode * -BitAndExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::bitAndExpr() { JSParseNode *pn; - pn = EqExpr(cx, ts, tc); - while (pn && js_MatchToken(cx, ts, TOK_BITAND)) - pn = NewBinary(TOK_BITAND, JSOP_BITAND, pn, EqExpr(cx, ts, tc), tc); + pn = eqExpr(); + while (pn && tokenStream.matchToken(TOK_BITAND)) + pn = JSParseNode::newBinaryOrAppend(TOK_BITAND, JSOP_BITAND, pn, eqExpr(), tc); return pn; } -static JSParseNode * -EqExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::eqExpr() { JSParseNode *pn; JSOp op; - pn = RelExpr(cx, ts, tc); - while (pn && js_MatchToken(cx, ts, TOK_EQOP)) { - op = CURRENT_TOKEN(ts).t_op; - pn = NewBinary(TOK_EQOP, op, pn, RelExpr(cx, ts, tc), tc); + pn = relExpr(); + while (pn && tokenStream.matchToken(TOK_EQOP)) { + op = tokenStream.currentToken().t_op; + pn = JSParseNode::newBinaryOrAppend(TOK_EQOP, op, pn, relExpr(), tc); } return pn; } -static JSParseNode * -RelExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::relExpr() { JSParseNode *pn; - JSTokenType tt; + TokenKind tt; JSOp op; uintN inForInitFlag = tc->flags & TCF_IN_FOR_INIT; /* - * Uses of the in operator in ShiftExprs are always unambiguous, + * Uses of the in operator in shiftExprs are always unambiguous, * so unset the flag that prohibits recognizing it. */ tc->flags &= ~TCF_IN_FOR_INIT; - pn = ShiftExpr(cx, ts, tc); + pn = shiftExpr(); while (pn && - (js_MatchToken(cx, ts, TOK_RELOP) || + (tokenStream.matchToken(TOK_RELOP) || /* * Recognize the 'in' token as an operator only if we're not * currently in the init expr of a for loop. */ - (inForInitFlag == 0 && js_MatchToken(cx, ts, TOK_IN)) || - js_MatchToken(cx, ts, TOK_INSTANCEOF))) { - tt = CURRENT_TOKEN(ts).type; - op = CURRENT_TOKEN(ts).t_op; - pn = NewBinary(tt, op, pn, ShiftExpr(cx, ts, tc), tc); + (inForInitFlag == 0 && tokenStream.matchToken(TOK_IN)) || + tokenStream.matchToken(TOK_INSTANCEOF))) { + tt = tokenStream.currentToken().type; + op = tokenStream.currentToken().t_op; + pn = JSParseNode::newBinaryOrAppend(tt, op, pn, shiftExpr(), tc); } /* Restore previous state of inForInit flag. */ tc->flags |= inForInitFlag; @@ -6273,58 +6177,58 @@ RelExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return pn; } -static JSParseNode * -ShiftExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::shiftExpr() { JSParseNode *pn; JSOp op; - pn = AddExpr(cx, ts, tc); - while (pn && js_MatchToken(cx, ts, TOK_SHOP)) { - op = CURRENT_TOKEN(ts).t_op; - pn = NewBinary(TOK_SHOP, op, pn, AddExpr(cx, ts, tc), tc); + pn = addExpr(); + while (pn && tokenStream.matchToken(TOK_SHOP)) { + op = tokenStream.currentToken().t_op; + pn = JSParseNode::newBinaryOrAppend(TOK_SHOP, op, pn, addExpr(), tc); } return pn; } -static JSParseNode * -AddExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::addExpr() { JSParseNode *pn; - JSTokenType tt; + TokenKind tt; JSOp op; - pn = MulExpr(cx, ts, tc); + pn = mulExpr(); while (pn && - (js_MatchToken(cx, ts, TOK_PLUS) || - js_MatchToken(cx, ts, TOK_MINUS))) { - tt = CURRENT_TOKEN(ts).type; + (tokenStream.matchToken(TOK_PLUS) || + tokenStream.matchToken(TOK_MINUS))) { + tt = tokenStream.currentToken().type; op = (tt == TOK_PLUS) ? JSOP_ADD : JSOP_SUB; - pn = NewBinary(tt, op, pn, MulExpr(cx, ts, tc), tc); + pn = JSParseNode::newBinaryOrAppend(tt, op, pn, mulExpr(), tc); } return pn; } -static JSParseNode * -MulExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::mulExpr() { JSParseNode *pn; - JSTokenType tt; + TokenKind tt; JSOp op; - pn = UnaryExpr(cx, ts, tc); + pn = unaryExpr(); while (pn && - (js_MatchToken(cx, ts, TOK_STAR) || - js_MatchToken(cx, ts, TOK_DIVOP))) { - tt = CURRENT_TOKEN(ts).type; - op = CURRENT_TOKEN(ts).t_op; - pn = NewBinary(tt, op, pn, UnaryExpr(cx, ts, tc), tc); + (tokenStream.matchToken(TOK_STAR) || + tokenStream.matchToken(TOK_DIVOP))) { + tt = tokenStream.currentToken().type; + op = tokenStream.currentToken().t_op; + pn = JSParseNode::newBinaryOrAppend(tt, op, pn, unaryExpr(), tc); } return pn; } static JSParseNode * -SetLvalKid(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, +SetLvalKid(JSContext *cx, TokenStream *ts, JSTreeContext *tc, JSParseNode *pn, JSParseNode *kid, const char *name) { if (kid->pn_type != TOK_NAME && @@ -6335,8 +6239,7 @@ SetLvalKid(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, (kid->pn_type != TOK_UNARYOP || kid->pn_op != JSOP_XMLNAME) && #endif kid->pn_type != TOK_LB) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_OPERAND, name); + ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, JSMSG_BAD_OPERAND, name); return NULL; } if (!CheckStrictAssignment(cx, tc, kid)) @@ -6348,9 +6251,9 @@ SetLvalKid(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, static const char incop_name_str[][10] = {"increment", "decrement"}; static JSBool -SetIncOpKid(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, +SetIncOpKid(JSContext *cx, TokenStream *ts, JSTreeContext *tc, JSParseNode *pn, JSParseNode *kid, - JSTokenType tt, JSBool preorder) + TokenKind tt, JSBool preorder) { JSOp op; @@ -6395,28 +6298,28 @@ SetIncOpKid(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, return JS_TRUE; } -static JSParseNode * -UnaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::unaryExpr() { - JSTokenType tt; + TokenKind tt; JSParseNode *pn, *pn2; - JS_CHECK_RECURSION(cx, return NULL); + JS_CHECK_RECURSION(context, return NULL); - ts->flags |= TSF_OPERAND; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_OPERAND; switch (tt) { case TOK_UNARYOP: case TOK_PLUS: case TOK_MINUS: - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; pn->pn_type = TOK_UNARYOP; /* PLUS and MINUS are binary */ - pn->pn_op = CURRENT_TOKEN(ts).t_op; - pn2 = UnaryExpr(cx, ts, tc); + pn->pn_op = tokenStream.currentToken().t_op; + pn2 = unaryExpr(); if (!pn2) return NULL; pn->pn_pos.end = pn2->pn_pos.end; @@ -6425,22 +6328,22 @@ UnaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) case TOK_INC: case TOK_DEC: - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; - pn2 = MemberExpr(cx, ts, tc, JS_TRUE); + pn2 = memberExpr(JS_TRUE); if (!pn2) return NULL; - if (!SetIncOpKid(cx, ts, tc, pn, pn2, tt, JS_TRUE)) + if (!SetIncOpKid(context, &tokenStream, tc, pn, pn2, tt, JS_TRUE)) return NULL; pn->pn_pos.end = pn2->pn_pos.end; break; case TOK_DELETE: - pn = NewParseNode(PN_UNARY, tc); + pn = UnaryNode::create(tc); if (!pn) return NULL; - pn2 = UnaryExpr(cx, ts, tc); + pn2 = unaryExpr(); if (!pn2) return NULL; pn->pn_pos.end = pn2->pn_pos.end; @@ -6450,17 +6353,18 @@ UnaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * returns true. Here we fold constants before checking for a call * expression, in order to rule out delete of a generator expression. */ - if (!js_FoldConstants(cx, pn2, tc)) + if (!js_FoldConstants(context, pn2, tc)) return NULL; switch (pn2->pn_type) { case TOK_LP: if (pn2->pn_op != JSOP_SETCALL && - !MakeSetCall(cx, pn2, tc, JSMSG_BAD_DELETE_OPERAND)) { + !MakeSetCall(context, pn2, tc, JSMSG_BAD_DELETE_OPERAND)) { return NULL; } break; case TOK_NAME: - if (!js_ReportStrictModeError(cx, ts, tc, pn, JSMSG_DEPRECATED_DELETE_OPERAND)) + if (!ReportStrictModeError(context, &tokenStream, tc, pn, + JSMSG_DEPRECATED_DELETE_OPERAND)) return NULL; pn2->pn_op = JSOP_DELNAME; break; @@ -6473,22 +6377,22 @@ UnaryExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) return NULL; default: - js_UngetToken(ts); - pn = MemberExpr(cx, ts, tc, JS_TRUE); + tokenStream.ungetToken(); + pn = memberExpr(JS_TRUE); if (!pn) return NULL; /* Don't look across a newline boundary for a postfix incop. */ - if (ON_CURRENT_LINE(ts, pn->pn_pos)) { - ts->flags |= TSF_OPERAND; - tt = js_PeekTokenSameLine(cx, ts); - ts->flags &= ~TSF_OPERAND; + if (tokenStream.onCurrentLine(pn->pn_pos)) { + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.peekTokenSameLine(); + tokenStream.flags &= ~TSF_OPERAND; if (tt == TOK_INC || tt == TOK_DEC) { - (void) js_GetToken(cx, ts); - pn2 = NewParseNode(PN_UNARY, tc); + (void) tokenStream.getToken(); + pn2 = UnaryNode::create(tc); if (!pn2) return NULL; - if (!SetIncOpKid(cx, ts, tc, pn2, pn, tt, JS_FALSE)) + if (!SetIncOpKid(context, &tokenStream, tc, pn2, pn, tt, JS_FALSE)) return NULL; pn2->pn_pos.begin = pn->pn_pos.begin; pn = pn2; @@ -6679,8 +6583,7 @@ CompExprTransplanter::transplant(JSParseNode *pn) if (dn->pn_pos >= root->pn_pos) { tc->parent->lexdeps.remove(tc->compiler, atom); } else { - JSDefinition *dn2 = (JSDefinition *) - NewNameNode(tc->compiler->context, dn->pn_atom, tc); + JSDefinition *dn2 = (JSDefinition *)NameNode::create(dn->pn_atom, tc); if (!dn2) return false; @@ -6728,21 +6631,18 @@ CompExprTransplanter::transplant(JSParseNode *pn) * comprehension or generator expression, with a unary node as the body of the * (possibly nested) for-loop, initialized by |type, op, kid|. */ -static JSParseNode * -ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, - JSTokenType type = TOK_SEMI, JSOp op = JSOP_NOP) +JSParseNode * +JSCompiler::comprehensionTail(JSParseNode *kid, uintN blockid, + TokenKind type, JSOp op) { - JSContext *cx = tc->compiler->context; - JSTokenStream *ts = TS(tc->compiler); - uintN adjust; JSParseNode *pn, *pn2, *pn3, **pnp; JSStmtInfo stmtInfo; BindData data; - JSTokenType tt; + TokenKind tt; JSAtom *atom; - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_FOR); + JS_ASSERT(tokenStream.currentToken().type == TOK_FOR); if (type == TOK_SEMI) { /* @@ -6750,7 +6650,7 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, * yields the next value from a for-in loop (possibly nested, and with * optional if guard). Make pn be the TOK_LC body node. */ - pn = PushLexicalScope(cx, ts, tc, &stmtInfo); + pn = PushLexicalScope(context, &tokenStream, tc, &stmtInfo); if (!pn) return NULL; adjust = pn->pn_blockid - blockid; @@ -6759,7 +6659,7 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, /* * Make a parse-node and literal object representing the block scope of - * this array comprehension. Our caller in PrimaryExpr, the TOK_LB case + * this array comprehension. Our caller in primaryExpr, the TOK_LB case * aka the array initialiser case, has passed the blockid to claim for * the comprehension's block scope. We allocate that id or one above it * here, by calling js_PushLexicalScope. @@ -6770,7 +6670,7 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, * block scope. */ adjust = tc->blockid(); - pn = PushLexicalScope(cx, ts, tc, &stmtInfo); + pn = PushLexicalScope(context, &tokenStream, tc, &stmtInfo); if (!pn) return NULL; @@ -6798,28 +6698,28 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, * index to count each block-local let-variable on the left-hand side * of the IN. */ - pn2 = NewParseNode(PN_BINARY, tc); + pn2 = BinaryNode::create(tc); if (!pn2) return NULL; pn2->pn_op = JSOP_ITER; pn2->pn_iflags = JSITER_ENUMERATE; - if (js_MatchToken(cx, ts, TOK_NAME)) { - if (CURRENT_TOKEN(ts).t_atom == cx->runtime->atomState.eachAtom) + if (tokenStream.matchToken(TOK_NAME)) { + if (tokenStream.currentToken().t_atom == context->runtime->atomState.eachAtom) pn2->pn_iflags |= JSITER_FOREACH; else - js_UngetToken(ts); + tokenStream.ungetToken(); } MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_AFTER_FOR); atom = NULL; - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); switch (tt) { #if JS_HAS_DESTRUCTURING case TOK_LB: case TOK_LC: tc->flags |= TCF_DECL_DESTRUCTURING; - pn3 = PrimaryExpr(cx, ts, tc, tt, JS_FALSE); + pn3 = primaryExpr(tt, JS_FALSE); tc->flags &= ~TCF_DECL_DESTRUCTURING; if (!pn3) return NULL; @@ -6827,7 +6727,7 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, #endif case TOK_NAME: - atom = CURRENT_TOKEN(ts).t_atom; + atom = tokenStream.currentToken().t_atom; /* * Create a name node with pn_op JSOP_NAME. We can't set pn_op to @@ -6842,15 +6742,15 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, break; default: - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_NO_VARIABLE_NAME); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_NO_VARIABLE_NAME); case TOK_ERROR: return NULL; } MUST_MATCH_TOKEN(TOK_IN, JSMSG_IN_AFTER_FOR_NAME); - JSParseNode *pn4 = Expr(cx, ts, tc); + JSParseNode *pn4 = expr(); if (!pn4) return NULL; MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_FOR_CTRL); @@ -6859,14 +6759,14 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, #if JS_HAS_DESTRUCTURING case TOK_LB: case TOK_LC: - if (!CheckDestructuring(cx, &data, pn3, NULL, tc)) + if (!CheckDestructuring(context, &data, pn3, NULL, tc)) return NULL; - if (JSVERSION_NUMBER(cx) == JSVERSION_1_7) { + if (JSVERSION_NUMBER(context) == JSVERSION_1_7) { /* Destructuring requires [key, value] enumeration in JS1.7. */ if (pn3->pn_type != TOK_RB || pn3->pn_count != 2) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_FOR_LEFTSIDE); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_FOR_LEFTSIDE); return NULL; } @@ -6880,32 +6780,32 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, case TOK_NAME: data.pn = pn3; - if (!data.binder(cx, &data, atom, tc)) + if (!data.binder(context, &data, atom, tc)) return NULL; break; default:; } - pn2->pn_left = NewBinary(TOK_IN, JSOP_NOP, pn3, pn4, tc); + pn2->pn_left = JSParseNode::newBinaryOrAppend(TOK_IN, JSOP_NOP, pn3, pn4, tc); if (!pn2->pn_left) return NULL; *pnp = pn2; pnp = &pn2->pn_right; - } while (js_MatchToken(cx, ts, TOK_FOR)); + } while (tokenStream.matchToken(TOK_FOR)); - if (js_MatchToken(cx, ts, TOK_IF)) { - pn2 = NewParseNode(PN_TERNARY, tc); + if (tokenStream.matchToken(TOK_IF)) { + pn2 = TernaryNode::create(tc); if (!pn2) return NULL; - pn2->pn_kid1 = Condition(cx, ts, tc); + pn2->pn_kid1 = condition(); if (!pn2->pn_kid1) return NULL; *pnp = pn2; pnp = &pn2->pn_kid2; } - pn2 = NewParseNode(PN_UNARY, tc); + pn2 = UnaryNode::create(tc); if (!pn2) return NULL; pn2->pn_type = type; @@ -6925,7 +6825,7 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, * generator function that is immediately called to evaluate to the generator * iterator that is the value of this generator expression. * - * Callers pass a blank unary node via pn, which GeneratorExpr fills in as the + * Callers pass a blank unary node via pn, which generatorExpr fills in as the * yield expression, which ComprehensionTail in turn wraps in a TOK_SEMI-type * expression-statement node that constitutes the body of the |for| loop(s) in * the generator function. @@ -6934,8 +6834,8 @@ ComprehensionTail(JSParseNode *kid, uintN blockid, JSTreeContext *tc, * the first |in| in the chain of |for| heads. Instead, a generator expression * is merely sugar for a generator function expression and its application. */ -static JSParseNode * -GeneratorExpr(JSParseNode *pn, JSParseNode *kid, JSTreeContext *tc) +JSParseNode * +JSCompiler::generatorExpr(JSParseNode *pn, JSParseNode *kid) { /* Initialize pn, connecting it to kid. */ JS_ASSERT(pn->pn_arity == PN_UNARY); @@ -6947,7 +6847,7 @@ GeneratorExpr(JSParseNode *pn, JSParseNode *kid, JSTreeContext *tc) pn->pn_hidden = true; /* Make a new node for the desugared generator function. */ - JSParseNode *genfn = NewParseNode(PN_FUNC, tc); + JSParseNode *genfn = FunctionNode::create(tc); if (!genfn) return NULL; genfn->pn_type = TOK_FUNCTION; @@ -6956,19 +6856,21 @@ GeneratorExpr(JSParseNode *pn, JSParseNode *kid, JSTreeContext *tc) genfn->pn_dflags = PND_FUNARG; { + JSTreeContext *outertc = tc; JSTreeContext gentc(tc->compiler); - JSFunctionBox *funbox = EnterFunction(genfn, tc, &gentc); + JSFunctionBox *funbox = EnterFunction(genfn, &gentc); if (!funbox) return NULL; /* - * We have to dance around a bit to propagate sharp variables from tc - * to gentc before setting TCF_HAS_SHARPS implicitly by propagating all - * of tc's TCF_FUN_FLAGS flags. As below, we have to be conservative by - * leaving TCF_HAS_SHARPS set in tc if we do propagate to gentc. + * We have to dance around a bit to propagate sharp variables from + * outertc to gentc before setting TCF_HAS_SHARPS implicitly by + * propagating all of outertc's TCF_FUN_FLAGS flags. As below, we have + * to be conservative by leaving TCF_HAS_SHARPS set in outertc if we + * do propagate to gentc. */ - if (tc->flags & TCF_HAS_SHARPS) { + if (outertc->flags & TCF_HAS_SHARPS) { gentc.flags |= TCF_IN_FUNCTION; if (!gentc.ensureSharpSlots()) return NULL; @@ -6987,15 +6889,15 @@ GeneratorExpr(JSParseNode *pn, JSParseNode *kid, JSTreeContext *tc) genfn->pn_funbox = funbox; genfn->pn_blockid = gentc.bodyid; - JSParseNode *body = ComprehensionTail(pn, tc->blockid(), &gentc); + JSParseNode *body = comprehensionTail(pn, outertc->blockid()); if (!body) return NULL; JS_ASSERT(!genfn->pn_body); genfn->pn_body = body; genfn->pn_pos.begin = body->pn_pos.begin = kid->pn_pos.begin; - genfn->pn_pos.end = body->pn_pos.end = CURRENT_TOKEN(TS(tc->compiler)).pos.end; + genfn->pn_pos.end = body->pn_pos.end = tokenStream.currentToken().pos.end; - if (!LeaveFunction(genfn, &gentc, tc)) + if (!LeaveFunction(genfn, &gentc)) return NULL; } @@ -7003,7 +6905,7 @@ GeneratorExpr(JSParseNode *pn, JSParseNode *kid, JSTreeContext *tc) * Our result is a call expression that invokes the anonymous generator * function object. */ - JSParseNode *result = NewParseNode(PN_LIST, tc); + JSParseNode *result = ListNode::create(tc); if (!result) return NULL; result->pn_type = TOK_LP; @@ -7018,53 +6920,52 @@ static const char js_generator_str[] = "generator"; #endif /* JS_HAS_GENERATOR_EXPRS */ #endif /* JS_HAS_GENERATORS */ -static JSBool -ArgumentList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSParseNode *listNode) +JSBool +JSCompiler::argumentList(JSParseNode *listNode) { JSBool matched; - ts->flags |= TSF_OPERAND; - matched = js_MatchToken(cx, ts, TOK_RP); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + matched = tokenStream.matchToken(TOK_RP); + tokenStream.flags &= ~TSF_OPERAND; if (!matched) { do { - JSParseNode *argNode = AssignExpr(cx, ts, tc); + JSParseNode *argNode = assignExpr(); if (!argNode) return JS_FALSE; #if JS_HAS_GENERATORS if (argNode->pn_type == TOK_YIELD && !argNode->pn_parens && - js_PeekToken(cx, ts) == TOK_COMMA) { - js_ReportCompileErrorNumber(cx, ts, argNode, JSREPORT_ERROR, - JSMSG_BAD_GENERATOR_SYNTAX, - js_yield_str); + tokenStream.peekToken() == TOK_COMMA) { + ReportCompileErrorNumber(context, &tokenStream, argNode, JSREPORT_ERROR, + JSMSG_BAD_GENERATOR_SYNTAX, + js_yield_str); return JS_FALSE; } #endif #if JS_HAS_GENERATOR_EXPRS - if (js_MatchToken(cx, ts, TOK_FOR)) { - JSParseNode *pn = NewParseNode(PN_UNARY, tc); + if (tokenStream.matchToken(TOK_FOR)) { + JSParseNode *pn = UnaryNode::create(tc); if (!pn) return JS_FALSE; - argNode = GeneratorExpr(pn, argNode, tc); + argNode = generatorExpr(pn, argNode); if (!argNode) return JS_FALSE; if (listNode->pn_count > 1 || - js_PeekToken(cx, ts) == TOK_COMMA) { - js_ReportCompileErrorNumber(cx, ts, argNode, JSREPORT_ERROR, - JSMSG_BAD_GENERATOR_SYNTAX, - js_generator_str); + tokenStream.peekToken() == TOK_COMMA) { + ReportCompileErrorNumber(context, &tokenStream, argNode, JSREPORT_ERROR, + JSMSG_BAD_GENERATOR_SYNTAX, + js_generator_str); return JS_FALSE; } } #endif listNode->append(argNode); - } while (js_MatchToken(cx, ts, TOK_COMMA)); + } while (tokenStream.matchToken(TOK_COMMA)); - if (js_GetToken(cx, ts) != TOK_RP) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_PAREN_AFTER_ARGS); + if (tokenStream.getToken() != TOK_RP) { + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_PAREN_AFTER_ARGS); return JS_FALSE; } } @@ -7086,24 +6987,22 @@ CheckForImmediatelyAppliedLambda(JSParseNode *pn) return pn; } -static JSParseNode * -MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSBool allowCallSyntax) +JSParseNode * +JSCompiler::memberExpr(JSBool allowCallSyntax) { JSParseNode *pn, *pn2, *pn3; - JSTokenType tt; - JS_CHECK_RECURSION(cx, return NULL); + JS_CHECK_RECURSION(context, return NULL); /* Check for new expression first. */ - ts->flags |= TSF_OPERAND; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + TokenKind tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_OPERAND; if (tt == TOK_NEW) { - pn = NewParseNode(PN_LIST, tc); + pn = ListNode::create(tc); if (!pn) return NULL; - pn2 = MemberExpr(cx, ts, tc, JS_FALSE); + pn2 = memberExpr(JS_FALSE); if (!pn2) return NULL; pn2 = CheckForImmediatelyAppliedLambda(pn2); @@ -7111,16 +7010,16 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn->initList(pn2); pn->pn_pos.begin = pn2->pn_pos.begin; - if (js_MatchToken(cx, ts, TOK_LP) && !ArgumentList(cx, ts, tc, pn)) + if (tokenStream.matchToken(TOK_LP) && !argumentList(pn)) return NULL; if (pn->pn_count > ARGC_LIMIT) { - JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, + JS_ReportErrorNumber(context, js_GetErrorMessage, NULL, JSMSG_TOO_MANY_CON_ARGS); return NULL; } pn->pn_pos.end = pn->last()->pn_pos.end; } else { - pn = PrimaryExpr(cx, ts, tc, tt, JS_FALSE); + pn = primaryExpr(tt, JS_FALSE); if (!pn) return NULL; @@ -7140,16 +7039,16 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, } } - while ((tt = js_GetToken(cx, ts)) > TOK_EOF) { + while ((tt = tokenStream.getToken()) > TOK_EOF) { if (tt == TOK_DOT) { - pn2 = NewNameNode(cx, NULL, tc); + pn2 = NameNode::create(NULL, tc); if (!pn2) return NULL; #if JS_HAS_XML_SUPPORT - ts->flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME; - tt = js_GetToken(cx, ts); - ts->flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME); - pn3 = PrimaryExpr(cx, ts, tc, tt, JS_TRUE); + tokenStream.flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME; + tt = tokenStream.getToken(); + tokenStream.flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME); + pn3 = primaryExpr(tt, JS_TRUE); if (!pn3) return NULL; @@ -7166,12 +7065,12 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, /* A filtering predicate is like a with statement. */ tc->flags |= TCF_FUN_HEAVYWEIGHT; - } else if (TOKEN_TYPE_IS_XML(PN_TYPE(pn3))) { + } else if (TokenKindIsXML(PN_TYPE(pn3))) { pn2->pn_type = TOK_LB; pn2->pn_op = JSOP_GETELEM; } else { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_NAME_AFTER_DOT); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_NAME_AFTER_DOT); return NULL; } pn2->pn_arity = PN_BINARY; @@ -7179,24 +7078,24 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn2->pn_right = pn3; } #else - ts->flags |= TSF_KEYWORD_IS_NAME; + tokenStream.flags |= TSF_KEYWORD_IS_NAME; MUST_MATCH_TOKEN(TOK_NAME, JSMSG_NAME_AFTER_DOT); - ts->flags &= ~TSF_KEYWORD_IS_NAME; + tokenStream.flags &= ~TSF_KEYWORD_IS_NAME; pn2->pn_op = JSOP_GETPROP; pn2->pn_expr = pn; - pn2->pn_atom = CURRENT_TOKEN(ts).t_atom; + pn2->pn_atom = tokenStream.currentToken().t_atom; #endif pn2->pn_pos.begin = pn->pn_pos.begin; - pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn2->pn_pos.end = tokenStream.currentToken().pos.end; #if JS_HAS_XML_SUPPORT } else if (tt == TOK_DBLDOT) { - pn2 = NewParseNode(PN_BINARY, tc); + pn2 = BinaryNode::create(tc); if (!pn2) return NULL; - ts->flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME; - tt = js_GetToken(cx, ts); - ts->flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME); - pn3 = PrimaryExpr(cx, ts, tc, tt, JS_TRUE); + tokenStream.flags |= TSF_OPERAND | TSF_KEYWORD_IS_NAME; + tt = tokenStream.getToken(); + tokenStream.flags &= ~(TSF_OPERAND | TSF_KEYWORD_IS_NAME); + pn3 = primaryExpr(tt, JS_TRUE); if (!pn3) return NULL; tt = PN_TYPE(pn3); @@ -7204,28 +7103,28 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn3->pn_type = TOK_STRING; pn3->pn_arity = PN_NULLARY; pn3->pn_op = JSOP_QNAMEPART; - } else if (!TOKEN_TYPE_IS_XML(tt)) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_NAME_AFTER_DOT); + } else if (!TokenKindIsXML(tt)) { + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_NAME_AFTER_DOT); return NULL; } pn2->pn_op = JSOP_DESCENDANTS; pn2->pn_left = pn; pn2->pn_right = pn3; pn2->pn_pos.begin = pn->pn_pos.begin; - pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn2->pn_pos.end = tokenStream.currentToken().pos.end; #endif } else if (tt == TOK_LB) { - pn2 = NewParseNode(PN_BINARY, tc); + pn2 = BinaryNode::create(tc); if (!pn2) return NULL; - pn3 = Expr(cx, ts, tc); + pn3 = expr(); if (!pn3) return NULL; MUST_MATCH_TOKEN(TOK_RB, JSMSG_BRACKET_IN_INDEX); pn2->pn_pos.begin = pn->pn_pos.begin; - pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn2->pn_pos.end = tokenStream.currentToken().pos.end; /* * Optimize o['p'] to o.p by rewriting pn2, but avoid rewriting @@ -7255,21 +7154,21 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn2->pn_right = pn3; } while (0); } else if (allowCallSyntax && tt == TOK_LP) { - pn2 = NewParseNode(PN_LIST, tc); + pn2 = ListNode::create(tc); if (!pn2) return NULL; pn2->pn_op = JSOP_CALL; pn = CheckForImmediatelyAppliedLambda(pn); if (pn->pn_op == JSOP_NAME) { - if (pn->pn_atom == cx->runtime->atomState.evalAtom) { + if (pn->pn_atom == context->runtime->atomState.evalAtom) { /* Select JSOP_EVAL and flag tc as heavyweight. */ pn2->pn_op = JSOP_EVAL; tc->flags |= TCF_FUN_HEAVYWEIGHT; } } else if (pn->pn_op == JSOP_GETPROP) { - if (pn->pn_atom == cx->runtime->atomState.applyAtom || - pn->pn_atom == cx->runtime->atomState.callAtom) { + if (pn->pn_atom == context->runtime->atomState.applyAtom || + pn->pn_atom == context->runtime->atomState.callAtom) { /* Select JSOP_APPLY given foo.apply(...). */ pn2->pn_op = JSOP_APPLY; } @@ -7278,16 +7177,16 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn2->initList(pn); pn2->pn_pos.begin = pn->pn_pos.begin; - if (!ArgumentList(cx, ts, tc, pn2)) + if (!argumentList(pn2)) return NULL; if (pn2->pn_count > ARGC_LIMIT) { - JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, + JS_ReportErrorNumber(context, js_GetErrorMessage, NULL, JSMSG_TOO_MANY_FUN_ARGS); return NULL; } - pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn2->pn_pos.end = tokenStream.currentToken().pos.end; } else { - js_UngetToken(ts); + tokenStream.ungetToken(); return pn; } @@ -7298,8 +7197,8 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, return pn; } -static JSParseNode * -BracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::bracketedExpr() { uintN oldflags; JSParseNode *pn; @@ -7311,19 +7210,19 @@ BracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) */ oldflags = tc->flags; tc->flags &= ~TCF_IN_FOR_INIT; - pn = Expr(cx, ts, tc); + pn = expr(); tc->flags = oldflags | (tc->flags & TCF_FUN_FLAGS); return pn; } #if JS_HAS_XML_SUPPORT -static JSParseNode * -EndBracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::endBracketedExpr() { JSParseNode *pn; - pn = BracketedExpr(cx, ts, tc); + pn = bracketedExpr(); if (!pn) return NULL; @@ -7382,37 +7281,36 @@ EndBracketedExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * We hoist the :: match into callers of QualifiedSuffix, in order to tweak * PropertySelector vs. Identifier pn_arity, pn_op, and other members. */ -static JSParseNode * -PropertySelector(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::propertySelector() { JSParseNode *pn; - pn = NewParseNode(PN_NULLARY, tc); + pn = NullaryNode::create(tc); if (!pn) return NULL; if (pn->pn_type == TOK_STAR) { pn->pn_type = TOK_ANYNAME; pn->pn_op = JSOP_ANYNAME; - pn->pn_atom = cx->runtime->atomState.starAtom; + pn->pn_atom = context->runtime->atomState.starAtom; } else { JS_ASSERT(pn->pn_type == TOK_NAME); pn->pn_op = JSOP_QNAMEPART; pn->pn_arity = PN_NAME; - pn->pn_atom = CURRENT_TOKEN(ts).t_atom; + pn->pn_atom = tokenStream.currentToken().t_atom; pn->pn_cookie = FREE_UPVAR_COOKIE; } return pn; } -static JSParseNode * -QualifiedSuffix(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, - JSTreeContext *tc) +JSParseNode * +JSCompiler::qualifiedSuffix(JSParseNode *pn) { JSParseNode *pn2, *pn3; - JSTokenType tt; + TokenKind tt; - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_DBLCOLON); - pn2 = NewNameNode(cx, NULL, tc); + JS_ASSERT(tokenStream.currentToken().type == TOK_DBLCOLON); + pn2 = NameNode::create(NULL, tc); if (!pn2) return NULL; @@ -7420,27 +7318,27 @@ QualifiedSuffix(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, if (pn->pn_op == JSOP_QNAMEPART) pn->pn_op = JSOP_NAME; - ts->flags |= TSF_KEYWORD_IS_NAME; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_KEYWORD_IS_NAME; + tokenStream.flags |= TSF_KEYWORD_IS_NAME; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_KEYWORD_IS_NAME; if (tt == TOK_STAR || tt == TOK_NAME) { - /* Inline and specialize PropertySelector for JSOP_QNAMECONST. */ + /* Inline and specialize propertySelector for JSOP_QNAMECONST. */ pn2->pn_op = JSOP_QNAMECONST; pn2->pn_pos.begin = pn->pn_pos.begin; pn2->pn_atom = (tt == TOK_STAR) - ? cx->runtime->atomState.starAtom - : CURRENT_TOKEN(ts).t_atom; + ? context->runtime->atomState.starAtom + : tokenStream.currentToken().t_atom; pn2->pn_expr = pn; pn2->pn_cookie = FREE_UPVAR_COOKIE; return pn2; } if (tt != TOK_LB) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_SYNTAX_ERROR); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_SYNTAX_ERROR); return NULL; } - pn3 = EndBracketedExpr(cx, ts, tc); + pn3 = endBracketedExpr(); if (!pn3) return NULL; @@ -7453,43 +7351,43 @@ QualifiedSuffix(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, return pn2; } -static JSParseNode * -QualifiedIdentifier(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::qualifiedIdentifier() { JSParseNode *pn; - pn = PropertySelector(cx, ts, tc); + pn = propertySelector(); if (!pn) return NULL; - if (js_MatchToken(cx, ts, TOK_DBLCOLON)) { + if (tokenStream.matchToken(TOK_DBLCOLON)) { /* Hack for bug 496316. Slowing down E4X won't make it go away, alas. */ tc->flags |= TCF_FUN_HEAVYWEIGHT; - pn = QualifiedSuffix(cx, ts, pn, tc); + pn = qualifiedSuffix(pn); } return pn; } -static JSParseNode * -AttributeIdentifier(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::attributeIdentifier() { JSParseNode *pn, *pn2; - JSTokenType tt; + TokenKind tt; - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_AT); - pn = NewParseNode(PN_UNARY, tc); + JS_ASSERT(tokenStream.currentToken().type == TOK_AT); + pn = UnaryNode::create(tc); if (!pn) return NULL; pn->pn_op = JSOP_TOATTRNAME; - ts->flags |= TSF_KEYWORD_IS_NAME; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_KEYWORD_IS_NAME; + tokenStream.flags |= TSF_KEYWORD_IS_NAME; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_KEYWORD_IS_NAME; if (tt == TOK_STAR || tt == TOK_NAME) { - pn2 = QualifiedIdentifier(cx, ts, tc); + pn2 = qualifiedIdentifier(); } else if (tt == TOK_LB) { - pn2 = EndBracketedExpr(cx, ts, tc); + pn2 = endBracketedExpr(); } else { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_SYNTAX_ERROR); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_SYNTAX_ERROR); return NULL; } if (!pn2) @@ -7501,14 +7399,14 @@ AttributeIdentifier(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) /* * Make a TOK_LC unary node whose pn_kid is an expression. */ -static JSParseNode * -XMLExpr(JSContext *cx, JSTokenStream *ts, JSBool inTag, JSTreeContext *tc) +JSParseNode * +JSCompiler::xmlExpr(JSBool inTag) { JSParseNode *pn, *pn2; uintN oldflag; - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_LC); - pn = NewParseNode(PN_UNARY, tc); + JS_ASSERT(tokenStream.currentToken().type == TOK_LC); + pn = UnaryNode::create(tc); if (!pn) return NULL; @@ -7518,14 +7416,14 @@ XMLExpr(JSContext *cx, JSTokenStream *ts, JSBool inTag, JSTreeContext *tc) * within text contained in an element, but outside of any start, end, or * point tag. */ - oldflag = ts->flags & TSF_XMLTAGMODE; - ts->flags &= ~TSF_XMLTAGMODE; - pn2 = Expr(cx, ts, tc); + oldflag = tokenStream.flags & TSF_XMLTAGMODE; + tokenStream.flags &= ~TSF_XMLTAGMODE; + pn2 = expr(); if (!pn2) return NULL; MUST_MATCH_TOKEN(TOK_RC, JSMSG_CURLY_IN_XML_EXPR); - ts->flags |= oldflag; + tokenStream.flags |= oldflag; pn->pn_kid = pn2; pn->pn_op = inTag ? JSOP_XMLTAGEXPR : JSOP_XMLELTEXPR; return pn; @@ -7537,16 +7435,13 @@ XMLExpr(JSContext *cx, JSTokenStream *ts, JSBool inTag, JSTreeContext *tc) * parse tree to XML, we preserve a TOK_XMLSPACE node only if it's the sole * child of a container tag. */ -static JSParseNode * -XMLAtomNode(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::xmlAtomNode() { - JSParseNode *pn; - JSToken *tp; - - pn = NewParseNode(PN_NULLARY, tc); + JSParseNode *pn = NullaryNode::create(tc); if (!pn) return NULL; - tp = &CURRENT_TOKEN(ts); + Token *tp = tokenStream.mutableCurrentToken(); pn->pn_op = tp->t_op; pn->pn_atom = tp->t_atom; if (tp->type == TOK_XMLPI) @@ -7566,22 +7461,22 @@ XMLAtomNode(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * If PN_LIST or PN_NULLARY, pn_type will be TOK_XMLNAME; if PN_UNARY, pn_type * will be TOK_LC. */ -static JSParseNode * -XMLNameExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) +JSParseNode * +JSCompiler::xmlNameExpr() { JSParseNode *pn, *pn2, *list; - JSTokenType tt; + TokenKind tt; pn = list = NULL; do { - tt = CURRENT_TOKEN(ts).type; + tt = tokenStream.currentToken().type; if (tt == TOK_LC) { - pn2 = XMLExpr(cx, ts, JS_TRUE, tc); + pn2 = xmlExpr(JS_TRUE); if (!pn2) return NULL; } else { JS_ASSERT(tt == TOK_XMLNAME); - pn2 = XMLAtomNode(cx, ts, tc); + pn2 = xmlAtomNode(); if (!pn2) return NULL; } @@ -7590,7 +7485,7 @@ XMLNameExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn = pn2; } else { if (!list) { - list = NewParseNode(PN_LIST, tc); + list = ListNode::create(tc); if (!list) return NULL; list->pn_type = TOK_XMLNAME; @@ -7602,9 +7497,9 @@ XMLNameExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) pn->pn_pos.end = pn2->pn_pos.end; pn->append(pn2); } - } while ((tt = js_GetToken(cx, ts)) == TOK_XMLNAME || tt == TOK_LC); + } while ((tt = tokenStream.getToken()) == TOK_XMLNAME || tt == TOK_LC); - js_UngetToken(ts); + tokenStream.ungetToken(); return pn; } @@ -7633,31 +7528,30 @@ XMLNameExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc) * PN_LIST, pn_type will be tagtype. If PN_UNARY, pn_type will be TOK_LC and * we parsed exactly one expression. */ -static JSParseNode * -XMLTagContent(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSTokenType tagtype, JSAtom **namep) +JSParseNode * +JSCompiler::xmlTagContent(TokenKind tagtype, JSAtom **namep) { JSParseNode *pn, *pn2, *list; - JSTokenType tt; + TokenKind tt; - pn = XMLNameExpr(cx, ts, tc); + pn = xmlNameExpr(); if (!pn) return NULL; *namep = (pn->pn_arity == PN_NULLARY) ? pn->pn_atom : NULL; list = NULL; - while (js_MatchToken(cx, ts, TOK_XMLSPACE)) { - tt = js_GetToken(cx, ts); + while (tokenStream.matchToken(TOK_XMLSPACE)) { + tt = tokenStream.getToken(); if (tt != TOK_XMLNAME && tt != TOK_LC) { - js_UngetToken(ts); + tokenStream.ungetToken(); break; } - pn2 = XMLNameExpr(cx, ts, tc); + pn2 = xmlNameExpr(); if (!pn2) return NULL; if (!list) { - list = NewParseNode(PN_LIST, tc); + list = ListNode::create(tc); if (!list) return NULL; list->pn_type = tagtype; @@ -7669,19 +7563,19 @@ XMLTagContent(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, if (!XML_FOLDABLE(pn2)) pn->pn_xflags |= PNX_CANTFOLD; - js_MatchToken(cx, ts, TOK_XMLSPACE); + tokenStream.matchToken(TOK_XMLSPACE); MUST_MATCH_TOKEN(TOK_ASSIGN, JSMSG_NO_ASSIGN_IN_XML_ATTR); - js_MatchToken(cx, ts, TOK_XMLSPACE); + tokenStream.matchToken(TOK_XMLSPACE); - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); if (tt == TOK_XMLATTR) { - pn2 = XMLAtomNode(cx, ts, tc); + pn2 = xmlAtomNode(); } else if (tt == TOK_LC) { - pn2 = XMLExpr(cx, ts, JS_TRUE, tc); + pn2 = xmlExpr(JS_TRUE); pn->pn_xflags |= PNX_CANTFOLD; } else { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_XML_ATTR_VALUE); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_XML_ATTR_VALUE); return NULL; } if (!pn2) @@ -7693,63 +7587,58 @@ XMLTagContent(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, return pn; } -#define XML_CHECK_FOR_ERROR_AND_EOF(tt,result) \ - JS_BEGIN_MACRO \ - if ((tt) <= TOK_EOF) { \ - if ((tt) == TOK_EOF) { \ - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, \ - JSMSG_END_OF_XML_SOURCE); \ - } \ - return result; \ - } \ +#define XML_CHECK_FOR_ERROR_AND_EOF(tt,result) \ + JS_BEGIN_MACRO \ + if ((tt) <= TOK_EOF) { \ + if ((tt) == TOK_EOF) { \ + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, \ + JSMSG_END_OF_XML_SOURCE); \ + } \ + return result; \ + } \ JS_END_MACRO -static JSParseNode * -XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSBool allowList); - /* * Consume XML element tag content, including the TOK_XMLETAGO (flags &= ~TSF_XMLTAGMODE; + tokenStream.flags &= ~TSF_XMLTAGMODE; for (;;) { - ts->flags |= TSF_XMLTEXTMODE; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_XMLTEXTMODE; + tokenStream.flags |= TSF_XMLTEXTMODE; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_XMLTEXTMODE; XML_CHECK_FOR_ERROR_AND_EOF(tt, JS_FALSE); JS_ASSERT(tt == TOK_XMLSPACE || tt == TOK_XMLTEXT); - textAtom = CURRENT_TOKEN(ts).t_atom; + textAtom = tokenStream.currentToken().t_atom; if (textAtom) { /* Non-zero-length XML text scanned. */ - pn2 = XMLAtomNode(cx, ts, tc); + pn2 = xmlAtomNode(); if (!pn2) return JS_FALSE; pn->pn_pos.end = pn2->pn_pos.end; pn->append(pn2); } - ts->flags |= TSF_OPERAND; - tt = js_GetToken(cx, ts); - ts->flags &= ~TSF_OPERAND; + tokenStream.flags |= TSF_OPERAND; + tt = tokenStream.getToken(); + tokenStream.flags &= ~TSF_OPERAND; XML_CHECK_FOR_ERROR_AND_EOF(tt, JS_FALSE); if (tt == TOK_XMLETAGO) break; if (tt == TOK_LC) { - pn2 = XMLExpr(cx, ts, JS_FALSE, tc); + pn2 = xmlExpr(JS_FALSE); pn->pn_xflags |= PNX_CANTFOLD; } else if (tt == TOK_XMLSTAGO) { - pn2 = XMLElementOrList(cx, ts, tc, JS_FALSE); + pn2 = xmlElementOrList(JS_FALSE); if (pn2) { pn2->pn_xflags &= ~PNX_XMLROOT; pn->pn_xflags |= pn2->pn_xflags; @@ -7757,7 +7646,7 @@ XMLElementContent(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, } else { JS_ASSERT(tt == TOK_XMLCDATA || tt == TOK_XMLCOMMENT || tt == TOK_XMLPI); - pn2 = XMLAtomNode(cx, ts, tc); + pn2 = xmlAtomNode(); } if (!pn2) return JS_FALSE; @@ -7765,31 +7654,30 @@ XMLElementContent(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, pn->append(pn2); } - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_XMLETAGO); - ts->flags |= TSF_XMLTAGMODE; + JS_ASSERT(tokenStream.currentToken().type == TOK_XMLETAGO); + tokenStream.flags |= TSF_XMLTAGMODE; return JS_TRUE; } /* * Return a PN_LIST node containing an XML or XMLList Initialiser. */ -static JSParseNode * -XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSBool allowList) +JSParseNode * +JSCompiler::xmlElementOrList(JSBool allowList) { JSParseNode *pn, *pn2, *list; - JSTokenType tt; + TokenKind tt; JSAtom *startAtom, *endAtom; - JS_CHECK_RECURSION(cx, return NULL); + JS_CHECK_RECURSION(context, return NULL); - JS_ASSERT(CURRENT_TOKEN(ts).type == TOK_XMLSTAGO); - pn = NewParseNode(PN_LIST, tc); + JS_ASSERT(tokenStream.currentToken().type == TOK_XMLSTAGO); + pn = ListNode::create(tc); if (!pn) return NULL; - ts->flags |= TSF_XMLTAGMODE; - tt = js_GetToken(cx, ts); + tokenStream.flags |= TSF_XMLTAGMODE; + tt = tokenStream.getToken(); if (tt == TOK_ERROR) return NULL; @@ -7797,12 +7685,12 @@ XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, /* * XMLElement. Append the tag and its contents, if any, to pn. */ - pn2 = XMLTagContent(cx, ts, tc, TOK_XMLSTAGO, &startAtom); + pn2 = xmlTagContent(TOK_XMLSTAGO, &startAtom); if (!pn2) return NULL; - js_MatchToken(cx, ts, TOK_XMLSPACE); + tokenStream.matchToken(TOK_XMLSPACE); - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); if (tt == TOK_XMLPTAGC) { /* Point tag (/>): recycle pn if pn2 is a list of tag contents. */ if (pn2->pn_type == TOK_XMLSTAGO) { @@ -7821,11 +7709,11 @@ XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, } else { /* We had better have a tag-close (>) at this point. */ if (tt != TOK_XMLTAGC) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_XML_TAG_SYNTAX); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_XML_TAG_SYNTAX); return NULL; } - pn2->pn_pos.end = CURRENT_TOKEN(ts).pos.end; + pn2->pn_pos.end = tokenStream.currentToken().pos.end; /* Make sure pn2 is a TOK_XMLSTAGO list containing tag contents. */ if (pn2->pn_type != TOK_XMLSTAGO) { @@ -7833,7 +7721,7 @@ XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, if (!XML_FOLDABLE(pn2)) pn->pn_xflags |= PNX_CANTFOLD; pn2 = pn; - pn = NewParseNode(PN_LIST, tc); + pn = ListNode::create(tc); if (!pn) return NULL; } @@ -7847,41 +7735,39 @@ XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn->pn_xflags |= PNX_XMLROOT; /* Get element contents and delimiting end-tag-open sequence. */ - if (!XMLElementContent(cx, ts, pn, tc)) + if (!xmlElementContent(pn)) return NULL; - tt = js_GetToken(cx, ts); + tt = tokenStream.getToken(); XML_CHECK_FOR_ERROR_AND_EOF(tt, NULL); if (tt != TOK_XMLNAME && tt != TOK_LC) { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_XML_TAG_SYNTAX); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_XML_TAG_SYNTAX); return NULL; } /* Parse end tag; check mismatch at compile-time if we can. */ - pn2 = XMLTagContent(cx, ts, tc, TOK_XMLETAGO, &endAtom); + pn2 = xmlTagContent(TOK_XMLETAGO, &endAtom); if (!pn2) return NULL; if (pn2->pn_type == TOK_XMLETAGO) { /* Oops, end tag has attributes! */ - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_XML_TAG_SYNTAX); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_XML_TAG_SYNTAX); return NULL; } if (endAtom && startAtom && endAtom != startAtom) { JSString *str = ATOM_TO_STRING(startAtom); /* End vs. start tag name mismatch: point to the tag name. */ - js_ReportCompileErrorNumber(cx, ts, pn2, - JSREPORT_UC | JSREPORT_ERROR, - JSMSG_XML_TAG_NAME_MISMATCH, - str->chars()); + ReportCompileErrorNumber(context, &tokenStream, pn2, JSREPORT_UC | JSREPORT_ERROR, + JSMSG_XML_TAG_NAME_MISMATCH, str->chars()); return NULL; } /* Make a TOK_XMLETAGO list with pn2 as its single child. */ JS_ASSERT(pn2->pn_type == TOK_XMLNAME || pn2->pn_type == TOK_LC); - list = NewParseNode(PN_LIST, tc); + list = ListNode::create(tc); if (!list) return NULL; list->pn_type = TOK_XMLETAGO; @@ -7892,7 +7778,7 @@ XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn->pn_xflags |= PNX_CANTFOLD; } - js_MatchToken(cx, ts, TOK_XMLSPACE); + tokenStream.matchToken(TOK_XMLSPACE); MUST_MATCH_TOKEN(TOK_XMLTAGC, JSMSG_BAD_XML_TAG_SYNTAX); } @@ -7904,24 +7790,23 @@ XMLElementOrList(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, pn->pn_op = JSOP_TOXMLLIST; pn->makeEmpty(); pn->pn_xflags |= PNX_XMLROOT; - if (!XMLElementContent(cx, ts, pn, tc)) + if (!xmlElementContent(pn)) return NULL; MUST_MATCH_TOKEN(TOK_XMLTAGC, JSMSG_BAD_XML_LIST_SYNTAX); } else { - js_ReportCompileErrorNumber(cx, ts, NULL, JSREPORT_ERROR, - JSMSG_BAD_XML_NAME_SYNTAX); + ReportCompileErrorNumber(context, &tokenStream, NULL, JSREPORT_ERROR, + JSMSG_BAD_XML_NAME_SYNTAX); return NULL; } - pn->pn_pos.end = CURRENT_TOKEN(ts).pos.end; - ts->flags &= ~TSF_XMLTAGMODE; + pn->pn_pos.end = tokenStream.currentToken().pos.end; + tokenStream.flags &= ~TSF_XMLTAGMODE; return pn; } -static JSParseNode * -XMLElementOrListRoot(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSBool allowList) +JSParseNode * +JSCompiler::xmlElementOrListRoot(JSBool allowList) { uint32 oldopts; JSParseNode *pn; @@ -7932,9 +7817,9 @@ XMLElementOrListRoot(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, * to end of line (used in script tags to hide content from old browsers * that don't recognize + * + * It does not cope with malformed comment hiding hacks where --> is hidden + * by C-style comments, or on a dirty line. Such cases are already broken. + */ + TSF_IN_HTML_COMMENT = 0x2000, + + /* Ignore keywords and return TOK_NAME instead to the parser. */ + TSF_KEYWORD_IS_NAME = 0x4000, + + /* Tokenize as appropriate for strict mode code. */ + TSF_STRICT_MODE_CODE = 0x8000 +}; + #define t_op u.s.op #define t_reflags u.reflags #define t_atom u.s.atom #define t_atom2 u.p.atom2 #define t_dval u.dval -typedef struct JSTokenBuf { - jschar *base; /* base of line or stream buffer */ - jschar *limit; /* limit for quick bounds check */ - jschar *ptr; /* next char to get, or slot to use */ -} JSTokenBuf; +const size_t LINE_LIMIT = 256; /* logical line buffer size limit + -- physical line length is unlimited */ -#define JS_LINE_LIMIT 256 /* logical line buffer size limit -- - physical line length is unlimited */ -#define NTOKENS 4 /* 1 current + 2 lookahead, rounded */ -#define NTOKENS_MASK (NTOKENS-1) /* to power of 2 to avoid divmod by 3 */ +class TokenStream +{ + static const size_t ntokens = 4; /* 1 current + 2 lookahead, rounded + to power of 2 to avoid divmod by 3 */ + static const uintN ntokensMask = ntokens - 1; + public: + /* + * To construct a TokenStream, first call the constructor, which is + * infallible, then call |init|, which can fail. To destroy a TokenStream, + * first call |close| then call the destructor. If |init| fails, do not call + * |close|. + * + * This class uses JSContext.tempPool to allocate internal buffers. The + * caller should JS_ARENA_MARK before calling |init| and JS_ARENA_RELEASE + * after calling |close|. + */ + TokenStream(JSContext *); -struct JSTokenStream { - JSToken tokens[NTOKENS];/* circular token buffer */ + /* + * Create a new token stream, either from an input buffer or from a file. + * Return false on file-open or memory-allocation failure. + */ + bool init(const jschar *base, size_t length, FILE *fp, const char *filename, uintN lineno); + void close(); + ~TokenStream() {} + + /* Accessors. */ + JSContext *getContext() const { return cx; } + bool onCurrentLine(const TokenPos &pos) const { return lineno == pos.end.lineno; } + const Token ¤tToken() const { return tokens[cursor]; } + const Token &getTokenAt(size_t index) const { + JS_ASSERT(index < ntokens); + return tokens[index]; + } + const JSCharBuffer &getTokenbuf() const { return tokenbuf; } + const char *getFilename() const { return filename; } + uintN getLineno() const { return lineno; } + + /* Mutators. */ + Token *mutableCurrentToken() { return &tokens[cursor]; } + bool reportCompileErrorNumberVA(JSParseNode *pn, uintN flags, uintN errorNumber, va_list ap); + + /* + * Get the next token from the stream, make it the current token, and + * return its kind. + */ + TokenKind getToken() { + /* Check for a pushed-back token resulting from mismatching lookahead. */ + while (lookahead != 0) { + JS_ASSERT(!(flags & TSF_XMLTEXTMODE)); + lookahead--; + cursor = (cursor + 1) & ntokensMask; + TokenKind tt = currentToken().type; + if (tt != TOK_EOL || (flags & TSF_NEWLINES)) + return tt; + } + + /* If there was a fatal error, keep returning TOK_ERROR. */ + if (flags & TSF_ERROR) + return TOK_ERROR; + + return getTokenInternal(); + } + + Token *getMutableTokenAt(size_t index) { + JS_ASSERT(index < ntokens); + return &tokens[index]; + } + + /* + * Push the last scanned token back into the stream. + */ + void ungetToken() { + JS_ASSERT(lookahead < ntokensMask); + lookahead++; + cursor = (cursor - 1) & ntokensMask; + } + + TokenKind peekToken() { + if (lookahead != 0) { + return tokens[(cursor + lookahead) & ntokensMask].type; + } + TokenKind tt = getToken(); + ungetToken(); + return tt; + } + + TokenKind peekTokenSameLine() { + if (!onCurrentLine(currentToken().pos)) + return TOK_EOL; + flags |= TSF_NEWLINES; + TokenKind tt = peekToken(); + flags &= ~TSF_NEWLINES; + return tt; + } + + /* + * Get the next token from the stream if its kind is |tt|. + */ + JSBool matchToken(TokenKind tt) { + if (getToken() == tt) + return JS_TRUE; + ungetToken(); + return JS_FALSE; + } + + private: + typedef struct TokenBuf { + jschar *base; /* base of line or stream buffer */ + jschar *limit; /* limit for quick bounds check */ + jschar *ptr; /* next char to get, or slot to use */ + } TokenBuf; + + TokenKind getTokenInternal(); /* doesn't check for pushback or error flag. */ + int32 getChar(); + void ungetChar(int32 c); + Token *newToken(ptrdiff_t adjust); + int32 getUnicodeEscape(); + JSBool peekChars(intN n, jschar *cp); + JSBool getXMLEntity(); + + JSBool matchChar(int32 expect) { + int32 c = getChar(); + if (c == expect) + return JS_TRUE; + ungetChar(c); + return JS_FALSE; + } + + int32 peekChar() { + int32 c = getChar(); + ungetChar(c); + return c; + } + + void skipChars(intN n) { + while (--n >= 0) + getChar(); + } + + JSContext * const cx; + Token tokens[ntokens];/* circular token buffer */ uintN cursor; /* index of last parsed token */ uintN lookahead; /* count of lookahead tokens */ + uintN lineno; /* current line number */ uintN ungetpos; /* next free char slot in ungetbuf */ jschar ungetbuf[6]; /* at most 6, for \uXXXX lookahead */ - uintN flags; /* flags -- see below */ + public: + uintN flags; /* flags -- see above */ + private: uint32 linelen; /* physical linebuf segment length */ uint32 linepos; /* linebuf offset in physical line */ - JSTokenBuf linebuf; /* line buffer for diagnostics */ - JSTokenBuf userbuf; /* user input buffer if !file */ + TokenBuf linebuf; /* line buffer for diagnostics */ + + TokenBuf userbuf; /* user input buffer if !file */ const char *filename; /* input filename or null */ FILE *file; /* stdio stream if reading from file */ JSSourceHandler listener; /* callback for source; eg debugger */ @@ -278,82 +466,16 @@ struct JSTokenStream { jschar *saveEOL; /* save next end of line in userbuf, to optimize for very long lines */ JSCharBuffer tokenbuf; /* current token string buffer */ - - /* - * To construct a JSTokenStream, first call the constructor, which is - * infallible, then call |init|, which can fail. To destroy a JSTokenStream, - * first call |close| then call the destructor. If |init| fails, do not call - * |close|. - * - * This class uses JSContext.tempPool to allocate internal buffers. The - * caller should JS_ARENA_MARK before calling |init| and JS_ARENA_RELEASE - * after calling |close|. - */ - JSTokenStream(JSContext *); - - /* - * Create a new token stream, either from an input buffer or from a file. - * Return false on file-open or memory-allocation failure. - */ - bool init(JSContext *, const jschar *base, size_t length, - FILE *fp, const char *filename, uintN lineno); - - void close(JSContext *); - ~JSTokenStream() {} }; -#define CURRENT_TOKEN(ts) ((ts)->tokens[(ts)->cursor]) -#define ON_CURRENT_LINE(ts,pos) ((ts)->lineno == (pos).end.lineno) - -/* JSTokenStream flags */ -#define TSF_ERROR 0x01 /* fatal error while compiling */ -#define TSF_EOF 0x02 /* hit end of file */ -#define TSF_NEWLINES 0x04 /* tokenize newlines */ -#define TSF_OPERAND 0x08 /* looking for operand, not operator */ -#define TSF_NLFLAG 0x20 /* last linebuf ended with \n */ -#define TSF_CRFLAG 0x40 /* linebuf would have ended with \r */ -#define TSF_DIRTYLINE 0x80 /* non-whitespace since start of line */ -#define TSF_OWNFILENAME 0x100 /* ts->filename is malloc'd */ -#define TSF_XMLTAGMODE 0x200 /* scanning within an XML tag in E4X */ -#define TSF_XMLTEXTMODE 0x400 /* scanning XMLText terminal from E4X */ -#define TSF_XMLONLYMODE 0x800 /* don't scan {expr} within text/tag */ - -/* Flag indicating unexpected end of input, i.e. TOK_EOF not at top-level. */ -#define TSF_UNEXPECTED_EOF 0x1000 - -/* - * To handle the hard case of contiguous HTML comments, we want to clear the - * TSF_DIRTYINPUT flag at the end of each such comment. But we'd rather not - * scan for --> within every //-style comment unless we have to. So we set - * TSF_IN_HTML_COMMENT when a either on a clean line, or - * only if (ts->flags & TSF_IN_HTML_COMMENT), in a //-style comment. - * - * This still works as before given a malformed comment hiding hack such as: - * - * - * - * It does not cope with malformed comment hiding hacks where --> is hidden - * by C-style comments, or on a dirty line. Such cases are already broken. - */ -#define TSF_IN_HTML_COMMENT 0x2000 - -/* Ignore keywords and return TOK_NAME instead to the parser. */ -#define TSF_KEYWORD_IS_NAME 0x4000 - -/* Tokenize as appropriate for strict mode code. */ -#define TSF_STRICT_MODE_CODE 0x8000 +} /* namespace js */ /* Unicode separators that are treated as line terminators, in addition to \n, \r */ #define LINE_SEPARATOR 0x2028 #define PARA_SEPARATOR 0x2029 extern void -js_CloseTokenStream(JSContext *cx, JSTokenStream *ts); +js_CloseTokenStream(JSContext *cx, js::TokenStream *ts); extern JS_FRIEND_API(int) js_fgets(char *buf, int size, FILE *file); @@ -362,7 +484,7 @@ js_fgets(char *buf, int size, FILE *file); * If the given char array forms JavaScript keyword, return corresponding * token. Otherwise return TOK_EOF. */ -extern JSTokenType +extern js::TokenKind js_CheckKeyword(const jschar *chars, size_t length); /* @@ -378,69 +500,44 @@ typedef void (*JSMapKeywordFun)(const char *); extern JSBool js_IsIdentifier(JSString *str); +/* + * Steal one JSREPORT_* bit (see jsapi.h) to tell that arguments to the error + * message have const jschar* type, not const char*. + */ +#define JSREPORT_UC 0x100 + +namespace js { + /* * Report a compile-time error by its number. Return true for a warning, false * for an error. When pn is not null, use it to report error's location. * Otherwise use ts, which must not be null. */ bool -js_ReportCompileErrorNumber(JSContext *cx, JSTokenStream *ts, JSParseNode *pn, - uintN flags, uintN errorNumber, ...); +ReportCompileErrorNumber(JSContext *cx, TokenStream *ts, JSParseNode *pn, uintN flags, + uintN errorNumber, ...); /* * Report a condition that should elicit a warning with JSOPTION_STRICT, * or an error if ts or tc is handling strict mode code. This function - * defers to js_ReportCompileErrorNumber to do the real work. Either tc + * defers to ReportCompileErrorNumber to do the real work. Either tc * or ts may be NULL, if there is no tree context or token stream state * whose strictness should affect the report. * - * One could have js_ReportCompileErrorNumber recognize the + * One could have ReportCompileErrorNumber recognize the * JSREPORT_STRICT_MODE_ERROR flag instead of having a separate function * like this one. However, the strict mode code flag we need to test is * in the JSTreeContext structure for that code; we would have to change - * the ~120 js_ReportCompileErrorNumber calls to pass the additional + * the ~120 ReportCompileErrorNumber calls to pass the additional * argument, even though many of those sites would never use it. Using * ts's TSF_STRICT_MODE_CODE flag instead of tc's would be brittle: at some * points ts's flags don't correspond to those of the tc relevant to the * error. */ bool -js_ReportStrictModeError(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc, - JSParseNode *pn, uintN errorNumber, ...); +ReportStrictModeError(JSContext *cx, TokenStream *ts, JSTreeContext *tc, JSParseNode *pn, + uintN errorNumber, ...); -/* - * Steal one JSREPORT_* bit (see jsapi.h) to tell that arguments to the error - * message have const jschar* type, not const char*. - */ -#define JSREPORT_UC 0x100 - -/* - * Look ahead one token and return its type. - */ -extern JSTokenType -js_PeekToken(JSContext *cx, JSTokenStream *ts); - -extern JSTokenType -js_PeekTokenSameLine(JSContext *cx, JSTokenStream *ts); - -/* - * Get the next token from ts. - */ -extern JSTokenType -js_GetToken(JSContext *cx, JSTokenStream *ts); - -/* - * Push back the last scanned token onto ts. - */ -extern void -js_UngetToken(JSTokenStream *ts); - -/* - * Get the next token from ts if its type is tt. - */ -extern JSBool -js_MatchToken(JSContext *cx, JSTokenStream *ts, JSTokenType tt); - -JS_END_EXTERN_C +} /* namespace js */ #endif /* jsscan_h___ */ diff --git a/js/src/jsscope.cpp b/js/src/jsscope.cpp index 076bbbf90be..482ca4231e7 100644 --- a/js/src/jsscope.cpp +++ b/js/src/jsscope.cpp @@ -55,8 +55,10 @@ #include "jsatom.h" #include "jscntxt.h" #include "jsdbgapi.h" +#include "jsfun.h" /* for JS_ARGS_LENGTH_MAX */ #include "jslock.h" #include "jsnum.h" +#include "jsobj.h" #include "jsscope.h" #include "jsstr.h" #include "jstracer.h" @@ -104,7 +106,7 @@ js_GetMutableScope(JSContext *cx, JSObject *obj) * Compile-time block objects each have their own scope, created at * birth, and runtime clone of a block objects are never mutated. */ - JS_ASSERT(STOBJ_GET_CLASS(obj) != &js_BlockClass); + JS_ASSERT(obj->getClass() != &js_BlockClass); newscope = JSScope::create(cx, scope->ops, obj->getClass(), obj, scope->shape); if (!newscope) return NULL; @@ -114,8 +116,8 @@ js_GetMutableScope(JSContext *cx, JSObject *obj) JS_ASSERT(JS_IS_SCOPE_LOCKED(cx, newscope)); obj->map = newscope; - JS_ASSERT(newscope->freeslot == JSSLOT_FREE(STOBJ_GET_CLASS(obj))); - clasp = STOBJ_GET_CLASS(obj); + JS_ASSERT(newscope->freeslot == JSSLOT_FREE(obj->getClass())); + clasp = obj->getClass(); if (clasp->reserveSlots) { /* * FIXME: Here we change OBJ_SCOPE(obj)->freeslot without changing @@ -124,8 +126,8 @@ js_GetMutableScope(JSContext *cx, JSObject *obj) * js_AddProperty. See bug 535416. */ freeslot = JSSLOT_FREE(clasp) + clasp->reserveSlots(cx, obj); - if (freeslot > STOBJ_NSLOTS(obj)) - freeslot = STOBJ_NSLOTS(obj); + if (freeslot > obj->numSlots()) + freeslot = obj->numSlots(); if (newscope->freeslot < freeslot) newscope->freeslot = freeslot; } @@ -279,11 +281,43 @@ JSScope::destroy(JSContext *cx) bool JSScope::initRuntimeState(JSContext *cx) { - cx->runtime->emptyBlockScope = cx->create(cx, &js_ObjectOps, - &js_BlockClass); - JS_ASSERT(cx->runtime->emptyBlockScope->nrefs == 2); - cx->runtime->emptyBlockScope->nrefs = 1; - return !!cx->runtime->emptyBlockScope; + JSRuntime *rt = cx->runtime; + + rt->emptyArgumentsScope = cx->create(cx, &js_ObjectOps, &js_ArgumentsClass); + if (!rt->emptyArgumentsScope) + return false; + JS_ASSERT(rt->emptyArgumentsScope->nrefs == 2); + rt->emptyArgumentsScope->nrefs = 1; + + /* + * NewArguments allocates dslots to have enough room for the argc of the + * particular arguments object being created. + * + * Thus we fake freeslot in the shared empty scope for the many unmutated + * arguments objects so that, until and unless a scope property is defined + * on a particular arguments object, it can share the runtime-wide empty + * scope with other arguments objects, whatever their initial argc values. + * + * This allows assertions that the arg slot being got or set by a fast path + * is less than freeslot to succeed. As the shared emptyArgumentsScope is + * never mutated, it's safe to pretend to have all the slots possible. + * + * Note how the fast paths in jsops.cpp for JSOP_LENGTH and JSOP_GETELEM + * bypass resolution of scope properties for length and element indices on + * arguments objects. This helps ensure that any arguments object needing + * its own mutable scope (with unique shape) is a rare event. + */ + rt->emptyArgumentsScope->freeslot = JS_INITIAL_NSLOTS + JS_ARGS_LENGTH_MAX; + + rt->emptyBlockScope = cx->create(cx, &js_ObjectOps, &js_BlockClass); + if (!rt->emptyBlockScope) { + rt->emptyArgumentsScope->drop(cx); + rt->emptyArgumentsScope = NULL; + return false; + } + JS_ASSERT(rt->emptyBlockScope->nrefs == 2); + rt->emptyBlockScope->nrefs = 1; + return true; } /* static */ @@ -291,6 +325,10 @@ void JSScope::finishRuntimeState(JSContext *cx) { JSRuntime *rt = cx->runtime; + if (rt->emptyArgumentsScope) { + rt->emptyArgumentsScope->drop(cx); + rt->emptyArgumentsScope = NULL; + } if (rt->emptyBlockScope) { rt->emptyBlockScope->drop(cx); rt->emptyBlockScope = NULL; @@ -452,498 +490,6 @@ JSScope::changeTable(JSContext *cx, int change) return true; } -static JSDHashNumber -js_HashScopeProperty(JSDHashTable *table, const void *key) -{ - const JSScopeProperty *sprop = (const JSScopeProperty *)key; - return sprop->hash(); -} - -static JSBool -js_MatchScopeProperty(JSDHashTable *table, - const JSDHashEntryHdr *hdr, - const void *key) -{ - const JSPropertyTreeEntry *entry = (const JSPropertyTreeEntry *)hdr; - const JSScopeProperty *sprop = entry->child; - const JSScopeProperty *kprop = (const JSScopeProperty *)key; - - return sprop->matches(kprop); -} - -static const JSDHashTableOps PropertyTreeHashOps = { - JS_DHashAllocTable, - JS_DHashFreeTable, - js_HashScopeProperty, - js_MatchScopeProperty, - JS_DHashMoveEntryStub, - JS_DHashClearEntryStub, - JS_DHashFinalizeStub, - NULL -}; - -/* - * A property tree node on rt->propertyFreeList overlays the following prefix - * struct on JSScopeProperty. - */ -typedef struct FreeNode { - jsid id; - JSScopeProperty *next; - JSScopeProperty **prevp; -} FreeNode; - -#define FREENODE(sprop) ((FreeNode *) (sprop)) - -#define FREENODE_INSERT(list, sprop) \ - JS_BEGIN_MACRO \ - FREENODE(sprop)->next = (list); \ - FREENODE(sprop)->prevp = &(list); \ - if (list) \ - FREENODE(list)->prevp = &FREENODE(sprop)->next; \ - (list) = (sprop); \ - JS_END_MACRO - -#define FREENODE_REMOVE(sprop) \ - JS_BEGIN_MACRO \ - *FREENODE(sprop)->prevp = FREENODE(sprop)->next; \ - if (FREENODE(sprop)->next) \ - FREENODE(FREENODE(sprop)->next)->prevp = FREENODE(sprop)->prevp; \ - JS_END_MACRO - -/* NB: Called with rt->gcLock held. */ -static JSScopeProperty * -NewScopeProperty(JSRuntime *rt) -{ - JSScopeProperty *sprop; - - sprop = rt->propertyFreeList; - if (sprop) { - FREENODE_REMOVE(sprop); - } else { - JS_ARENA_ALLOCATE_CAST(sprop, JSScopeProperty *, - &rt->propertyArenaPool, - sizeof(JSScopeProperty)); - if (!sprop) - return NULL; - } - - JS_RUNTIME_METER(rt, livePropTreeNodes); - JS_RUNTIME_METER(rt, totalPropTreeNodes); - return sprop; -} - -#define CHUNKY_KIDS_TAG ((jsuword)1) -#define KIDS_IS_CHUNKY(kids) ((jsuword)(kids) & CHUNKY_KIDS_TAG) -#define KIDS_TO_CHUNK(kids) ((PropTreeKidsChunk *) \ - ((jsuword)(kids) & ~CHUNKY_KIDS_TAG)) -#define CHUNK_TO_KIDS(chunk) ((JSScopeProperty *) \ - ((jsuword)(chunk) | CHUNKY_KIDS_TAG)) -#define MAX_KIDS_PER_CHUNK 10 -#define CHUNK_HASH_THRESHOLD 30 - -typedef struct PropTreeKidsChunk PropTreeKidsChunk; - -struct PropTreeKidsChunk { - JSScopeProperty *kids[MAX_KIDS_PER_CHUNK]; - JSDHashTable *table; - PropTreeKidsChunk *next; -}; - -static PropTreeKidsChunk * -NewPropTreeKidsChunk(JSRuntime *rt) -{ - PropTreeKidsChunk *chunk; - - chunk = (PropTreeKidsChunk *) js_calloc(sizeof *chunk); - if (!chunk) - return NULL; - JS_ASSERT(((jsuword)chunk & CHUNKY_KIDS_TAG) == 0); - JS_RUNTIME_METER(rt, propTreeKidsChunks); - return chunk; -} - -static void -DestroyPropTreeKidsChunk(JSRuntime *rt, PropTreeKidsChunk *chunk) -{ - JS_RUNTIME_UNMETER(rt, propTreeKidsChunks); - if (chunk->table) - JS_DHashTableDestroy(chunk->table); - js_free(chunk); -} - -/* NB: Called with rt->gcLock held. */ -static bool -InsertPropertyTreeChild(JSRuntime *rt, JSScopeProperty *parent, - JSScopeProperty *child, PropTreeKidsChunk *sweptChunk) -{ - JSDHashTable *table; - JSPropertyTreeEntry *entry; - JSScopeProperty **childp, *kids, *sprop; - PropTreeKidsChunk *chunk, **chunkp; - uintN i; - - JS_ASSERT(!parent || child->parent != parent); - JS_ASSERT(!JSVAL_IS_NULL(child->id)); - - if (!parent) { - table = &rt->propertyTreeHash; - entry = (JSPropertyTreeEntry *) - JS_DHashTableOperate(table, child, JS_DHASH_ADD); - if (!entry) - return false; - childp = &entry->child; - sprop = *childp; - if (!sprop) { - *childp = child; - } else { - /* - * A "Duplicate child" case. - * - * We can't do away with child, as at least one live scope entry - * still points at it. What's more, that scope's lastProp chains - * through an ancestor line to reach child, and js_Enumerate and - * others count on this linkage. We must leave child out of the - * hash table, and not require it to be there when we eventually - * GC it (see RemovePropertyTreeChild, below). - * - * It is necessary to leave the duplicate child out of the hash - * table to preserve entry uniqueness. It is safe to leave the - * child out of the hash table (unlike the duplicate child cases - * below), because the child's parent link will be null, which - * can't dangle. - */ - JS_ASSERT(sprop != child && sprop->matches(child)); - JS_RUNTIME_METER(rt, duplicatePropTreeNodes); - } - } else { - JS_ASSERT(!JSVAL_IS_NULL(parent->id)); - childp = &parent->kids; - kids = *childp; - if (kids) { - if (KIDS_IS_CHUNKY(kids)) { - chunk = KIDS_TO_CHUNK(kids); - - table = chunk->table; - if (table) { - entry = (JSPropertyTreeEntry *) - JS_DHashTableOperate(table, child, JS_DHASH_ADD); - if (!entry) - return false; - if (!entry->child) { - entry->child = child; - while (chunk->next) - chunk = chunk->next; - for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - childp = &chunk->kids[i]; - sprop = *childp; - if (!sprop) - goto insert; - } - chunkp = &chunk->next; - goto new_chunk; - } - } - - do { - for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - childp = &chunk->kids[i]; - sprop = *childp; - if (!sprop) - goto insert; - - JS_ASSERT(sprop != child); - if (sprop->matches(child)) { - /* - * Duplicate child, see comment above. In this - * case, we must let the duplicate be inserted at - * this level in the tree, so we keep iterating, - * looking for an empty slot in which to insert. - */ - JS_ASSERT(sprop != child); - JS_RUNTIME_METER(rt, duplicatePropTreeNodes); - } - } - chunkp = &chunk->next; - } while ((chunk = *chunkp) != NULL); - - new_chunk: - if (sweptChunk) { - chunk = sweptChunk; - } else { - chunk = NewPropTreeKidsChunk(rt); - if (!chunk) - return false; - } - *chunkp = chunk; - childp = &chunk->kids[0]; - } else { - sprop = kids; - JS_ASSERT(sprop != child); - if (sprop->matches(child)) { - /* - * Duplicate child, see comment above. Once again, we - * must let duplicates created by deletion pile up in a - * kids-chunk-list, in order to find them when sweeping - * and thereby avoid dangling parent pointers. - */ - JS_RUNTIME_METER(rt, duplicatePropTreeNodes); - } - if (sweptChunk) { - chunk = sweptChunk; - } else { - chunk = NewPropTreeKidsChunk(rt); - if (!chunk) - return false; - } - parent->kids = CHUNK_TO_KIDS(chunk); - chunk->kids[0] = sprop; - childp = &chunk->kids[1]; - } - } - insert: - *childp = child; - } - - child->parent = parent; - return true; -} - -/* NB: Called with rt->gcLock held. */ -static PropTreeKidsChunk * -RemovePropertyTreeChild(JSRuntime *rt, JSScopeProperty *child) -{ - PropTreeKidsChunk *freeChunk; - JSScopeProperty *parent, *kids, *kid; - JSDHashTable *table; - PropTreeKidsChunk *list, *chunk, **chunkp, *lastChunk; - uintN i, j; - JSPropertyTreeEntry *entry; - - freeChunk = NULL; - parent = child->parent; - if (!parent) { - /* - * Don't remove child if it is not in rt->propertyTreeHash, but only - * matches a root child in the table that has compatible members. See - * the "Duplicate child" comments in InsertPropertyTreeChild, above. - */ - table = &rt->propertyTreeHash; - } else { - JS_ASSERT(!JSVAL_IS_NULL(parent->id)); - kids = parent->kids; - if (KIDS_IS_CHUNKY(kids)) { - list = chunk = KIDS_TO_CHUNK(kids); - chunkp = &list; - table = chunk->table; - - do { - for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - if (chunk->kids[i] == child) { - lastChunk = chunk; - if (!lastChunk->next) { - j = i + 1; - } else { - j = 0; - do { - chunkp = &lastChunk->next; - lastChunk = *chunkp; - } while (lastChunk->next); - } - for (; j < MAX_KIDS_PER_CHUNK; j++) { - if (!lastChunk->kids[j]) - break; - } - --j; - if (chunk != lastChunk || j > i) - chunk->kids[i] = lastChunk->kids[j]; - lastChunk->kids[j] = NULL; - if (j == 0) { - *chunkp = NULL; - if (!list) - parent->kids = NULL; - freeChunk = lastChunk; - } - goto out; - } - } - - chunkp = &chunk->next; - } while ((chunk = *chunkp) != NULL); - } else { - table = NULL; - kid = kids; - if (kid == child) - parent->kids = NULL; - } - } - -out: - if (table) { - entry = (JSPropertyTreeEntry *) - JS_DHashTableOperate(table, child, JS_DHASH_LOOKUP); - - if (entry->child == child) - JS_DHashTableRawRemove(table, &entry->hdr); - } - return freeChunk; -} - -static JSDHashTable * -HashChunks(PropTreeKidsChunk *chunk, uintN n) -{ - JSDHashTable *table; - uintN i; - JSScopeProperty *sprop; - JSPropertyTreeEntry *entry; - - table = JS_NewDHashTable(&PropertyTreeHashOps, NULL, - sizeof(JSPropertyTreeEntry), - JS_DHASH_DEFAULT_CAPACITY(n + 1)); - if (!table) - return NULL; - do { - for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - sprop = chunk->kids[i]; - if (!sprop) - break; - entry = (JSPropertyTreeEntry *) - JS_DHashTableOperate(table, sprop, JS_DHASH_ADD); - entry->child = sprop; - } - } while ((chunk = chunk->next) != NULL); - return table; -} - -/* - * Called without cx->runtime->gcLock held. This function acquires that lock - * only when inserting a new child. Thus there may be races to find or add a - * node that result in duplicates. We expect such races to be rare! - * - * We use rt->gcLock, not rt->rtLock, to avoid nesting the former inside the - * latter in js_GenerateShape below. - */ -JSScopeProperty * -js_GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent, - const JSScopeProperty &child) -{ - JSRuntime *rt; - JSDHashTable *table; - JSPropertyTreeEntry *entry; - JSScopeProperty *sprop; - PropTreeKidsChunk *chunk; - uintN i, n; - - rt = cx->runtime; - if (!parent) { - JS_LOCK_GC(rt); - - table = &rt->propertyTreeHash; - entry = (JSPropertyTreeEntry *) - JS_DHashTableOperate(table, &child, JS_DHASH_ADD); - if (!entry) - goto out_of_memory; - - sprop = entry->child; - if (sprop) - goto out; - } else { - JS_ASSERT(!JSVAL_IS_NULL(parent->id)); - - /* - * Because chunks are appended at the end and never deleted except by - * the GC, we can search without taking the runtime's GC lock. We may - * miss a matching sprop added by another thread, and make a duplicate - * one, but that is an unlikely, therefore small, cost. The property - * tree has extremely low fan-out below its root in popular embeddings - * with real-world workloads. - * - * Patterns such as defining closures that capture a constructor's - * environment as getters or setters on the new object that is passed - * in as |this| can significantly increase fan-out below the property - * tree root -- see bug 335700 for details. - */ - entry = NULL; - sprop = parent->kids; - if (sprop) { - if (KIDS_IS_CHUNKY(sprop)) { - chunk = KIDS_TO_CHUNK(sprop); - - table = chunk->table; - if (table) { - JS_LOCK_GC(rt); - entry = (JSPropertyTreeEntry *) - JS_DHashTableOperate(table, &child, JS_DHASH_LOOKUP); - sprop = entry->child; - if (sprop) - goto out; - goto locked_not_found; - } - - n = 0; - do { - for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - sprop = chunk->kids[i]; - if (!sprop) { - n += i; - if (n >= CHUNK_HASH_THRESHOLD) { - chunk = KIDS_TO_CHUNK(parent->kids); - if (!chunk->table) { - table = HashChunks(chunk, n); - JS_LOCK_GC(rt); - if (!table) - goto out_of_memory; - if (chunk->table) - JS_DHashTableDestroy(table); - else - chunk->table = table; - goto locked_not_found; - } - } - goto not_found; - } - - if (sprop->matches(&child)) - return sprop; - } - n += MAX_KIDS_PER_CHUNK; - } while ((chunk = chunk->next) != NULL); - } else { - if (sprop->matches(&child)) - return sprop; - } - } - - not_found: - JS_LOCK_GC(rt); - } - -locked_not_found: - sprop = NewScopeProperty(rt); - if (!sprop) - goto out_of_memory; - - new(sprop) JSScopeProperty(child.id, child.rawGetter, child.rawSetter, child.slot, - child.attrs, child.flags, child.shortid); - sprop->parent = sprop->kids = NULL; - sprop->shape = js_GenerateShape(cx, true); - - if (!parent) { - entry->child = sprop; - } else { - if (!InsertPropertyTreeChild(rt, parent, sprop, NULL)) - goto out_of_memory; - } - - out: - JS_UNLOCK_GC(rt); - return sprop; - - out_of_memory: - JS_UNLOCK_GC(rt); - JS_ReportOutOfMemory(cx); - return NULL; -} - /* * Get or create a property-tree or dictionary child property of parent, which * must be lastProp if inDictionaryMode(), else parent must be one of lastProp @@ -988,7 +534,7 @@ JSScope::getChildProperty(JSContext *cx, JSScopeProperty *parent, return NULL; } - JSScopeProperty *sprop = js_GetPropertyTreeChild(cx, parent, child); + JSScopeProperty *sprop = JS_PROPERTY_TREE(cx).getChild(cx, parent, shape, child); if (sprop) { JS_ASSERT(sprop->parent == parent); if (parent == lastProp) { @@ -1092,13 +638,9 @@ JSScopeProperty * JSScope::newDictionaryProperty(JSContext *cx, const JSScopeProperty &child, JSScopeProperty **childp) { - JS_LOCK_GC(cx->runtime); - JSScopeProperty *dprop = NewScopeProperty(cx->runtime); - JS_UNLOCK_GC(cx->runtime); - if (!dprop) { - JS_ReportOutOfMemory(cx); + JSScopeProperty *dprop = JS_PROPERTY_TREE(cx).newScopeProperty(cx); + if (!dprop) return NULL; - } new (dprop) JSScopeProperty(child.id, child.rawGetter, child.rawSetter, child.slot, child.attrs, child.flags | JSScopeProperty::IN_DICTIONARY, @@ -1600,7 +1142,7 @@ JSScope::clear(JSContext *cx) JSEmptyScope *emptyScope; uint32 newShape; if (proto && - OBJ_IS_NATIVE(proto) && + proto->isNative() && (emptyScope = OBJ_SCOPE(proto)->emptyScope) && emptyScope->clasp == clasp) { newShape = emptyScope->shape; @@ -1612,12 +1154,6 @@ JSScope::clear(JSContext *cx) JS_ATOMIC_INCREMENT(&cx->runtime->propertyRemovals); } -void -JSScope::brandingShapeChange(JSContext *cx, uint32 slot, jsval v) -{ - generateOwnShape(cx); -} - void JSScope::deletingShapeChange(JSContext *cx, JSScopeProperty *sprop) { @@ -1677,12 +1213,6 @@ JSScope::protoShapeChange(JSContext *cx) generateOwnShape(cx); } -void -JSScope::sealingShapeChange(JSContext *cx) -{ - generateOwnShape(cx); -} - void JSScope::shadowingShapeChange(JSContext *cx, JSScopeProperty *sprop) { @@ -1770,428 +1300,3 @@ JSScopeProperty::trace(JSTracer *trc) js_CallGCMarker(trc, methodObject(), JSTRACE_OBJECT); } } - -#ifdef DEBUG - -static void -MeterKidCount(JSBasicStats *bs, uintN nkids) -{ - JS_BASIC_STATS_ACCUM(bs, nkids); - bs->hist[JS_MIN(nkids, 10)]++; -} - -static void -MeterPropertyTree(JSBasicStats *bs, JSScopeProperty *node) -{ - uintN i, nkids; - JSScopeProperty *kids, *kid; - PropTreeKidsChunk *chunk; - - nkids = 0; - kids = node->kids; - if (kids) { - if (KIDS_IS_CHUNKY(kids)) { - for (chunk = KIDS_TO_CHUNK(kids); chunk; chunk = chunk->next) { - for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - kid = chunk->kids[i]; - if (!kid) - break; - MeterPropertyTree(bs, kid); - nkids++; - } - } - } else { - MeterPropertyTree(bs, kids); - nkids = 1; - } - } - - MeterKidCount(bs, nkids); -} - -static JSDHashOperator -js_MeterPropertyTree(JSDHashTable *table, JSDHashEntryHdr *hdr, uint32 number, - void *arg) -{ - JSPropertyTreeEntry *entry = (JSPropertyTreeEntry *)hdr; - JSBasicStats *bs = (JSBasicStats *)arg; - - MeterPropertyTree(bs, entry->child); - return JS_DHASH_NEXT; -} - -void -JSScopeProperty::dump(JSContext *cx, FILE *fp) -{ - JS_ASSERT(!JSVAL_IS_NULL(id)); - - jsval idval = ID_TO_VALUE(id); - if (JSVAL_IS_INT(idval)) { - fprintf(fp, "[%ld]", (long) JSVAL_TO_INT(idval)); - } else { - JSString *str; - if (JSVAL_IS_STRING(idval)) { - str = JSVAL_TO_STRING(idval); - } else { - JS_ASSERT(JSVAL_IS_OBJECT(idval)); - str = js_ValueToString(cx, idval); - fputs("object ", fp); - } - if (!str) - fputs("", fp); - else - js_FileEscapedString(fp, str, '"'); - } - - fprintf(fp, " g/s %p/%p slot %u attrs %x ", - JS_FUNC_TO_DATA_PTR(void *, rawGetter), - JS_FUNC_TO_DATA_PTR(void *, rawSetter), - slot, attrs); - if (attrs) { - int first = 1; - fputs("(", fp); -#define DUMP_ATTR(name, display) if (attrs & JSPROP_##name) fputs(" " #display + first, fp), first = 0 - DUMP_ATTR(ENUMERATE, enumerate); - DUMP_ATTR(READONLY, readonly); - DUMP_ATTR(PERMANENT, permanent); - DUMP_ATTR(GETTER, getter); - DUMP_ATTR(SETTER, setter); - DUMP_ATTR(SHARED, shared); -#undef DUMP_ATTR - fputs(") ", fp); - } - - fprintf(fp, "flags %x ", flags); - if (flags) { - int first = 1; - fputs("(", fp); -#define DUMP_FLAG(name, display) if (flags & name) fputs(" " #display + first, fp), first = 0 - DUMP_FLAG(ALIAS, alias); - DUMP_FLAG(HAS_SHORTID, has_shortid); - DUMP_FLAG(METHOD, method); - DUMP_FLAG(MARK, mark); - DUMP_FLAG(SHAPE_REGEN, shape_regen); - DUMP_FLAG(IN_DICTIONARY, in_dictionary); -#undef DUMP_FLAG - fputs(") ", fp); - } - - fprintf(fp, "shortid %d\n", shortid); -} - -void -JSScopeProperty::dumpSubtree(JSContext *cx, int level, FILE *fp) -{ - fprintf(fp, "%*sid ", level, ""); - dump(cx, fp); - - if (kids) { - ++level; - if (KIDS_IS_CHUNKY(kids)) { - PropTreeKidsChunk *chunk = KIDS_TO_CHUNK(kids); - do { - for (uintN i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - JSScopeProperty *kid = chunk->kids[i]; - if (!kid) - break; - JS_ASSERT(kid->parent == this); - kid->dumpSubtree(cx, level, fp); - } - } while ((chunk = chunk->next) != NULL); - } else { - JSScopeProperty *kid = kids; - JS_ASSERT(kid->parent == this); - kid->dumpSubtree(cx, level, fp); - } - } -} - -#endif /* DEBUG */ - -void -js_SweepScopeProperties(JSContext *cx) -{ - JSRuntime *rt = cx->runtime; - JSArena **ap, *a; - JSScopeProperty *limit, *sprop, *parent, *kids, *kid; - uintN liveCount; - PropTreeKidsChunk *chunk, *nextChunk, *freeChunk; - uintN i; - -#ifdef DEBUG - JSBasicStats bs; - uint32 livePropCapacity = 0, totalLiveCount = 0; - static FILE *logfp; - if (!logfp) { - if (const char *filename = getenv("JS_PROPTREE_STATFILE")) - logfp = fopen(filename, "w"); - } - - if (logfp) { - JS_BASIC_STATS_INIT(&bs); - MeterKidCount(&bs, rt->propertyTreeHash.entryCount); - JS_DHashTableEnumerate(&rt->propertyTreeHash, js_MeterPropertyTree, &bs); - - double props, nodes, mean, sigma; - - props = rt->liveScopePropsPreSweep; - nodes = rt->livePropTreeNodes; - JS_ASSERT(nodes == bs.sum); - mean = JS_MeanAndStdDevBS(&bs, &sigma); - - fprintf(logfp, - "props %g nodes %g beta %g meankids %g sigma %g max %u\n", - props, nodes, nodes / props, mean, sigma, bs.max); - - JS_DumpHistogram(&bs, logfp); - } -#endif - - ap = &rt->propertyArenaPool.first.next; - while ((a = *ap) != NULL) { - limit = (JSScopeProperty *) a->avail; - liveCount = 0; - for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++) { - /* If the id is null, sprop is already on the freelist. */ - if (sprop->id == JSVAL_NULL) - continue; - - /* - * If the mark bit is set, sprop is alive, so clear the mark bit - * and continue the while loop. - * - * Regenerate sprop->shape if it hasn't already been refreshed - * during the mark phase, when live scopes' lastProp members are - * followed to update both scope->shape and lastProp->shape. - */ - if (sprop->marked()) { - sprop->clearMark(); - if (rt->gcRegenShapes) { - if (sprop->hasRegenFlag()) - sprop->clearRegenFlag(); - else - sprop->shape = js_RegenerateShapeForGC(cx); - } - liveCount++; - continue; - } - - if (!sprop->inDictionary()) { - /* Ok, sprop is garbage to collect: unlink it from its parent. */ - freeChunk = RemovePropertyTreeChild(rt, sprop); - - /* - * Take care to reparent all sprop's kids to their grandparent. - * InsertPropertyTreeChild can potentially fail for two reasons: - * - * 1. If parent is null, insertion into the root property hash - * table may fail. We are forced to leave the kid out of the - * table (as can already happen with duplicates) but ensure - * that the kid's parent pointer is set to null. - * - * 2. If parent is non-null, allocation of a new KidsChunk can - * fail. To prevent this from happening, we allow sprops's own - * chunks to be reused by the grandparent, which removes the - * need for InsertPropertyTreeChild to malloc a new KidsChunk. - * - * If sprop does not have chunky kids, then we rely on the - * RemovePropertyTreeChild call above (which removed sprop from - * its parent) either leaving one free entry, or else returning - * the now-unused chunk to us so we can reuse it. - * - * We also require the grandparent to have either no kids or else - * chunky kids. A single non-chunky kid would force a new chunk to - * be malloced in some cases (if sprop had a single non-chunky - * kid, or a multiple of MAX_KIDS_PER_CHUNK kids). Note that - * RemovePropertyTreeChild never converts a single-entry chunky - * kid back to a non-chunky kid, so we are assured of correct - * behaviour. - */ - kids = sprop->kids; - if (kids) { - sprop->kids = NULL; - parent = sprop->parent; - - /* The grandparent must have either no kids or chunky kids. */ - JS_ASSERT(!parent || !parent->kids || - KIDS_IS_CHUNKY(parent->kids)); - if (KIDS_IS_CHUNKY(kids)) { - chunk = KIDS_TO_CHUNK(kids); - do { - nextChunk = chunk->next; - chunk->next = NULL; - for (i = 0; i < MAX_KIDS_PER_CHUNK; i++) { - kid = chunk->kids[i]; - if (!kid) - break; - JS_ASSERT(kid->parent == sprop); - - /* - * Clear a space in the kids array for possible - * re-use by InsertPropertyTreeChild. - */ - chunk->kids[i] = NULL; - if (!InsertPropertyTreeChild(rt, parent, kid, chunk)) { - /* - * This can happen only if we failed to add an - * entry to the root property hash table. - */ - JS_ASSERT(!parent); - kid->parent = NULL; - } - } - if (!chunk->kids[0]) { - /* The chunk wasn't reused, so we must free it. */ - DestroyPropTreeKidsChunk(rt, chunk); - } - } while ((chunk = nextChunk) != NULL); - } else { - kid = kids; - if (!InsertPropertyTreeChild(rt, parent, kid, freeChunk)) { - /* - * This can happen only if we failed to add an entry - * to the root property hash table. - */ - JS_ASSERT(!parent); - kid->parent = NULL; - } - } - } - - if (freeChunk && !freeChunk->kids[0]) { - /* The chunk wasn't reused, so we must free it. */ - DestroyPropTreeKidsChunk(rt, freeChunk); - } - } - - /* Clear id so we know (above) that sprop is on the freelist. */ - sprop->id = JSVAL_NULL; - FREENODE_INSERT(rt->propertyFreeList, sprop); - JS_RUNTIME_UNMETER(rt, livePropTreeNodes); - } - - /* If a contains no live properties, return it to the malloc heap. */ - if (liveCount == 0) { - for (sprop = (JSScopeProperty *) a->base; sprop < limit; sprop++) - FREENODE_REMOVE(sprop); - JS_ARENA_DESTROY(&rt->propertyArenaPool, a, ap); - } else { -#ifdef DEBUG - livePropCapacity += limit - (JSScopeProperty *) a->base; - totalLiveCount += liveCount; -#endif - ap = &a->next; - } - } - -#ifdef DEBUG - if (logfp) { - fprintf(logfp, - "\nProperty tree stats for gcNumber %lu\n", - (unsigned long) rt->gcNumber); - - fprintf(logfp, "arenautil %g%%\n", - (totalLiveCount && livePropCapacity) - ? (totalLiveCount * 100.0) / livePropCapacity - : 0.0); - -#define RATE(f1, f2) (((double)js_scope_stats.f1 / js_scope_stats.f2) * 100.0) - - fprintf(logfp, - "Scope search stats:\n" - " searches: %6u\n" - " hits: %6u %5.2f%% of searches\n" - " misses: %6u %5.2f%%\n" - " hashes: %6u %5.2f%%\n" - " steps: %6u %5.2f%% %5.2f%% of hashes\n" - " stepHits: %6u %5.2f%% %5.2f%%\n" - " stepMisses: %6u %5.2f%% %5.2f%%\n" - " tableAllocFails %6u\n" - " toDictFails %6u\n" - " wrapWatchFails %6u\n" - " adds: %6u\n" - " addFails: %6u\n" - " puts: %6u\n" - " redundantPuts: %6u\n" - " putFails: %6u\n" - " changes: %6u\n" - " changeFails: %6u\n" - " compresses: %6u\n" - " grows: %6u\n" - " removes: %6u\n" - " removeFrees: %6u\n" - " uselessRemoves: %6u\n" - " shrinks: %6u\n", - js_scope_stats.searches, - js_scope_stats.hits, RATE(hits, searches), - js_scope_stats.misses, RATE(misses, searches), - js_scope_stats.hashes, RATE(hashes, searches), - js_scope_stats.steps, RATE(steps, searches), RATE(steps, hashes), - js_scope_stats.stepHits, - RATE(stepHits, searches), RATE(stepHits, hashes), - js_scope_stats.stepMisses, - RATE(stepMisses, searches), RATE(stepMisses, hashes), - js_scope_stats.tableAllocFails, - js_scope_stats.toDictFails, - js_scope_stats.wrapWatchFails, - js_scope_stats.adds, - js_scope_stats.addFails, - js_scope_stats.puts, - js_scope_stats.redundantPuts, - js_scope_stats.putFails, - js_scope_stats.changes, - js_scope_stats.changeFails, - js_scope_stats.compresses, - js_scope_stats.grows, - js_scope_stats.removes, - js_scope_stats.removeFrees, - js_scope_stats.uselessRemoves, - js_scope_stats.shrinks); - -#undef RATE - - fflush(logfp); - } - - if (const char *filename = getenv("JS_PROPTREE_DUMPFILE")) { - char pathname[1024]; - JS_snprintf(pathname, sizeof pathname, "%s.%lu", filename, (unsigned long)rt->gcNumber); - FILE *dumpfp = fopen(pathname, "w"); - if (dumpfp) { - JSPropertyTreeEntry *pte, *end; - - pte = (JSPropertyTreeEntry *) rt->propertyTreeHash.entryStore; - end = pte + JS_DHASH_TABLE_SIZE(&rt->propertyTreeHash); - while (pte < end) { - if (pte->child) - pte->child->dumpSubtree(cx, 0, dumpfp); - pte++; - } - fclose(dumpfp); - } - } -#endif /* DEBUG */ -} - -bool -js_InitPropertyTree(JSRuntime *rt) -{ - if (!JS_DHashTableInit(&rt->propertyTreeHash, &PropertyTreeHashOps, NULL, - sizeof(JSPropertyTreeEntry), JS_DHASH_MIN_SIZE)) { - rt->propertyTreeHash.ops = NULL; - return false; - } - JS_InitArenaPool(&rt->propertyArenaPool, "properties", - 256 * sizeof(JSScopeProperty), sizeof(void *), NULL); - return true; -} - -void -js_FinishPropertyTree(JSRuntime *rt) -{ - if (rt->propertyTreeHash.ops) { - JS_DHashTableFinish(&rt->propertyTreeHash); - rt->propertyTreeHash.ops = NULL; - } - JS_FinishArenaPool(&rt->propertyArenaPool); -} diff --git a/js/src/jsscope.h b/js/src/jsscope.h index 24efc0e5f3b..3b387530f67 100644 --- a/js/src/jsscope.h +++ b/js/src/jsscope.h @@ -48,10 +48,13 @@ #endif #include "jstypes.h" +#include "jscntxt.h" #include "jslock.h" #include "jsobj.h" #include "jsprvtd.h" #include "jspubtd.h" +#include "jspropertycache.h" +#include "jspropertytree.h" #ifdef _MSC_VER #pragma warning(push) @@ -204,6 +207,10 @@ JS_BEGIN_EXTERN_C * is added that crosses the threshold of 6 or more entries for hashing, we use * linear search from scope->lastProp to find a given id, and save on the space * overhead of a hash table. + * + * See jspropertytree.{h,cpp} for the actual PropertyTree implementation. This + * file contains object property map (historical misnomer: "scope" AKA JSScope) + * and property tree node ("sprop", JSScopeProperty) declarations. */ struct JSEmptyScope; @@ -361,12 +368,10 @@ struct JSScope : public JSObjectMap void trace(JSTracer *trc); - void brandingShapeChange(JSContext *cx, uint32 slot, jsval v); void deletingShapeChange(JSContext *cx, JSScopeProperty *sprop); bool methodShapeChange(JSContext *cx, JSScopeProperty *sprop, jsval toval); bool methodShapeChange(JSContext *cx, uint32 slot, jsval toval); void protoShapeChange(JSContext *cx); - void sealingShapeChange(JSContext *cx); void shadowingShapeChange(JSContext *cx, JSScopeProperty *sprop); /* By definition, hashShift = JS_DHASH_BITS - log2(capacity). */ @@ -400,8 +405,11 @@ struct JSScope : public JSObjectMap * sealed. */ bool sealed() { return flags & SEALED; } - void setSealed() { + + void seal(JSContext *cx) { JS_ASSERT(!isSharedEmpty()); + JS_ASSERT(!sealed()); + generateOwnShape(cx); flags |= SEALED; } @@ -411,7 +419,15 @@ struct JSScope : public JSObjectMap * evolves whenever a function value changes. */ bool branded() { JS_ASSERT(!generic()); return flags & BRANDED; } - void setBranded() { flags |= BRANDED; } + + bool brand(JSContext *cx, uint32 slot, jsval v) { + JS_ASSERT(!branded()); + generateOwnShape(cx); + if (js_IsPropertyCacheDisabled(cx)) // check for rt->shapeGen overflow + return false; + flags |= BRANDED; + return true; + } bool generic() { return flags & GENERIC; } void setGeneric() { flags |= GENERIC; } @@ -554,23 +570,54 @@ js_CastAsObjectJSVal(JSPropertyOp op) return OBJECT_TO_JSVAL(JS_FUNC_TO_DATA_PTR(JSObject *, op)); } +namespace js { +class PropertyTree; +} + struct JSScopeProperty { friend struct JSScope; - friend void js_SweepScopeProperties(JSContext *cx); - friend JSScopeProperty * js_GetPropertyTreeChild(JSContext *cx, JSScopeProperty *parent, - const JSScopeProperty &child); + friend class js::PropertyTree; + friend JSDHashOperator js::RemoveNodeIfDead(JSDHashTable *table, JSDHashEntryHdr *hdr, + uint32 number, void *arg); + friend void js::SweepScopeProperties(JSContext *cx); jsid id; /* int-tagged jsval/untagged JSAtom* */ -private: - JSPropertyOp rawGetter; /* getter and setter hooks or objects */ - JSPropertyOp rawSetter; /* getter is JSObject* and setter is 0 + + private: + union { + JSPropertyOp rawGetter; /* getter and setter hooks or objects */ + JSScopeProperty *next; /* next node in freelist */ + }; + + union { + JSPropertyOp rawSetter; /* getter is JSObject* and setter is 0 if sprop->isMethod() */ -public: + JSScopeProperty **prevp; /* pointer to previous node's next, or + pointer to head of freelist */ + }; + + void insertFree(JSScopeProperty *&list) { + id = JSVAL_NULL; + next = list; + prevp = &list; + if (list) + list->prevp = &next; + list = this; + } + + void removeFree() { + JS_ASSERT(JSVAL_IS_NULL(id)); + *prevp = next; + if (next) + next->prevp = prevp; + } + + public: uint32 slot; /* abstract index in object slots */ + private: uint8 attrs; /* attributes, see jsapi.h JSPROP_* */ -private: uint8 flags; /* flags, see below for defines */ -public: + public: int16 shortid; /* tinyid, or local arg/var index */ JSScopeProperty *parent; /* parent node, reverse for..in order */ union { @@ -583,21 +630,25 @@ public: }; uint32 shape; /* property cache shape identifier */ -private: - /* Implementation-private bits stored in sprop->flags. */ + private: + /* + * Implementation-private bits stored in sprop->flags. See public: enum {} + * flags further below, which were allocated FCFS over time, so interleave + * with these bits. + */ enum { /* GC mark flag. */ - MARK = 0x01, + MARK = 0x01, /* * Set during a shape-regenerating GC if the shape has already been * regenerated. Unlike JSScope::SHAPE_REGEN, this does not toggle with - * each GC. js_SweepScopeProperties clears it. + * each GC. js::SweepScopeProperties clears it. */ - SHAPE_REGEN = 0x08, + SHAPE_REGEN = 0x08, /* Property stored in per-object dictionary, not shared property tree. */ - IN_DICTIONARY = 0x20 + IN_DICTIONARY = 0x20 }; JSScopeProperty(jsid id, JSPropertyOp getter, JSPropertyOp setter, uint32 slot, @@ -621,13 +672,13 @@ private: bool inDictionary() const { return (flags & IN_DICTIONARY) != 0; } -public: + public: /* Public bits stored in sprop->flags. */ enum { - ALIAS = 0x02, - HAS_SHORTID = 0x04, - METHOD = 0x10, - PUBLIC_FLAGS = ALIAS | HAS_SHORTID | METHOD + ALIAS = 0x02, + HAS_SHORTID = 0x04, + METHOD = 0x10, + PUBLIC_FLAGS = ALIAS | HAS_SHORTID | METHOD }; uintN getFlags() const { return flags & PUBLIC_FLAGS; } @@ -647,30 +698,30 @@ public: JSPropertyOp getter() const { return rawGetter; } bool hasDefaultGetter() const { return !rawGetter; } JSPropertyOp getterOp() const { - JS_ASSERT(!(attrs & JSPROP_GETTER)); + JS_ASSERT(!hasGetterValue()); return rawGetter; } JSObject *getterObject() const { - JS_ASSERT(attrs & JSPROP_GETTER); + JS_ASSERT(hasGetterValue()); return js_CastAsObject(rawGetter); } jsval getterValue() const { - JS_ASSERT(attrs & JSPROP_GETTER); + JS_ASSERT(hasGetterValue()); return rawGetter ? js_CastAsObjectJSVal(rawGetter) : JSVAL_VOID; } JSPropertyOp setter() const { return rawSetter; } bool hasDefaultSetter() const { return !rawSetter; } JSPropertyOp setterOp() const { - JS_ASSERT(!(attrs & JSPROP_SETTER)); + JS_ASSERT(!hasSetterValue()); return rawSetter; } JSObject *setterObject() const { - JS_ASSERT((attrs & JSPROP_SETTER) && rawSetter); + JS_ASSERT(hasSetterValue() && rawSetter); return js_CastAsObject(rawSetter); } jsval setterValue() const { - JS_ASSERT(attrs & JSPROP_SETTER); + JS_ASSERT(hasSetterValue()); return rawSetter ? js_CastAsObjectJSVal(rawSetter) : JSVAL_VOID; } @@ -682,16 +733,30 @@ public: bool get(JSContext* cx, JSObject* obj, JSObject *pobj, jsval* vp); bool set(JSContext* cx, JSObject* obj, jsval* vp); + inline bool isSharedPermanent() const; + void trace(JSTracer *trc); - bool configurable() { return (attrs & JSPROP_PERMANENT) == 0; } - bool enumerable() { return (attrs & JSPROP_ENUMERATE) != 0; } - bool writable() { return (attrs & JSPROP_READONLY) == 0; } + bool hasSlot() const { return (attrs & JSPROP_SHARED) == 0; } - bool isDataDescriptor() { + uint8 attributes() const { return attrs; } + bool configurable() const { return (attrs & JSPROP_PERMANENT) == 0; } + bool enumerable() const { return (attrs & JSPROP_ENUMERATE) != 0; } + bool writable() const { + // JS_ASSERT(isDataDescriptor()); + return (attrs & JSPROP_READONLY) == 0; + } + bool hasGetterValue() const { return attrs & JSPROP_GETTER; } + bool hasSetterValue() const { return attrs & JSPROP_SETTER; } + + bool hasDefaultGetterOrIsMethod() const { + return hasDefaultGetter() || isMethod(); + } + + bool isDataDescriptor() const { return (attrs & (JSPROP_SETTER | JSPROP_GETTER)) == 0; } - bool isAccessorDescriptor() { + bool isAccessorDescriptor() const { return (attrs & (JSPROP_SETTER | JSPROP_GETTER)) != 0; } @@ -818,12 +883,6 @@ JSScope::insertDictionaryProperty(JSScopeProperty *sprop, JSScopeProperty **chil #define SLOT_IN_SCOPE(slot,scope) ((slot) < (scope)->freeslot) #define SPROP_HAS_VALID_SLOT(sprop,scope) SLOT_IN_SCOPE((sprop)->slot, scope) -#define SPROP_HAS_STUB_GETTER(sprop) ((sprop)->hasDefaultGetter()) -#define SPROP_HAS_STUB_SETTER(sprop) ((sprop)->hasDefaultSetter()) - -#define SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) \ - (SPROP_HAS_STUB_GETTER(sprop) || (sprop)->isMethod()) - #ifndef JS_THREADSAFE # define js_GenerateShape(cx, gcLocked) js_GenerateShape (cx) #endif @@ -890,16 +949,22 @@ JSScope::search(jsid id, bool adding) inline bool JSScope::canProvideEmptyScope(JSObjectOps *ops, JSClass *clasp) { + /* + * An empty scope cannot provide another empty scope, or wrongful two-level + * prototype shape sharing ensues -- see bug 497789. + */ + if (!object) + return false; return this->ops == ops && (!emptyScope || emptyScope->clasp == clasp); } inline bool JSScopeProperty::get(JSContext* cx, JSObject* obj, JSObject *pobj, jsval* vp) { - JS_ASSERT(!SPROP_HAS_STUB_GETTER(this)); JS_ASSERT(!JSVAL_IS_NULL(this->id)); + JS_ASSERT(!hasDefaultGetter()); - if (attrs & JSPROP_GETTER) { + if (hasGetterValue()) { JS_ASSERT(!isMethod()); jsval fval = getterValue(); return js_InternalGetOrSet(cx, obj, id, fval, JSACC_READ, 0, 0, vp); @@ -919,7 +984,7 @@ JSScopeProperty::get(JSContext* cx, JSObject* obj, JSObject *pobj, jsval* vp) * objects. XPConnect objects don't expect the hook to be called here, * but with objects do. */ - if (STOBJ_GET_CLASS(obj) == &js_WithClass) + if (obj->getClass() == &js_WithClass) obj = obj->map->ops->thisObject(cx, obj); return getterOp()(cx, obj, SPROP_USERID(this), vp); } @@ -927,7 +992,7 @@ JSScopeProperty::get(JSContext* cx, JSObject* obj, JSObject *pobj, jsval* vp) inline bool JSScopeProperty::set(JSContext* cx, JSObject* obj, jsval* vp) { - JS_ASSERT_IF(SPROP_HAS_STUB_SETTER(this), attrs & JSPROP_GETTER); + JS_ASSERT_IF(hasDefaultSetter(), hasGetterValue()); if (attrs & JSPROP_SETTER) { jsval fval = setterValue(); @@ -938,14 +1003,16 @@ JSScopeProperty::set(JSContext* cx, JSObject* obj, jsval* vp) return !!js_ReportGetterOnlyAssignment(cx); /* See the comment in JSScopeProperty::get as to why we can check for With. */ - if (STOBJ_GET_CLASS(obj) == &js_WithClass) + if (obj->getClass() == &js_WithClass) obj = obj->map->ops->thisObject(cx, obj); return setterOp()(cx, obj, SPROP_USERID(this), vp); } -/* Macro for common expression to test for shared permanent attributes. */ -#define SPROP_IS_SHARED_PERMANENT(sprop) \ - ((~(sprop)->attrs & (JSPROP_SHARED | JSPROP_PERMANENT)) == 0) +inline bool +JSScopeProperty::isSharedPermanent() const +{ + return (~attrs & (JSPROP_SHARED | JSPROP_PERMANENT)) == 0; +} extern JSScope * js_GetMutableScope(JSContext *cx, JSObject *obj); @@ -953,15 +1020,6 @@ js_GetMutableScope(JSContext *cx, JSObject *obj); extern void js_TraceId(JSTracer *trc, jsid id); -extern void -js_SweepScopeProperties(JSContext *cx); - -extern bool -js_InitPropertyTree(JSRuntime *rt); - -extern void -js_FinishPropertyTree(JSRuntime *rt); - JS_END_EXTERN_C #ifdef _MSC_VER diff --git a/js/src/jsscopeinlines.h b/js/src/jsscopeinlines.h index ba0fbc71935..395bb20028a 100644 --- a/js/src/jsscopeinlines.h +++ b/js/src/jsscopeinlines.h @@ -163,6 +163,7 @@ JSScope::trace(JSTracer *trc) JSContext *cx = trc->context; JSScopeProperty *sprop = lastProp; uint8 regenFlag = cx->runtime->gcRegenShapesScopeFlag; + if (IS_GC_MARKING_TRACER(trc) && cx->runtime->gcRegenShapes && !hasRegenFlag(regenFlag)) { /* * Either this scope has its own shape, which must be regenerated, or @@ -184,14 +185,20 @@ JSScope::trace(JSTracer *trc) shape = newShape; flags ^= JSScope::SHAPE_REGEN; - /* Also regenerate the shapes of empty scopes, in case they are not shared. */ - for (JSScope *empty = emptyScope; - empty && !empty->hasRegenFlag(regenFlag); - empty = empty->emptyScope) { - empty->shape = js_RegenerateShapeForGC(cx); - empty->flags ^= JSScope::SHAPE_REGEN; + /* Also regenerate the shapes of this scope's empty scope, if there is one. */ + JSScope *empty = emptyScope; + if (empty) { + JS_ASSERT(!empty->emptyScope); + if (!empty->hasRegenFlag(regenFlag)) { + uint32 newEmptyShape = js_RegenerateShapeForGC(cx); + + JS_PROPERTY_TREE(cx).emptyShapeChange(empty->shape, newEmptyShape); + empty->shape = newEmptyShape; + empty->flags ^= JSScope::SHAPE_REGEN; + } } } + if (sprop) { JS_ASSERT(hasProperty(sprop)); diff --git a/js/src/jsscript.cpp b/js/src/jsscript.cpp index 01aa5877adf..db7a69a339c 100644 --- a/js/src/jsscript.cpp +++ b/js/src/jsscript.cpp @@ -94,7 +94,6 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, bool needMutableScript, uint32 length, lineno, nslots, magic; uint32 natoms, nsrcnotes, ntrynotes, nobjects, nupvars, nregexps, i; uint32 prologLength, version; - JSTempValueRooter tvr; JSPrincipals *principals; uint32 encodeable; JSBool filenameWasSaved; @@ -212,6 +211,8 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, bool needMutableScript, if (!JS_XDRUint32(xdr, &nregexps)) return JS_FALSE; + AutoScriptRooter tvr(cx, NULL); + if (xdr->mode == JSXDR_DECODE) { script = js_NewScript(cx, length, nsrcnotes, natoms, nobjects, nupvars, nregexps, ntrynotes); @@ -225,7 +226,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, bool needMutableScript, /* If we know nsrcnotes, we allocated space for notes in script. */ notes = script->notes(); *scriptp = script; - JS_PUSH_TEMP_ROOT_SCRIPT(cx, script, &tvr); + tvr.setScript(script); } /* @@ -311,7 +312,7 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, bool needMutableScript, JSObject **objp = &script->objects()->vector[i]; uint32 isBlock; if (xdr->mode == JSXDR_ENCODE) { - JSClass *clasp = STOBJ_GET_CLASS(*objp); + JSClass *clasp = (*objp)->getClass(); JS_ASSERT(clasp == &js_FunctionClass || clasp == &js_BlockClass); isBlock = (clasp == &js_BlockClass) ? 1 : 0; @@ -368,13 +369,10 @@ js_XDRScript(JSXDRState *xdr, JSScript **scriptp, bool needMutableScript, } xdr->script = oldscript; - if (xdr->mode == JSXDR_DECODE) - JS_POP_TEMP_ROOT(cx, &tvr); return JS_TRUE; error: if (xdr->mode == JSXDR_DECODE) { - JS_POP_TEMP_ROOT(cx, &tvr); if (script->filename && !filenameWasSaved) { cx->free((void *) script->filename); script->filename = NULL; @@ -831,7 +829,7 @@ js_NewScript(JSContext *cx, uint32 length, uint32 nsrcnotes, uint32 natoms, script = (JSScript *) cx->malloc(size); if (!script) return NULL; - memset(script, 0, sizeof(JSScript)); + PodZero(script); script->length = length; script->version = cx->version; @@ -1006,7 +1004,7 @@ js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg) script->nfixed = (uint16) nfixed; js_InitAtomMap(cx, &script->atomMap, &cg->atomList); - filename = cg->compiler->tokenStream.filename; + filename = cg->compiler->tokenStream.getFilename(); if (filename) { script->filename = js_SaveScriptFilename(cx, filename); if (!script->filename) @@ -1014,8 +1012,7 @@ js_NewScriptFromCG(JSContext *cx, JSCodeGenerator *cg) } script->lineno = cg->firstLine; if (script->nfixed + cg->maxStackDepth >= JS_BIT(16)) { - js_ReportCompileErrorNumber(cx, CG_TS(cg), NULL, JSREPORT_ERROR, - JSMSG_NEED_DIET, "script"); + ReportCompileErrorNumber(cx, CG_TS(cg), NULL, JSREPORT_ERROR, JSMSG_NEED_DIET, "script"); goto bad; } script->nslots = script->nfixed + cg->maxStackDepth; @@ -1139,10 +1136,10 @@ js_DestroyScript(JSContext *cx, JSScript *script) * regenerating shapes, so we don't have to purge fragments if the GC is * currently running. * - * JS_THREADSAFE note: js_PurgePropertyCacheForScript purges only the - * current thread's property cache, so a script not owned by a function - * or object, which hands off lifetime management for that script to the - * GC, must be used by only one thread over its lifetime. + * JS_THREADSAFE note: The code below purges only the current thread's + * property cache, so a script not owned by a function or object, which + * hands off lifetime management for that script to the GC, must be used by + * only one thread over its lifetime. * * This should be an API-compatible change, since a script is never safe * against premature GC if shared among threads without a rooted object @@ -1158,7 +1155,7 @@ js_DestroyScript(JSContext *cx, JSScript *script) JSStackFrame *fp = js_GetTopStackFrame(cx); if (!(fp && (fp->flags & JSFRAME_EVAL))) { - js_PurgePropertyCacheForScript(cx, script); + JS_PROPERTY_CACHE(cx).purgeForScript(script); #ifdef CHECK_SCRIPT_OWNER JS_ASSERT(script->owner == cx->thread); diff --git a/js/src/jsscriptinlines.h b/js/src/jsscriptinlines.h index fdd3bf51df1..0ac77a4871f 100644 --- a/js/src/jsscriptinlines.h +++ b/js/src/jsscriptinlines.h @@ -50,7 +50,7 @@ inline JSFunction * JSScript::getFunction(size_t index) { JSObject *funobj = getObject(index); - JS_ASSERT(HAS_FUNCTION_CLASS(funobj)); + JS_ASSERT(funobj->isFunction()); JS_ASSERT(funobj == (JSObject *) funobj->getPrivate()); JSFunction *fun = (JSFunction *) funobj; JS_ASSERT(FUN_INTERPRETED(fun)); @@ -63,7 +63,7 @@ JSScript::getRegExp(size_t index) JSObjectArray *arr = regexps(); JS_ASSERT((uint32) index < arr->length); JSObject *obj = arr->vector[index]; - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_RegExpClass); + JS_ASSERT(obj->getClass() == &js_RegExpClass); return obj; } diff --git a/js/src/jsstr.cpp b/js/src/jsstr.cpp index 50ee03aaaf4..67c64858f67 100644 --- a/js/src/jsstr.cpp +++ b/js/src/jsstr.cpp @@ -63,7 +63,7 @@ #include "jsbool.h" #include "jsbuiltins.h" #include "jscntxt.h" -#include "jsversion.h" +#include "jsfun.h" /* for JS_ARGS_LENGTH_MAX */ #include "jsgc.h" #include "jsinterp.h" #include "jslock.h" @@ -76,6 +76,7 @@ #include "jsstr.h" #include "jsbit.h" #include "jsvector.h" +#include "jsversion.h" #include "jsstrinlines.h" using namespace js; @@ -104,7 +105,7 @@ MinimizeDependentStrings(JSString *str, int level, JSString **basep) } while (base->isDependent()); } length = str->dependentLength(); - str->reinitDependent(base, start, length); + str->initDependent(base, start, length); } *basep = base; return start; @@ -187,7 +188,7 @@ js_ConcatStrings(JSContext *cx, JSString *left, JSString *right) /* Morph left into a dependent string if we realloc'd its buffer. */ if (ldep) { - ldep->reinitDependent(str, 0, ln); + ldep->initDependent(str, 0, ln); #ifdef DEBUG { JSRuntime *rt = cx->runtime; @@ -219,7 +220,7 @@ js_UndependString(JSContext *cx, JSString *str) js_strncpy(s, str->dependentChars(), n); s[n] = 0; - str->reinitFlat(s, n); + str->initFlat(s, n); #ifdef DEBUG { @@ -1095,6 +1096,71 @@ js_BoyerMooreHorspool(const jschar *text, jsuint textlen, return -1; } +namespace { + +struct MemCmp { + typedef jsuint Extent; + static JS_ALWAYS_INLINE Extent computeExtent(const jschar *, jsuint patlen) { + return (patlen - 1) * sizeof(jschar); + } + static JS_ALWAYS_INLINE bool match(const jschar *p, const jschar *t, Extent extent) { + return memcmp(p, t, extent) == 0; + } +}; + +struct ManualCmp { + typedef const jschar *Extent; + static JS_ALWAYS_INLINE Extent computeExtent(const jschar *pat, jsuint patlen) { + return pat + patlen; + } + static JS_ALWAYS_INLINE bool match(const jschar *p, const jschar *t, Extent extent) { + for (; p != extent; ++p, ++t) { + if (*p != *t) + return false; + } + return true; + } +}; + +} + +template +static jsint +Duff(const jschar *text, jsuint textlen, const jschar *pat, jsuint patlen) +{ + JS_ASSERT(patlen > 0 && textlen > 0); + const jschar *textend = text + textlen - (patlen - 1); + const jschar p0 = *pat; + const jschar *const patNext = pat + 1; + const typename InnerMatch::Extent extent = InnerMatch::computeExtent(pat, patlen); + uint8 fixup; + + const jschar *t = text; + switch ((textend - t) & 7) { + do { + case 0: if (*t++ == p0) { fixup = 8; goto match; } + case 7: if (*t++ == p0) { fixup = 7; goto match; } + case 6: if (*t++ == p0) { fixup = 6; goto match; } + case 5: if (*t++ == p0) { fixup = 5; goto match; } + case 4: if (*t++ == p0) { fixup = 4; goto match; } + case 3: if (*t++ == p0) { fixup = 3; goto match; } + case 2: if (*t++ == p0) { fixup = 2; goto match; } + case 1: if (*t++ == p0) { fixup = 1; goto match; } + continue; + do { + if (*t++ == p0) { + match: + if (!InnerMatch::match(patNext, t, extent)) + goto failed_match; + return t - text - 1; + } + failed_match:; + } while (--fixup > 0); + } while(t != textend); + } + return -1; +} + static JS_ALWAYS_INLINE jsint StringMatch(const jschar *text, jsuint textlen, const jschar *pat, jsuint patlen) @@ -1104,7 +1170,7 @@ StringMatch(const jschar *text, jsuint textlen, if (textlen < patlen) return -1; -#if defined(__i386__) || defined(__i386) +#if defined(__i386__) || defined(_M_IX86) || defined(__i386) /* * Given enough registers, the unrolled loop below is faster than the * following loop. 32-bit x86 does not have enough registers. @@ -1138,50 +1204,18 @@ StringMatch(const jschar *text, jsuint textlen, return index; } - const jschar *textend = text + textlen - (patlen - 1); - const jschar *patend = pat + patlen; - const jschar p0 = *pat; - const jschar *patNext = pat + 1; - uint8 fixup; - -#if __APPLE__ && __GNUC__ && __i386__ /* - * It is critical that |t| is kept in a register. The version of gcc we use - * to build on 32-bit Mac does not realize this. See bug 526173. + * For big patterns with large potential overlap we want the SIMD-optimized + * speed of memcmp. For small patterns, a simple loop is faster. + * + * FIXME: Linux memcmp performance is sad and the manual loop is faster. */ - register const jschar *t asm("esi") = text; -#else - const jschar *t = text; + return +#if !defined(__linux__) + patlen > 128 ? Duff(text, textlen, pat, patlen) + : #endif - - /* Credit: Duff */ - switch ((textend - text) & 7) { - do { - case 0: if (*t++ == p0) { fixup = 8; goto match; } - case 7: if (*t++ == p0) { fixup = 7; goto match; } - case 6: if (*t++ == p0) { fixup = 6; goto match; } - case 5: if (*t++ == p0) { fixup = 5; goto match; } - case 4: if (*t++ == p0) { fixup = 4; goto match; } - case 3: if (*t++ == p0) { fixup = 3; goto match; } - case 2: if (*t++ == p0) { fixup = 2; goto match; } - case 1: if (*t++ == p0) { fixup = 1; goto match; } - continue; - do { - if (*t++ == p0) { - match: - for (const jschar *p1 = patNext, *t1 = t; - p1 != patend; - ++p1, ++t1) { - if (*p1 != *t1) - goto failed_match; - } - return t - text - 1; - } - failed_match:; - } while (--fixup > 0); - } while(t != textend); - } - return -1; + Duff(text, textlen, pat, patlen); } static JSBool @@ -1603,7 +1637,7 @@ str_match(JSContext *cx, uintN argc, jsval *vp) if (!g.normalizeRegExp(false, 1, argc, vp)) return false; - JSAutoTempValueRooter array(cx, JSVAL_NULL); + AutoValueRooter array(cx, JSVAL_NULL); if (!DoMatch(cx, vp, str, g, MatchCallback, array.addr(), MATCH_ARGS)) return false; @@ -2020,7 +2054,7 @@ static jsint find_split(JSContext *cx, JSString *str, JSRegExp *re, jsint *ip, JSSubString *sep) { - jsint i, j, k; + jsint i; size_t length; jschar *chars; @@ -2106,119 +2140,104 @@ find_split(JSContext *cx, JSString *str, JSRegExp *re, jsint *ip, * occurrence of all of sep's chars. If we find them, return the index of * the first separator char. Otherwise, return length. */ - j = 0; - while ((size_t)(k = i + j) < length) { - if (chars[k] == sep->chars[j]) { - if ((size_t)++j == sep->length) - return i; - } else { - i++; - j = 0; - } - } - return k; + jsint match = StringMatch(chars + i, length - i, sep->chars, sep->length); + return match == -1 ? length : match + i; } static JSBool str_split(JSContext *cx, uintN argc, jsval *vp) { - JSString *str, *sub; - JSObject *arrayobj; - jsval v; - JSBool ok, limited; - JSRegExp *re; - JSSubString *sep, tmp; - jsdouble d; - jsint i, j; - uint32 len, limit; - + JSString *str; NORMALIZE_THIS(cx, vp, str); - arrayobj = js_NewArrayObject(cx, 0, NULL); - if (!arrayobj) - return JS_FALSE; - *vp = OBJECT_TO_JSVAL(arrayobj); - if (argc == 0) { - v = STRING_TO_JSVAL(str); - ok = arrayobj->setProperty(cx, INT_TO_JSID(0), &v); - } else { - if (VALUE_IS_REGEXP(cx, vp[2])) { - re = (JSRegExp *) JSVAL_TO_OBJECT(vp[2])->getPrivate(); - sep = &tmp; - - /* Set a magic value so we can detect a successful re match. */ - sep->chars = NULL; - sep->length = 0; - } else { - JSString *str2 = js_ValueToString(cx, vp[2]); - if (!str2) - return JS_FALSE; - vp[2] = STRING_TO_JSVAL(str2); - - /* - * Point sep at a local copy of str2's header because find_split - * will modify sep->length. - */ - str2->getCharsAndLength(tmp.chars, tmp.length); - sep = &tmp; - re = NULL; - } - - /* Use the second argument as the split limit, if given. */ - limited = (argc > 1) && !JSVAL_IS_VOID(vp[3]); - limit = 0; /* Avoid warning. */ - if (limited) { - d = js_ValueToNumber(cx, &vp[3]); - if (JSVAL_IS_NULL(vp[3])) - return JS_FALSE; - - /* Clamp limit between 0 and 1 + string length. */ - limit = js_DoubleToECMAUint32(d); - if (limit > str->length()) - limit = 1 + str->length(); - } - - len = i = 0; - while ((j = find_split(cx, str, re, &i, sep)) >= 0) { - if (limited && len >= limit) - break; - sub = js_NewDependentString(cx, str, i, (size_t)(j - i)); - if (!sub) - return JS_FALSE; - v = STRING_TO_JSVAL(sub); - if (!JS_SetElement(cx, arrayobj, len, &v)) - return JS_FALSE; - len++; - - /* - * Imitate perl's feature of including parenthesized substrings - * that matched part of the delimiter in the new array, after the - * split substring that was delimited. - */ - if (re && sep->chars) { - uintN num; - JSSubString *parsub; - - for (num = 0; num < cx->regExpStatics.parenCount; num++) { - if (limited && len >= limit) - break; - parsub = REGEXP_PAREN_SUBSTRING(&cx->regExpStatics, num); - sub = js_NewStringCopyN(cx, parsub->chars, parsub->length); - if (!sub) - return JS_FALSE; - v = STRING_TO_JSVAL(sub); - if (!JS_SetElement(cx, arrayobj, len, &v)) - return JS_FALSE; - len++; - } - sep->chars = NULL; - } - i = j + sep->length; - } - ok = (j != -2); + jsval v = STRING_TO_JSVAL(str); + JSObject *aobj = js_NewArrayObject(cx, 1, &v); + if (!aobj) + return false; + *vp = OBJECT_TO_JSVAL(aobj); + return true; } - return ok; + + JSRegExp *re; + JSSubString *sep, tmp; + if (VALUE_IS_REGEXP(cx, vp[2])) { + re = (JSRegExp *) JSVAL_TO_OBJECT(vp[2])->getPrivate(); + sep = &tmp; + + /* Set a magic value so we can detect a successful re match. */ + sep->chars = NULL; + sep->length = 0; + } else { + JSString *str2 = js_ValueToString(cx, vp[2]); + if (!str2) + return false; + vp[2] = STRING_TO_JSVAL(str2); + + /* + * Point sep at a local copy of str2's header because find_split + * will modify sep->length. + */ + str2->getCharsAndLength(tmp.chars, tmp.length); + sep = &tmp; + re = NULL; + } + + /* Use the second argument as the split limit, if given. */ + uint32 limit = 0; /* Avoid warning. */ + bool limited = (argc > 1) && !JSVAL_IS_VOID(vp[3]); + if (limited) { + jsdouble d = js_ValueToNumber(cx, &vp[3]); + if (JSVAL_IS_NULL(vp[3])) + return false; + + /* Clamp limit between 0 and 1 + string length. */ + limit = js_DoubleToECMAUint32(d); + if (limit > str->length()) + limit = 1 + str->length(); + } + + AutoValueVector splits(cx); + + jsint i, j; + uint32 len = i = 0; + while ((j = find_split(cx, str, re, &i, sep)) >= 0) { + if (limited && len >= limit) + break; + + JSString *sub = js_NewDependentString(cx, str, i, size_t(j - i)); + if (!sub || !splits.push(sub)) + return false; + len++; + + /* + * Imitate perl's feature of including parenthesized substrings that + * matched part of the delimiter in the new array, after the split + * substring that was delimited. + */ + if (re && sep->chars) { + for (uintN num = 0; num < cx->regExpStatics.parenCount; num++) { + if (limited && len >= limit) + break; + JSSubString *parsub = REGEXP_PAREN_SUBSTRING(&cx->regExpStatics, num); + sub = js_NewStringCopyN(cx, parsub->chars, parsub->length); + if (!sub || !splits.push(sub)) + return false; + len++; + } + sep->chars = NULL; + } + i = j + sep->length; + } + + if (j == -2) + return false; + + JSObject *aobj = js_NewArrayObject(cx, splits.length(), splits.buffer()); + if (!aobj) + return false; + *vp = OBJECT_TO_JSVAL(aobj); + return true; } #if JS_HAS_PERL_SUBSTR @@ -2744,7 +2763,7 @@ __attribute__ ((aligned (8))) #define O24(c) 0x32, O4(c) #define O25(c) 0x32, O5(c) -/* +/* * Array starts with 100, 101, 102... (0x31 0x30 0x30 0x00 for 100\0) * 100, 101, 102 also share the pointers to 0, 1, 2 ... * 110, 111, 112 also share the pointers to 10, 11, 12... @@ -3024,66 +3043,6 @@ static JSFunctionSpec string_static_methods[] = { JS_FS_END }; -static JSHashNumber -js_hash_string_pointer(const void *key) -{ - return (JSHashNumber)JS_PTR_TO_UINT32(key) >> JSVAL_TAGBITS; -} - -JSBool -js_InitRuntimeStringState(JSContext *cx) -{ - JSRuntime *rt; - - rt = cx->runtime; - rt->emptyString = ATOM_TO_STRING(rt->atomState.emptyAtom); - return JS_TRUE; -} - -JSBool -js_InitDeflatedStringCache(JSRuntime *rt) -{ - JSHashTable *cache; - - /* Initialize string cache */ - JS_ASSERT(!rt->deflatedStringCache); - cache = JS_NewHashTable(8, js_hash_string_pointer, - JS_CompareValues, JS_CompareValues, - NULL, NULL); - if (!cache) - return JS_FALSE; - rt->deflatedStringCache = cache; - -#ifdef JS_THREADSAFE - JS_ASSERT(!rt->deflatedStringCacheLock); - rt->deflatedStringCacheLock = JS_NEW_LOCK(); - if (!rt->deflatedStringCacheLock) - return JS_FALSE; -#endif - return JS_TRUE; -} - -void -js_FinishRuntimeStringState(JSContext *cx) -{ - cx->runtime->emptyString = NULL; -} - -void -js_FinishDeflatedStringCache(JSRuntime *rt) -{ - if (rt->deflatedStringCache) { - JS_HashTableDestroy(rt->deflatedStringCache); - rt->deflatedStringCache = NULL; - } -#ifdef JS_THREADSAFE - if (rt->deflatedStringCacheLock) { - JS_DESTROY_LOCK(rt->deflatedStringCacheLock); - rt->deflatedStringCacheLock = NULL; - } -#endif -} - JSObject * js_InitStringClass(JSContext *cx, JSObject *obj) { @@ -3273,26 +3232,6 @@ js_NewStringCopyZ(JSContext *cx, const jschar *s) return str; } -void -js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str) -{ - JSHashNumber hash; - JSHashEntry *he, **hep; - - hash = js_hash_string_pointer(str); - JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock); - hep = JS_HashTableRawLookup(rt->deflatedStringCache, hash, str); - he = *hep; - if (he) { -#ifdef DEBUG - rt->deflatedStringCacheBytes -= str->length(); -#endif - js_free(he->value); - JS_HashTableRawRemove(rt->deflatedStringCache, hep, he); - } - JS_RELEASE_LOCK(rt->deflatedStringCacheLock); -} - JS_FRIEND_API(const char *) js_ValueToPrintable(JSContext *cx, jsval v, JSValueToStringFun v2sfun) { @@ -3384,7 +3323,7 @@ js_ValueToSource(JSContext *cx, jsval v) } JSAtom *atom = cx->runtime->atomState.toSourceAtom; - JSAutoTempValueRooter tvr(cx, JSVAL_NULL); + AutoValueRooter tvr(cx, JSVAL_NULL); if (!js_TryMethod(cx, JSVAL_TO_OBJECT(v), atom, 0, NULL, tvr.addr())) return NULL; return js_ValueToString(cx, tvr.value()); @@ -3576,20 +3515,26 @@ js_DeflateString(JSContext *cx, const jschar *chars, size_t nchars) return bytes; } +size_t +js_GetDeflatedStringLength(JSContext *cx, const jschar *chars, size_t nchars) +{ + if (!js_CStringsAreUTF8) + return nchars; + + return js_GetDeflatedUTF8StringLength(cx, chars, nchars); +} + /* * May be called with null cx through js_GetStringBytes, see below. */ size_t -js_GetDeflatedStringLength(JSContext *cx, const jschar *chars, size_t nchars) +js_GetDeflatedUTF8StringLength(JSContext *cx, const jschar *chars, size_t nchars) { size_t nbytes; const jschar *end; uintN c, c2; char buffer[10]; - if (!js_CStringsAreUTF8) - return nchars; - nbytes = nchars; for (end = chars + nchars; chars != end; chars++) { c = *chars; @@ -3627,10 +3572,7 @@ JSBool js_DeflateStringToBuffer(JSContext *cx, const jschar *src, size_t srclen, char *dst, size_t *dstlenp) { - size_t dstlen, i, origDstlen, utf8Len; - jschar c, c2; - uint32 v; - uint8 utf8buf[6]; + size_t dstlen, i; dstlen = *dstlenp; if (!js_CStringsAreUTF8) { @@ -3649,6 +3591,19 @@ js_DeflateStringToBuffer(JSContext *cx, const jschar *src, size_t srclen, return JS_TRUE; } + return js_DeflateStringToUTF8Buffer(cx, src, srclen, dst, dstlenp); +} + +JSBool +js_DeflateStringToUTF8Buffer(JSContext *cx, const jschar *src, size_t srclen, + char *dst, size_t *dstlenp) +{ + size_t dstlen, i, origDstlen, utf8Len; + jschar c, c2; + uint32 v; + uint8 utf8buf[6]; + + dstlen = *dstlenp; origDstlen = dstlen; while (srclen) { c = *src++; @@ -3705,8 +3660,7 @@ JSBool js_InflateStringToBuffer(JSContext *cx, const char *src, size_t srclen, jschar *dst, size_t *dstlenp) { - size_t dstlen, i, origDstlen, offset, j, n; - uint32 v; + size_t dstlen, i; if (!js_CStringsAreUTF8) { if (dst) { @@ -3727,6 +3681,16 @@ js_InflateStringToBuffer(JSContext *cx, const char *src, size_t srclen, return JS_TRUE; } + return js_InflateUTF8StringToBuffer(cx, src, srclen, dst, dstlenp); +} + +JSBool +js_InflateUTF8StringToBuffer(JSContext *cx, const char *src, size_t srclen, + jschar *dst, size_t *dstlenp) +{ + size_t dstlen, origDstlen, offset, j, n; + uint32 v; + dstlen = dst ? *dstlenp : (size_t) -1; origDstlen = dstlen; offset = 0; @@ -3803,42 +3767,161 @@ bufferTooSmall: return JS_FALSE; } -JSBool -js_SetStringBytes(JSContext *cx, JSString *str, char *bytes, size_t length) +namespace js { + +DeflatedStringCache::DeflatedStringCache() { - JSRuntime *rt; - JSHashTable *cache; - JSBool ok; - JSHashNumber hash; - JSHashEntry **hep; - - rt = cx->runtime; - JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock); - - cache = rt->deflatedStringCache; - hash = js_hash_string_pointer(str); - hep = JS_HashTableRawLookup(cache, hash, str); - JS_ASSERT(*hep == NULL); - ok = JS_HashTableRawAdd(cache, hep, hash, str, bytes) != NULL; - if (ok) { - str->setDeflated(); -#ifdef DEBUG - rt->deflatedStringCacheBytes += length; +#ifdef JS_THREADSAFE + lock = NULL; #endif +} + +bool +DeflatedStringCache::init() +{ +#ifdef JS_THREADSAFE + JS_ASSERT(!lock); + lock = JS_NEW_LOCK(); + if (!lock) + return false; +#endif + + /* + * Make room for 2K deflated strings that a typical browser session + * creates. + */ + return map.init(2048); +} + +DeflatedStringCache::~DeflatedStringCache() +{ +#ifdef JS_THREADSAFE + if (lock) + JS_DESTROY_LOCK(lock); +#endif +} + +void +DeflatedStringCache::sweep(JSContext *cx) +{ + /* + * We must take a lock even during the GC as JS_GetStringBytes() can be + * called outside the request. + */ + JS_ACQUIRE_LOCK(lock); + + for (Map::Enum e(map); !e.empty(); e.popFront()) { + JSString *str = e.front().key; + if (js_IsAboutToBeFinalized(str)) { + char *bytes = e.front().value; + e.removeFront(); + + /* + * We cannot use cx->free here as bytes may come from the + * embedding that calls JS_NewString(cx, bytes, length). Those + * bytes may not be allocated via js_malloc and may not have + * space for the background free list. + */ + js_free(bytes); + } } - JS_RELEASE_LOCK(rt->deflatedStringCacheLock); + JS_RELEASE_LOCK(lock); +} + +void +DeflatedStringCache::remove(JSString *str) +{ + JS_ACQUIRE_LOCK(lock); + + Map::Ptr p = map.lookup(str); + if (p) { + js_free(p->value); + map.remove(p); + } + + JS_RELEASE_LOCK(lock); +} + +bool +DeflatedStringCache::setBytes(JSContext *cx, JSString *str, char *bytes) +{ + JS_ACQUIRE_LOCK(lock); + + Map::AddPtr p = map.lookupForAdd(str); + JS_ASSERT(!p); + bool ok = map.add(p, str, bytes); + + JS_RELEASE_LOCK(lock); + + if (!ok) + js_ReportOutOfMemory(cx); return ok; } +char * +DeflatedStringCache::getBytes(JSContext *cx, JSString *str) +{ + JS_ACQUIRE_LOCK(lock); + + char *bytes; + do { + Map::AddPtr p = map.lookupForAdd(str); + if (p) { + bytes = p->value; + break; + } +#ifdef JS_THREADSAFE + unsigned generation = map.generation(); + JS_RELEASE_LOCK(lock); +#endif + bytes = js_DeflateString(cx, str->chars(), str->length()); + if (!bytes) + return NULL; +#ifdef JS_THREADSAFE + JS_ACQUIRE_LOCK(lock); + if (generation != map.generation()) { + p = map.lookupForAdd(str); + if (p) { + /* Some other thread has asked for str bytes .*/ + if (cx) + cx->free(bytes); + else + js_free(bytes); + bytes = p->value; + break; + } + } +#endif + if (!map.add(p, str, bytes)) { + JS_RELEASE_LOCK(lock); + if (cx) { + cx->free(bytes); + js_ReportOutOfMemory(cx); + } else { + js_free(bytes); + } + return NULL; + } + } while (false); + + JS_ASSERT(bytes); + + /* Try to catch failure to JS_ShutDown between runtime epochs. */ + JS_ASSERT_IF(!js_CStringsAreUTF8 && *bytes != (char) str->chars()[0], + *bytes == '\0' && str->empty()); + + JS_RELEASE_LOCK(lock); + return bytes; +} + +} /* namespace js */ + const char * js_GetStringBytes(JSContext *cx, JSString *str) { JSRuntime *rt; - JSHashTable *cache; char *bytes; - JSHashNumber hash; - JSHashEntry *he, **hep; if (JSString::isUnitString(str)) { #ifdef IS_LITTLE_ENDIAN @@ -3868,50 +3951,7 @@ js_GetStringBytes(JSContext *cx, JSString *str) rt = js_GetGCStringRuntime(str); } -#ifdef JS_THREADSAFE - if (!rt->deflatedStringCacheLock) { - /* - * Called from last GC (see js_DestroyContext), after runtime string - * state has been finalized. We have no choice but to leak here. - */ - return js_DeflateString(NULL, str->chars(), str->length()); - } -#endif - - JS_ACQUIRE_LOCK(rt->deflatedStringCacheLock); - - cache = rt->deflatedStringCache; - hash = js_hash_string_pointer(str); - hep = JS_HashTableRawLookup(cache, hash, str); - he = *hep; - if (he) { - bytes = (char *) he->value; - - /* Try to catch failure to JS_ShutDown between runtime epochs. */ - if (!js_CStringsAreUTF8) { - JS_ASSERT_IF(*bytes != (char) str->chars()[0], - *bytes == '\0' && str->empty()); - } - } else { - bytes = js_DeflateString(cx, str->chars(), str->length()); - if (bytes) { - if (JS_HashTableRawAdd(cache, hep, hash, str, bytes)) { -#ifdef DEBUG - rt->deflatedStringCacheBytes += str->length(); -#endif - str->setDeflated(); - } else { - if (cx) - cx->free(bytes); - else - js_free(bytes); - bytes = NULL; - } - } - } - - JS_RELEASE_LOCK(rt->deflatedStringCacheLock); - return bytes; + return rt->deflatedStringCache->getBytes(cx, str); } /* diff --git a/js/src/jsstr.h b/js/src/jsstr.h index 4ce0f577ea2..392e1c9f4a5 100644 --- a/js/src/jsstr.h +++ b/js/src/jsstr.h @@ -51,6 +51,7 @@ #include #include "jspubtd.h" #include "jsprvtd.h" +#include "jshashtable.h" #include "jslock.h" JS_BEGIN_EXTERN_C @@ -87,10 +88,6 @@ JS_STATIC_ASSERT(JS_BITS_PER_WORD >= 32); * A flat string with the ATOMIZED flag means that the string is hashed as * an atom. This flag is used to avoid re-hashing the already-atomized string. * - * Any string with the DEFLATED flag means that the string has an entry in the - * deflated string cache. The GC uses this flag to optimize string finalization - * and avoid an expensive cache lookup for strings that were never deflated. - * * When the DEPENDENT flag is set, the string depends on characters of another * string strongly referenced by the mBase field. The base member may point to * another dependent string if chars() has not been called yet. @@ -124,7 +121,6 @@ struct JSString { static const size_t DEPENDENT = JSSTRING_BIT(1); static const size_t MUTABLE = JSSTRING_BIT(2); static const size_t ATOMIZED = JSSTRING_BIT(3); - static const size_t DEFLATED = JSSTRING_BIT(4); inline bool hasFlag(size_t flag) const { return (mFlags & flag) != 0; @@ -145,14 +141,6 @@ struct JSString { return !isDependent(); } - inline bool isDeflated() const { - return hasFlag(DEFLATED); - } - - inline void setDeflated() { - JS_ATOMIC_SET_MASK(&mFlags, DEFLATED); - } - inline bool isMutable() const { return !isDependent() && hasFlag(MUTABLE); } @@ -201,18 +189,6 @@ struct JSString { return length(); } - /* - * Special flat string initializer that preserves the DEFLATED flag. - * Use this method when reinitializing an existing string which may be - * hashed to its deflated bytes. Newborn strings must use initFlat. - */ - void reinitFlat(jschar *chars, size_t length) { - mLength = length; - mOffset = 0; - mFlags = mFlags & DEFLATED; - mChars = chars; - } - /* * Methods to manipulate atomized and mutable flags of flat strings. It is * safe to use these without extra locking due to the following properties: @@ -264,15 +240,6 @@ struct JSString { mBase = bstr; } - /* See JSString::reinitFlat. */ - inline void reinitDependent(JSString *bstr, size_t off, size_t len) { - JS_ASSERT(len <= MAX_LENGTH); - mLength = len; - mOffset = off; - mFlags = DEPENDENT | (mFlags & DEFLATED); - mBase = bstr; - } - inline JSString *dependentBase() const { JS_ASSERT(isDependent()); return mBase; @@ -503,19 +470,6 @@ JS_ISSPACE(jschar c) #define JS7_UNHEX(c) (uintN)(JS7_ISDEC(c) ? (c) - '0' : 10 + tolower(c) - 'a') #define JS7_ISLET(c) ((c) < 128 && isalpha(c)) -/* Initialize per-runtime string state for the first context in the runtime. */ -extern JSBool -js_InitRuntimeStringState(JSContext *cx); - -extern JSBool -js_InitDeflatedStringCache(JSRuntime *rt); - -extern void -js_FinishRuntimeStringState(JSContext *cx); - -extern void -js_FinishDeflatedStringCache(JSRuntime *rt); - /* Initialize the String class, returning its prototype object. */ extern JSClass js_StringClass; @@ -663,35 +617,50 @@ js_DeflateString(JSContext *cx, const jschar *chars, size_t length); * Inflate bytes to JS chars into a buffer. 'chars' must be large enough for * 'length' jschars. The buffer is NOT null-terminated. The destination length * must be be initialized with the buffer size and will contain on return the - * number of copied chars. + * number of copied chars. Conversion behavior depends on js_CStringsAreUTF8. */ extern JSBool js_InflateStringToBuffer(JSContext *cx, const char *bytes, size_t length, jschar *chars, size_t *charsLength); /* - * Get number of bytes in the deflated sequence of characters. + * Same as js_InflateStringToBuffer, but always treats 'bytes' as UTF-8. + */ +extern JSBool +js_InflateUTF8StringToBuffer(JSContext *cx, const char *bytes, size_t length, + jschar *chars, size_t *charsLength); + +/* + * Get number of bytes in the deflated sequence of characters. Behavior depends + * on js_CStringsAreUTF8. */ extern size_t js_GetDeflatedStringLength(JSContext *cx, const jschar *chars, size_t charsLength); +/* + * Same as js_GetDeflatedStringLength, but always treats the result as UTF-8. + */ +extern size_t +js_GetDeflatedUTF8StringLength(JSContext *cx, const jschar *chars, + size_t charsLength); + /* * Deflate JS chars to bytes into a buffer. 'bytes' must be large enough for * 'length chars. The buffer is NOT null-terminated. The destination length * must to be initialized with the buffer size and will contain on return the - * number of copied bytes. + * number of copied bytes. Conversion behavior depends on js_CStringsAreUTF8. */ extern JSBool js_DeflateStringToBuffer(JSContext *cx, const jschar *chars, size_t charsLength, char *bytes, size_t *length); /* - * Associate bytes with str in the deflated string cache, returning true on - * successful association, false on out of memory. + * Same as js_DeflateStringToBuffer, but always treats 'bytes' as UTF-8. */ extern JSBool -js_SetStringBytes(JSContext *cx, JSString *str, char *bytes, size_t length); +js_DeflateStringToUTF8Buffer(JSContext *cx, const jschar *chars, + size_t charsLength, char *bytes, size_t *length); /* * Find or create a deflated string cache entry for str that contains its @@ -700,10 +669,6 @@ js_SetStringBytes(JSContext *cx, JSString *str, char *bytes, size_t length); extern const char * js_GetStringBytes(JSContext *cx, JSString *str); -/* Remove a deflated string cache entry associated with str if any. */ -extern void -js_PurgeDeflatedStringCache(JSRuntime *rt, JSString *str); - /* Export a few natives and a helper to other files in SpiderMonkey. */ extern JSBool js_str_escape(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, @@ -750,4 +715,57 @@ js_String(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval); JS_END_EXTERN_C +namespace js { + +class DeflatedStringCache { + public: + DeflatedStringCache(); + bool init(); + ~DeflatedStringCache(); + + void sweep(JSContext *cx); + void remove(JSString *str); + bool setBytes(JSContext *cx, JSString *str, char *bytes); + + private: + struct StringPtrHasher + { + typedef JSString *Lookup; + + static uint32 hash(JSString *str) { + /* + * We hash only GC-allocated Strings. They are aligned on + * sizeof(JSString) boundary so we can improve hashing by stripping + * initial zeros. + */ + const jsuword ALIGN_LOG = tl::FloorLog2::result; + JS_STATIC_ASSERT(sizeof(JSString) == (size_t(1) << ALIGN_LOG)); + + jsuword ptr = reinterpret_cast(str); + jsuword key = ptr >> ALIGN_LOG; + JS_ASSERT((key << ALIGN_LOG) == ptr); + return uint32(key); + } + + static bool match(JSString *s1, JSString *s2) { + return s1 == s2; + } + }; + + typedef HashMap Map; + + /* cx is NULL when the caller is JS_GetStringBytes(JSString *). */ + char *getBytes(JSContext *cx, JSString *str); + + friend const char * + ::js_GetStringBytes(JSContext *cx, JSString *str); + + Map map; +#ifdef JS_THREADSAFE + JSLock *lock; +#endif +}; + +} /* namespace js */ + #endif /* jsstr_h___ */ diff --git a/js/src/jstl.h b/js/src/jstl.h index b0bcc3a3bff..c894767237e 100644 --- a/js/src/jstl.h +++ b/js/src/jstl.h @@ -43,6 +43,7 @@ #include "jsbit.h" #include +#include namespace js { @@ -258,44 +259,79 @@ class SystemAllocPolicy template class LazilyConstructed { - char bytes[sizeof(T)]; - bool constructed; + union { + uint64 align; + char bytes[sizeof(T) + 1]; + }; + T &asT() { return *reinterpret_cast(bytes); } + char & constructed() { return bytes[sizeof(T)]; } public: - LazilyConstructed() : constructed(false) {} - ~LazilyConstructed() { if (constructed) asT().~T(); } + LazilyConstructed() { constructed() = false; } + ~LazilyConstructed() { if (constructed()) asT().~T(); } - bool empty() const { return !constructed; } + bool empty() const { return !constructed(); } void construct() { - JS_ASSERT(!constructed); + JS_ASSERT(!constructed()); new(bytes) T(); - constructed = true; + constructed() = true; } template void construct(const T1 &t1) { - JS_ASSERT(!constructed); + JS_ASSERT(!constructed()); new(bytes) T(t1); - constructed = true; + constructed() = true; } template void construct(const T1 &t1, const T2 &t2) { - JS_ASSERT(!constructed); + JS_ASSERT(!constructed()); new(bytes) T(t1, t2); - constructed = true; + constructed() = true; } template void construct(const T1 &t1, const T2 &t2, const T3 &t3) { - JS_ASSERT(!constructed); + JS_ASSERT(!constructed()); new(bytes) T(t1, t2, t3); - constructed = true; + constructed() = true; } }; +template +JS_ALWAYS_INLINE static void +PodZero(T *t) +{ + memset(t, 0, sizeof(T)); +} + +template +JS_ALWAYS_INLINE static void +PodZero(T *t, size_t nelem) +{ + memset(t, 0, nelem * sizeof(T)); +} + +/* + * Arrays implicitly convert to pointers to their first element, which is + * dangerous when combined with the above PodZero definitions. Adding an + * overload for arrays is ambiguous, so we need another identifier. The + * ambiguous overload is left to catch mistaken uses of PodZero; if you get a + * compile error involving PodZero and array types, use PodArrayZero instead. + */ +template static void PodZero(T (&)[N]); /* undefined */ +template static void PodZero(T (&)[N], size_t); /* undefined */ + +template +JS_ALWAYS_INLINE static void +PodArrayZero(T (&t)[N]) +{ + memset(t, 0, N * sizeof(T)); +} + } /* namespace js */ #endif /* jstl_h_ */ diff --git a/js/src/jstracer.cpp b/js/src/jstracer.cpp index cc73b1aae4b..f40c5d8e15c 100644 --- a/js/src/jstracer.cpp +++ b/js/src/jstracer.cpp @@ -78,7 +78,7 @@ #include "jstypedarray.h" #include "jsatominlines.h" -#include "jsinterpinlines.h" +#include "jspropertycacheinlines.h" #include "jsobjinlines.h" #include "jsscopeinlines.h" #include "jsscriptinlines.h" @@ -131,50 +131,47 @@ nanojit::Allocator::postReset() { vma->mSize = 0; } -void -StackFilter::getTops(LIns* guard, int& spTop, int& rpTop) +int +StackFilter::getTop(LIns* guard) { VMSideExit* e = (VMSideExit*)guard->record()->exit; - spTop = e->sp_adj; - rpTop = e->rp_adj; + return e->sp_adj; } #if defined NJ_VERBOSE void -LirNameMap::formatGuard(LIns *i, char *out) +LInsPrinter::formatGuard(InsBuf *buf, LIns *ins) { - VMSideExit *x; - - x = (VMSideExit *)i->record()->exit; - sprintf(out, + RefBuf b1, b2; + VMSideExit *x = (VMSideExit *)ins->record()->exit; + VMPI_snprintf(buf->buf, buf->len, "%s: %s %s -> pc=%p imacpc=%p sp%+ld rp%+ld (GuardID=%03d)", - formatRef(i), - lirNames[i->opcode()], - i->oprnd1() ? formatRef(i->oprnd1()) : "", + formatRef(&b1, ins), + lirNames[ins->opcode()], + ins->oprnd1() ? formatRef(&b2, ins->oprnd1()) : "", (void *)x->pc, (void *)x->imacpc, (long int)x->sp_adj, (long int)x->rp_adj, - i->record()->profGuardID); + ins->record()->profGuardID); } void -LirNameMap::formatGuardXov(LIns *i, char *out) +LInsPrinter::formatGuardXov(InsBuf *buf, LIns *ins) { - VMSideExit *x; - - x = (VMSideExit *)i->record()->exit; - sprintf(out, + RefBuf b1, b2, b3; + VMSideExit *x = (VMSideExit *)ins->record()->exit; + VMPI_snprintf(buf->buf, buf->len, "%s = %s %s, %s -> pc=%p imacpc=%p sp%+ld rp%+ld (GuardID=%03d)", - formatRef(i), - lirNames[i->opcode()], - formatRef(i->oprnd1()), - formatRef(i->oprnd2()), + formatRef(&b1, ins), + lirNames[ins->opcode()], + formatRef(&b2, ins->oprnd1()), + formatRef(&b3, ins->oprnd2()), (void *)x->pc, (void *)x->imacpc, (long int)x->sp_adj, (long int)x->rp_adj, - i->record()->profGuardID); + ins->record()->profGuardID); } #endif @@ -197,14 +194,6 @@ using namespace nanojit; #define RETURN_IF_XML_A(val) RETURN_VALUE_IF_XML(val, ARECORD_STOP) #define RETURN_IF_XML(val) RETURN_VALUE_IF_XML(val, RECORD_STOP) -/* - * Never use JSVAL_IS_BOOLEAN because it restricts the value (true, false) and - * the type. What you want to use is JSVAL_IS_SPECIAL(x) and then handle the - * undefined case properly (bug 457363). - */ -#undef JSVAL_IS_BOOLEAN -#define JSVAL_IS_BOOLEAN(x) JS_STATIC_ASSERT(0) - JS_STATIC_ASSERT(sizeof(TraceType) == 1); JS_STATIC_ASSERT(offsetof(TraceNativeStorage, stack_global_buf) % 16 == 0); @@ -610,8 +599,8 @@ FragProfiling_showResults(TraceMonitor* tm) uint64_t totCount = 0, cumulCount; uint32_t totSE = 0; size_t totCodeB = 0, totExitB = 0; - memset(topFragID, 0, sizeof(topFragID)); - memset(topPI, 0, sizeof(topPI)); + PodArrayZero(topFragID); + PodArrayZero(topPI); FragStatsMap::Iter iter(*tm->profTab); while (iter.next()) { uint32_t fragID = iter.key(); @@ -634,7 +623,7 @@ FragProfiling_showResults(TraceMonitor* tm) break; } r++; - AvmAssert(r >= 0 && r <= N_TOP_BLOCKS); + NanoAssert(r >= 0 && r <= N_TOP_BLOCKS); /* This entry should be placed at topPI[r], and entries at higher numbered slots moved up one. */ if (r < N_TOP_BLOCKS) { @@ -848,7 +837,7 @@ TraceRecorder::tprint(const char *format, int count, nanojit::LIns *insa[]) double *args = (double*) traceMonitor->traceAlloc->alloc(count * sizeof(double)); for (int i = 0; i < count; ++i) { JS_ASSERT(insa[i]); - lir->insStorei(insa[i], INS_CONSTPTR(args), sizeof(double) * i); + lir->insStorei(insa[i], INS_CONSTPTR(args), sizeof(double) * i, ACC_OTHER); } LIns* args_ins[] = { INS_CONSTPTR(args), INS_CONST(count), INS_CONSTPTR(data) }; @@ -1048,15 +1037,18 @@ GetPromotedType(jsval v) if (JSVAL_IS_OBJECT(v)) { if (JSVAL_IS_NULL(v)) return TT_NULL; - if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))) + if (JSVAL_TO_OBJECT(v)->isFunction()) return TT_FUNCTION; return TT_OBJECT; } + /* N.B. void is JSVAL_SPECIAL. */ + if (JSVAL_IS_VOID(v)) + return TT_VOID; uint8_t tag = JSVAL_TAG(v); JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_SPECIAL); JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE); JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); - JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL); + JS_STATIC_ASSERT(static_cast(TT_SPECIAL) == JSVAL_SPECIAL); return TraceType(tag); } @@ -1069,15 +1061,18 @@ getCoercedType(jsval v) if (JSVAL_IS_OBJECT(v)) { if (JSVAL_IS_NULL(v)) return TT_NULL; - if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))) + if (JSVAL_TO_OBJECT(v)->isFunction()) return TT_FUNCTION; return TT_OBJECT; } + /* N.B. void is JSVAL_SPECIAL. */ + if (JSVAL_IS_VOID(v)) + return TT_VOID; uint8_t tag = JSVAL_TAG(v); JS_ASSERT(tag == JSVAL_DOUBLE || tag == JSVAL_STRING || tag == JSVAL_SPECIAL); JS_STATIC_ASSERT(static_cast(TT_DOUBLE) == JSVAL_DOUBLE); JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); - JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL); + JS_STATIC_ASSERT(static_cast(TT_SPECIAL) == JSVAL_SPECIAL); return TraceType(tag); } @@ -1117,7 +1112,7 @@ GlobalSlotHash(JSContext* cx, unsigned slot) fp = fp->down; HashAccum(h, uintptr_t(fp->script), ORACLE_MASK); - HashAccum(h, uintptr_t(OBJ_SHAPE(JS_GetGlobalForObject(cx, fp->scopeChain))), ORACLE_MASK); + HashAccum(h, uintptr_t(OBJ_SHAPE(fp->scopeChain->getGlobal())), ORACLE_MASK); HashAccum(h, uintptr_t(slot), ORACLE_MASK); return int(h); } @@ -1525,9 +1520,10 @@ AssertTreeIsUnique(TraceMonitor* tm, TreeFragment* f) #endif static void -AttemptCompilation(JSContext *cx, TraceMonitor* tm, JSObject* globalObj, jsbytecode* pc, - uint32 argc) +AttemptCompilation(JSContext *cx, JSObject* globalObj, jsbytecode* pc, uint32 argc) { + TraceMonitor *tm = &JS_TRACE_MONITOR(cx); + /* If we already permanently blacklisted the location, undo that. */ JS_ASSERT(*pc == JSOP_NOP || *pc == JSOP_TRACE || *pc == JSOP_CALL); if (*pc == JSOP_NOP) @@ -1662,12 +1658,6 @@ isPromote(LIns* i) return isPromoteInt(i) || isPromoteUint(i); } -static bool -IsConst(LIns* i, int32_t c) -{ - return i->isconst() && i->imm32() == c; -} - /* * Determine whether this operand is guaranteed to not overflow the specified * integer operation. @@ -1718,40 +1708,13 @@ public: } else if (isFCmpOpcode(v)) { if (isPromoteInt(s0) && isPromoteInt(s1)) { // demote fcmp to cmp - v = LOpcode(v + (LIR_eq - LIR_feq)); + v = f64cmp_to_i32cmp(v); return out->ins2(v, demote(out, s0), demote(out, s1)); } else if (isPromoteUint(s0) && isPromoteUint(s1)) { // uint compare - v = LOpcode(v + (LIR_eq - LIR_feq)); - if (v != LIR_eq) - v = LOpcode(v + (LIR_ult - LIR_lt)); // cmp -> ucmp + v = f64cmp_to_u32cmp(v); return out->ins2(v, demote(out, s0), demote(out, s1)); } - } else if (v == LIR_or && - s0->isop(LIR_lsh) && IsConst(s0->oprnd2(), 16) && - s1->isop(LIR_and) && IsConst(s1->oprnd2(), 0xffff)) { - LIns* msw = s0->oprnd1(); - LIns* lsw = s1->oprnd1(); - LIns* x; - LIns* y; - if (lsw->isop(LIR_add) && - lsw->oprnd1()->isop(LIR_and) && - lsw->oprnd2()->isop(LIR_and) && - IsConst(lsw->oprnd1()->oprnd2(), 0xffff) && - IsConst(lsw->oprnd2()->oprnd2(), 0xffff) && - msw->isop(LIR_add) && - msw->oprnd1()->isop(LIR_add) && - msw->oprnd2()->isop(LIR_rsh) && - msw->oprnd1()->oprnd1()->isop(LIR_rsh) && - msw->oprnd1()->oprnd2()->isop(LIR_rsh) && - IsConst(msw->oprnd2()->oprnd2(), 16) && - IsConst(msw->oprnd1()->oprnd1()->oprnd2(), 16) && - IsConst(msw->oprnd1()->oprnd2()->oprnd2(), 16) && - (x = lsw->oprnd1()->oprnd1()) == msw->oprnd1()->oprnd1()->oprnd1() && - (y = lsw->oprnd2()->oprnd1()) == msw->oprnd1()->oprnd2()->oprnd1() && - lsw == msw->oprnd2()->oprnd1()) { - return out->ins2(LIR_add, x, y); - } } return out->ins2(v, s0, s1); } @@ -1828,7 +1791,7 @@ VisitGlobalSlots(Visitor &visitor, JSContext *cx, JSObject *globalObj, { for (unsigned n = 0; n < ngslots; ++n) { unsigned slot = gslots[n]; - visitor.visitGlobalSlot(&STOBJ_GET_SLOT(globalObj, slot), n, slot); + visitor.visitGlobalSlot(&globalObj->getSlotRef(slot), n, slot); } } @@ -1847,7 +1810,7 @@ template static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitGlobalSlots(Visitor &visitor, JSContext *cx, SlotList &gslots) { - VisitGlobalSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain), + VisitGlobalSlots(visitor, cx, cx->fp->scopeChain->getGlobal(), gslots.length(), gslots.data()); } @@ -1866,7 +1829,7 @@ static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor& visitor, JSContext* cx, unsigned callDepth, unsigned ngslots, uint16* gslots) { - VisitSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain), + VisitSlots(visitor, cx, cx->fp->scopeChain->getGlobal(), callDepth, ngslots, gslots); } @@ -1884,7 +1847,7 @@ static JS_REQUIRES_STACK JS_ALWAYS_INLINE void VisitSlots(Visitor &visitor, JSContext *cx, unsigned callDepth, const SlotList& slots) { - VisitSlots(visitor, cx, JS_GetGlobalForObject(cx, cx->fp->scopeChain), + VisitSlots(visitor, cx, cx->fp->scopeChain->getGlobal(), callDepth, slots.length(), slots.data()); } @@ -2201,13 +2164,12 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag generatedSpecializedNative(), tempTypeMap(cx) { - JS_ASSERT(globalObj == JS_GetGlobalForObject(cx, cx->fp->scopeChain)); + JS_ASSERT(globalObj == cx->fp->scopeChain->getGlobal()); JS_ASSERT(cx->fp->regs->pc == (jsbytecode*)fragment->ip); fragment->lirbuf = lirbuf; #ifdef DEBUG - LabelMap* labels = new (tempAlloc()) LabelMap(tempAlloc(), &LogController); - lirbuf->names = new (tempAlloc()) LirNameMap(tempAlloc(), labels); + lirbuf->printer = new (tempAlloc()) LInsPrinter(tempAlloc()); #endif /* @@ -2249,11 +2211,13 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag nanojit::LirWriter*& lir = InitConst(this->lir); lir = new (tempAlloc()) LirBufWriter(lirbuf, nanojit::AvmCore::config); #ifdef DEBUG - lir = new (tempAlloc()) ValidateWriter(lir, "end of writer pipeline"); + ValidateWriter* validate2; + lir = validate2 = + new (tempAlloc()) ValidateWriter(lir, lirbuf->printer, "end of writer pipeline"); #endif debug_only_stmt( if (LogController.lcbits & LC_TMRecorder) { - lir = new (tempAlloc()) VerboseWriter(tempAlloc(), lir, lirbuf->names, + lir = new (tempAlloc()) VerboseWriter(tempAlloc(), lir, lirbuf->printer, &LogController); } ) @@ -2267,7 +2231,9 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag lir = new (tempAlloc()) ExprFilter(lir); lir = new (tempAlloc()) FuncFilter(lir); #ifdef DEBUG - lir = new (tempAlloc()) ValidateWriter(lir, "start of writer pipeline"); + ValidateWriter* validate1; + lir = validate1 = + new (tempAlloc()) ValidateWriter(lir, lirbuf->printer, "start of writer pipeline"); #endif lir->ins0(LIR_start); @@ -2299,11 +2265,24 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag fragment->loopLabel = entryLabel; }) - lirbuf->sp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, sp)), "sp"); - lirbuf->rp = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp)), "rp"); - InitConst(cx_ins) = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, cx)), "cx"); - InitConst(eos_ins) = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos)), "eos"); - InitConst(eor_ins) = addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor)), "eor"); + lirbuf->sp = + addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, sp), ACC_OTHER), "sp"); + lirbuf->rp = + addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp), ACC_OTHER), "rp"); + InitConst(cx_ins) = + addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, cx), ACC_OTHER), "cx"); + InitConst(eos_ins) = + addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eos), ACC_OTHER), "eos"); + InitConst(eor_ins) = + addName(lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, eor), ACC_OTHER), "eor"); + +#ifdef DEBUG + // Need to set these up before any stack/rstack loads/stores occur. + validate1->setSp(lirbuf->sp); + validate2->setSp(lirbuf->sp); + validate1->setRp(lirbuf->rp); + validate2->setRp(lirbuf->rp); +#endif /* If we came from exit, we might not have enough global types. */ if (tree->globalSlots->length() > tree->nGlobalTypes()) @@ -2321,7 +2300,10 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag * We poll the operation callback request flag. It is updated asynchronously whenever * the callback is to be invoked. */ - LIns* x = lir->insLoad(LIR_ld, cx_ins, offsetof(JSContext, operationCallbackFlag)); + // XXX: this load is volatile. If bug 545406 (loop-invariant code + // hoisting) is implemented this fact will need to be made explicit. + LIns* x = + lir->insLoad(LIR_ld, cx_ins, offsetof(JSContext, operationCallbackFlag), ACC_LOAD_ANY); guard(true, lir->ins_eq0(x), snapshot(TIMEOUT_EXIT)); } @@ -2331,8 +2313,8 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag */ if (anchor && anchor->exitType == NESTED_EXIT) { LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state, - offsetof(InterpState, outermostTreeExitGuard)), - "outermostTreeExitGuard"); + offsetof(InterpState, outermostTreeExitGuard), + ACC_OTHER), "outermostTreeExitGuard"); guard(true, lir->ins2(LIR_peq, nested_ins, INS_CONSTPTR(innermost)), NESTED_EXIT); } } @@ -2448,7 +2430,7 @@ TraceRecorder::addName(LIns* ins, const char* name) * in adding names otherwise. */ if (LogController.lcbits > 0) - lirbuf->names->addName(ins, name); + lirbuf->printer->lirNameMap->addName(ins, name); #endif return ins; } @@ -2520,7 +2502,7 @@ bool TraceRecorder::isGlobal(jsval* p) const { return ((size_t(p - globalObj->fslots) < JS_INITIAL_NSLOTS) || - (size_t(p - globalObj->dslots) < (STOBJ_NSLOTS(globalObj) - JS_INITIAL_NSLOTS))); + (size_t(p - globalObj->dslots) < (globalObj->numSlots() - JS_INITIAL_NSLOTS))); } /* @@ -2584,7 +2566,7 @@ ValueToNative(JSContext* cx, jsval v, TraceType type, double* slot) switch (type) { case TT_OBJECT: JS_ASSERT(tag == JSVAL_OBJECT); - JS_ASSERT(!JSVAL_IS_NULL(v) && !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v))); + JS_ASSERT(!JSVAL_IS_NULL(v) && !JSVAL_TO_OBJECT(v)->isFunction()); *(JSObject**)slot = JSVAL_TO_OBJECT(v); debug_only_printf(LC_TMTracer, "object<%p:%s> ", (void*)JSVAL_TO_OBJECT(v), @@ -2631,11 +2613,16 @@ ValueToNative(JSContext* cx, jsval v, TraceType type, double* slot) debug_only_print0(LC_TMTracer, "null "); return; - case TT_PSEUDOBOOLEAN: - /* Watch out for pseudo-booleans. */ + case TT_SPECIAL: JS_ASSERT(tag == JSVAL_SPECIAL); *(JSBool*)slot = JSVAL_TO_SPECIAL(v); - debug_only_printf(LC_TMTracer, "pseudoboolean<%d> ", *(JSBool*)slot); + debug_only_printf(LC_TMTracer, "special<%d> ", *(JSBool*)slot); + return; + + case TT_VOID: + JS_ASSERT(JSVAL_IS_VOID(v)); + *(JSBool*)slot = JSVAL_TO_SPECIAL(JSVAL_VOID); + debug_only_print0(LC_TMTracer, "undefined "); return; case TT_FUNCTION: { @@ -2698,7 +2685,7 @@ TraceMonitor::flush() assembler = new (alloc) Assembler(*codeAlloc, alloc, alloc, core, &LogController, avmplus::AvmCore::config); verbose_only( branches = NULL; ) - memset(&vmfragments[0], 0, FRAGMENT_TABLE_SIZE * sizeof(TreeFragment*)); + PodArrayZero(vmfragments); reFragments = new (alloc) REHashMap(alloc); needFlush = JS_FALSE; @@ -2807,14 +2794,18 @@ NativeToValue(JSContext* cx, jsval& v, TraceType type, double* slot) debug_only_printf(LC_TMTracer, "null<%p> ", (void*)(*(JSObject**)slot)); break; - case TT_PSEUDOBOOLEAN: - /* Watch out for pseudo-booleans. */ + case TT_SPECIAL: v = SPECIAL_TO_JSVAL(*(JSBool*)slot); - debug_only_printf(LC_TMTracer, "boolean<%d> ", *(JSBool*)slot); + debug_only_printf(LC_TMTracer, "special<%d> ", *(JSBool*)slot); + break; + + case TT_VOID: + v = JSVAL_VOID; + debug_only_print0(LC_TMTracer, "undefined "); break; case TT_FUNCTION: { - JS_ASSERT(HAS_FUNCTION_CLASS(*(JSObject**)slot)); + JS_ASSERT((*(JSObject**)slot)->isFunction()); v = OBJECT_TO_JSVAL(*(JSObject**)slot); #ifdef DEBUG JSFunction* fun = GET_FUNCTION_PRIVATE(cx, JSVAL_TO_OBJECT(v)); @@ -3228,7 +3219,7 @@ struct VarClosureTraits static inline uint32 adj_slot(JSStackFrame* fp, uint32 slot) { return 4 + fp->argc + slot; } static inline LIns* adj_slot_lir(LirWriter* lir, LIns* fp_ins, unsigned slot) { - LIns *argc_ins = lir->insLoad(LIR_ld, fp_ins, offsetof(JSStackFrame, argc)); + LIns *argc_ins = lir->insLoad(LIR_ld, fp_ins, offsetof(JSStackFrame, argc), ACC_OTHER); return lir->ins2(LIR_add, lir->insImm(4 + slot), argc_ins); } @@ -3301,13 +3292,11 @@ FlushNativeStackFrame(JSContext* cx, unsigned callDepth, const TraceType* mp, do for (; n != 0; fp = fp->down) { --n; if (fp->argv) { - if (fp->argsobj && - js_GetArgsPrivateNative(JSVAL_TO_OBJECT(fp->argsobj))) { + if (fp->argsobj && GetArgsPrivateNative(JSVAL_TO_OBJECT(fp->argsobj))) JSVAL_TO_OBJECT(fp->argsobj)->setPrivate(fp); - } JS_ASSERT(JSVAL_IS_OBJECT(fp->argv[-1])); - JS_ASSERT(HAS_FUNCTION_CLASS(fp->calleeObject())); + JS_ASSERT(fp->calleeObject()->isFunction()); JS_ASSERT(GET_FUNCTION_PRIVATE(cx, fp->callee()) == fp->fun); if (FUN_INTERPRETED(fp->fun) && @@ -3341,6 +3330,7 @@ TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, TraceType t, const char *prefix, uintN index, JSStackFrame *fp) { LIns* ins; + AccSet accSet = base == lirbuf->sp ? ACC_STACK : ACC_OTHER; if (t == TT_INT32) { /* demoted */ JS_ASSERT(isInt32(*p)); @@ -3350,16 +3340,18 @@ TraceRecorder::import(LIns* base, ptrdiff_t offset, jsval* p, TraceType t, * to see doubles on entry. The first op to use this slot will emit a * f2i cast which will cancel out the i2f we insert here. */ - ins = lir->insLoad(LIR_ld, base, offset); + ins = lir->insLoad(LIR_ld, base, offset, accSet); ins = lir->ins1(LIR_i2f, ins); } else { JS_ASSERT_IF(t != TT_JSVAL, isNumber(*p) == (t == TT_DOUBLE)); if (t == TT_DOUBLE) { - ins = lir->insLoad(LIR_ldf, base, offset); - } else if (t == TT_PSEUDOBOOLEAN) { - ins = lir->insLoad(LIR_ld, base, offset); + ins = lir->insLoad(LIR_ldf, base, offset, accSet); + } else if (t == TT_SPECIAL) { + ins = lir->insLoad(LIR_ld, base, offset, accSet); + } else if (t == TT_VOID) { + ins = INS_VOID(); } else { - ins = lir->insLoad(LIR_ldp, base, offset); + ins = lir->insLoad(LIR_ldp, base, offset, accSet); } } checkForGlobalObjectReallocation(); @@ -3497,20 +3489,20 @@ TraceRecorder::isValidSlot(JSScope* scope, JSScopeProperty* sprop) uint32 setflags = (js_CodeSpec[*cx->fp->regs->pc].format & (JOF_SET | JOF_INCDEC | JOF_FOR)); if (setflags) { - if (!SPROP_HAS_STUB_SETTER(sprop)) + if (!sprop->hasDefaultSetter()) RETURN_VALUE("non-stub setter", false); - if (sprop->attrs & JSPROP_READONLY) + if (!sprop->writable()) RETURN_VALUE("writing to a read-only property", false); } /* This check applies even when setflags == 0. */ - if (setflags != JOF_SET && !SPROP_HAS_STUB_GETTER(sprop)) { + if (setflags != JOF_SET && !sprop->hasDefaultGetter()) { JS_ASSERT(!sprop->isMethod()); RETURN_VALUE("non-stub getter", false); } if (!SPROP_HAS_VALID_SLOT(sprop, scope)) - RETURN_VALUE("slotless obj property", false); + RETURN_VALUE("invalid-slot obj property", false); return true; } @@ -3520,9 +3512,9 @@ JS_REQUIRES_STACK void TraceRecorder::importGlobalSlot(unsigned slot) { JS_ASSERT(slot == uint16(slot)); - JS_ASSERT(STOBJ_NSLOTS(globalObj) <= MAX_GLOBAL_SLOTS); + JS_ASSERT(globalObj->numSlots() <= MAX_GLOBAL_SLOTS); - jsval* vp = &STOBJ_GET_SLOT(globalObj, slot); + jsval* vp = &globalObj->getSlotRef(slot); JS_ASSERT(!known(vp)); /* Add the slot to the list of interned global slots. */ @@ -3554,9 +3546,9 @@ TraceRecorder::lazilyImportGlobalSlot(unsigned slot) * If the global object grows too large, alloca in ExecuteTree might fail, * so abort tracing on global objects with unreasonably many slots. */ - if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS) + if (globalObj->numSlots() > MAX_GLOBAL_SLOTS) return false; - jsval* vp = &STOBJ_GET_SLOT(globalObj, slot); + jsval* vp = &globalObj->getSlotRef(slot); if (known(vp)) return true; /* we already have it */ importGlobalSlot(slot); @@ -3574,7 +3566,7 @@ TraceRecorder::writeBack(LIns* i, LIns* base, ptrdiff_t offset, bool shouldDemot */ if (shouldDemote && isPromoteInt(i)) i = demote(lir, i); - return lir->insStorei(i, base, offset); + return lir->insStorei(i, base, offset, (base == lirbuf->sp) ? ACC_STACK : ACC_OTHER); } /* Update the tracker, then issue a write back store. */ @@ -3755,7 +3747,7 @@ public: bool isPromote = isPromoteInt(ins); if (isPromote && *mTypeMap == TT_DOUBLE) { mLir->insStorei(mRecorder.get(vp), mRecorder.eos_ins, - mRecorder.nativeGlobalOffset(vp)); + mRecorder.nativeGlobalOffset(vp), ACC_OTHER); /* * Aggressively undo speculation so the inner tree will compile @@ -3799,7 +3791,7 @@ public: bool isPromote = isPromoteInt(ins); if (isPromote && *mTypeMap == TT_DOUBLE) { mLir->insStorei(mRecorder.get(vp), mLirbuf->sp, - mRecorder.nativespOffset(vp)); + mRecorder.nativespOffset(vp), ACC_STACK); /* * Aggressively undo speculation so the inner tree will compile @@ -3852,14 +3844,17 @@ TraceRecorder::determineSlotType(jsval* vp) } else if (JSVAL_IS_OBJECT(*vp)) { if (JSVAL_IS_NULL(*vp)) m = TT_NULL; - else if (HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) + else if (JSVAL_TO_OBJECT(*vp)->isFunction()) m = TT_FUNCTION; else m = TT_OBJECT; + } else if (JSVAL_IS_VOID(*vp)) { + /* N.B. void is JSVAL_SPECIAL. */ + m = TT_VOID; } else { - JS_ASSERT(JSVAL_TAG(*vp) == JSVAL_STRING || JSVAL_IS_SPECIAL(*vp)); + JS_ASSERT(JSVAL_IS_STRING(*vp) || JSVAL_IS_SPECIAL(*vp)); JS_STATIC_ASSERT(static_cast(TT_STRING) == JSVAL_STRING); - JS_STATIC_ASSERT(static_cast(TT_PSEUDOBOOLEAN) == JSVAL_SPECIAL); + JS_STATIC_ASSERT(static_cast(TT_SPECIAL) == JSVAL_SPECIAL); m = TraceType(JSVAL_TAG(*vp)); } JS_ASSERT(m != TT_INT32 || isInt32(*vp)); @@ -4059,7 +4054,16 @@ TraceRecorder::snapshot(ExitType exitType) JS_REQUIRES_STACK GuardRecord* TraceRecorder::createGuardRecord(VMSideExit* exit) { +#ifdef JS_JIT_SPEW + // For debug builds, place the guard records in a longer lasting + // pool. This is because the fragment profiler will look at them + // relatively late in the day, after they would have been freed, + // in some cases, had they been allocated in traceAlloc(). + GuardRecord* gr = new (dataAlloc()) GuardRecord(); +#else + // The standard place (for production builds). GuardRecord* gr = new (traceAlloc()) GuardRecord(); +#endif gr->exit = exit; exit->addGuard(gr); @@ -4230,11 +4234,11 @@ TraceRecorder::compile() } if (tree->maxNativeStackSlots >= MAX_NATIVE_STACK_SLOTS) { debug_only_print0(LC_TMTracer, "Blacklist: excessive stack use.\n"); - Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*)tree->ip); return ARECORD_STOP; } if (anchor && anchor->exitType != CASE_EXIT) - ++fragment->root->branchCount; + ++tree->branchCount; if (outOfMemory()) return ARECORD_STOP; @@ -4245,26 +4249,25 @@ TraceRecorder::compile() char* label = (char*)js_malloc((filename ? strlen(filename) : 7) + 16); sprintf(label, "%s:%u", filename ? filename : "", js_FramePCToLineNumber(cx, cx->fp)); - lirbuf->names->labels->add(fragment, sizeof(Fragment), 0, label); + lirbuf->printer->addrNameMap->addAddrRange(fragment, sizeof(Fragment), 0, label); js_free(label); #endif Assembler *assm = traceMonitor->assembler; JS_ASSERT(assm->error() == nanojit::None); - assm->compile(fragment, tempAlloc(), /*optimize*/true - verbose_only(, lirbuf->names->labels)); + assm->compile(fragment, tempAlloc(), /*optimize*/true verbose_only(, lirbuf->printer)); if (assm->error() != nanojit::None) { assm->setError(nanojit::None); debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n"); - Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*)tree->ip); return ARECORD_STOP; } if (outOfMemory()) return ARECORD_STOP; - ResetRecordingAttempts(cx, (jsbytecode*) fragment->ip); - ResetRecordingAttempts(cx, (jsbytecode*) fragment->root->ip); + ResetRecordingAttempts(cx, (jsbytecode*)fragment->ip); + ResetRecordingAttempts(cx, (jsbytecode*)tree->ip); if (anchor) { #ifdef NANOJIT_IA32 if (anchor->exitType == CASE_EXIT) @@ -4539,11 +4542,11 @@ JS_REQUIRES_STACK TypeConsensus TraceRecorder::peerTypeStability(SlotMap& slotMap, const void* ip, TreeFragment** pPeer) { /* See if there are any peers that would make this stable */ - TreeFragment* root = fragment->root; - TreeFragment* peer = LookupLoop(traceMonitor, ip, root->globalObj, root->globalShape, root->argc); + JS_ASSERT(fragment->root == tree); + TreeFragment* peer = LookupLoop(traceMonitor, ip, tree->globalObj, tree->globalShape, tree->argc); /* This condition is possible with recursion */ - JS_ASSERT_IF(!peer, fragment->root->ip != ip); + JS_ASSERT_IF(!peer, tree->ip != ip); if (!peer) return TypeConsensus_Bad; bool onlyUndemotes = false; @@ -4600,7 +4603,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit) if (callDepth != 0) { debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n"); - Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*)tree->ip); trashSelf = true; return ARECORD_STOP; } @@ -4609,10 +4612,11 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit) exit->numStackSlots == tree->nStackTypes); JS_ASSERT_IF(exit->exitType != UNSTABLE_LOOP_EXIT, exit->exitType == RECURSIVE_UNLINKED_EXIT); JS_ASSERT_IF(exit->exitType == RECURSIVE_UNLINKED_EXIT, - exit->recursive_pc != fragment->root->ip); + exit->recursive_pc != tree->ip); + + JS_ASSERT(fragment->root == tree); TreeFragment* peer = NULL; - TreeFragment* root = fragment->root; TypeConsensus consensus = TypeConsensus_Bad; @@ -4620,7 +4624,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit) consensus = selfTypeStability(slotMap); if (consensus != TypeConsensus_Okay) { const void* ip = exit->exitType == RECURSIVE_UNLINKED_EXIT ? - exit->recursive_pc : fragment->root->ip; + exit->recursive_pc : tree->ip; TypeConsensus peerConsensus = peerTypeStability(slotMap, ip, &peer); /* If there was a semblance of a stable peer (even if not linkable), keep the result. */ if (peerConsensus != TypeConsensus_Bad) @@ -4671,7 +4675,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit) debug_only_printf(LC_TMTracer, "Joining type-unstable trace to target fragment %p.\n", (void*)peer); - peer->dependentTrees.addUnique(fragment->root); + peer->dependentTrees.addUnique(tree); tree->linkedTrees.addUnique(peer); } } else { @@ -4685,7 +4689,7 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit) lir->ins1(LIR_plive, lirbuf->state); } - exit->target = fragment->root; + exit->target = tree; fragment->lastIns = lir->insGuard(LIR_x, NULL, createGuardRecord(exit)); } @@ -4693,8 +4697,11 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit) debug_only_printf(LC_TMTreeVis, "TREEVIS CLOSELOOP EXIT=%p PEER=%p\n", (void*)exit, (void*)peer); - peer = LookupLoop(traceMonitor, root->ip, root->globalObj, root->globalShape, root->argc); - JS_ASSERT(peer); + JS_ASSERT(LookupLoop(traceMonitor, tree->ip, tree->globalObj, tree->globalShape, tree->argc) == + tree->first); + JS_ASSERT(tree->first); + + peer = tree->first; joinEdgesToEntry(peer); debug_only_stmt(DumpPeerStability(traceMonitor, peer->ip, peer->globalObj, @@ -4702,15 +4709,15 @@ TraceRecorder::closeLoop(SlotMap& slotMap, VMSideExit* exit) debug_only_print0(LC_TMTracer, "updating specializations on dependent and linked trees\n"); - if (fragment->root->code()) - SpecializeTreesToMissingGlobals(cx, globalObj, fragment->root); + if (tree->code()) + SpecializeTreesToMissingGlobals(cx, globalObj, tree); /* * If this is a newly formed tree, and the outer tree has not been compiled yet, we * should try to compile the outer tree again. */ if (outer) - AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc); + AttemptCompilation(cx, globalObj, outer, outerArgc); #ifdef JS_JIT_SPEW debug_only_printf(LC_TMMinimal, "Recording completed at %s:%u@%u via closeLoop (FragID=%06u)\n", @@ -4796,7 +4803,7 @@ TraceRecorder::joinEdgesToEntry(TreeFragment* peer_root) /* Build the full typemap for this unstable exit */ FullMapFromExit(typeMap, uexit->exit); /* Check its compatibility against this tree */ - TypeConsensus consensus = TypeMapLinkability(cx, typeMap, fragment->root); + TypeConsensus consensus = TypeMapLinkability(cx, typeMap, tree); JS_ASSERT_IF(consensus == TypeConsensus_Okay, peer != fragment); if (consensus == TypeConsensus_Okay) { debug_only_printf(LC_TMTracer, @@ -4843,9 +4850,11 @@ TraceRecorder::endLoop() JS_REQUIRES_STACK AbortableRecordingStatus TraceRecorder::endLoop(VMSideExit* exit) { + JS_ASSERT(fragment->root == tree); + if (callDepth != 0) { debug_only_print0(LC_TMTracer, "Blacklisted: stack depth mismatch, possible recursion.\n"); - Blacklist((jsbytecode*) fragment->root->ip); + Blacklist((jsbytecode*)tree->ip); trashSelf = true; return ARECORD_STOP; } @@ -4860,11 +4869,13 @@ TraceRecorder::endLoop(VMSideExit* exit) debug_only_printf(LC_TMTreeVis, "TREEVIS ENDLOOP EXIT=%p\n", (void*)exit); - TreeFragment* root = fragment->root; - joinEdgesToEntry(LookupLoop(traceMonitor, root->ip, root->globalObj, - root->globalShape, root->argc)); - debug_only_stmt(DumpPeerStability(traceMonitor, root->ip, root->globalObj, - root->globalShape, root->argc);) + JS_ASSERT(LookupLoop(traceMonitor, tree->ip, tree->globalObj, tree->globalShape, tree->argc) == + tree->first); + + joinEdgesToEntry(tree->first); + + debug_only_stmt(DumpPeerStability(traceMonitor, tree->ip, tree->globalObj, + tree->globalShape, tree->argc);) /* * Note: this must always be done, in case we added new globals on trace @@ -4872,7 +4883,7 @@ TraceRecorder::endLoop(VMSideExit* exit) */ debug_only_print0(LC_TMTracer, "updating specializations on dependent and linked trees\n"); - if (fragment->root->code()) + if (tree->code()) SpecializeTreesToMissingGlobals(cx, globalObj, fragment->root); /* @@ -4880,7 +4891,7 @@ TraceRecorder::endLoop(VMSideExit* exit) * yet, we should try to compile the outer tree again. */ if (outer) - AttemptCompilation(cx, traceMonitor, globalObj, outer, outerArgc); + AttemptCompilation(cx, globalObj, outer, outerArgc); #ifdef JS_JIT_SPEW debug_only_printf(LC_TMMinimal, "Recording completed at %s:%u@%u via endLoop (FragID=%06u)\n", @@ -4945,9 +4956,9 @@ TraceRecorder::prepareTreeCall(TreeFragment* inner) + inner->nativeStackBase; /* plus the inner tree's stack base */ /* We have enough space, so adjust sp and rp to their new level. */ lir->insStorei(lir->ins2(LIR_piadd, lirbuf->sp, INS_CONSTWORD(sp_offset)), - lirbuf->state, offsetof(InterpState, sp)); + lirbuf->state, offsetof(InterpState, sp), ACC_OTHER); lir->insStorei(lir->ins2(LIR_piadd, lirbuf->rp, INS_CONSTWORD(rp_adj)), - lirbuf->state, offsetof(InterpState, rp)); + lirbuf->state, offsetof(InterpState, rp), ACC_OTHER); } /* @@ -4990,7 +5001,7 @@ TraceRecorder::emitTreeCall(TreeFragment* inner, VMSideExit* exit) CallInfo* ci = new (traceAlloc()) CallInfo(); ci->_address = uintptr_t(inner->code()); JS_ASSERT(ci->_address); - ci->_argtypes = ARGSIZE_P | ARGSIZE_P << ARGSIZE_SHIFT; + ci->_typesig = ARGTYPE_P | ARGTYPE_P << ARGTYPE_SHIFT; ci->_isPure = 0; ci->_storeAccSet = ACC_STORE_ANY; ci->_abi = ABI_FASTCALL; @@ -4998,10 +5009,12 @@ TraceRecorder::emitTreeCall(TreeFragment* inner, VMSideExit* exit) ci->_name = "fragment"; #endif LIns* rec = lir->insCall(ci, args); - LIns* lr = lir->insLoad(LIR_ldp, rec, offsetof(GuardRecord, exit)); + LIns* lr = lir->insLoad(LIR_ldp, rec, offsetof(GuardRecord, exit), ACC_OTHER); LIns* nested = lir->insBranch(LIR_jt, lir->ins2i(LIR_eq, - lir->insLoad(LIR_ld, lr, offsetof(VMSideExit, exitType)), + lir->insLoad(LIR_ld, lr, + offsetof(VMSideExit, exitType), + ACC_OTHER), NESTED_EXIT), NULL); @@ -5010,7 +5023,7 @@ TraceRecorder::emitTreeCall(TreeFragment* inner, VMSideExit* exit) * with that guard. If we mismatch on a tree call guard, this will contain the last * non-nested guard we encountered, which is the innermost loop or branch guard. */ - lir->insStorei(lr, lirbuf->state, offsetof(InterpState, lastTreeExitGuard)); + lir->insStorei(lr, lirbuf->state, offsetof(InterpState, lastTreeExitGuard), ACC_OTHER); LIns* done1 = lir->insBranch(LIR_j, NULL, NULL); /* @@ -5022,16 +5035,20 @@ TraceRecorder::emitTreeCall(TreeFragment* inner, VMSideExit* exit) LIns* done2 = lir->insBranch(LIR_jf, lir->ins_peq0(lir->insLoad(LIR_ldp, lirbuf->state, - offsetof(InterpState, lastTreeCallGuard))), + offsetof(InterpState, lastTreeCallGuard), + ACC_OTHER)), NULL); - lir->insStorei(lr, lirbuf->state, offsetof(InterpState, lastTreeCallGuard)); + lir->insStorei(lr, lirbuf->state, offsetof(InterpState, lastTreeCallGuard), ACC_OTHER); lir->insStorei(lir->ins2(LIR_piadd, - lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp)), + lir->insLoad(LIR_ldp, lirbuf->state, offsetof(InterpState, rp), + ACC_OTHER), lir->ins_i2p(lir->ins2i(LIR_lsh, - lir->insLoad(LIR_ld, lr, offsetof(VMSideExit, calldepth)), + lir->insLoad(LIR_ld, lr, + offsetof(VMSideExit, calldepth), + ACC_OTHER), sizeof(void*) == 4 ? 2 : 3))), lirbuf->state, - offsetof(InterpState, rpAtLastTreeCall)); + offsetof(InterpState, rpAtLastTreeCall), ACC_OTHER); LIns* label = lir->ins0(LIR_label); done1->setTarget(label); done2->setTarget(label); @@ -5040,7 +5057,7 @@ TraceRecorder::emitTreeCall(TreeFragment* inner, VMSideExit* exit) * Keep updating outermostTreeExit so that InterpState always contains the most recent * side exit. */ - lir->insStorei(lr, lirbuf->state, offsetof(InterpState, outermostTreeExitGuard)); + lir->insStorei(lr, lirbuf->state, offsetof(InterpState, outermostTreeExitGuard), ACC_OTHER); /* Read back all registers, in case the called tree changed any of them. */ #ifdef DEBUG @@ -5065,7 +5082,7 @@ TraceRecorder::emitTreeCall(TreeFragment* inner, VMSideExit* exit) SlotList& gslots = *tree->globalSlots; for (unsigned i = 0; i < gslots.length(); i++) { unsigned slot = gslots[i]; - jsval* vp = &STOBJ_GET_SLOT(globalObj, slot); + jsval* vp = &globalObj->getSlotRef(slot); tracker.set(vp, NULL); } @@ -5092,8 +5109,8 @@ TraceRecorder::emitTreeCall(TreeFragment* inner, VMSideExit* exit) /* Restore sp and rp to their original values (we still have them in a register). */ if (callDepth > 0) { - lir->insStorei(lirbuf->sp, lirbuf->state, offsetof(InterpState, sp)); - lir->insStorei(lirbuf->rp, lirbuf->state, offsetof(InterpState, rp)); + lir->insStorei(lirbuf->sp, lirbuf->state, offsetof(InterpState, sp), ACC_OTHER); + lir->insStorei(lirbuf->rp, lirbuf->state, offsetof(InterpState, rp), ACC_OTHER); } /* @@ -5136,7 +5153,7 @@ JS_REQUIRES_STACK void TraceRecorder::emitIf(jsbytecode* pc, bool cond, LIns* x) { ExitType exitType; - if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { + if (IsLoopEdge(pc, (jsbytecode*)tree->ip)) { exitType = LOOP_EXIT; /* @@ -5182,7 +5199,7 @@ TraceRecorder::fuseIf(jsbytecode* pc, bool cond, LIns* x) JS_REQUIRES_STACK AbortableRecordingStatus TraceRecorder::checkTraceEnd(jsbytecode *pc) { - if (IsLoopEdge(pc, (jsbytecode*)fragment->root->ip)) { + if (IsLoopEdge(pc, (jsbytecode*)tree->ip)) { /* * If we compile a loop, the trace should have a zero stack balance at * the loop edge. Currently we are parked on a comparison op or @@ -5194,7 +5211,7 @@ TraceRecorder::checkTraceEnd(jsbytecode *pc) bool fused = pc != cx->fp->regs->pc; JSFrameRegs orig = *cx->fp->regs; - cx->fp->regs->pc = (jsbytecode*)fragment->root->ip; + cx->fp->regs->pc = (jsbytecode*)tree->ip; cx->fp->regs->sp -= fused ? 2 : 1; JSContext* localcx = cx; @@ -5208,46 +5225,50 @@ TraceRecorder::checkTraceEnd(jsbytecode *pc) return ARECORD_CONTINUE; } -bool -TraceRecorder::hasMethod(JSObject* obj, jsid id) +RecordingStatus +TraceRecorder::hasMethod(JSObject* obj, jsid id, bool& found) { + found = false; + RecordingStatus status = RECORD_CONTINUE; if (!obj) - return false; + return status; JSObject* pobj; JSProperty* prop; int protoIndex = obj->lookupProperty(cx, id, &pobj, &prop); - if (protoIndex < 0 || !prop) - return false; + if (protoIndex < 0) + return RECORD_ERROR; + if (!prop) + return status; - bool found = false; - if (OBJ_IS_NATIVE(pobj)) { + if (!pobj->isNative()) { + // We can't rely on __iterator__ being present on trace just because + // it's there now, if found in a non-native object. + status = RECORD_STOP; + } else { JSScope* scope = OBJ_SCOPE(pobj); JSScopeProperty* sprop = (JSScopeProperty*) prop; - if (SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop) && - SPROP_HAS_VALID_SLOT(sprop, scope)) { + if (sprop->hasDefaultGetterOrIsMethod() && SPROP_HAS_VALID_SLOT(sprop, scope)) { jsval v = LOCKED_OBJ_GET_SLOT(pobj, sprop->slot); if (VALUE_IS_FUNCTION(cx, v)) { found = true; - if (!scope->generic() && !scope->branded()) { - scope->brandingShapeChange(cx, sprop->slot, v); - scope->setBranded(); - } + if (!scope->generic() && !scope->branded() && !scope->brand(cx, sprop->slot, v)) + status = RECORD_STOP; } } } pobj->dropProperty(cx, prop); - return found; + return status; } -JS_REQUIRES_STACK bool -TraceRecorder::hasIteratorMethod(JSObject* obj) +JS_REQUIRES_STACK RecordingStatus +TraceRecorder::hasIteratorMethod(JSObject* obj, bool& found) { JS_ASSERT(cx->fp->regs->sp + 2 <= cx->fp->slots + cx->fp->script->nslots); - return hasMethod(obj, ATOM_TO_JSID(cx->runtime->atomState.iteratorAtom)); + return hasMethod(obj, ATOM_TO_JSID(cx->runtime->atomState.iteratorAtom), found); } /* @@ -5264,7 +5285,7 @@ CheckGlobalObjectShape(JSContext* cx, TraceMonitor* tm, JSObject* globalObj, return false; } - if (STOBJ_NSLOTS(globalObj) > MAX_GLOBAL_SLOTS) { + if (globalObj->numSlots() > MAX_GLOBAL_SLOTS) { if (tm->recorder) AbortRecording(cx, "too many slots in global object"); return false; @@ -5573,10 +5594,11 @@ SynthesizeSlowNativeFrame(InterpState& state, JSContext *cx, VMSideExit *exit) } static JS_REQUIRES_STACK bool -RecordTree(JSContext* cx, TraceMonitor* tm, TreeFragment* peer, jsbytecode* outer, - uint32 outerArgc, JSObject* globalObj, uint32 globalShape, - SlotList* globalSlots, uint32 argc, RecordReason reason) +RecordTree(JSContext* cx, TreeFragment* peer, jsbytecode* outer, + uint32 outerArgc, SlotList* globalSlots, RecordReason reason) { + TraceMonitor* tm = &JS_TRACE_MONITOR(cx); + /* Try to find an unused peer fragment, or allocate a new one. */ TreeFragment* f = peer; while (f->code() && f->peer) @@ -5589,7 +5611,7 @@ RecordTree(JSContext* cx, TraceMonitor* tm, TreeFragment* peer, jsbytecode* oute const void* localRootIP = f->root->ip; /* Make sure the global type map didn't change on us. */ - if (!CheckGlobalObjectShape(cx, tm, globalObj)) { + if (!CheckGlobalObjectShape(cx, tm, f->globalObj)) { Backoff(cx, (jsbytecode*) localRootIP); return false; } @@ -5748,9 +5770,7 @@ AttemptToStabilizeTree(JSContext* cx, JSObject* globalObj, VMSideExit* exit, jsb if (*(jsbytecode*)from->ip == JSOP_NOP) return false; - return RecordTree(cx, tm, from->first, outer, outerArgc, globalObj, - globalShape, globalSlots, cx->fp->argc, - Record_Branch); + return RecordTree(cx, from->first, outer, outerArgc, globalSlots, Record_Branch); } static JS_REQUIRES_STACK VMFragment* @@ -5886,7 +5906,7 @@ JS_REQUIRES_STACK bool TraceRecorder::recordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount) { #ifdef JS_THREADSAFE - if (OBJ_SCOPE(JS_GetGlobalForObject(cx, cx->fp->scopeChain))->title.ownercx != cx) { + if (OBJ_SCOPE(cx->fp->scopeChain->getGlobal())->title.ownercx != cx) { AbortRecording(cx, "Global object not owned by this context"); return false; /* we stay away from shared global objects */ } @@ -5909,7 +5929,7 @@ TraceRecorder::recordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCall * Make sure the shape of the global object still matches (this might flush * the JIT cache). */ - JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); + JSObject* globalObj = cx->fp->scopeChain->getGlobal(); uint32 globalShape = -1; SlotList* globalSlots = NULL; if (!CheckGlobalObjectShape(cx, tm, globalObj, &globalShape, &globalSlots)) { @@ -5931,11 +5951,10 @@ TraceRecorder::recordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCall TreeFragment* outerFragment = root; jsbytecode* outer = (jsbytecode*) outerFragment->ip; uint32 outerArgc = outerFragment->argc; - uint32 argc = cx->fp->argc; + JS_ASSERT(cx->fp->argc == first->argc); AbortRecording(cx, "No compatible inner tree"); - return RecordTree(cx, tm, first, outer, outerArgc, globalObj, globalShape, - globalSlots, argc, Record_Branch); + return RecordTree(cx, first, outer, outerArgc, globalSlots, Record_Branch); } return r->attemptTreeCall(f, inlineCallCount) == ARECORD_CONTINUE; @@ -5997,7 +6016,7 @@ TraceRecorder::attemptTreeCall(TreeFragment* f, uintN& inlineCallCount) return ARECORD_ABORTED; } - TreeFragment* outerFragment = fragment->root; + TreeFragment* outerFragment = tree; jsbytecode* outer = (jsbytecode*) outerFragment->ip; switch (lr->exitType) { case RECURSIVE_LOOP_EXIT: @@ -6058,7 +6077,7 @@ IsEntryTypeCompatible(jsval* vp, TraceType* m) switch (*m) { case TT_OBJECT: if (tag == JSVAL_OBJECT && !JSVAL_IS_NULL(*vp) && - !HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) { + !JSVAL_TO_OBJECT(*vp)->isFunction()) { return true; } debug_only_printf(LC_TMTracer, "object != tag%u ", tag); @@ -6089,15 +6108,21 @@ IsEntryTypeCompatible(jsval* vp, TraceType* m) return true; debug_only_printf(LC_TMTracer, "null != tag%u ", tag); return false; - case TT_PSEUDOBOOLEAN: - if (tag == JSVAL_SPECIAL) + case TT_SPECIAL: + /* N.B. void is JSVAL_SPECIAL. */ + if (JSVAL_IS_SPECIAL(*vp) && !JSVAL_IS_VOID(*vp)) return true; debug_only_printf(LC_TMTracer, "bool != tag%u ", tag); return false; + case TT_VOID: + if (JSVAL_IS_VOID(*vp)) + return true; + debug_only_printf(LC_TMTracer, "undefined != tag%u ", tag); + return false; default: JS_ASSERT(*m == TT_FUNCTION); if (tag == JSVAL_OBJECT && !JSVAL_IS_NULL(*vp) && - HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(*vp))) { + JSVAL_TO_OBJECT(*vp)->isFunction()) { return true; } debug_only_printf(LC_TMTracer, "fun != tag%u ", tag); @@ -6372,7 +6397,7 @@ ExecuteTrace(JSContext* cx, Fragment* f, InterpState& state) static JS_REQUIRES_STACK JS_ALWAYS_INLINE bool ScopeChainCheck(JSContext* cx, TreeFragment* f) { - JS_ASSERT(f->globalObj == JS_GetGlobalForObject(cx, cx->fp->scopeChain)); + JS_ASSERT(f->globalObj == cx->fp->scopeChain->getGlobal()); /* * The JIT records and expects to execute with two scope-chain @@ -6408,7 +6433,7 @@ ScopeChainCheck(JSContext* cx, TreeFragment* f) } /* Make sure the global object is sane. */ - JS_ASSERT(STOBJ_NSLOTS(f->globalObj) <= MAX_GLOBAL_SLOTS); + JS_ASSERT(f->globalObj->numSlots() <= MAX_GLOBAL_SLOTS); JS_ASSERT(f->nGlobalTypes() == f->globalSlots->length()); JS_ASSERT_IF(f->globalSlots->length() != 0, OBJ_SHAPE(f->globalObj) == f->globalShape); @@ -6451,7 +6476,7 @@ ExecuteTree(JSContext* cx, TreeFragment* f, uintN& inlineCallCount, f->maxNativeStackSlots, f->code()); - debug_only_stmt(uint32 globalSlots = STOBJ_NSLOTS(globalObj);) + debug_only_stmt(uint32 globalSlots = globalObj->numSlots();) debug_only_stmt(*(uint64*)&tm->storage->global()[globalSlots] = 0xdeadbeefdeadbeefLL;) /* Execute trace. */ @@ -6869,7 +6894,7 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason) * Make sure the shape of the global object still matches (this might flush * the JIT cache). */ - JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); + JSObject* globalObj = cx->fp->scopeChain->getGlobal(); uint32 globalShape = -1; SlotList* globalSlots = NULL; @@ -6909,8 +6934,7 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason) * it will walk the peer list and find us a free slot or allocate a new * tree if needed. */ - bool rv = RecordTree(cx, tm, f->first, NULL, 0, globalObj, globalShape, - globalSlots, argc, reason); + bool rv = RecordTree(cx, f->first, NULL, 0, globalSlots, reason); #ifdef MOZ_TRACEVIS if (!rv) tvso.r = R_FAIL_RECORD_TREE; @@ -7386,7 +7410,7 @@ InitJIT(TraceMonitor *tm) } tm->lastFragID = 0; #else - memset(&LogController, 0, sizeof(LogController)); + PodZero(&LogController); #endif if (!did_we_check_processor_features) { @@ -7435,7 +7459,7 @@ InitJIT(TraceMonitor *tm) verbose_only( tm->branches = NULL; ) #if !defined XP_WIN - debug_only(memset(&jitstats, 0, sizeof(jitstats))); + debug_only(PodZero(&jitstats)); #endif #ifdef JS_JIT_SPEW @@ -7530,7 +7554,7 @@ FinishJIT(TraceMonitor *tm) } #endif - memset(&tm->vmfragments[0], 0, FRAGMENT_TABLE_SIZE * sizeof(TreeFragment*)); + PodArrayZero(tm->vmfragments); if (tm->frameCache) { delete tm->frameCache; @@ -7744,8 +7768,8 @@ JS_REQUIRES_STACK LIns* TraceRecorder::entryScopeChain() const { return lir->insLoad(LIR_ldp, - lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp)), - offsetof(JSStackFrame, scopeChain)); + lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp), ACC_OTHER), + offsetof(JSStackFrame, scopeChain), ACC_OTHER); } /* @@ -7839,7 +7863,7 @@ TraceRecorder::scopeChainProp(JSObject* chainHead, jsval*& vp, LIns*& ins, NameR obj2->dropProperty(cx, prop); RETURN_STOP_A("lazy import of global slot failed"); } - vp = &STOBJ_GET_SLOT(obj, sprop->slot); + vp = &obj->getSlotRef(sprop->slot); ins = get(vp); obj2->dropProperty(cx, prop); nr.tracked = true; @@ -7868,7 +7892,7 @@ TraceRecorder::callProp(JSObject* obj, JSProperty* prop, jsid id, jsval*& vp, JSOp op = JSOp(*cx->fp->regs->pc); uint32 setflags = (js_CodeSpec[op].format & (JOF_SET | JOF_INCDEC | JOF_FOR)); - if (setflags && (sprop->attrs & JSPROP_READONLY)) + if (setflags && !sprop->writable()) RETURN_STOP("writing to a read-only property"); uintN slot = uint16(sprop->shortid); @@ -7944,8 +7968,8 @@ TraceRecorder::callProp(JSObject* obj, JSProperty* prop, jsid id, jsval*& vp, // Now assert that our use of sprop->shortid was in fact kosher. JS_ASSERT(sprop->hasShortID()); - LIns* base = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots)); - LIns* val_ins = lir->insLoad(LIR_ldp, base, dslot_index * sizeof(jsval)); + LIns* base = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots), ACC_OTHER); + LIns* val_ins = lir->insLoad(LIR_ldp, base, dslot_index * sizeof(jsval), ACC_OTHER); ins = unbox_jsval(obj->dslots[dslot_index], val_ins, snapshot(BRANCH_EXIT)); } else { ClosureVarInfo* cv = new (traceAlloc()) ClosureVarInfo(); @@ -7981,7 +8005,7 @@ TraceRecorder::callProp(JSObject* obj, JSProperty* prop, jsid id, jsval*& vp, addName(lir->ins2(LIR_eq, call_ins, lir->insImm(type)), "guard(type-stable name access)"), BRANCH_EXIT); - ins = stackLoad(outp, type); + ins = stackLoad(outp, ACC_OTHER, type); } nr.tracked = false; nr.obj = obj; @@ -8173,6 +8197,12 @@ TraceRecorder::alu(LOpcode v, jsdouble v0, jsdouble v1, LIns* s0, LIns* s1) return lir->ins1(LIR_i2f, result); } +LIns* +TraceRecorder::i2f(LIns* i) +{ + return lir->ins1(LIR_i2f, i); +} + LIns* TraceRecorder::f2i(LIns* f) { @@ -8251,8 +8281,12 @@ TraceRecorder::stringify(jsval& v) const CallInfo* ci; if (JSVAL_IS_NUMBER(v)) { ci = &js_NumberToString_ci; + } else if (JSVAL_IS_VOID(v)) { + /* N.B. void is JSVAL_SPECIAL. */ + return INS_ATOM(cx->runtime->atomState.booleanAtoms[2]); } else if (JSVAL_IS_SPECIAL(v)) { - ci = &js_BooleanOrUndefinedToString_ci; + JS_ASSERT(JSVAL_IS_BOOLEAN(v)); + ci = &js_BooleanIntToString_ci; } else { /* * Callers must deal with non-primitive (non-null object) values by @@ -8310,7 +8344,7 @@ TraceRecorder::ifop() lir->ins_eq0(lir->ins2(LIR_feq, v_ins, lir->insImmf(0)))); } else if (JSVAL_IS_STRING(v)) { cond = JSVAL_TO_STRING(v)->length() != 0; - x = lir->insLoad(LIR_ldp, v_ins, offsetof(JSString, mLength)); + x = lir->insLoad(LIR_ldp, v_ins, offsetof(JSString, mLength), ACC_OTHER); } else { JS_NOT_REACHED("ifop"); return ARECORD_STOP; @@ -8338,7 +8372,7 @@ TraceRecorder::tableswitch() /* No need to guard if the condition is constant. */ LIns* v_ins = f2i(get(&v)); - if (v_ins->isconst() || v_ins->isconstq()) + if (v_ins->isconst()) return ARECORD_CONTINUE; jsbytecode* pc = cx->fp->regs->pc; @@ -8375,7 +8409,7 @@ TraceRecorder::tableswitch() LIns* diff = lir->ins2(LIR_sub, v_ins, lir->insImm(low)); LIns* cmp = lir->ins2(LIR_ult, diff, lir->insImm(si->count)); lir->insGuard(LIR_xf, cmp, createGuardRecord(snapshot(DEFAULT_EXIT))); - lir->insStorei(diff, lir->insImmPtr(&si->index), 0); + lir->insStorei(diff, lir->insImmPtr(&si->index), 0, ACC_OTHER); VMSideExit* exit = snapshot(CASE_EXIT); exit->switchInfo = si; LIns* guardIns = lir->insGuard(LIR_xtbl, diff, createGuardRecord(exit)); @@ -8400,7 +8434,7 @@ TraceRecorder::switchop() LIns* v_ins = get(&v); /* No need to guard if the condition is constant. */ - if (v_ins->isconst() || v_ins->isconstq()) + if (v_ins->isImmAny()) return RECORD_CONTINUE; if (isNumber(v)) { jsdouble d = asNumber(v); @@ -8480,7 +8514,7 @@ TraceRecorder::incProp(jsint incr, bool pre) if (slot == SPROP_INVALID_SLOT) RETURN_STOP_A("incProp on invalid slot"); - jsval& v = STOBJ_GET_SLOT(obj, slot); + jsval& v = obj->getSlotRef(slot); CHECK_STATUS_A(inc(v, v_ins, incr, pre)); LIns* dslots_ins = NULL; @@ -8498,7 +8532,7 @@ TraceRecorder::incElem(jsint incr, bool pre) LIns* addr_ins; if (JSVAL_IS_PRIMITIVE(l) || !JSVAL_IS_INT(r) || - !guardDenseArray(JSVAL_TO_OBJECT(l), get(&l))) { + !guardDenseArray(JSVAL_TO_OBJECT(l), get(&l), MISMATCH_EXIT)) { return RECORD_STOP; } @@ -8506,7 +8540,7 @@ TraceRecorder::incElem(jsint incr, bool pre) if (!addr_ins) // if we read a hole, abort return RECORD_STOP; CHECK_STATUS(inc(*vp, v_ins, incr, pre)); - lir->insStorei(box_jsval(*vp, v_ins), addr_ins, 0); + lir->insStorei(box_jsval(*vp, v_ins), addr_ins, 0, ACC_OTHER); return RECORD_CONTINUE; } @@ -8622,14 +8656,18 @@ TraceRecorder::equalityHelper(jsval l, jsval r, LIns* l_ins, LIns* r_ins, */ if (GetPromotedType(l) == GetPromotedType(r)) { - if (JSVAL_TAG(l) == JSVAL_OBJECT || JSVAL_IS_SPECIAL(l)) { - if (JSVAL_TAG(l) == JSVAL_OBJECT && l) { - JSClass *clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(l)); - if ((clasp->flags & JSCLASS_IS_EXTENDED) && ((JSExtendedClass*) clasp)->equality) - RETURN_STOP_A("Can't trace extended class equality operator"); - } - if (JSVAL_TAG(l) == JSVAL_OBJECT) + if (JSVAL_IS_VOID(l) || JSVAL_IS_NULL(l)) { + cond = true; + if (JSVAL_IS_NULL(l)) op = LIR_peq; + } else if (JSVAL_IS_OBJECT(l)) { + JSClass *clasp = OBJ_GET_CLASS(cx, JSVAL_TO_OBJECT(l)); + if ((clasp->flags & JSCLASS_IS_EXTENDED) && ((JSExtendedClass*) clasp)->equality) + RETURN_STOP_A("Can't trace extended class equality operator"); + op = LIR_peq; + cond = (l == r); + } else if (JSVAL_IS_SPECIAL(l)) { + JS_ASSERT(JSVAL_IS_BOOLEAN(l) && JSVAL_IS_BOOLEAN(r)); cond = (l == r); } else if (JSVAL_IS_STRING(l)) { args[0] = r_ins, args[1] = l_ins; @@ -8641,12 +8679,12 @@ TraceRecorder::equalityHelper(jsval l, jsval r, LIns* l_ins, LIns* r_ins, cond = (asNumber(l) == asNumber(r)); op = LIR_feq; } - } else if (JSVAL_IS_NULL(l) && JSVAL_IS_SPECIAL(r)) { - l_ins = lir->insImm(JSVAL_TO_SPECIAL(JSVAL_VOID)); - cond = (r == JSVAL_VOID); - } else if (JSVAL_IS_SPECIAL(l) && JSVAL_IS_NULL(r)) { - r_ins = lir->insImm(JSVAL_TO_SPECIAL(JSVAL_VOID)); - cond = (l == JSVAL_VOID); + } else if (JSVAL_IS_NULL(l) && JSVAL_IS_VOID(r)) { + l_ins = INS_VOID(); + cond = true; + } else if (JSVAL_IS_VOID(l) && JSVAL_IS_NULL(r)) { + r_ins = INS_VOID(); + cond = true; } else if (isNumber(l) && JSVAL_IS_STRING(r)) { args[0] = r_ins, args[1] = cx_ins; r_ins = lir->insCall(&js_StringToNumber_ci, args); @@ -8658,43 +8696,25 @@ TraceRecorder::equalityHelper(jsval l, jsval r, LIns* l_ins, LIns* r_ins, cond = (js_StringToNumber(cx, JSVAL_TO_STRING(l)) == asNumber(r)); op = LIR_feq; } else { - if (JSVAL_IS_SPECIAL(l)) { - bool isVoid = !!JSVAL_IS_VOID(l); - guard(isVoid, - lir->ins2(LIR_eq, l_ins, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))), - BRANCH_EXIT); - if (!isVoid) { - args[0] = l_ins, args[1] = cx_ins; - l_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args); - l = (l == JSVAL_VOID) - ? cx->runtime->NaNValue - : INT_TO_JSVAL(l == JSVAL_TRUE); - return equalityHelper(l, r, l_ins, r_ins, negate, - tryBranchAfterCond, rval); - } - } else if (JSVAL_IS_SPECIAL(r)) { - bool isVoid = !!JSVAL_IS_VOID(r); - guard(isVoid, - lir->ins2(LIR_eq, r_ins, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))), - BRANCH_EXIT); - if (!isVoid) { - args[0] = r_ins, args[1] = cx_ins; - r_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args); - r = (r == JSVAL_VOID) - ? cx->runtime->NaNValue - : INT_TO_JSVAL(r == JSVAL_TRUE); - return equalityHelper(l, r, l_ins, r_ins, negate, - tryBranchAfterCond, rval); - } - } else { - if ((JSVAL_IS_STRING(l) || isNumber(l)) && !JSVAL_IS_PRIMITIVE(r)) { - RETURN_IF_XML_A(r); - return InjectStatus(call_imacro(equality_imacros.any_obj)); - } - if (!JSVAL_IS_PRIMITIVE(l) && (JSVAL_IS_STRING(r) || isNumber(r))) { - RETURN_IF_XML_A(l); - return InjectStatus(call_imacro(equality_imacros.obj_any)); - } + if (JSVAL_IS_BOOLEAN(l)) { + l_ins = i2f(l_ins); + l = INT_TO_JSVAL(l == JSVAL_TRUE); + return equalityHelper(l, r, l_ins, r_ins, negate, + tryBranchAfterCond, rval); + } + if (JSVAL_IS_BOOLEAN(r)) { + r_ins = i2f(r_ins); + r = INT_TO_JSVAL(r == JSVAL_TRUE); + return equalityHelper(l, r, l_ins, r_ins, negate, + tryBranchAfterCond, rval); + } + if ((JSVAL_IS_STRING(l) || isNumber(l)) && !JSVAL_IS_PRIMITIVE(r)) { + RETURN_IF_XML_A(r); + return InjectStatus(call_imacro(equality_imacros.any_obj)); + } + if (!JSVAL_IS_PRIMITIVE(l) && (JSVAL_IS_STRING(r) || isNumber(r))) { + RETURN_IF_XML_A(l); + return InjectStatus(call_imacro(equality_imacros.obj_any)); } l_ins = lir->insImm(0); @@ -8781,7 +8801,10 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) LIns* args[] = { l_ins, cx_ins }; switch (JSVAL_TAG(l)) { case JSVAL_SPECIAL: - l_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args); + if (JSVAL_IS_VOID(l)) + l_ins = lir->insImmf(js_NaN); + else + l_ins = i2f(l_ins); break; case JSVAL_STRING: l_ins = lir->insCall(&js_StringToNumber_ci, args); @@ -8804,7 +8827,10 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) LIns* args[] = { r_ins, cx_ins }; switch (JSVAL_TAG(r)) { case JSVAL_SPECIAL: - r_ins = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args); + if (JSVAL_IS_VOID(r)) + r_ins = lir->insImmf(js_NaN); + else + r_ins = i2f(r_ins); break; case JSVAL_STRING: r_ins = lir->insCall(&js_StringToNumber_ci, args); @@ -8824,13 +8850,12 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) } } { - jsval tmp = JSVAL_NULL; - JSAutoTempValueRooter tvr(cx, 1, &tmp); + AutoValueRooter tvr(cx, JSVAL_NULL); - tmp = l; - lnum = js_ValueToNumber(cx, &tmp); - tmp = r; - rnum = js_ValueToNumber(cx, &tmp); + *tvr.addr() = l; + lnum = js_ValueToNumber(cx, tvr.addr()); + *tvr.addr() = r; + rnum = js_ValueToNumber(cx, tvr.addr()); } cond = EvalCmp(op, lnum, rnum); fp = true; @@ -8843,7 +8868,7 @@ TraceRecorder::relational(LOpcode op, bool tryBranchAfterCond) */ if (!fp) { JS_ASSERT(isFCmpOpcode(op)); - op = LOpcode(op + (LIR_eq - LIR_feq)); + op = f64cmp_to_i32cmp(op); } x = lir->ins2(op, l_ins, r_ins); @@ -8936,16 +8961,25 @@ TraceRecorder::binary(LOpcode op) rnum = js_StringToNumber(cx, JSVAL_TO_STRING(r)); rightIsNumber = true; } + /* N.B. void is JSVAL_SPECIAL. */ if (JSVAL_IS_SPECIAL(l)) { - LIns* args[] = { a, cx_ins }; - a = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args); - lnum = js_BooleanOrUndefinedToNumber(cx, JSVAL_TO_SPECIAL(l)); + if (JSVAL_IS_VOID(l)) { + a = lir->insImmf(js_NaN); + lnum = js_NaN; + } else { + a = i2f(a); + lnum = JSVAL_TO_SPECIAL(l); + } leftIsNumber = true; } if (JSVAL_IS_SPECIAL(r)) { - LIns* args[] = { b, cx_ins }; - b = lir->insCall(&js_BooleanOrUndefinedToNumber_ci, args); - rnum = js_BooleanOrUndefinedToNumber(cx, JSVAL_TO_SPECIAL(r)); + if (JSVAL_IS_VOID(r)) { + b = lir->insImmf(js_NaN); + rnum = js_NaN; + } else { + b = i2f(b); + rnum = JSVAL_TO_SPECIAL(r); + } rightIsNumber = true; } if (leftIsNumber && rightIsNumber) { @@ -9003,7 +9037,7 @@ TraceRecorder::dumpGuardedShapes(const char* prefix) JS_REQUIRES_STACK RecordingStatus TraceRecorder::guardShape(LIns* obj_ins, JSObject* obj, uint32 shape, const char* guardName, - LIns* map_ins, VMSideExit* exit) + VMSideExit* exit) { // Test (with add if missing) for a remembered guard for (obj_ins, obj). GuardedShapeTable::AddPtr p = guardedShapeTable.lookupForAdd(obj_ins); @@ -9021,7 +9055,8 @@ TraceRecorder::guardShape(LIns* obj_ins, JSObject* obj, uint32 shape, const char #endif // Finally, emit the shape guard. - LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), "shape"); + LIns* shape_ins = + addName(lir->insLoad(LIR_ld, map(obj_ins), offsetof(JSScope, shape), ACC_OTHER), "shape"); guard(true, addName(lir->ins2i(LIR_eq, shape_ins, shape), guardName), exit); @@ -9055,7 +9090,7 @@ JS_STATIC_ASSERT(offsetof(JSObjectOps, objectMap) == 0); inline LIns* TraceRecorder::map(LIns* obj_ins) { - return addName(lir->insLoad(LIR_ldp, obj_ins, (int) offsetof(JSObject, map)), "map"); + return addName(lir->insLoad(LIR_ldp, obj_ins, (int) offsetof(JSObject, map), ACC_OTHER), "map"); } bool @@ -9080,43 +9115,8 @@ TraceRecorder::map_is_native(JSObjectMap* map, LIns* map_ins, LIns*& ops_ins, si return true; } -JS_REQUIRES_STACK RecordingStatus -TraceRecorder::guardNativePropertyOp(JSObject* aobj, LIns* map_ins) -{ - /* - * Interpreter calls to PROPERTY_CACHE_TEST guard on native object ops - * which is required to use native objects (those whose maps are scopes), - * or even more narrow conditions required because the cache miss case - * will call a particular object-op (js_GetProperty, js_SetProperty). - * - * We parameterize using offsetof and guard on match against the hook at - * the given offset in js_ObjectOps. TraceRecorder::record_JSOP_SETPROP - * guards the js_SetProperty case. - */ - uint32 format = js_CodeSpec[*cx->fp->regs->pc].format; - uint32 mode = JOF_MODE(format); - - // No need to guard native-ness of global object. - JS_ASSERT(OBJ_IS_NATIVE(globalObj)); - if (aobj != globalObj) { - size_t op_offset = offsetof(JSObjectOps, objectMap); - if (mode == JOF_PROP || mode == JOF_VARPROP) { - op_offset = (format & JOF_SET) - ? offsetof(JSObjectOps, setProperty) - : offsetof(JSObjectOps, getProperty); - } else { - JS_ASSERT(mode == JOF_NAME); - } - - LIns* ops_ins; - if (!map_is_native(aobj->map, map_ins, ops_ins, op_offset)) - RETURN_STOP("non-native map"); - } - return RECORD_CONTINUE; -} - JS_REQUIRES_STACK AbortableRecordingStatus -TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2, jsuword& pcval) +TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2, PCVal& pcval) { jsbytecode* pc = cx->fp->regs->pc; JS_ASSERT(*pc != JSOP_INITPROP && *pc != JSOP_INITMETHOD && @@ -9132,13 +9132,9 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 obj_ins = stobj_get_proto(obj_ins); } - LIns* map_ins = map(obj_ins); - - CHECK_STATUS_A(guardNativePropertyOp(aobj, map_ins)); - JSAtom* atom; - JSPropCacheEntry* entry; - PROPERTY_CACHE_TEST(cx, pc, aobj, obj2, entry, atom); + PropertyCacheEntry* entry; + JS_PROPERTY_CACHE(cx).test(cx, pc, aobj, obj2, entry, atom); if (atom) { // Miss: pre-fill the cache for the interpreter, as well as for our needs. jsid id = ATOM_TO_JSID(atom); @@ -9175,12 +9171,12 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 RETURN_ERROR_A("error in js_LookupPropertyWithFlags"); if (prop) { - if (!OBJ_IS_NATIVE(obj2)) { + if (!obj2->isNative()) { obj2->dropProperty(cx, prop); RETURN_STOP_A("property found on non-native object"); } - entry = js_FillPropertyCache(cx, aobj, 0, protoIndex, obj2, - (JSScopeProperty*) prop, false); + entry = JS_PROPERTY_CACHE(cx).fill(cx, aobj, 0, protoIndex, obj2, + (JSScopeProperty*) prop); JS_ASSERT(entry); if (entry == JS_NO_PROP_CACHE_FILL) entry = NULL; @@ -9194,8 +9190,8 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 // the global it's assigning does not yet exist, create it. obj2 = obj; - // Use PCVAL_NULL to return "no such property" to our caller. - pcval = PCVAL_NULL; + // Use a null pcval to return "no such property" to our caller. + pcval.setNull(); return ARECORD_CONTINUE; } @@ -9212,93 +9208,68 @@ TraceRecorder::test_property_cache(JSObject* obj, LIns* obj_ins, JSObject*& obj2 JS_ASSERT(cx->requestDepth); #endif - return InjectStatus(guardPropertyCacheHit(obj_ins, map_ins, aobj, obj2, entry, pcval)); + return InjectStatus(guardPropertyCacheHit(obj_ins, aobj, obj2, entry, pcval)); } JS_REQUIRES_STACK RecordingStatus TraceRecorder::guardPropertyCacheHit(LIns* obj_ins, - LIns* map_ins, JSObject* aobj, JSObject* obj2, - JSPropCacheEntry* entry, - jsuword& pcval) + PropertyCacheEntry* entry, + PCVal& pcval) { VMSideExit* exit = snapshot(BRANCH_EXIT); - uint32 vshape = PCVCAP_SHAPE(entry->vcap); + uint32 vshape = entry->vshape(); - // Check for first-level cache hit and guard on kshape if possible. - // Otherwise guard on key object exact match. - if (PCVCAP_TAG(entry->vcap) <= 1) { - // Special case for the global object, which may be aliased to get a property value. - // To catch cross-global property accesses we must check against globalObj identity. - // But a JOF_NAME mode opcode needs no guard, as we ensure the global object's shape - // never changes, and name ops can't reach across a global object ('with' aborts). - if (aobj == globalObj) { - if (entry->adding()) - RETURN_STOP("adding a property to the global object"); + // Special case for the global object, which may be aliased to get a property value. + // To catch cross-global property accesses we must check against globalObj identity. + // But a JOF_NAME mode opcode needs no guard, as we ensure the global object's shape + // never changes, and name ops can't reach across a global object ('with' aborts). + if (aobj == globalObj) { + if (entry->adding()) + RETURN_STOP("adding a property to the global object"); - JSOp op = js_GetOpcode(cx, cx->fp->script, cx->fp->regs->pc); - if (JOF_OPMODE(op) != JOF_NAME) { - guard(true, - addName(lir->ins2(LIR_peq, obj_ins, INS_CONSTOBJ(globalObj)), "guard_global"), - exit); - } - } else { - CHECK_STATUS(guardShape(obj_ins, aobj, entry->kshape, "guard_kshape", map_ins, exit)); - } - - if (entry->adding()) { - LIns *vshape_ins = addName( - lir->insLoad(LIR_ld, - addName(lir->insLoad(LIR_ldp, cx_ins, - offsetof(JSContext, runtime), ACC_READONLY), - "runtime"), - offsetof(JSRuntime, protoHazardShape)), - "protoHazardShape"); - guard(true, - addName(lir->ins2i(LIR_eq, vshape_ins, vshape), "guard_protoHazardShape"), - MISMATCH_EXIT); - } - } else { JSOp op = js_GetOpcode(cx, cx->fp->script, cx->fp->regs->pc); - -#ifdef DEBUG - JSAtom *pcatom; - if (op == JSOP_LENGTH) { - pcatom = cx->runtime->atomState.lengthAtom; - } else { - ptrdiff_t pcoff = (JOF_TYPE(js_CodeSpec[op].format) == JOF_SLOTATOM) ? SLOTNO_LEN : 0; - GET_ATOM_FROM_BYTECODE(cx->fp->script, cx->fp->regs->pc, pcoff, pcatom); - } - JS_ASSERT(entry->kpc == (jsbytecode *) pcatom); - JS_ASSERT(entry->kshape == jsuword(aobj)); -#endif - - // See above comment about globalObj and JOF_NAME. - if (!obj_ins->isconstp() && (aobj != globalObj || JOF_OPMODE(op) != JOF_NAME)) { + if (JOF_OPMODE(op) != JOF_NAME) { guard(true, - addName(lir->ins2(LIR_peq, obj_ins, INS_CONSTOBJ(aobj)), "guard_kobj"), + addName(lir->ins2(LIR_peq, obj_ins, INS_CONSTOBJ(globalObj)), "guard_global"), exit); } + } else { + CHECK_STATUS(guardShape(obj_ins, aobj, entry->kshape, "guard_kshape", exit)); + } + + if (entry->adding()) { + LIns *vshape_ins = addName( + lir->insLoad(LIR_ld, + addName(lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, runtime), + ACC_READONLY), + "runtime"), + offsetof(JSRuntime, protoHazardShape), ACC_OTHER), + "protoHazardShape"); + + guard(true, + addName(lir->ins2i(LIR_eq, vshape_ins, vshape), "guard_protoHazardShape"), + MISMATCH_EXIT); } // For any hit that goes up the scope and/or proto chains, we will need to // guard on the shape of the object containing the property. - if (PCVCAP_TAG(entry->vcap) >= 1) { + if (entry->vcapTag() >= 1) { JS_ASSERT(OBJ_SHAPE(obj2) == vshape); if (obj2 == globalObj) RETURN_STOP("hitting the global object via a prototype chain"); LIns* obj2_ins; - if (PCVCAP_TAG(entry->vcap) == 1) { - // Duplicate the special case in PROPERTY_CACHE_TEST. + if (entry->vcapTag() == 1) { + // Duplicate the special case in PropertyCache::test. obj2_ins = addName(stobj_get_proto(obj_ins), "proto"); guard(false, lir->ins_peq0(obj2_ins), exit); } else { obj2_ins = INS_CONSTOBJ(obj2); } - CHECK_STATUS(guardShape(obj2_ins, obj2, vshape, "guard_vshape", map(obj2_ins), exit)); + CHECK_STATUS(guardShape(obj2_ins, obj2, vshape, "guard_vshape", exit)); } pcval = entry->vword; @@ -9308,15 +9279,15 @@ TraceRecorder::guardPropertyCacheHit(LIns* obj_ins, void TraceRecorder::stobj_set_fslot(LIns *obj_ins, unsigned slot, LIns* v_ins) { - lir->insStorei(v_ins, obj_ins, offsetof(JSObject, fslots) + slot * sizeof(jsval)); + lir->insStorei(v_ins, obj_ins, offsetof(JSObject, fslots) + slot * sizeof(jsval), ACC_OTHER); } void TraceRecorder::stobj_set_dslot(LIns *obj_ins, unsigned slot, LIns*& dslots_ins, LIns* v_ins) { if (!dslots_ins) - dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots)); - lir->insStorei(v_ins, dslots_ins, slot * sizeof(jsval)); + dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots), ACC_OTHER); + lir->insStorei(v_ins, dslots_ins, slot * sizeof(jsval), ACC_OTHER); } void @@ -9333,7 +9304,8 @@ LIns* TraceRecorder::stobj_get_fslot(LIns* obj_ins, unsigned slot) { JS_ASSERT(slot < JS_INITIAL_NSLOTS); - return lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, fslots) + slot * sizeof(jsval)); + return lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, fslots) + slot * sizeof(jsval), + ACC_OTHER); } LIns* @@ -9348,8 +9320,8 @@ LIns* TraceRecorder::stobj_get_dslot(LIns* obj_ins, unsigned index, LIns*& dslots_ins) { if (!dslots_ins) - dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots)); - return lir->insLoad(LIR_ldp, dslots_ins, index * sizeof(jsval)); + dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots), ACC_OTHER); + return lir->insLoad(LIR_ldp, dslots_ins, index * sizeof(jsval), ACC_OTHER); } LIns* @@ -9407,11 +9379,17 @@ TraceRecorder::unbox_jsval(jsval v, LIns* v_ins, VMSideExit* exit) } switch (JSVAL_TAG(v)) { case JSVAL_SPECIAL: + if (JSVAL_IS_VOID(v)) { + guard(true, lir->ins2(LIR_peq, v_ins, INS_CONSTWORD(JSVAL_VOID)), exit); + return INS_VOID(); + } guard(true, lir->ins2(LIR_peq, lir->ins2(LIR_piand, v_ins, INS_CONSTWORD(JSVAL_TAGMASK)), INS_CONSTWORD(JSVAL_SPECIAL)), exit); + JS_ASSERT(!v_ins->isconstp()); + guard(false, lir->ins2(LIR_peq, v_ins, INS_CONSTWORD(JSVAL_VOID)), exit); return p2i(lir->ins2i(LIR_pursh, v_ins, JSVAL_TAGBITS)); case JSVAL_OBJECT: @@ -9426,10 +9404,11 @@ TraceRecorder::unbox_jsval(jsval v, LIns* v_ins, VMSideExit* exit) INS_CONSTWORD(JSVAL_OBJECT)), exit); - guard(HAS_FUNCTION_CLASS(JSVAL_TO_OBJECT(v)), + guard(JSVAL_TO_OBJECT(v)->isFunction(), lir->ins2(LIR_peq, lir->ins2(LIR_piand, - lir->insLoad(LIR_ldp, v_ins, offsetof(JSObject, classword)), + lir->insLoad(LIR_ldp, v_ins, offsetof(JSObject, classword), + ACC_OTHER), INS_CONSTWORD(~JSSLOT_CLASS_MASK_BITS)), INS_CONSTPTR(&js_FunctionClass)), exit); @@ -9452,20 +9431,21 @@ JS_REQUIRES_STACK RecordingStatus TraceRecorder::getThis(LIns*& this_ins) { /* - * js_ComputeThisForFrame updates cx->fp->argv[-1], so sample it into 'original' first. + * JSStackFrame::getThisObject updates cx->fp->argv[-1], so sample it into 'original' first. */ jsval original = JSVAL_NULL; if (cx->fp->argv) { original = cx->fp->argv[-1]; if (!JSVAL_IS_PRIMITIVE(original) && - guardClass(JSVAL_TO_OBJECT(original), get(&cx->fp->argv[-1]), &js_WithClass, snapshot(MISMATCH_EXIT))) { + guardClass(JSVAL_TO_OBJECT(original), get(&cx->fp->argv[-1]), &js_WithClass, + snapshot(MISMATCH_EXIT), ACC_OTHER)) { RETURN_STOP("can't trace getThis on With object"); } } - JSObject* thisObj = js_ComputeThisForFrame(cx, cx->fp); + JSObject* thisObj = cx->fp->getThisObject(cx); if (!thisObj) - RETURN_ERROR("js_ComputeThisForName failed"); + RETURN_ERROR("fp->getThisObject failed"); /* In global code, bake in the global object as 'this' object. */ if (!cx->fp->callee()) { @@ -9487,7 +9467,7 @@ TraceRecorder::getThis(LIns*& this_ins) * a null value in argv[-1], this trace will only match if we see null at * runtime as well. Bake in the global object as 'this' object, updating * the tracker as well. We can only detect this condition prior to calling - * js_ComputeThisForFrame, since it updates the interpreter's copy of + * JSStackFrame::getThisObject, since it updates the interpreter's copy of * argv[-1]. */ JSClass* clasp = NULL;; @@ -9495,7 +9475,8 @@ TraceRecorder::getThis(LIns*& this_ins) (((clasp = JSVAL_TO_OBJECT(original)->getClass()) == &js_CallClass) || (clasp == &js_BlockClass))) { if (clasp) - guardClass(JSVAL_TO_OBJECT(original), get(&thisv), clasp, snapshot(BRANCH_EXIT)); + guardClass(JSVAL_TO_OBJECT(original), get(&thisv), clasp, snapshot(BRANCH_EXIT), + ACC_OTHER); JS_ASSERT(!JSVAL_IS_PRIMITIVE(thisv)); if (thisObj != globalObj) RETURN_STOP("global object was wrapped while recording"); @@ -9543,13 +9524,13 @@ TraceRecorder::guardClass(JSObject* obj, LIns* obj_ins, JSClass* clasp, VMSideEx JS_REQUIRES_STACK bool TraceRecorder::guardDenseArray(JSObject* obj, LIns* obj_ins, ExitType exitType) { - return guardClass(obj, obj_ins, &js_ArrayClass, snapshot(exitType)); + return guardClass(obj, obj_ins, &js_ArrayClass, snapshot(exitType), ACC_OTHER); } JS_REQUIRES_STACK bool TraceRecorder::guardDenseArray(JSObject* obj, LIns* obj_ins, VMSideExit* exit) { - return guardClass(obj, obj_ins, &js_ArrayClass, exit); + return guardClass(obj, obj_ins, &js_ArrayClass, exit, ACC_OTHER); } JS_REQUIRES_STACK bool @@ -9578,7 +9559,7 @@ TraceRecorder::guardPrototypeHasNoIndexedProperties(JSObject* obj, LIns* obj_ins return RECORD_STOP; while (guardHasPrototype(obj, obj_ins, &obj, &obj_ins, exit)) - CHECK_STATUS(guardShape(obj_ins, obj, OBJ_SHAPE(obj), "guard(shape)", map(obj_ins), exit)); + CHECK_STATUS(guardShape(obj_ins, obj, OBJ_SHAPE(obj), "guard(shape)", exit)); return RECORD_CONTINUE; } @@ -9672,7 +9653,7 @@ TraceRecorder::putActivationObjects() args_ins = lir->insAlloc(sizeof(jsval) * nargs); for (int i = 0; i < nargs; ++i) { LIns* arg_ins = box_jsval(cx->fp->argv[i], get(&cx->fp->argv[i])); - lir->insStorei(arg_ins, args_ins, i * sizeof(jsval)); + lir->insStorei(arg_ins, args_ins, i * sizeof(jsval), ACC_OTHER); } } else { args_ins = INS_CONSTPTR(0); @@ -9691,7 +9672,7 @@ TraceRecorder::putActivationObjects() slots_ins = lir->insAlloc(sizeof(jsval) * nslots); for (int i = 0; i < nslots; ++i) { LIns* slot_ins = box_jsval(cx->fp->slots[i], get(&cx->fp->slots[i])); - lir->insStorei(slot_ins, slots_ins, i * sizeof(jsval)); + lir->insStorei(slot_ins, slots_ins, i * sizeof(jsval), ACC_OTHER); } } else { slots_ins = INS_CONSTPTR(0); @@ -9806,9 +9787,8 @@ TraceRecorder::record_EnterFrame(uintN& inlineCallCount) RETURN_STOP_A("recursion started inlining"); } - TreeFragment* root = fragment->root; - TreeFragment* first = LookupLoop(&JS_TRACE_MONITOR(cx), fp->regs->pc, root->globalObj, - root->globalShape, fp->argc); + TreeFragment* first = LookupLoop(&JS_TRACE_MONITOR(cx), fp->regs->pc, tree->globalObj, + tree->globalShape, fp->argc); if (!first) return ARECORD_CONTINUE; TreeFragment* f = findNestedCompatiblePeer(first); @@ -9827,12 +9807,9 @@ TraceRecorder::record_EnterFrame(uintN& inlineCallCount) RETURN_STOP_A("inner recursive tree is blacklisted"); JSContext* _cx = cx; SlotList* globalSlots = tree->globalSlots; - TraceMonitor* tm = traceMonitor; AbortRecording(cx, "trying to compile inner recursive tree"); - if (RecordTree(_cx, tm, first, NULL, 0, first->globalObj, first->globalShape, - globalSlots, _cx->fp->argc, Record_EnterFrame)) { - JS_ASSERT(tm->recorder); - } + JS_ASSERT(_cx->fp->argc == first->argc); + RecordTree(_cx, first, NULL, 0, globalSlots, Record_EnterFrame); break; } } @@ -9842,7 +9819,7 @@ TraceRecorder::record_EnterFrame(uintN& inlineCallCount) * Make sure the shape of the global object still matches (this might * flush the JIT cache). */ - JSObject* globalObj = JS_GetGlobalForObject(cx, cx->fp->scopeChain); + JSObject* globalObj = cx->fp->scopeChain->getGlobal(); uint32 globalShape = -1; SlotList* globalSlots = NULL; if (!CheckGlobalObjectShape(cx, traceMonitor, globalObj, &globalShape, &globalSlots)) @@ -9880,7 +9857,7 @@ TraceRecorder::record_LeaveFrame() JS_REQUIRES_STACK AbortableRecordingStatus TraceRecorder::record_JSOP_PUSH() { - stack(0, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))); + stack(0, INS_VOID()); return ARECORD_CONTINUE; } @@ -9893,8 +9870,8 @@ TraceRecorder::record_JSOP_POPV() // Store it in cx->fp->rval. NB: Tricky dependencies. cx->fp is the right // frame because POPV appears only in global and eval code and we don't // trace JSOP_EVAL or leaving the frame where tracing started. - LIns *fp_ins = lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp)); - lir->insStorei(rval_ins, fp_ins, offsetof(JSStackFrame, rval)); + LIns *fp_ins = lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp), ACC_OTHER); + lir->insStorei(rval_ins, fp_ins, offsetof(JSStackFrame, rval), ACC_OTHER); return ARECORD_CONTINUE; } @@ -10011,19 +9988,19 @@ TraceRecorder::record_JSOP_ARGUMENTS() LIns* mem_ins = lir->insAlloc(sizeof(jsval)); LIns* br1 = lir->insBranch(LIR_jt, lir->ins_peq0(a_ins), NULL); - lir->insStorei(a_ins, mem_ins, 0); + lir->insStorei(a_ins, mem_ins, 0, ACC_OTHER); LIns* br2 = lir->insBranch(LIR_j, NULL, NULL); LIns* label1 = lir->ins0(LIR_label); br1->setTarget(label1); LIns* call_ins = newArguments(callee_ins); - lir->insStorei(call_ins, mem_ins, 0); + lir->insStorei(call_ins, mem_ins, 0, ACC_OTHER); LIns* label2 = lir->ins0(LIR_label); br2->setTarget(label2); - args_ins = lir->insLoad(LIR_ldp, mem_ins, 0); + args_ins = lir->insLoad(LIR_ldp, mem_ins, 0, ACC_OTHER); } stack(0, args_ins); @@ -10223,7 +10200,7 @@ TraceRecorder::record_JSOP_NOT() } JS_ASSERT(JSVAL_IS_STRING(v)); set(&v, lir->ins_peq0(lir->insLoad(LIR_ldp, get(&v), - offsetof(JSString, mLength)))); + offsetof(JSString, mLength), ACC_OTHER))); return ARECORD_CONTINUE; } @@ -10276,14 +10253,20 @@ TraceRecorder::record_JSOP_NEG() return ARECORD_CONTINUE; } - JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING || JSVAL_IS_SPECIAL(v)); + if (JSVAL_IS_VOID(v)) { + set(&v, lir->insImmf(js_NaN)); + return ARECORD_CONTINUE; + } - LIns* args[] = { get(&v), cx_ins }; - set(&v, lir->ins1(LIR_fneg, - lir->insCall(JSVAL_IS_STRING(v) - ? &js_StringToNumber_ci - : &js_BooleanOrUndefinedToNumber_ci, - args))); + if (JSVAL_IS_STRING(v)) { + LIns* args[] = { get(&v), cx_ins }; + set(&v, lir->ins1(LIR_fneg, + lir->insCall(&js_StringToNumber_ci, args))); + return ARECORD_CONTINUE; + } + + JS_ASSERT(JSVAL_IS_BOOLEAN(v)); + set(&v, lir->ins1(LIR_fneg, i2f(get(&v)))); return ARECORD_CONTINUE; } @@ -10304,14 +10287,19 @@ TraceRecorder::record_JSOP_POS() set(&v, lir->insImmf(0)); return ARECORD_CONTINUE; } + if (JSVAL_IS_VOID(v)) { + set(&v, lir->insImmf(js_NaN)); + return ARECORD_CONTINUE; + } - JS_ASSERT(JSVAL_TAG(v) == JSVAL_STRING || JSVAL_IS_SPECIAL(v)); + if (JSVAL_IS_STRING(v)) { + LIns* args[] = { get(&v), cx_ins }; + set(&v, lir->insCall(&js_StringToNumber_ci, args)); + return ARECORD_CONTINUE; + } - LIns* args[] = { get(&v), cx_ins }; - set(&v, lir->insCall(JSVAL_IS_STRING(v) - ? &js_StringToNumber_ci - : &js_BooleanOrUndefinedToNumber_ci, - args)); + JS_ASSERT(JSVAL_IS_BOOLEAN(v)); + set(&v, i2f(get(&v))); return ARECORD_CONTINUE; } @@ -10386,7 +10374,7 @@ TraceRecorder::getClassPrototype(JSProtoKey key, LIns*& proto_ins) #ifdef DEBUG /* Double-check that a native proto has a matching emptyScope. */ if (key != JSProto_Array) { - JS_ASSERT(OBJ_IS_NATIVE(proto)); + JS_ASSERT(proto->isNative()); JSEmptyScope *emptyScope = OBJ_SCOPE(proto)->emptyScope; JS_ASSERT(emptyScope); JS_ASSERT(JSCLASS_CACHED_PROTO_KEY(emptyScope->clasp) == key); @@ -10482,15 +10470,16 @@ TraceRecorder::propagateFailureToBuiltinStatus(LIns* ok_ins, LIns*& status_ins) lir->ins2i(LIR_and, ok_ins, 1), 1), 1)); - lir->insStorei(status_ins, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); + lir->insStorei(status_ins, lirbuf->state, (int) offsetof(InterpState, builtinStatus), + ACC_OTHER); } JS_REQUIRES_STACK void TraceRecorder::emitNativePropertyOp(JSScope* scope, JSScopeProperty* sprop, LIns* obj_ins, bool setflag, LIns* boxed_ins) { - JS_ASSERT(!(sprop->attrs & (setflag ? JSPROP_SETTER : JSPROP_GETTER))); - JS_ASSERT(setflag ? !SPROP_HAS_STUB_SETTER(sprop) : !SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)); + JS_ASSERT(setflag ? !sprop->hasSetterValue() : !sprop->hasGetterValue()); + JS_ASSERT(setflag ? !sprop->hasDefaultSetter() : !sprop->hasDefaultGetterOrIsMethod()); enterDeepBailCall(); @@ -10498,18 +10487,18 @@ TraceRecorder::emitNativePropertyOp(JSScope* scope, JSScopeProperty* sprop, LIns // because the getter or setter could end up resizing the object's dslots. // Instead, use a word of stack and root it in nativeVp. LIns* vp_ins = lir->insAlloc(sizeof(jsval)); - lir->insStorei(vp_ins, lirbuf->state, offsetof(InterpState, nativeVp)); - lir->insStorei(INS_CONST(1), lirbuf->state, offsetof(InterpState, nativeVpLen)); + lir->insStorei(vp_ins, lirbuf->state, offsetof(InterpState, nativeVp), ACC_OTHER); + lir->insStorei(INS_CONST(1), lirbuf->state, offsetof(InterpState, nativeVpLen), ACC_OTHER); if (setflag) - lir->insStorei(boxed_ins, vp_ins, 0); + lir->insStorei(boxed_ins, vp_ins, 0, ACC_OTHER); CallInfo* ci = new (traceAlloc()) CallInfo(); ci->_address = uintptr_t(setflag ? sprop->setterOp() : sprop->getterOp()); - ci->_argtypes = ARGSIZE_I << (0*ARGSIZE_SHIFT) | - ARGSIZE_P << (1*ARGSIZE_SHIFT) | - ARGSIZE_P << (2*ARGSIZE_SHIFT) | - ARGSIZE_P << (3*ARGSIZE_SHIFT) | - ARGSIZE_P << (4*ARGSIZE_SHIFT); + ci->_typesig = ARGTYPE_I << (0*ARGTYPE_SHIFT) | + ARGTYPE_P << (1*ARGTYPE_SHIFT) | + ARGTYPE_P << (2*ARGTYPE_SHIFT) | + ARGTYPE_P << (3*ARGTYPE_SHIFT) | + ARGTYPE_P << (4*ARGTYPE_SHIFT); ci->_isPure = 0; ci->_storeAccSet = ACC_STORE_ANY; ci->_abi = ABI_CDECL; @@ -10520,21 +10509,20 @@ TraceRecorder::emitNativePropertyOp(JSScope* scope, JSScopeProperty* sprop, LIns LIns* ok_ins = lir->insCall(ci, args); // Cleanup. Immediately clear nativeVp before we might deep bail. - lir->insStorei(INS_NULL(), lirbuf->state, offsetof(InterpState, nativeVp)); + lir->insStorei(INS_NULL(), lirbuf->state, offsetof(InterpState, nativeVp), ACC_OTHER); leaveDeepBailCall(); // Guard that the call succeeded and builtinStatus is still 0. // If the native op succeeds but we deep-bail here, the result value is // lost! Therefore this can only be used for setters of shared properties. // In that case we ignore the result value anyway. - LIns* status_ins = lir->insLoad(LIR_ld, - lirbuf->state, - (int) offsetof(InterpState, builtinStatus)); + LIns* status_ins = lir->insLoad(LIR_ld, lirbuf->state, + (int) offsetof(InterpState, builtinStatus), ACC_OTHER); propagateFailureToBuiltinStatus(ok_ins, status_ins); guard(true, lir->ins_eq0(status_ins), STATUS_EXIT); // Re-load the value--but this is currently unused, so commented out. - //boxed_ins = lir->insLoad(LIR_ldp, vp_ins, 0); + //boxed_ins = lir->insLoad(LIR_ldp, vp_ins, 0, ACC_OTHER); } JS_REQUIRES_STACK RecordingStatus @@ -10562,7 +10550,7 @@ TraceRecorder::emitNativeCall(JSSpecializedNative* sn, uintN argc, LIns* args[], // Immediately unroot the vp as soon we return since we might deep bail next. if (rooted) - lir->insStorei(INS_NULL(), lirbuf->state, offsetof(InterpState, nativeVp)); + lir->insStorei(INS_NULL(), lirbuf->state, offsetof(InterpState, nativeVp), ACC_OTHER); rval_ins = res_ins; switch (JSTN_ERRTYPE(sn)) { @@ -10778,7 +10766,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode) LIns* invokevp_ins = lir->insAlloc(vplen * sizeof(jsval)); // vp[0] is the callee. - lir->insStorei(INS_CONSTVAL(OBJECT_TO_JSVAL(funobj)), invokevp_ins, 0); + lir->insStorei(INS_CONSTVAL(OBJECT_TO_JSVAL(funobj)), invokevp_ins, 0, ACC_OTHER); // Calculate |this|. LIns* this_ins; @@ -10818,14 +10806,15 @@ TraceRecorder::callNative(uintN argc, JSOp mode) */ if (!(fun->flags & JSFUN_FAST_NATIVE)) { if (JSVAL_IS_NULL(vp[1])) { - JSObject* thisObj = js_ComputeThis(cx, JS_FALSE, vp + 2); + JSObject* thisObj = js_ComputeThis(cx, vp + 2); if (!thisObj) RETURN_ERROR("error in js_ComputeGlobalThis"); this_ins = INS_CONSTOBJ(thisObj); } else if (!JSVAL_IS_OBJECT(vp[1])) { RETURN_STOP("slow native(primitive, args)"); } else { - if (guardConstClass(JSVAL_TO_OBJECT(vp[1]), this_ins, &js_WithClass, snapshot(MISMATCH_EXIT))) + if (guardClass(JSVAL_TO_OBJECT(vp[1]), this_ins, &js_WithClass, + snapshot(MISMATCH_EXIT), ACC_READONLY)) RETURN_STOP("can't trace slow native invocation on With object"); this_ins = lir->ins_choose(lir->ins_peq0(stobj_get_parent(this_ins)), @@ -10835,12 +10824,12 @@ TraceRecorder::callNative(uintN argc, JSOp mode) } this_ins = box_jsval(vp[1], this_ins); } - lir->insStorei(this_ins, invokevp_ins, 1 * sizeof(jsval)); + lir->insStorei(this_ins, invokevp_ins, 1 * sizeof(jsval), ACC_OTHER); // Populate argv. for (uintN n = 2; n < 2 + argc; n++) { LIns* i = box_jsval(vp[n], get(&vp[n])); - lir->insStorei(i, invokevp_ins, n * sizeof(jsval)); + lir->insStorei(i, invokevp_ins, n * sizeof(jsval), ACC_OTHER); // For a very long argument list we might run out of LIR space, so // check inside the loop. @@ -10852,7 +10841,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode) if (2 + argc < vplen) { LIns* undef_ins = INS_CONSTWORD(JSVAL_VOID); for (uintN n = 2 + argc; n < vplen; n++) { - lir->insStorei(undef_ins, invokevp_ins, n * sizeof(jsval)); + lir->insStorei(undef_ins, invokevp_ins, n * sizeof(jsval), ACC_OTHER); if (outOfMemory()) RETURN_STOP("out of memory in extra slots"); @@ -10860,7 +10849,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode) } // Set up arguments for the JSNative or JSFastNative. - uint32 types; + uint32 typesig; if (fun->flags & JSFUN_FAST_NATIVE) { if (mode == JSOP_NEW) RETURN_STOP("untraceable fast native constructor"); @@ -10868,10 +10857,10 @@ TraceRecorder::callNative(uintN argc, JSOp mode) args[0] = invokevp_ins; args[1] = lir->insImm(argc); args[2] = cx_ins; - types = ARGSIZE_I << (0*ARGSIZE_SHIFT) | - ARGSIZE_P << (1*ARGSIZE_SHIFT) | - ARGSIZE_I << (2*ARGSIZE_SHIFT) | - ARGSIZE_P << (3*ARGSIZE_SHIFT); + typesig = ARGTYPE_I << (0*ARGTYPE_SHIFT) | + ARGTYPE_P << (1*ARGTYPE_SHIFT) | + ARGTYPE_I << (2*ARGTYPE_SHIFT) | + ARGTYPE_P << (3*ARGTYPE_SHIFT); } else { int32_t offset = (vplen - 1) * sizeof(jsval); native_rval_ins = lir->ins2(LIR_piadd, invokevp_ins, INS_CONSTWORD(offset)); @@ -10880,12 +10869,12 @@ TraceRecorder::callNative(uintN argc, JSOp mode) args[2] = lir->insImm(argc); args[3] = this_ins; args[4] = cx_ins; - types = ARGSIZE_I << (0*ARGSIZE_SHIFT) | - ARGSIZE_P << (1*ARGSIZE_SHIFT) | - ARGSIZE_P << (2*ARGSIZE_SHIFT) | - ARGSIZE_I << (3*ARGSIZE_SHIFT) | - ARGSIZE_P << (4*ARGSIZE_SHIFT) | - ARGSIZE_P << (5*ARGSIZE_SHIFT); + typesig = ARGTYPE_I << (0*ARGTYPE_SHIFT) | + ARGTYPE_P << (1*ARGTYPE_SHIFT) | + ARGTYPE_P << (2*ARGTYPE_SHIFT) | + ARGTYPE_I << (3*ARGTYPE_SHIFT) | + ARGTYPE_P << (4*ARGTYPE_SHIFT) | + ARGTYPE_P << (5*ARGTYPE_SHIFT); } // Generate CallInfo and a JSSpecializedNative structure on the fly. @@ -10897,7 +10886,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode) ci->_isPure = 0; ci->_storeAccSet = ACC_STORE_ANY; ci->_abi = ABI_CDECL; - ci->_argtypes = types; + ci->_typesig = typesig; #ifdef DEBUG ci->_name = JS_GetFunctionName(fun); #endif @@ -10915,8 +10904,8 @@ TraceRecorder::callNative(uintN argc, JSOp mode) // nativeVpLen immediately before emitting the call code. This way we avoid // leaving trace with a bogus nativeVp because we fall off trace while unboxing // values into the stack buffer. - lir->insStorei(INS_CONST(vplen), lirbuf->state, offsetof(InterpState, nativeVpLen)); - lir->insStorei(invokevp_ins, lirbuf->state, offsetof(InterpState, nativeVp)); + lir->insStorei(INS_CONST(vplen), lirbuf->state, offsetof(InterpState, nativeVpLen), ACC_OTHER); + lir->insStorei(invokevp_ins, lirbuf->state, offsetof(InterpState, nativeVp), ACC_OTHER); // argc is the original argc here. It is used to calculate where to place // the return value. @@ -11041,7 +11030,7 @@ TraceRecorder::record_JSOP_TYPEOF() JS_REQUIRES_STACK AbortableRecordingStatus TraceRecorder::record_JSOP_VOID() { - stack(-1, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))); + stack(-1, INS_VOID()); return ARECORD_CONTINUE; } @@ -11189,25 +11178,25 @@ TraceRecorder::nativeSet(JSObject* obj, LIns* obj_ins, JSScopeProperty* sprop, * case unboxing would fail and, having called a native setter, we could * not just retry the instruction in the interpreter. */ - JS_ASSERT(SPROP_HAS_STUB_SETTER(sprop) || slot == SPROP_INVALID_SLOT); + JS_ASSERT(sprop->hasDefaultSetter() || slot == SPROP_INVALID_SLOT); // Box the value to be stored, if necessary. LIns* boxed_ins = NULL; - if (!SPROP_HAS_STUB_SETTER(sprop) || (slot != SPROP_INVALID_SLOT && obj != globalObj)) + if (!sprop->hasDefaultSetter() || (slot != SPROP_INVALID_SLOT && obj != globalObj)) boxed_ins = box_jsval(v, v_ins); // Call the setter, if any. - if (!SPROP_HAS_STUB_SETTER(sprop)) + if (!sprop->hasDefaultSetter()) emitNativePropertyOp(scope, sprop, obj_ins, true, boxed_ins); // Store the value, if this property has a slot. if (slot != SPROP_INVALID_SLOT) { JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, scope)); - JS_ASSERT(!(sprop->attrs & JSPROP_SHARED)); + JS_ASSERT(sprop->hasSlot()); if (obj == globalObj) { if (!lazilyImportGlobalSlot(slot)) RETURN_STOP("lazy import of global slot failed"); - set(&STOBJ_GET_SLOT(obj, slot), v_ins); + set(&obj->getSlotRef(slot), v_ins); } else { LIns* dslots_ins = NULL; stobj_set_slot(obj_ins, slot, dslots_ins, boxed_ins); @@ -11220,7 +11209,7 @@ TraceRecorder::nativeSet(JSObject* obj, LIns* obj_ins, JSScopeProperty* sprop, static JSBool FASTCALL MethodWriteBarrier(JSContext* cx, JSObject* obj, JSScopeProperty* sprop, JSObject* funobj) { - JSAutoTempValueRooter tvr(cx, funobj); + AutoValueRooter tvr(cx, funobj); return OBJ_SCOPE(obj)->methodWriteBarrier(cx, sprop, tvr.value()); } @@ -11228,21 +11217,21 @@ JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, MethodWriteBarrier, CONTEXT, OBJECT, SCO 0, ACC_STORE_ANY) JS_REQUIRES_STACK RecordingStatus -TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop, +TraceRecorder::setProp(jsval &l, PropertyCacheEntry* entry, JSScopeProperty* sprop, jsval &v, LIns*& v_ins) { if (entry == JS_NO_PROP_CACHE_FILL) RETURN_STOP("can't trace uncacheable property set"); - JS_ASSERT_IF(PCVCAP_TAG(entry->vcap) >= 1, sprop->attrs & JSPROP_SHARED); - if (!SPROP_HAS_STUB_SETTER(sprop) && sprop->slot != SPROP_INVALID_SLOT) + JS_ASSERT_IF(entry->vcapTag() >= 1, !sprop->hasSlot()); + if (!sprop->hasDefaultSetter() && sprop->slot != SPROP_INVALID_SLOT) RETURN_STOP("can't trace set of property with setter and slot"); - if (sprop->attrs & JSPROP_SETTER) + if (sprop->hasSetterValue()) RETURN_STOP("can't trace JavaScript function setter"); // These two cases are errors and can't be traced. - if (sprop->attrs & JSPROP_GETTER) + if (sprop->hasGetterValue()) RETURN_STOP("can't assign to property with script getter but no setter"); - if (sprop->attrs & JSPROP_READONLY) + if (!sprop->writable()) RETURN_STOP("can't assign to readonly property"); JS_ASSERT(!JSVAL_IS_PRIMITIVE(l)); @@ -11250,7 +11239,7 @@ TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop LIns* obj_ins = get(&l); JSScope* scope = OBJ_SCOPE(obj); - JS_ASSERT_IF(entry->vcap == PCVCAP_MAKE(entry->kshape, 0, 0), scope->hasProperty(sprop)); + JS_ASSERT_IF(entry->directHit(), scope->hasProperty(sprop)); // Fast path for CallClass. This is about 20% faster than the general case. v_ins = get(&v); @@ -11259,21 +11248,19 @@ TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop // Find obj2. If entry->adding(), the TAG bits are all 0. JSObject* obj2 = obj; - for (jsuword i = PCVCAP_TAG(entry->vcap) >> PCVCAP_PROTOBITS; i; i--) + for (jsuword i = entry->scopeIndex(); i; i--) obj2 = obj2->getParent(); - for (jsuword j = PCVCAP_TAG(entry->vcap) & PCVCAP_PROTOMASK; j; j--) + for (jsuword j = entry->protoIndex(); j; j--) obj2 = obj2->getProto(); scope = OBJ_SCOPE(obj2); JS_ASSERT_IF(entry->adding(), obj2 == obj); // Guard before anything else. - LIns* map_ins = map(obj_ins); - CHECK_STATUS(guardNativePropertyOp(obj, map_ins)); - jsuword pcval; - CHECK_STATUS(guardPropertyCacheHit(obj_ins, map_ins, obj, obj2, entry, pcval)); + PCVal pcval; + CHECK_STATUS(guardPropertyCacheHit(obj_ins, obj, obj2, entry, pcval)); JS_ASSERT(scope->object == obj2); JS_ASSERT(scope->hasProperty(sprop)); - JS_ASSERT_IF(obj2 != obj, sprop->attrs & JSPROP_SHARED); + JS_ASSERT_IF(obj2 != obj, !sprop->hasSlot()); /* * Setting a function-valued property might need to rebrand the object, so @@ -11294,7 +11281,7 @@ TraceRecorder::setProp(jsval &l, JSPropCacheEntry* entry, JSScopeProperty* sprop // Add a property to the object if necessary. if (entry->adding()) { - JS_ASSERT(!(sprop->attrs & JSPROP_SHARED)); + JS_ASSERT(sprop->hasSlot()); if (obj == globalObj) RETURN_STOP("adding a property to the global object"); @@ -11352,8 +11339,8 @@ TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty // have a valid shortid; but we don't use it in that case anyway. JS_ASSERT(sprop->hasShortID()); - LIns* base = lir->insLoad(LIR_ldp, callobj_ins, offsetof(JSObject, dslots)); - lir->insStorei(box_jsval(v, v_ins), base, dslot_index * sizeof(jsval)); + LIns* base = lir->insLoad(LIR_ldp, callobj_ins, offsetof(JSObject, dslots), ACC_OTHER); + lir->insStorei(box_jsval(v, v_ins), base, dslot_index * sizeof(jsval), ACC_OTHER); return RECORD_CONTINUE; } @@ -11375,8 +11362,9 @@ TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty // entry frame. In that case, we must store to the native stack area for // that frame. - LIns *fp_ins = lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp)); - LIns *fpcallobj_ins = lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, callobj)); + LIns *fp_ins = lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp), ACC_OTHER); + LIns *fpcallobj_ins = lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, callobj), + ACC_OTHER); LIns *br1 = lir->insBranch(LIR_jf, lir->ins2(LIR_peq, fpcallobj_ins, callobj_ins), NULL); // Case 1: storing to native stack area. @@ -11392,11 +11380,11 @@ TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty // Guard that we are not changing the type of the slot we are storing to. LIns *callstackBase_ins = lir->insLoad(LIR_ldp, lirbuf->state, - offsetof(InterpState, callstackBase)); - LIns *frameInfo_ins = lir->insLoad(LIR_ldp, callstackBase_ins, 0); - LIns *typemap_ins = lir->ins2(LIR_addp, frameInfo_ins, INS_CONSTWORD(sizeof(FrameInfo))); + offsetof(InterpState, callstackBase), ACC_OTHER); + LIns *frameInfo_ins = lir->insLoad(LIR_ldp, callstackBase_ins, 0, ACC_OTHER); + LIns *typemap_ins = lir->ins2(LIR_piadd, frameInfo_ins, INS_CONSTWORD(sizeof(FrameInfo))); LIns *type_ins = lir->insLoad(LIR_ldzb, - lir->ins2(LIR_addp, typemap_ins, lir->ins_u2p(slot_ins)), 0, + lir->ins2(LIR_piadd, typemap_ins, lir->ins_u2p(slot_ins)), 0, ACC_READONLY); TraceType type = getCoercedType(v); if (type == TT_INT32 && !isPromoteInt(v_ins)) @@ -11408,10 +11396,10 @@ TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty // Store to the native stack slot. LIns *stackBase_ins = lir->insLoad(LIR_ldp, lirbuf->state, - offsetof(InterpState, stackBase)); + offsetof(InterpState, stackBase), ACC_OTHER); LIns *storeValue_ins = isPromoteInt(v_ins) ? demote(lir, v_ins) : v_ins; lir->insStorei(storeValue_ins, - lir->ins2(LIR_addp, stackBase_ins, lir->ins_u2p(offset_ins)), 0); + lir->ins2(LIR_piadd, stackBase_ins, lir->ins_u2p(offset_ins)), 0, ACC_STORE_ANY); LIns *br2 = lir->insBranch(LIR_j, NULL, NULL); // Case 2: calling builtin. @@ -11433,7 +11421,7 @@ TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty } JS_REQUIRES_STACK AbortableRecordingStatus -TraceRecorder::record_SetPropHit(JSPropCacheEntry* entry, JSScopeProperty* sprop) +TraceRecorder::record_SetPropHit(PropertyCacheEntry* entry, JSScopeProperty* sprop) { jsval& r = stackval(-1); jsval& l = stackval(-2); @@ -11460,7 +11448,7 @@ TraceRecorder::enterDeepBailCall() { // Take snapshot for DeepBail and store it in cx->bailExit. VMSideExit* exit = snapshot(DEEP_BAIL_EXIT); - lir->insStorei(INS_CONSTPTR(exit), cx_ins, offsetof(JSContext, bailExit)); + lir->insStorei(INS_CONSTPTR(exit), cx_ins, offsetof(JSContext, bailExit), ACC_OTHER); // Tell nanojit not to discard or defer stack writes before this call. GuardRecord* guardRec = createGuardRecord(exit); @@ -11475,7 +11463,7 @@ JS_REQUIRES_STACK void TraceRecorder::leaveDeepBailCall() { // Keep cx->bailExit null when it's invalid. - lir->insStorei(INS_NULL(), cx_ins, offsetof(JSContext, bailExit)); + lir->insStorei(INS_NULL(), cx_ins, offsetof(JSContext, bailExit), ACC_OTHER); } JS_REQUIRES_STACK void @@ -11484,7 +11472,7 @@ TraceRecorder::finishGetProp(LIns* obj_ins, LIns* vp_ins, LIns* ok_ins, jsval* o // Store the boxed result (and this-object, if JOF_CALLOP) before the // guard. The deep-bail case requires this. If the property get fails, // these slots will be ignored anyway. - LIns* result_ins = lir->insLoad(LIR_ldp, vp_ins, 0); + LIns* result_ins = lir->insLoad(LIR_ldp, vp_ins, 0, ACC_OTHER); set(outp, result_ins); if (js_CodeSpec[*cx->fp->regs->pc].format & JOF_CALLOP) set(outp + 1, obj_ins); @@ -11572,7 +11560,9 @@ TraceRecorder::getPropertyByName(LIns* obj_ins, jsval* idvalp, jsval* outp) // GetPropertyByName can assign to *idvalp, so the tracker has an incorrect // entry for that address. Correct it. (If the value in the address is // never used again, the usual case, Nanojit will kill this load.) - tracker.set(idvalp, lir->insLoad(LIR_ldp, idvalp_ins, 0)); + // The AccSet could be made more precise with some effort (idvalp_ins may + // equal 'sp+k'), but it's not worth it because this case is rare. + tracker.set(idvalp, lir->insLoad(LIR_ldp, idvalp_ins, 0, ACC_STACK|ACC_OTHER)); finishGetProp(obj_ins, vp_ins, ok_ins, outp); leaveDeepBailCall(); @@ -11584,7 +11574,7 @@ GetPropertyByIndex(JSContext* cx, JSObject* obj, int32 index, jsval* vp) { LeaveTraceIfGlobalObject(cx, obj); - JSAutoTempIdRooter idr(cx); + AutoIdRooter idr(cx); if (!js_Int32ToId(cx, index, idr.addr()) || !obj->getProperty(cx, idr.id(), vp)) { SetBuiltinError(cx); return JS_FALSE; @@ -11681,9 +11671,9 @@ JS_DEFINE_CALLINFO_4(static, BOOL_FAIL, GetPropertyWithNativeGetter, JS_REQUIRES_STACK RecordingStatus TraceRecorder::getPropertyWithNativeGetter(LIns* obj_ins, JSScopeProperty* sprop, jsval* outp) { - JS_ASSERT(!(sprop->attrs & JSPROP_GETTER)); + JS_ASSERT(!sprop->hasGetterValue()); JS_ASSERT(sprop->slot == SPROP_INVALID_SLOT); - JS_ASSERT(!SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)); + JS_ASSERT(!sprop->hasDefaultGetterOrIsMethod()); // Call GetPropertyWithNativeGetter. See note in getPropertyByName about vp. // FIXME - We should call the getter directly. Using a builtin function for @@ -11747,7 +11737,7 @@ TraceRecorder::record_JSOP_GETELEM() return InjectStatus(getPropertyByName(obj_ins, &idx, &lval)); } - if (obj->getClass() == &js_ArgumentsClass) { + if (obj->isArguments()) { unsigned depth; JSStackFrame *afp = guardArguments(obj, obj_ins, &depth); if (afp) { @@ -11794,7 +11784,9 @@ TraceRecorder::record_JSOP_GETELEM() // is accurate. // Note: this relies on the assumption that we abort on setting an element of // an arguments object in any deeper frame. - LIns* fip_ins = lir->insLoad(LIR_ldp, lirbuf->rp, (callDepth-depth)*sizeof(FrameInfo*)); + LIns* fip_ins = lir->insLoad(LIR_ldp, lirbuf->rp, + (callDepth-depth)*sizeof(FrameInfo*), + ACC_RSTACK); typemap_ins = lir->ins2(LIR_piadd, fip_ins, INS_CONSTWORD(sizeof(FrameInfo) + 2/*callee,this*/ * sizeof(TraceType))); } @@ -11818,7 +11810,11 @@ TraceRecorder::record_JSOP_GETELEM() lir->ins_u2p(lir->ins2(LIR_mul, idx_ins, INS_CONST(sizeof(double))))); - v_ins = stackLoad(argi_addr_ins, type); + // The AccSet could be more precise, but ValidateWriter + // doesn't recognise the complex expression involving 'sp' as + // a STACK access, and it's not worth the effort to be more + // precise because this case is rare. + v_ins = stackLoad(argi_addr_ins, ACC_LOAD_ANY, type); } JS_ASSERT(v_ins); set(&lval, v_ins); @@ -11847,7 +11843,7 @@ TraceRecorder::record_JSOP_GETELEM() jsval* vp; LIns* addr_ins; - guardConstClass(obj, obj_ins, obj->getClass(), snapshot(BRANCH_EXIT)); + guardClass(obj, obj_ins, obj->getClass(), snapshot(BRANCH_EXIT), ACC_READONLY); CHECK_STATUS_A(typedArrayElement(lval, idx, vp, v_ins, addr_ins)); set(&lval, v_ins); if (call) @@ -11908,7 +11904,7 @@ TraceRecorder::initOrSetPropertyByName(LIns* obj_ins, jsval* idvalp, jsval* rval } else { // See note in getPropertyByName about vp. LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp"); - lir->insStorei(rval_ins, vp_ins, 0); + lir->insStorei(rval_ins, vp_ins, 0, ACC_OTHER); LIns* args[] = {vp_ins, idvalp_ins, obj_ins, cx_ins}; ok_ins = lir->insCall(&SetPropertyByName_ci, args); } @@ -11923,7 +11919,7 @@ SetPropertyByIndex(JSContext* cx, JSObject* obj, int32 index, jsval* vp) { LeaveTraceIfGlobalObject(cx, obj); - JSAutoTempIdRooter idr(cx); + AutoIdRooter idr(cx); if (!js_Int32ToId(cx, index, idr.addr()) || !obj->setProperty(cx, idr.id(), vp)) { SetBuiltinError(cx); return JS_FALSE; @@ -11938,7 +11934,7 @@ InitPropertyByIndex(JSContext* cx, JSObject* obj, int32 index, jsval val) { LeaveTraceIfGlobalObject(cx, obj); - JSAutoTempIdRooter idr(cx); + AutoIdRooter idr(cx); if (!js_Int32ToId(cx, index, idr.addr()) || !obj->defineProperty(cx, idr.id(), val, NULL, NULL, JSPROP_ENUMERATE)) { SetBuiltinError(cx); @@ -11965,7 +11961,7 @@ TraceRecorder::initOrSetPropertyByIndex(LIns* obj_ins, LIns* index_ins, jsval* r } else { // See note in getPropertyByName about vp. LIns* vp_ins = addName(lir->insAlloc(sizeof(jsval)), "vp"); - lir->insStorei(rval_ins, vp_ins, 0); + lir->insStorei(rval_ins, vp_ins, 0, ACC_OTHER); LIns* args[] = {vp_ins, index_ins, obj_ins, cx_ins}; ok_ins = lir->insCall(&SetPropertyByIndex_ci, args); } @@ -12003,16 +11999,16 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex) // Fast path: assigning to element of typed array. // Ensure array is a typed array and is the same type as what was written - guardConstClass(obj, obj_ins, obj->getClass(), snapshot(BRANCH_EXIT)); + guardClass(obj, obj_ins, obj->getClass(), snapshot(BRANCH_EXIT), ACC_READONLY); js::TypedArray* tarray = js::TypedArray::fromJSObject(obj); LIns* priv_ins = stobj_get_const_fslot(obj_ins, JSSLOT_PRIVATE); - // The index was on the stack and is therefore a LIR float. Force it to - // be an integer. - idx_ins = makeNumberInt32(idx_ins); - + // The index was on the stack and is therefore a LIR float; force it to + // be an integer. + idx_ins = makeNumberInt32(idx_ins); + // Ensure idx >= 0 && idx < length (by using uint32) lir->insGuard(LIR_xf, lir->ins2(LIR_ult, @@ -12027,80 +12023,85 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex) LIns* pidx_ins = lir->ins_u2p(idx_ins); LIns* addr_ins = 0; - if (isNumber(v)) { - if (isPromoteInt(v_ins) && - tarray->type != js::TypedArray::TYPE_FLOAT32 && - tarray->type != js::TypedArray::TYPE_FLOAT64) { - LIns *v_ins_int = demote(lir, v_ins); - - if (tarray->type == js::TypedArray::TYPE_UINT8_CLAMPED) { - /* Wrap v_ins_int in some magic to clamp it */ - v_ins_int = lir->ins_choose(lir->ins2i(LIR_lt, v_ins_int, 0), - lir->insImm(0), - lir->ins_choose(lir->ins2i(LIR_gt, v_ins_int, 0xff), - lir->insImm(0xff), - v_ins_int, - avmplus::AvmCore::use_cmov()), - avmplus::AvmCore::use_cmov()); - } - - switch (tarray->type) { - case js::TypedArray::TYPE_INT8: - case js::TypedArray::TYPE_UINT8: - case js::TypedArray::TYPE_UINT8_CLAMPED: - addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); - lir->insStore(LIR_stb, v_ins_int, addr_ins, 0); - break; - case js::TypedArray::TYPE_INT16: - case js::TypedArray::TYPE_UINT16: - addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 1)); - lir->insStore(LIR_sts, v_ins_int, addr_ins, 0); - break; - case js::TypedArray::TYPE_INT32: - case js::TypedArray::TYPE_UINT32: - addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); - lir->insStore(LIR_sti, v_ins_int, addr_ins, 0); - break; - case js::TypedArray::TYPE_FLOAT32: - case js::TypedArray::TYPE_FLOAT64: - default: - JS_NOT_REACHED("Unknown typed array in tracer"); - } + // If it's not a number, convert objects to NaN, + // null to 0, and call StringToNumber or BooleanOrUndefinedToNumber + // for those. + if (!isNumber(v)) { + if (JSVAL_IS_NULL(v)) { + v_ins = INS_CONST(0); + } else if (JSVAL_IS_VOID(v)) { + v_ins = lir->insImmf(js_NaN); + } else if (JSVAL_IS_STRING(v)) { + LIns* args[] = { v_ins, cx_ins }; + v_ins = lir->insCall(&js_StringToNumber_ci, args); + } else if (JSVAL_IS_SPECIAL(v)) { + JS_ASSERT(JSVAL_IS_BOOLEAN(v)); + v_ins = i2f(v_ins); } else { - switch (tarray->type) { - case js::TypedArray::TYPE_INT8: - case js::TypedArray::TYPE_UINT8: - addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); - lir->insStore(LIR_stb, lir->ins1(LIR_f2i, v_ins), addr_ins, 0); - break; - case js::TypedArray::TYPE_INT16: - case js::TypedArray::TYPE_UINT16: - addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 1)); - lir->insStore(LIR_sts, lir->ins1(LIR_f2i, v_ins), addr_ins, 0); - break; - case js::TypedArray::TYPE_INT32: - case js::TypedArray::TYPE_UINT32: - addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); - lir->insStore(LIR_sti, lir->ins1(LIR_f2i, v_ins), addr_ins, 0); - break; - case js::TypedArray::TYPE_FLOAT32: - addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); - lir->insStore(LIR_st32f, v_ins, addr_ins, 0); - break; - case js::TypedArray::TYPE_FLOAT64: - addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 3)); - lir->insStore(LIR_stfi, v_ins, addr_ins, 0); - break; - case js::TypedArray::TYPE_UINT8_CLAMPED: - addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); - lir->insStore(LIR_stb, lir->insCall(&js_TypedArray_uint8_clamp_double_ci, &v_ins), addr_ins, 0); - break; - default: - JS_NOT_REACHED("Unknown typed array type in tracer"); - } + v_ins = lir->insImmf(js_NaN); } - } else { - RETURN_STOP_A("can't trace setting typed array element to non-number value"); + } + + switch (tarray->type) { + case js::TypedArray::TYPE_INT8: + case js::TypedArray::TYPE_INT16: + case js::TypedArray::TYPE_INT32: + v_ins = f2i(v_ins); + break; + case js::TypedArray::TYPE_UINT8: + case js::TypedArray::TYPE_UINT16: + case js::TypedArray::TYPE_UINT32: + v_ins = f2u(v_ins); + break; + case js::TypedArray::TYPE_UINT8_CLAMPED: + if (isPromoteInt(v_ins)) { + v_ins = demote(lir, v_ins); + v_ins = lir->ins_choose(lir->ins2i(LIR_lt, v_ins, 0), + lir->insImm(0), + lir->ins_choose(lir->ins2i(LIR_gt, v_ins, 0xff), + lir->insImm(0xff), + v_ins, + avmplus::AvmCore::use_cmov()), + avmplus::AvmCore::use_cmov()); + } else { + v_ins = lir->insCall(&js_TypedArray_uint8_clamp_double_ci, &v_ins); + } + break; + case js::TypedArray::TYPE_FLOAT32: + case js::TypedArray::TYPE_FLOAT64: + // Do nothing, this is already a float + break; + default: + JS_NOT_REACHED("Unknown typed array type in tracer"); + } + + switch (tarray->type) { + case js::TypedArray::TYPE_INT8: + case js::TypedArray::TYPE_UINT8_CLAMPED: + case js::TypedArray::TYPE_UINT8: + addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); + lir->insStore(LIR_stb, v_ins, addr_ins, 0, ACC_OTHER); + break; + case js::TypedArray::TYPE_INT16: + case js::TypedArray::TYPE_UINT16: + addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 1)); + lir->insStore(LIR_sts, v_ins, addr_ins, 0, ACC_OTHER); + break; + case js::TypedArray::TYPE_INT32: + case js::TypedArray::TYPE_UINT32: + addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); + lir->insStore(LIR_sti, v_ins, addr_ins, 0, ACC_OTHER); + break; + case js::TypedArray::TYPE_FLOAT32: + addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); + lir->insStore(LIR_st32f, v_ins, addr_ins, 0, ACC_OTHER); + break; + case js::TypedArray::TYPE_FLOAT64: + addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 3)); + lir->insStore(LIR_stfi, v_ins, addr_ins, 0, ACC_OTHER); + break; + default: + JS_NOT_REACHED("Unknown typed array type in tracer"); } } else if (JSVAL_TO_INT(idx) < 0 || !obj->isDenseArray()) { CHECK_STATUS_A(initOrSetPropertyByIndex(obj_ins, idx_ins, &v, @@ -12165,16 +12166,16 @@ TraceRecorder::record_JSOP_CALLNAME() LIns* obj_ins = INS_CONSTOBJ(globalObj); JSObject* obj2; - jsuword pcval; + PCVal pcval; CHECK_STATUS_A(test_property_cache(obj, obj_ins, obj2, pcval)); - if (PCVAL_IS_NULL(pcval) || !PCVAL_IS_OBJECT(pcval)) + if (pcval.isNull() || !pcval.isObject()) RETURN_STOP_A("callee is not an object"); - JS_ASSERT(HAS_FUNCTION_CLASS(PCVAL_TO_OBJECT(pcval))); + JS_ASSERT(pcval.toObject()->isFunction()); - stack(0, INS_CONSTOBJ(PCVAL_TO_OBJECT(pcval))); + stack(0, INS_CONSTOBJ(pcval.toObject())); stack(1, obj_ins); return ARECORD_CONTINUE; } @@ -12217,7 +12218,7 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) JSStackFrame* fp = cx->display[level]; const CallInfo* ci; int32 slot; - if (!fp->fun) { + if (!fp->fun || (fp->flags & JSFRAME_EVAL)) { ci = &GetUpvarStackOnTrace_ci; slot = cookieSlot; } else if (cookieSlot < fp->fun->nargs) { @@ -12245,14 +12246,14 @@ TraceRecorder::upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v) addName(lir->ins2(LIR_eq, call_ins, lir->insImm(type)), "guard(type-stable upvar)"), BRANCH_EXIT); - return stackLoad(outp, type); + return stackLoad(outp, ACC_OTHER, type); } /* * Generate LIR to load a value from the native stack. This method ensures that * the correct LIR load operator is used. */ -LIns* TraceRecorder::stackLoad(LIns* base, uint8 type) +LIns* TraceRecorder::stackLoad(LIns* base, AccSet accSet, uint8 type) { LOpcode loadOp; switch (type) { @@ -12266,7 +12267,8 @@ LIns* TraceRecorder::stackLoad(LIns* base, uint8 type) loadOp = LIR_ldp; break; case TT_INT32: - case TT_PSEUDOBOOLEAN: + case TT_SPECIAL: + case TT_VOID: loadOp = LIR_ld; break; case TT_JSVAL: @@ -12275,7 +12277,7 @@ LIns* TraceRecorder::stackLoad(LIns* base, uint8 type) return NULL; } - LIns* result = lir->insLoad(loadOp, base, 0); + LIns* result = lir->insLoad(loadOp, base, 0, accSet); if (type == TT_INT32) result = lir->ins1(LIR_i2f, result); return result; @@ -12312,8 +12314,8 @@ TraceRecorder::record_JSOP_GETDSLOT() LIns* callee_ins = get(&cx->fp->argv[-2]); unsigned index = GET_UINT16(cx->fp->regs->pc); - LIns* dslots_ins = lir->insLoad(LIR_ldp, callee_ins, offsetof(JSObject, dslots)); - LIns* v_ins = lir->insLoad(LIR_ldp, dslots_ins, index * sizeof(jsval), ACC_READONLY); + LIns* dslots_ins = lir->insLoad(LIR_ldp, callee_ins, offsetof(JSObject, dslots), ACC_OTHER); + LIns* v_ins = lir->insLoad(LIR_ldp, dslots_ins, index * sizeof(jsval), ACC_OTHER); stack(0, unbox_jsval(callee->dslots[index], v_ins, snapshot(BRANCH_EXIT))); return ARECORD_CONTINUE; @@ -12399,14 +12401,14 @@ TraceRecorder::guardCallee(jsval& callee) JS_REQUIRES_STACK JSStackFrame * TraceRecorder::guardArguments(JSObject *obj, LIns* obj_ins, unsigned *depthp) { - JS_ASSERT(obj->getClass() == &js_ArgumentsClass); + JS_ASSERT(obj->isArguments()); JSStackFrame *afp = frameIfInRange(obj, depthp); if (!afp) return NULL; VMSideExit *exit = snapshot(MISMATCH_EXIT); - guardConstClass(obj, obj_ins, &js_ArgumentsClass, exit); + guardClass(obj, obj_ins, &js_ArgumentsClass, exit, ACC_READONLY); LIns* args_ins = get(&afp->argsobj); LIns* cmp = lir->ins2(LIR_peq, args_ins, obj_ins); @@ -12426,12 +12428,12 @@ TraceRecorder::interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, * and does not call any TR::record_*CallComplete hook. */ if (fun->u.i.script->isEmpty()) { - LIns* rval_ins = constructing ? stack(-1 - argc) : INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID)); + LIns* rval_ins = constructing ? stack(-1 - argc) : INS_VOID(); stack(-2 - argc, rval_ins); return RECORD_CONTINUE; } - if (JS_GetGlobalForObject(cx, JSVAL_TO_OBJECT(fval)) != globalObj) + if (JSVAL_TO_OBJECT(fval)->getGlobal() != globalObj) RETURN_STOP("JSOP_CALL or JSOP_NEW crosses global scopes"); JSStackFrame* fp = cx->fp; @@ -12470,7 +12472,7 @@ TraceRecorder::interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc, fi = traceMonitor->frameCache->memoize(fi); if (!fi) RETURN_STOP("out of memory"); - lir->insStorei(INS_CONSTPTR(fi), lirbuf->rp, callDepth * sizeof(FrameInfo*)); + lir->insStorei(INS_CONSTPTR(fi), lirbuf->rp, callDepth * sizeof(FrameInfo*), ACC_RSTACK); #if defined JS_JIT_SPEW debug_only_printf(LC_TMTracer, "iFC frameinfo=%p, stack=%d, map=", (void*)fi, @@ -12574,14 +12576,14 @@ TraceRecorder::record_JSOP_APPLY() * for apply uses imacros to handle a specific number of arguments. */ if (aobj->isDenseArray()) { - guardDenseArray(aobj, aobj_ins); + guardDenseArray(aobj, aobj_ins, MISMATCH_EXIT); length = jsuint(aobj->fslots[JSSLOT_ARRAY_LENGTH]); guard(true, lir->ins2i(LIR_eq, p2i(stobj_get_fslot(aobj_ins, JSSLOT_ARRAY_LENGTH)), length), BRANCH_EXIT); - } else if (OBJ_GET_CLASS(cx, aobj) == &js_ArgumentsClass) { + } else if (aobj->isArguments()) { unsigned depth; JSStackFrame *afp = guardArguments(aobj, aobj_ins, &depth); if (!afp) @@ -12648,9 +12650,10 @@ TraceRecorder::record_NativeCallComplete() if (JSTN_ERRTYPE(pendingSpecializedNative) == FAIL_STATUS) { /* Keep cx->bailExit null when it's invalid. */ - lir->insStorei(INS_NULL(), cx_ins, (int) offsetof(JSContext, bailExit)); + lir->insStorei(INS_NULL(), cx_ins, (int) offsetof(JSContext, bailExit), ACC_OTHER); - LIns* status = lir->insLoad(LIR_ld, lirbuf->state, (int) offsetof(InterpState, builtinStatus)); + LIns* status = lir->insLoad(LIR_ld, lirbuf->state, + (int) offsetof(InterpState, builtinStatus), ACC_OTHER); if (pendingSpecializedNative == &generatedSpecializedNative) { LIns* ok_ins = v_ins; @@ -12669,7 +12672,7 @@ TraceRecorder::record_NativeCallComplete() * vector for native function calls. The actual return value of the native is a JSBool * indicating the error status. */ - v_ins = lir->insLoad(LIR_ldp, native_rval_ins, 0); + v_ins = lir->insLoad(LIR_ldp, native_rval_ins, 0, ACC_OTHER); if (*pc == JSOP_NEW) { LIns* x = lir->ins_peq0(lir->ins2(LIR_piand, v_ins, INS_CONSTWORD(JSVAL_TAGMASK))); x = lir->ins_choose(x, v_ins, INS_CONSTWORD(0), avmplus::AvmCore::use_cmov()); @@ -12696,7 +12699,7 @@ TraceRecorder::record_NativeCallComplete() } else { /* Convert the result to double if the builtin returns int32. */ if (JSVAL_IS_NUMBER(v) && - (pendingSpecializedNative->builtin->_argtypes & ARGSIZE_MASK_ANY) == ARGSIZE_I) { + pendingSpecializedNative->builtin->returnType() == ARGTYPE_I) { set(&v, lir->ins1(LIR_i2f, v_ins)); } } @@ -12718,7 +12721,7 @@ TraceRecorder::name(jsval*& vp, LIns*& ins, NameResult& nr) uint32 slot; JSObject* obj2; - jsuword pcval; + PCVal pcval; /* * Property cache ensures that we are dealing with an existing property, @@ -12727,7 +12730,7 @@ TraceRecorder::name(jsval*& vp, LIns*& ins, NameResult& nr) CHECK_STATUS_A(test_property_cache(obj, obj_ins, obj2, pcval)); /* Abort if property doesn't exist (interpreter will report an error.) */ - if (PCVAL_IS_NULL(pcval)) + if (pcval.isNull()) RETURN_STOP_A("named property not found"); /* Insist on obj being the directly addressed object. */ @@ -12735,21 +12738,21 @@ TraceRecorder::name(jsval*& vp, LIns*& ins, NameResult& nr) RETURN_STOP_A("name() hit prototype chain"); /* Don't trace getter or setter calls, our caller wants a direct slot. */ - if (PCVAL_IS_SPROP(pcval)) { - JSScopeProperty* sprop = PCVAL_TO_SPROP(pcval); + if (pcval.isSprop()) { + JSScopeProperty* sprop = pcval.toSprop(); if (!isValidSlot(OBJ_SCOPE(obj), sprop)) RETURN_STOP_A("name() not accessing a valid slot"); slot = sprop->slot; } else { - if (!PCVAL_IS_SLOT(pcval)) + if (!pcval.isSlot()) RETURN_STOP_A("PCE is not a slot"); - slot = PCVAL_TO_SLOT(pcval); + slot = pcval.toSlot(); } if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - vp = &STOBJ_GET_SLOT(obj, slot); + vp = &obj->getSlotRef(slot); ins = get(vp); nr.tracked = true; return ARECORD_CONTINUE; @@ -12758,7 +12761,7 @@ TraceRecorder::name(jsval*& vp, LIns*& ins, NameResult& nr) static JSObject* FASTCALL MethodReadBarrier(JSContext* cx, JSObject* obj, JSScopeProperty* sprop, JSObject* funobj) { - JSAutoTempValueRooter tvr(cx, funobj); + AutoValueRooter tvr(cx, funobj); if (!OBJ_SCOPE(obj)->methodReadBarrier(cx, sprop, tvr.addr())) return NULL; @@ -12792,11 +12795,11 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32 *slotp, LIns** v_insp, * and guards the shape for us. */ JSObject* obj2; - jsuword pcval; + PCVal pcval; CHECK_STATUS_A(test_property_cache(obj, obj_ins, obj2, pcval)); /* Check for non-existent property reference, which results in undefined. */ - if (PCVAL_IS_NULL(pcval)) { + if (pcval.isNull()) { if (slotp) RETURN_STOP_A("property not found"); @@ -12808,7 +12811,7 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32 *slotp, LIns** v_insp, RETURN_STOP_A("can't trace through access to undefined property if " "JSClass.getProperty hook isn't stubbed"); } - guardClass(obj, obj_ins, OBJ_GET_CLASS(cx, obj), snapshot(MISMATCH_EXIT)); + guardClass(obj, obj_ins, OBJ_GET_CLASS(cx, obj), snapshot(MISMATCH_EXIT), ACC_OTHER); /* * This trace will be valid as long as neither the object nor any object @@ -12819,17 +12822,15 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32 *slotp, LIns** v_insp, */ VMSideExit* exit = snapshot(BRANCH_EXIT); do { - LIns* map_ins = map(obj_ins); - LIns* ops_ins; - if (map_is_native(obj->map, map_ins, ops_ins)) { - CHECK_STATUS_A(InjectStatus(guardShape(obj_ins, obj, OBJ_SHAPE(obj), "guard(shape)", - map_ins, exit))); + if (obj->isNative()) { + CHECK_STATUS_A(InjectStatus(guardShape(obj_ins, obj, OBJ_SHAPE(obj), + "guard(shape)", exit))); } else if (!guardDenseArray(obj, obj_ins, exit)) { RETURN_STOP_A("non-native object involved in undefined property access"); } } while (guardHasPrototype(obj, obj_ins, &obj, &obj_ins, exit)); - set(outp, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID))); + set(outp, INS_VOID()); return ARECORD_CONTINUE; } @@ -12837,7 +12838,7 @@ TraceRecorder::prop(JSObject* obj, LIns* obj_ins, uint32 *slotp, LIns** v_insp, } JS_REQUIRES_STACK AbortableRecordingStatus -TraceRecorder::propTail(JSObject* obj, LIns* obj_ins, JSObject* obj2, jsuword pcval, +TraceRecorder::propTail(JSObject* obj, LIns* obj_ins, JSObject* obj2, PCVal pcval, uint32 *slotp, LIns** v_insp, jsval *outp) { const JSCodeSpec& cs = js_CodeSpec[*cx->fp->regs->pc]; @@ -12848,18 +12849,18 @@ TraceRecorder::propTail(JSObject* obj, LIns* obj_ins, JSObject* obj2, jsuword pc uint32 slot; bool isMethod; - if (PCVAL_IS_SPROP(pcval)) { - sprop = PCVAL_TO_SPROP(pcval); + if (pcval.isSprop()) { + sprop = pcval.toSprop(); JS_ASSERT(OBJ_SCOPE(obj2)->hasProperty(sprop)); - if (setflags && !SPROP_HAS_STUB_SETTER(sprop)) + if (setflags && !sprop->hasDefaultSetter()) RETURN_STOP_A("non-stub setter"); - if (setflags && (sprop->attrs & JSPROP_READONLY)) + if (setflags && !sprop->writable()) RETURN_STOP_A("writing to a readonly property"); - if (!SPROP_HAS_STUB_GETTER_OR_IS_METHOD(sprop)) { + if (!sprop->hasDefaultGetterOrIsMethod()) { if (slotp) RETURN_STOP_A("can't trace non-stub getter for this opcode"); - if (sprop->attrs & JSPROP_GETTER) + if (sprop->hasGetterValue()) RETURN_STOP_A("script getter"); if (sprop->slot == SPROP_INVALID_SLOT) return InjectStatus(getPropertyWithNativeGetter(obj_ins, sprop, outp)); @@ -12871,9 +12872,9 @@ TraceRecorder::propTail(JSObject* obj, LIns* obj_ins, JSObject* obj2, jsuword pc isMethod = sprop->isMethod(); JS_ASSERT_IF(isMethod, OBJ_SCOPE(obj2)->hasMethodBarrier()); } else { - if (!PCVAL_IS_SLOT(pcval)) + if (!pcval.isSlot()) RETURN_STOP_A("PCE is not a slot"); - slot = PCVAL_TO_SLOT(pcval); + slot = pcval.toSlot(); sprop = NULL; isMethod = false; } @@ -12902,7 +12903,7 @@ TraceRecorder::propTail(JSObject* obj, LIns* obj_ins, JSObject* obj2, jsuword pc } LIns* dslots_ins = NULL; - LIns* v_ins = unbox_jsval(STOBJ_GET_SLOT(obj, slot), + LIns* v_ins = unbox_jsval(obj->getSlot(slot), stobj_get_slot(obj_ins, slot, dslots_ins), snapshot(BRANCH_EXIT)); @@ -12947,7 +12948,7 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ VMSideExit* exit = snapshot(BRANCH_EXIT); /* check that the index is within bounds */ - LIns* dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots)); + LIns* dslots_ins = lir->insLoad(LIR_ldp, obj_ins, offsetof(JSObject, dslots), ACC_OTHER); jsuint capacity = js_DenseArrayCapacity(obj); bool within = (jsuint(idx) < jsuint(obj->fslots[JSSLOT_ARRAY_LENGTH]) && jsuint(idx) < capacity); if (!within) { @@ -12975,9 +12976,8 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ LIns* br4 = lir->insBranch(LIR_jf, lir->ins2(LIR_pult, pidx_ins, - lir->insLoad(LIR_ldp, - dslots_ins, - -(int)sizeof(jsval))), + lir->insLoad(LIR_ldp, dslots_ins, + -(int)sizeof(jsval), ACC_OTHER)), NULL); lir->insGuard(LIR_x, NULL, createGuardRecord(exit)); LIns* label = lir->ins0(LIR_label); @@ -12990,7 +12990,7 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT)); // Return undefined and indicate that we didn't actually read this (addr_ins). - v_ins = lir->insImm(JSVAL_TO_SPECIAL(JSVAL_VOID)); + v_ins = INS_VOID(); addr_ins = NULL; return RECORD_CONTINUE; } @@ -13018,16 +13018,16 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ guard(true, lir->ins2(LIR_pult, pidx_ins, - lir->insLoad(LIR_ldp, dslots_ins, 0 - (int)sizeof(jsval))), + lir->insLoad(LIR_ldp, dslots_ins, 0 - (int)sizeof(jsval), ACC_OTHER)), exit); /* Load the value and guard on its type to unbox it. */ vp = &obj->dslots[jsuint(idx)]; addr_ins = lir->ins2(LIR_piadd, dslots_ins, lir->ins2i(LIR_pilsh, pidx_ins, (sizeof(jsval) == 4) ? 2 : 3)); - v_ins = unbox_jsval(*vp, lir->insLoad(LIR_ldp, addr_ins, 0), exit); + v_ins = unbox_jsval(*vp, lir->insLoad(LIR_ldp, addr_ins, 0, ACC_OTHER), exit); - if (JSVAL_IS_SPECIAL(*vp)) { + if (JSVAL_IS_SPECIAL(*vp) && !JSVAL_IS_VOID(*vp)) { /* * If we read a hole from the array, convert it to undefined and guard * that there are no indexed properties along the prototype chain. @@ -13062,9 +13062,16 @@ TraceRecorder::typedArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ /* priv_ins will load the TypedArray* */ LIns* priv_ins = stobj_get_const_fslot(obj_ins, JSSLOT_PRIVATE); - /* for out-of-range, just let the interpreter handle it */ - if ((jsuint) idx >= tarray->length) - return ARECORD_STOP; + /* for out-of-range, do the same thing that the interpreter does, which is return undefined */ + if ((jsuint) idx >= tarray->length) { + guard(false, + lir->ins2(LIR_ult, + idx_ins, + lir->insLoad(LIR_ld, priv_ins, js::TypedArray::lengthOffset(), ACC_READONLY)), + BRANCH_EXIT); + v_ins = INS_VOID(); + return ARECORD_CONTINUE; + } /* * Ensure idx < length @@ -13079,7 +13086,7 @@ TraceRecorder::typedArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ lir->ins2(LIR_ult, idx_ins, lir->insLoad(LIR_ld, priv_ins, js::TypedArray::lengthOffset(), ACC_READONLY)), - OVERFLOW_EXIT); + BRANCH_EXIT); /* We are now ready to load. Do a different type of load * depending on what type of thing we're loading. */ @@ -13088,36 +13095,36 @@ TraceRecorder::typedArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_ switch (tarray->type) { case js::TypedArray::TYPE_INT8: addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); - v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ldsb, addr_ins, 0)); + v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ldsb, addr_ins, 0, ACC_OTHER)); break; case js::TypedArray::TYPE_UINT8: case js::TypedArray::TYPE_UINT8_CLAMPED: addr_ins = lir->ins2(LIR_piadd, data_ins, pidx_ins); - v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ldzb, addr_ins, 0)); + v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ldzb, addr_ins, 0, ACC_OTHER)); break; case js::TypedArray::TYPE_INT16: addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 1)); - v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ldss, addr_ins, 0)); + v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ldss, addr_ins, 0, ACC_OTHER)); break; case js::TypedArray::TYPE_UINT16: addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 1)); - v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ldzs, addr_ins, 0)); + v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ldzs, addr_ins, 0, ACC_OTHER)); break; case js::TypedArray::TYPE_INT32: addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); - v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ld, addr_ins, 0)); + v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ld, addr_ins, 0, ACC_OTHER)); break; case js::TypedArray::TYPE_UINT32: addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); - v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ld, addr_ins, 0)); + v_ins = lir->ins1(LIR_u2f, lir->insLoad(LIR_ld, addr_ins, 0, ACC_OTHER)); break; case js::TypedArray::TYPE_FLOAT32: addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 2)); - v_ins = lir->insLoad(LIR_ld32f, addr_ins, 0); + v_ins = lir->insLoad(LIR_ld32f, addr_ins, 0, ACC_OTHER); break; case js::TypedArray::TYPE_FLOAT64: addr_ins = lir->ins2(LIR_piadd, data_ins, lir->ins2i(LIR_pilsh, pidx_ins, 3)); - v_ins = lir->insLoad(LIR_ldf, addr_ins, 0); + v_ins = lir->insLoad(LIR_ldf, addr_ins, 0, ACC_OTHER); break; default: JS_NOT_REACHED("Unknown typed array type in tracer"); @@ -13439,7 +13446,11 @@ TraceRecorder::record_JSOP_ITER() jsuint flags = cx->fp->regs->pc[1]; - if (hasIteratorMethod(JSVAL_TO_OBJECT(v))) { + bool found; + RecordingStatus status = hasIteratorMethod(JSVAL_TO_OBJECT(v), found); + if (status != RECORD_CONTINUE) + return InjectStatus(status); + if (found) { if (flags == JSITER_ENUMERATE) return InjectStatus(call_imacro(iter_imacros.for_in)); if (flags == (JSITER_ENUMERATE | JSITER_FOREACH)) @@ -13463,7 +13474,7 @@ TraceRecorder::record_JSOP_NEXTITER() JSObject* iterobj = JSVAL_TO_OBJECT(iterobj_val); JSClass* clasp = iterobj->getClass(); LIns* iterobj_ins = get(&iterobj_val); - guardClass(iterobj, iterobj_ins, clasp, snapshot(BRANCH_EXIT)); + guardClass(iterobj, iterobj_ins, clasp, snapshot(BRANCH_EXIT), ACC_OTHER); if (clasp == &js_IteratorClass || clasp == &js_GeneratorClass) return InjectStatus(call_imacro(nextiter_imacros.native_iter_next)); return InjectStatus(call_imacro(nextiter_imacros.custom_iter_next)); @@ -13600,7 +13611,8 @@ TraceRecorder::traverseScopeChain(JSObject *obj, LIns *obj_ins, JSObject *target if (obj->getClass() == &js_CallClass && JSFUN_HEAVYWEIGHT_TEST(js_GetCallObjectFunction(obj)->flags)) { LIns* map_ins = map(obj_ins); - LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape)), + LIns* shape_ins = addName(lir->insLoad(LIR_ld, map_ins, offsetof(JSScope, shape), + ACC_OTHER), "obj_shape"); if (!exit) exit = snapshot(BRANCH_EXIT); @@ -13807,7 +13819,7 @@ TraceRecorder::record_JSOP_INSTANCEOF() stack(-2, lir->insCall(&HasInstance_ci, args)); LIns* status_ins = lir->insLoad(LIR_ld, lirbuf->state, - offsetof(InterpState, builtinStatus)); + offsetof(InterpState, builtinStatus), ACC_OTHER); pendingGuardCondition = lir->ins_eq0(status_ins); leaveDeepBailCall(); @@ -14083,7 +14095,7 @@ TraceRecorder::record_JSOP_ARGSUB() JS_REQUIRES_STACK LIns* TraceRecorder::guardArgsLengthNotAssigned(LIns* argsobj_ins) { - // The following implements js_IsOverriddenArgsLength on trace. + // The following implements IsOverriddenArgsLength on trace. // The '2' bit is set if length was overridden. LIns *len_ins = stobj_get_fslot(argsobj_ins, JSSLOT_ARGS_LENGTH); LIns *ovr_ins = lir->ins2(LIR_piand, len_ins, INS_CONSTWORD(2)); @@ -14103,7 +14115,7 @@ TraceRecorder::record_JSOP_ARGCNT() // We also have to check that arguments.length has not been mutated // at record time, because if so we will generate incorrect constant // LIR, which will assert in alu(). - if (cx->fp->argsobj && js_IsOverriddenArgsLength(JSVAL_TO_OBJECT(cx->fp->argsobj))) + if (cx->fp->argsobj && IsOverriddenArgsLength(JSVAL_TO_OBJECT(cx->fp->argsobj))) RETURN_STOP_A("can't trace JSOP_ARGCNT if arguments.length has been modified"); LIns *a_ins = get(&cx->fp->argsobj); if (callDepth == 0) { @@ -14250,7 +14262,7 @@ TraceRecorder::record_JSOP_GETGVAR() if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - stack(0, get(&STOBJ_GET_SLOT(globalObj, slot))); + stack(0, get(&globalObj->getSlotRef(slot))); return ARECORD_CONTINUE; } @@ -14266,7 +14278,7 @@ TraceRecorder::record_JSOP_SETGVAR() if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - set(&STOBJ_GET_SLOT(globalObj, slot), stack(-1)); + set(&globalObj->getSlotRef(slot), stack(-1)); return ARECORD_CONTINUE; } @@ -14283,7 +14295,7 @@ TraceRecorder::record_JSOP_INCGVAR() if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - return InjectStatus(inc(STOBJ_GET_SLOT(globalObj, slot), 1)); + return InjectStatus(inc(globalObj->getSlotRef(slot), 1)); } JS_REQUIRES_STACK AbortableRecordingStatus @@ -14299,7 +14311,7 @@ TraceRecorder::record_JSOP_DECGVAR() if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - return InjectStatus(inc(STOBJ_GET_SLOT(globalObj, slot), -1)); + return InjectStatus(inc(globalObj->getSlotRef(slot), -1)); } JS_REQUIRES_STACK AbortableRecordingStatus @@ -14315,7 +14327,7 @@ TraceRecorder::record_JSOP_GVARINC() if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - return InjectStatus(inc(STOBJ_GET_SLOT(globalObj, slot), 1, false)); + return InjectStatus(inc(globalObj->getSlotRef(slot), 1, false)); } JS_REQUIRES_STACK AbortableRecordingStatus @@ -14331,7 +14343,7 @@ TraceRecorder::record_JSOP_GVARDEC() if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - return InjectStatus(inc(STOBJ_GET_SLOT(globalObj, slot), -1, false)); + return InjectStatus(inc(globalObj->getSlotRef(slot), -1, false)); } JS_REQUIRES_STACK AbortableRecordingStatus @@ -14556,27 +14568,27 @@ TraceRecorder::record_JSOP_CALLPROP() } JSObject* obj2; - jsuword pcval; + PCVal pcval; CHECK_STATUS_A(test_property_cache(obj, obj_ins, obj2, pcval)); - if (PCVAL_IS_OBJECT(pcval)) { - if (PCVAL_IS_NULL(pcval)) + if (pcval.isObject()) { + if (pcval.isNull()) RETURN_STOP_A("callprop of missing method"); - JS_ASSERT(HAS_FUNCTION_CLASS(PCVAL_TO_OBJECT(pcval))); + JS_ASSERT(pcval.toObject()->isFunction()); if (JSVAL_IS_PRIMITIVE(l)) { - JSFunction* fun = GET_FUNCTION_PRIVATE(cx, PCVAL_TO_OBJECT(pcval)); + JSFunction* fun = GET_FUNCTION_PRIVATE(cx, pcval.toObject()); if (!PRIMITIVE_THIS_TEST(fun, l)) RETURN_STOP_A("callee does not accept primitive |this|"); } - set(&l, INS_CONSTOBJ(PCVAL_TO_OBJECT(pcval))); + set(&l, INS_CONSTOBJ(pcval.toObject())); } else { if (JSVAL_IS_PRIMITIVE(l)) RETURN_STOP_A("callprop of primitive method"); - JS_ASSERT_IF(PCVAL_IS_SPROP(pcval), !PCVAL_TO_SPROP(pcval)->isMethod()); + JS_ASSERT_IF(pcval.isSprop(), !pcval.toSprop()->isMethod()); AbortableRecordingStatus status = propTail(obj, obj_ins, obj2, pcval, NULL, NULL, &l); if (status != ARECORD_CONTINUE) @@ -14660,7 +14672,7 @@ TraceRecorder::record_JSOP_STOP() JS_ASSERT(fp->thisv == fp->argv[-1]); rval_ins = get(&fp->argv[-1]); } else { - rval_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID)); + rval_ins = INS_VOID(); } clearCurrentFrameSlotsFromTracker(nativeFrameTracker); return ARECORD_CONTINUE; @@ -14699,7 +14711,7 @@ TraceRecorder::record_JSOP_ENTERBLOCK() JSObject* obj; obj = cx->fp->script->getObject(getFullIndex(0)); - LIns* void_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID)); + LIns* void_ins = INS_VOID(); for (int i = 0, n = OBJ_BLOCK_COUNT(cx, obj); i < n; i++) stack(i, void_ins); return ARECORD_CONTINUE; @@ -14822,7 +14834,7 @@ TraceRecorder::record_JSOP_CALLGVAR() if (!lazilyImportGlobalSlot(slot)) RETURN_STOP_A("lazy import of global slot failed"); - jsval& v = STOBJ_GET_SLOT(globalObj, slot); + jsval& v = globalObj->getSlotRef(slot); stack(0, get(&v)); stack(1, INS_NULL()); return ARECORD_CONTINUE; @@ -14879,7 +14891,7 @@ CallIteratorNext(JSContext *cx, uintN argc, jsval *vp) static jsval FASTCALL CallIteratorNext_tn(JSContext* cx, jsbytecode* pc, JSObject* iterobj) { - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); JSBool ok = js_CallIteratorNext(cx, iterobj, tvr.addr()); if (!ok) { @@ -14970,14 +14982,14 @@ TraceRecorder::record_JSOP_LENGTH() RETURN_STOP_A("non-string primitive JSOP_LENGTH unsupported"); set(&l, lir->ins1(LIR_i2f, p2i(lir->insLoad(LIR_ldp, get(&l), - offsetof(JSString, mLength))))); + offsetof(JSString, mLength), ACC_OTHER)))); return ARECORD_CONTINUE; } JSObject* obj = JSVAL_TO_OBJECT(l); LIns* obj_ins = get(&l); - if (obj->getClass() == &js_ArgumentsClass) { + if (obj->isArguments()) { unsigned depth; JSStackFrame *afp = guardArguments(obj, obj_ins, &depth); if (!afp) @@ -14985,7 +14997,7 @@ TraceRecorder::record_JSOP_LENGTH() // We must both check at record time and guard at run time that // arguments.length has not been reassigned, redefined or deleted. - if (js_IsOverriddenArgsLength(obj)) + if (IsOverriddenArgsLength(obj)) RETURN_STOP_A("can't trace JSOP_ARGCNT if arguments.length has been modified"); LIns* slot_ins = guardArgsLengthNotAssigned(obj_ins); @@ -15004,18 +15016,18 @@ TraceRecorder::record_JSOP_LENGTH() return ARECORD_STOP; } } else { - if (!guardClass(obj, obj_ins, &js_SlowArrayClass, snapshot(BRANCH_EXIT))) + if (!guardClass(obj, obj_ins, &js_SlowArrayClass, snapshot(BRANCH_EXIT), ACC_OTHER)) RETURN_STOP_A("can't trace length property access on non-array"); } v_ins = lir->ins1(LIR_i2f, p2i(stobj_get_fslot(obj_ins, JSSLOT_ARRAY_LENGTH))); } else if (OkToTraceTypedArrays && js_IsTypedArray(obj)) { // Ensure array is a typed array and is the same type as what was written - guardConstClass(obj, obj_ins, obj->getClass(), snapshot(BRANCH_EXIT)); + guardClass(obj, obj_ins, obj->getClass(), snapshot(BRANCH_EXIT), ACC_OTHER); v_ins = lir->ins1(LIR_i2f, lir->insLoad(LIR_ld, stobj_get_const_fslot(obj_ins, JSSLOT_PRIVATE), js::TypedArray::lengthOffset(), ACC_READONLY)); } else { - if (!OBJ_IS_NATIVE(obj)) + if (!obj->isNative()) RETURN_STOP_A("can't trace length property access on non-array, non-native object"); return getProp(obj, obj_ins); } @@ -15097,7 +15109,7 @@ TraceRecorder::record_JSOP_CONCATN() int32_t d = 0; for (jsval *vp = argBase; vp != regs.sp; ++vp, d += sizeof(void *)) { JS_ASSERT(JSVAL_IS_PRIMITIVE(*vp)); - lir->insStorei(stringify(*vp), buf_ins, d); + lir->insStorei(stringify(*vp), buf_ins, d, ACC_OTHER); } /* Perform concatenation using a builtin. */ diff --git a/js/src/jstracer.h b/js/src/jstracer.h index 638cdbc6acf..256e79a8e37 100644 --- a/js/src/jstracer.h +++ b/js/src/jstracer.h @@ -348,9 +348,10 @@ enum TraceType_ TT_JSVAL = 3, /* arbitrary jsval */ TT_STRING = 4, /* pointer to JSString */ TT_NULL = 5, /* null */ - TT_PSEUDOBOOLEAN = 6, /* true, false, or undefined (0, 1, or 2) */ - TT_FUNCTION = 7, /* pointer to JSObject whose class is js_FunctionClass */ - TT_IGNORE = 8 + TT_SPECIAL = 6, /* true, false, hole, or areturn (0, 1, 6, or 8) */ + TT_VOID = 7, /* undefined (2) */ + TT_FUNCTION = 8, /* pointer to JSObject whose class is js_FunctionClass */ + TT_IGNORE = 9 } #if defined(__GNUC__) && defined(USE_TRACE_TYPE_ENUM) __attribute__((packed)) @@ -455,7 +456,7 @@ struct VMSideExit : public nanojit::SideExit uint32 numStackSlotsBelowCurrentFrame; ExitType exitType; uintN lookupFlags; - void* recursive_pc; + jsbytecode* recursive_pc; FrameInfo* recursive_down; unsigned hitcount; unsigned slurpFailSlot; @@ -1096,7 +1097,9 @@ class TraceRecorder VMSideExit* exit); JS_REQUIRES_STACK nanojit::LIns* slurpNullSlot(nanojit::LIns* val_ins, jsval* vp, VMSideExit* exit); - JS_REQUIRES_STACK nanojit::LIns* slurpBoolSlot(nanojit::LIns* val_ins, jsval* vp, + JS_REQUIRES_STACK nanojit::LIns* slurpSpecialSlot(nanojit::LIns* val_ins, jsval* vp, + VMSideExit* exit); + JS_REQUIRES_STACK nanojit::LIns* slurpVoidSlot(nanojit::LIns* val_ins, jsval* vp, VMSideExit* exit); JS_REQUIRES_STACK nanojit::LIns* slurpSlot(nanojit::LIns* val_ins, jsval* vp, VMSideExit* exit); @@ -1149,12 +1152,14 @@ class TraceRecorder JS_REQUIRES_STACK nanojit::LIns* var(unsigned n); JS_REQUIRES_STACK void var(unsigned n, nanojit::LIns* i); JS_REQUIRES_STACK nanojit::LIns* upvar(JSScript* script, JSUpvarArray* uva, uintN index, jsval& v); - nanojit::LIns* stackLoad(nanojit::LIns* addr, uint8 type); + nanojit::LIns* stackLoad(nanojit::LIns* addr, nanojit::AccSet accSet, uint8 type); JS_REQUIRES_STACK nanojit::LIns* stack(int n); JS_REQUIRES_STACK void stack(int n, nanojit::LIns* i); JS_REQUIRES_STACK nanojit::LIns* alu(nanojit::LOpcode op, jsdouble v0, jsdouble v1, nanojit::LIns* s0, nanojit::LIns* s1); + + nanojit::LIns* i2f(nanojit::LIns* i); nanojit::LIns* f2i(nanojit::LIns* f); nanojit::LIns* f2u(nanojit::LIns* f); JS_REQUIRES_STACK nanojit::LIns* makeNumberInt32(nanojit::LIns* f); @@ -1190,8 +1195,7 @@ class TraceRecorder JS_REQUIRES_STACK RecordingStatus binary(nanojit::LOpcode op); JS_REQUIRES_STACK RecordingStatus guardShape(nanojit::LIns* obj_ins, JSObject* obj, - uint32 shape, const char* name, - nanojit::LIns* map_ins, VMSideExit* exit); + uint32 shape, const char* name, VMSideExit* exit); #if defined DEBUG_notme && defined XP_UNIX void dumpGuardedShapes(const char* prefix); @@ -1203,15 +1207,12 @@ class TraceRecorder JS_REQUIRES_STACK bool map_is_native(JSObjectMap* map, nanojit::LIns* map_ins, nanojit::LIns*& ops_ins, size_t op_offset = 0); JS_REQUIRES_STACK AbortableRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins, - JSObject*& obj2, jsuword& pcval); - JS_REQUIRES_STACK RecordingStatus guardNativePropertyOp(JSObject* aobj, - nanojit::LIns* map_ins); + JSObject*& obj2, PCVal& pcval); JS_REQUIRES_STACK RecordingStatus guardPropertyCacheHit(nanojit::LIns* obj_ins, - nanojit::LIns* map_ins, - JSObject* aobj, - JSObject* obj2, - JSPropCacheEntry* entry, - jsuword& pcval); + JSObject* aobj, + JSObject* obj2, + PropertyCacheEntry* entry, + PCVal& pcval); void stobj_set_fslot(nanojit::LIns *obj_ins, unsigned slot, nanojit::LIns* v_ins); @@ -1244,7 +1245,7 @@ class TraceRecorder uint32 *slotp, nanojit::LIns** v_insp, jsval* outp); JS_REQUIRES_STACK AbortableRecordingStatus propTail(JSObject* obj, nanojit::LIns* obj_ins, - JSObject* obj2, jsuword pcval, + JSObject* obj2, PCVal pcval, uint32 *slotp, nanojit::LIns** v_insp, jsval* outp); JS_REQUIRES_STACK RecordingStatus denseArrayElement(jsval& oval, jsval& idx, jsval*& vp, @@ -1275,7 +1276,7 @@ class TraceRecorder JS_REQUIRES_STACK RecordingStatus nativeSet(JSObject* obj, nanojit::LIns* obj_ins, JSScopeProperty* sprop, jsval v, nanojit::LIns* v_ins); - JS_REQUIRES_STACK RecordingStatus setProp(jsval &l, JSPropCacheEntry* entry, + JS_REQUIRES_STACK RecordingStatus setProp(jsval &l, PropertyCacheEntry* entry, JSScopeProperty* sprop, jsval &v, nanojit::LIns*& v_ins); JS_REQUIRES_STACK RecordingStatus setCallProp(JSObject *callobj, nanojit::LIns *callobj_ins, @@ -1293,13 +1294,9 @@ class TraceRecorder JS_REQUIRES_STACK nanojit::LIns* box_jsval(jsval v, nanojit::LIns* v_ins); JS_REQUIRES_STACK nanojit::LIns* unbox_jsval(jsval v, nanojit::LIns* v_ins, VMSideExit* exit); JS_REQUIRES_STACK bool guardClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp, - VMSideExit* exit, - nanojit::AccSet accSet = nanojit::ACC_LOAD_ANY); - bool guardConstClass(JSObject* obj, nanojit::LIns* obj_ins, JSClass* clasp, VMSideExit* exit) { - return guardClass(obj, obj_ins, clasp, exit, nanojit::ACC_READONLY); - } + VMSideExit* exit, nanojit::AccSet accSet); JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins, - ExitType exitType = MISMATCH_EXIT); + ExitType exitType); JS_REQUIRES_STACK bool guardDenseArray(JSObject* obj, nanojit::LIns* obj_ins, VMSideExit* exit); JS_REQUIRES_STACK bool guardHasPrototype(JSObject* obj, nanojit::LIns* obj_ins, @@ -1348,8 +1345,8 @@ class TraceRecorder JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x); JS_REQUIRES_STACK AbortableRecordingStatus checkTraceEnd(jsbytecode* pc); - bool hasMethod(JSObject* obj, jsid id); - JS_REQUIRES_STACK bool hasIteratorMethod(JSObject* obj); + RecordingStatus hasMethod(JSObject* obj, jsid id, bool& found); + JS_REQUIRES_STACK RecordingStatus hasIteratorMethod(JSObject* obj, bool& found); JS_REQUIRES_STACK jsatomid getFullIndex(ptrdiff_t pcoff = 0); @@ -1430,8 +1427,8 @@ public: JS_REQUIRES_STACK AbortableRecordingStatus monitorRecording(JSOp op); JS_REQUIRES_STACK AbortableRecordingStatus record_EnterFrame(uintN& inlineCallCount); JS_REQUIRES_STACK AbortableRecordingStatus record_LeaveFrame(); - JS_REQUIRES_STACK AbortableRecordingStatus record_SetPropHit(JSPropCacheEntry* entry, - JSScopeProperty* sprop); + JS_REQUIRES_STACK AbortableRecordingStatus record_SetPropHit(PropertyCacheEntry* entry, + JSScopeProperty* sprop); JS_REQUIRES_STACK AbortableRecordingStatus record_DefLocalFunSetSlot(uint32 slot, JSObject* obj); JS_REQUIRES_STACK AbortableRecordingStatus record_NativeCallComplete(); void forgetGuardedShapesForObject(JSObject* obj); diff --git a/js/src/jstypedarray.cpp b/js/src/jstypedarray.cpp index 5aea95d8834..51ffdd1f8df 100644 --- a/js/src/jstypedarray.cpp +++ b/js/src/jstypedarray.cpp @@ -37,6 +37,8 @@ * * ***** END LICENSE BLOCK ***** */ +#define __STDC_LIMIT_MACROS + #include #include "jstypes.h" @@ -368,12 +370,12 @@ struct uint8_clamped { } inline uint8_clamped& operator= (uint16 x) { - val = (x > 255) ? 255 : 0; + val = (x > 255) ? 255 : uint8(x); return *this; } inline uint8_clamped& operator= (uint32 x) { - val = (x > 255) ? 255 : 0; + val = (x > 255) ? 255 : uint8(x); return *this; } @@ -424,6 +426,15 @@ template<> inline const int TypeIDOfType() { return TypedArray::TYPE_FLOA template<> inline const int TypeIDOfType() { return TypedArray::TYPE_FLOAT64; } template<> inline const int TypeIDOfType() { return TypedArray::TYPE_UINT8_CLAMPED; } +template static inline const bool TypeIsUnsigned() { return false; } +template<> inline const bool TypeIsUnsigned() { return true; } +template<> inline const bool TypeIsUnsigned() { return true; } +template<> inline const bool TypeIsUnsigned() { return true; } + +template static inline const bool TypeIsFloatingPoint() { return false; } +template<> inline const bool TypeIsFloatingPoint() { return true; } +template<> inline const bool TypeIsFloatingPoint() { return true; } + template class TypedArrayTemplate; typedef TypedArrayTemplate Int8Array; @@ -442,6 +453,9 @@ class TypedArrayTemplate { public: typedef TypedArrayTemplate ThisTypeArray; + static const int ArrayTypeID() { return TypeIDOfType(); } + static const bool ArrayTypeIsUnsigned() { return TypeIsUnsigned(); } + static const bool ArrayTypeIsFloatingPoint() { return TypeIsFloatingPoint(); } static JSObjectOps fastObjectOps; static JSObjectMap fastObjectMap; @@ -450,12 +464,12 @@ class TypedArrayTemplate static inline JSClass *slowClass() { - return &TypedArray::slowClasses[TypeIDOfType()]; + return &TypedArray::slowClasses[ArrayTypeID()]; } static inline JSClass *fastClass() { - return &TypedArray::fastClasses[TypeIDOfType()]; + return &TypedArray::fastClasses[ArrayTypeID()]; } static JSObjectOps *getObjectOps(JSContext *cx, JSClass *clasp) @@ -494,7 +508,7 @@ class TypedArrayTemplate return false; if (prop) { - if (OBJ_IS_NATIVE(obj2)) { + if (obj2->isNative()) { sprop = (JSScopeProperty *) prop; if (!js_NativeGet(cx, obj, obj2, sprop, JSGET_METHOD_BARRIER, vp)) return false; @@ -536,12 +550,51 @@ class TypedArrayTemplate if (JSVAL_IS_INT(*vp)) { tarray->setIndex(index, NativeType(JSVAL_TO_INT(*vp))); - } else if (JSVAL_IS_DOUBLE(*vp)) { - tarray->setIndex(index, NativeType(*JSVAL_TO_DOUBLE(*vp))); + return true; + } + + jsdouble d; + + if (JSVAL_IS_DOUBLE(*vp)) { + d = *JSVAL_TO_DOUBLE(*vp); + } else if (JSVAL_IS_NULL(*vp)) { + d = 0.0f; + } else if (JSVAL_IS_PRIMITIVE(*vp)) { + JS_ASSERT(JSVAL_IS_STRING(*vp) || JSVAL_IS_SPECIAL(*vp)); + if (JSVAL_IS_STRING(*vp)) { + // note that ValueToNumber will always + // succeed with a string arg + d = js_ValueToNumber(cx, vp); + JS_ASSERT(*vp != JSVAL_NULL); + } else if (*vp == JSVAL_VOID) { + d = js_NaN; + } else { + d = (double) JSVAL_TO_BOOLEAN(*vp); + } } else { - jsdouble d; - if (JS_ValueToNumber(cx, *vp, &d)) - tarray->setIndex(index, NativeType(d)); + // non-primitive assignments become NaN or 0 (for float/int arrays) + d = js_NaN; + } + + // If the array is an integer array, we only handle up to + // 32-bit ints from this point on. if we want to handle + // 64-bit ints, we'll need some changes. + + // Assign based on characteristics of the destination type + if (ArrayTypeIsFloatingPoint()) { + tarray->setIndex(index, NativeType(d)); + } else if (ArrayTypeIsUnsigned()) { + JS_ASSERT(sizeof(NativeType) <= 4); + uint32 n = js_DoubleToECMAUint32(d); + tarray->setIndex(index, NativeType(n)); + } else if (ArrayTypeID() == TypedArray::TYPE_UINT8_CLAMPED) { + // The uint8_clamped type has a special rounding converter + // for doubles. + tarray->setIndex(index, NativeType(d)); + } else { + JS_ASSERT(sizeof(NativeType) <= 4); + int32 n = js_DoubleToECMAInt32(d); + tarray->setIndex(index, NativeType(n)); } return true; @@ -815,14 +868,14 @@ class TypedArrayTemplate bool init(JSContext *cx, uint32 len) { - type = TypeIDOfType(); + type = ArrayTypeID(); return createBufferWithSizeAndCount(cx, sizeof(NativeType), len); } bool init(JSContext *cx, JSObject *other, int32 byteOffsetInt = -1, int32 lengthInt = -1) { - type = TypeIDOfType(); + type = ArrayTypeID(); //printf ("Constructing with type %d other %p offset %d length %d\n", type, other, byteOffset, length); @@ -867,7 +920,17 @@ class TypedArrayTemplate len = (uint32) lengthInt; } - if (boffset + len*sizeof(NativeType) > abuf->byteLength) { + // Go slowly and check for overflow. + uint32 arrayByteLength = len*sizeof(NativeType); + if (uint32(len) >= INT32_MAX / sizeof(NativeType) || + uint32(boffset) >= INT32_MAX - arrayByteLength) + { + JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, + JSMSG_TYPED_ARRAY_BAD_ARGS); + return false; // overflow occurred along the way when calculating boffset+len*sizeof(NativeType) + } + + if (arrayByteLength + boffset > abuf->byteLength) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_TYPED_ARRAY_BAD_ARGS); return false; // boffset+len is too big for the arraybuffer @@ -876,7 +939,7 @@ class TypedArrayTemplate buffer = abuf; bufferJS = other; byteOffset = boffset; - byteLength = len * sizeof(NativeType); + byteLength = arrayByteLength; length = len; data = abuf->offsetData(boffset); } else { @@ -924,47 +987,49 @@ class TypedArrayTemplate } protected: + static NativeType + nativeFromValue(JSContext *cx, jsval v) + { + if (JSVAL_IS_INT(v)) + return NativeType(JSVAL_TO_INT(v)); + + if (JSVAL_IS_DOUBLE(v)) + return NativeType(*JSVAL_TO_DOUBLE(v)); + + if (JSVAL_IS_PRIMITIVE(v) && v != JSVAL_HOLE) { + jsdouble dval = js_ValueToNumber(cx, &v); + JS_ASSERT(v != JSVAL_NULL); + return NativeType(dval); + } + + if (ArrayTypeIsFloatingPoint()) + return NativeType(js_NaN); + + return NativeType(int32(0)); + } + bool copyFrom(JSContext *cx, JSObject *ar, jsuint len) { NativeType *dest = static_cast(data); - if (ar->isDenseArray()) { + if (ar->isDenseArray() && js_DenseArrayCapacity(ar) >= len) { JS_ASSERT(ar->fslots[JSSLOT_ARRAY_LENGTH] == (jsval)len); jsval *src = ar->dslots; for (uintN i = 0; i < len; ++i) { jsval v = *src++; - if (JSVAL_IS_INT(v)) { - *dest++ = NativeType(JSVAL_TO_INT(v)); - } else if (JSVAL_IS_DOUBLE(v)) { - *dest++ = NativeType(*JSVAL_TO_DOUBLE(v)); - } else { - jsdouble dval; - if (!JS_ValueToNumber(cx, v, &dval)) - return false; - *dest++ = NativeType(dval); - } + *dest++ = nativeFromValue(cx, v); } } else { // slow path jsval v; for (uintN i = 0; i < len; ++i) { - if (!JS_GetElement(cx, ar, i, &v)) + if (!ar->getProperty(cx, INT_TO_JSID(i), &v)) return false; - - if (JSVAL_IS_INT(v)) { - *dest++ = NativeType(JSVAL_TO_INT(v)); - } else if (JSVAL_IS_DOUBLE(v)) { - *dest++ = NativeType(*JSVAL_TO_DOUBLE(v)); - } else { - jsdouble dval; - if (!JS_ValueToNumber(cx, v, &dval)) - return false; - *dest++ = NativeType(dval); - } + *dest++ = nativeFromValue(cx, v); } } @@ -1042,13 +1107,15 @@ class TypedArrayTemplate bool createBufferWithSizeAndCount(JSContext *cx, uint32 size, uint32 count) { - int32 bytelen = size * count; - if (bytelen / size != count) { + JS_ASSERT(size != 0); + + if (size != 0 && count >= INT32_MAX / size) { JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_NEED_DIET, "size and count"); return false; } + int32 bytelen = size * count; if (!createBufferWithByteLength(cx, bytelen)) return false; @@ -1329,10 +1396,11 @@ js_IsTypedArray(JSObject *obj) JS_FRIEND_API(JSObject *) js_CreateArrayBuffer(JSContext *cx, jsuint nbytes) { - JSAutoTempValueRooter tvr(cx); - js_NewNumberInRootedValue(cx, jsdouble(nbytes), tvr.addr()); + AutoValueRooter tvr(cx); + if (!js_NewNumberInRootedValue(cx, jsdouble(nbytes), tvr.addr())) + return NULL; - JSAutoTempValueRooter rval(cx); + AutoValueRooter rval(cx); if (!ArrayBuffer::class_constructor(cx, cx->globalObject, 1, tvr.addr(), rval.addr())) @@ -1341,7 +1409,7 @@ js_CreateArrayBuffer(JSContext *cx, jsuint nbytes) return JSVAL_TO_OBJECT(rval.value()); } -static inline bool +static inline JSBool TypedArrayConstruct(JSContext *cx, jsint atype, uintN argc, jsval *argv, jsval *rv) { switch (atype) { @@ -1384,7 +1452,7 @@ js_CreateTypedArray(JSContext *cx, jsint atype, jsuint nelements) JS_ASSERT(atype >= 0 && atype < TypedArray::TYPE_MAX); jsval vals[2]; - JSAutoTempValueRooter tvr(cx, 2, vals); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(vals), vals); if (!js_NewNumberInRootedValue(cx, jsdouble(nelements), &vals[0])) return NULL; @@ -1401,7 +1469,7 @@ js_CreateTypedArrayWithArray(JSContext *cx, jsint atype, JSObject *arrayArg) JS_ASSERT(atype >= 0 && atype < TypedArray::TYPE_MAX); jsval vals[2]; - JSAutoTempValueRooter tvr(cx, 2, vals); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(vals), vals); vals[0] = OBJECT_TO_JSVAL(arrayArg); @@ -1417,27 +1485,28 @@ js_CreateTypedArrayWithBuffer(JSContext *cx, jsint atype, JSObject *bufArg, { JS_ASSERT(atype >= 0 && atype < TypedArray::TYPE_MAX); JS_ASSERT(bufArg && ArrayBuffer::fromJSObject(bufArg)); - /* if byteoffset is -1, length must be -1 */ - JS_ASSERT(length < 0 || byteoffset >= 0); + JS_ASSERT_IF(byteoffset < 0, length < 0); jsval vals[4]; - JSAutoTempValueRooter tvr(cx, 4, vals); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(vals), vals); int argc = 1; vals[0] = OBJECT_TO_JSVAL(bufArg); if (byteoffset >= 0) { - js_NewNumberInRootedValue(cx, jsdouble(byteoffset), &vals[1]); + if (!js_NewNumberInRootedValue(cx, jsdouble(byteoffset), &vals[argc])) + return NULL; + argc++; } if (length >= 0) { - js_NewNumberInRootedValue(cx, jsdouble(length), &vals[1]); + if (!js_NewNumberInRootedValue(cx, jsdouble(length), &vals[argc])) + return NULL; + argc++; } - js_NewNumberInRootedValue(cx, jsdouble(byteoffset), &vals[0]); - if (!TypedArrayConstruct(cx, atype, argc, &vals[0], &vals[3])) return NULL; diff --git a/js/src/jstypes.h b/js/src/jstypes.h index f6b49f49d65..f664ac8fac5 100644 --- a/js/src/jstypes.h +++ b/js/src/jstypes.h @@ -267,19 +267,6 @@ #define JS_BIT(n) ((JSUint32)1 << (n)) #define JS_BITMASK(n) (JS_BIT(n) - 1) -/*********************************************************************** -** MACROS: JS_PTR_TO_INT32 -** JS_PTR_TO_UINT32 -** JS_INT32_TO_PTR -** JS_UINT32_TO_PTR -** DESCRIPTION: -** Integer to pointer and pointer to integer conversion macros. -***********************************************************************/ -#define JS_PTR_TO_INT32(x) ((jsint)((char *)(x) - (char *)0)) -#define JS_PTR_TO_UINT32(x) ((jsuint)((char *)(x) - (char *)0)) -#define JS_INT32_TO_PTR(x) ((void *)((char *)0 + (jsint)(x))) -#define JS_UINT32_TO_PTR(x) ((void *)((char *)0 + (jsuint)(x))) - /*********************************************************************** ** MACROS: JS_HOWMANY ** JS_ROUNDUP diff --git a/js/src/jsutil.cpp b/js/src/jsutil.cpp index 679aa5fbca7..ad21141c40e 100644 --- a/js/src/jsutil.cpp +++ b/js/src/jsutil.cpp @@ -46,6 +46,7 @@ #include "jstypes.h" #include "jsstdint.h" #include "jsutil.h" +#include "jstl.h" #ifdef WIN32 # include @@ -53,6 +54,8 @@ # include #endif +using namespace js; + /* * Checks the assumption that JS_FUNC_TO_DATA_PTR and JS_DATA_TO_FUNC_PTR * macros uses to implement casts between function and data pointers. @@ -140,7 +143,7 @@ JS_BasicStatsAccum(JSBasicStats *bs, uint32 val) if (newscale != oldscale) { uint32 newhist[11], newbin; - memset(newhist, 0, sizeof newhist); + PodArrayZero(newhist); for (bin = 0; bin <= 10; bin++) { newbin = ValToBin(newscale, BinToVal(oldscale, bin)); newhist[newbin] += bs->hist[bin]; diff --git a/js/src/jsvector.h b/js/src/jsvector.h index 7c051f7005e..0e41079a282 100644 --- a/js/src/jsvector.h +++ b/js/src/jsvector.h @@ -229,6 +229,21 @@ class Vector : AllocPolicy union { BufferPtrs ptrs; char mBuf[sInlineBytes]; + +#if __GNUC__ + /* + * GCC thinks there is a strict aliasing warning since mBuf is a char + * array but we read and write to it as a T array. This is not an error + * since there are no reads and writes to the mBuf memory except those + * that treat it as a T array. Sadly, + * #pragma GCC diagnostic ignore "-Wstrict-aliasing" + * doesn't silence the warning. Type punning is allowed through a union + * of the involved types, so, for now, this error can be silenced by + * adding each offending T to this union. (This won't work for non-POD + * T's, but there don't seem to be any with warnings yet...) + */ + jschar unused1_; +#endif } u; /* Only valid when usingInlineStorage() */ diff --git a/js/src/jsxml.cpp b/js/src/jsxml.cpp index 673b2d3abf9..4bd4107ae89 100644 --- a/js/src/jsxml.cpp +++ b/js/src/jsxml.cpp @@ -70,10 +70,15 @@ #include "jsstaticcheck.h" #include "jsvector.h" +#include "jscntxtinlines.h" +#include "jsobjinlines.h" + #ifdef DEBUG #include /* for #ifdef DEBUG memset calls */ #endif +using namespace js; + /* * NOTES * - in the js shell, you must use the -x command line option, or call @@ -151,9 +156,9 @@ GetSlotString(const JSObject *obj, uint32 slot) JS_ASSERT(slot == JSSLOT_PREFIX || slot == JSSLOT_URI || slot == JSSLOT_LOCAL_NAME); - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_NamespaceClass.base || - IsQNameClass(STOBJ_GET_CLASS(obj))); - JS_ASSERT_IF(STOBJ_GET_CLASS(obj) == &js_NamespaceClass.base, + JS_ASSERT(obj->getClass() == &js_NamespaceClass.base || + IsQNameClass(obj->getClass())); + JS_ASSERT_IF(obj->getClass() == &js_NamespaceClass.base, slot != JSSLOT_LOCAL_NAME); v = obj->fslots[slot]; @@ -186,7 +191,7 @@ IsDeclared(const JSObject *obj) { jsval v; - JS_ASSERT(STOBJ_GET_CLASS(obj) == &js_NamespaceClass.base); + JS_ASSERT(obj->getClass() == &js_NamespaceClass.base); v = obj->fslots[JSSLOT_DECLARED]; JS_ASSERT(JSVAL_IS_VOID(v) || v == JSVAL_TRUE); return v == JSVAL_TRUE; @@ -222,7 +227,7 @@ namespace_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) if (!JSVAL_IS_INT(id)) return JS_TRUE; - if (STOBJ_GET_CLASS(obj) != &js_NamespaceClass.base) + if (obj->getClass() != &js_NamespaceClass.base) return JS_TRUE; switch (JSVAL_TO_INT(id)) { @@ -330,7 +335,7 @@ qname_getProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) if (!JSVAL_IS_INT(id)) return JS_TRUE; - if (STOBJ_GET_CLASS(obj) != &js_QNameClass.base) + if (obj->getClass() != &js_QNameClass.base) return JS_TRUE; switch (JSVAL_TO_INT(id)) { @@ -866,60 +871,10 @@ attr_identity(const void *a, const void *b) return qname_identity(xmla->name, xmlb->name); } -struct JSXMLArrayCursor -{ - JSXMLArray *array; - uint32 index; - JSXMLArrayCursor *next; - JSXMLArrayCursor **prevp; - void *root; - - JSXMLArrayCursor(JSXMLArray *array) - : array(array), index(0), next(array->cursors), prevp(&array->cursors), - root(NULL) - { - if (next) - next->prevp = &next; - array->cursors = this; - } - - ~JSXMLArrayCursor() { disconnect(); } - - void disconnect() { - if (!array) - return; - if (next) - next->prevp = prevp; - *prevp = next; - array = NULL; - } - - void *getNext() { - if (!array || index >= array->length) - return NULL; - return root = array->vector[index++]; - } - - void *getCurrent() { - if (!array || index >= array->length) - return NULL; - return root = array->vector[index]; - } -}; - static void XMLArrayCursorTrace(JSTracer *trc, JSXMLArrayCursor *cursor) { - void *root; -#ifdef DEBUG - size_t index = 0; -#endif - - for (; cursor; cursor = cursor->next) { - root = cursor->root; - JS_SET_TRACING_INDEX(trc, "cursor_root", index++); - js_CallValueTracerIfGCThing(trc, (jsval)root); - } + cursor->trace(trc); } /* NB: called with null cx from the GC, via xml_trace => XMLArrayTrim. */ @@ -1314,11 +1269,10 @@ ParseNodeToQName(JSCompiler *jsc, JSParseNode *pn, } if (!uri) { - js_ReportCompileErrorNumber(jsc->context, &jsc->tokenStream, pn, - JSREPORT_ERROR, - JSMSG_BAD_XML_NAMESPACE, - js_ValueToPrintableString(jsc->context, - STRING_TO_JSVAL(prefix))); + ReportCompileErrorNumber(jsc->context, &jsc->tokenStream, pn, + JSREPORT_ERROR, JSMSG_BAD_XML_NAMESPACE, + js_ValueToPrintableString(jsc->context, + STRING_TO_JSVAL(prefix))); return NULL; } @@ -1396,8 +1350,8 @@ ParseNodeToXML(JSCompiler *jsc, JSParseNode *pn, int stackDummy; if (!JS_CHECK_STACK_SIZE(cx, stackDummy)) { - js_ReportCompileErrorNumber(cx, &jsc->tokenStream, pn, JSREPORT_ERROR, - JSMSG_OVER_RECURSED); + ReportCompileErrorNumber(cx, &jsc->tokenStream, pn, JSREPORT_ERROR, + JSMSG_OVER_RECURSED); return NULL; } @@ -1535,11 +1489,10 @@ ParseNodeToXML(JSCompiler *jsc, JSParseNode *pn, /* Enforce "Well-formedness constraint: Unique Att Spec". */ for (pn3 = head; pn3 != pn2; pn3 = pn3->pn_next->pn_next) { if (pn3->pn_atom == pn2->pn_atom) { - js_ReportCompileErrorNumber(cx, &jsc->tokenStream, pn2, - JSREPORT_ERROR, - JSMSG_DUPLICATE_XML_ATTR, - js_ValueToPrintableString(cx, - ATOM_KEY(pn2->pn_atom))); + ReportCompileErrorNumber(cx, &jsc->tokenStream, pn2, + JSREPORT_ERROR, JSMSG_DUPLICATE_XML_ATTR, + js_ValueToPrintableString(cx, + ATOM_KEY(pn2->pn_atom))); goto fail; } } @@ -1646,11 +1599,10 @@ ParseNodeToXML(JSCompiler *jsc, JSParseNode *pn, attrjqn = attrj->name; if (js_EqualStrings(GetURI(attrjqn), GetURI(qn)) && js_EqualStrings(GetLocalName(attrjqn), GetLocalName(qn))) { - js_ReportCompileErrorNumber(cx, &jsc->tokenStream, pn2, - JSREPORT_ERROR, - JSMSG_DUPLICATE_XML_ATTR, - js_ValueToPrintableString(cx, - ATOM_KEY(pn2->pn_atom))); + ReportCompileErrorNumber(cx, &jsc->tokenStream, pn2, + JSREPORT_ERROR, JSMSG_DUPLICATE_XML_ATTR, + js_ValueToPrintableString(cx, + ATOM_KEY(pn2->pn_atom))); goto fail; } } @@ -1687,11 +1639,10 @@ ParseNodeToXML(JSCompiler *jsc, JSParseNode *pn, xml_class = JSXML_CLASS_COMMENT; } else if (pn->pn_type == TOK_XMLPI) { if (IS_XML(str)) { - js_ReportCompileErrorNumber(cx, &jsc->tokenStream, pn, - JSREPORT_ERROR, - JSMSG_RESERVED_ID, - js_ValueToPrintableString(cx, - STRING_TO_JSVAL(str))); + ReportCompileErrorNumber(cx, &jsc->tokenStream, pn, + JSREPORT_ERROR, JSMSG_RESERVED_ID, + js_ValueToPrintableString(cx, + STRING_TO_JSVAL(str))); goto fail; } @@ -1736,8 +1687,7 @@ skip_child: #undef PN2X_SKIP_CHILD syntax: - js_ReportCompileErrorNumber(cx, &jsc->tokenStream, pn, JSREPORT_ERROR, - JSMSG_BAD_XML_MARKUP); + ReportCompileErrorNumber(cx, &jsc->tokenStream, pn, JSREPORT_ERROR, JSMSG_BAD_XML_MARKUP); fail: js_LeaveLocalRootScope(cx); return NULL; @@ -3848,12 +3798,10 @@ GetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) JSObject *kidobj, *listobj; JSObject *nameqn; jsid funid; - jsval roots[2]; - JSTempValueRooter tvr; xml = (JSXML *) JS_GetInstancePrivate(cx, obj, &js_XMLClass, NULL); if (!xml) - return JS_TRUE; + return true; if (js_IdIsIndex(id, &index)) { if (xml->xml_class != JSXML_CLASS_LIST) { @@ -3870,18 +3818,18 @@ GetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML); if (!kid) { *vp = JSVAL_VOID; - return JS_TRUE; + return true; } kidobj = js_GetXMLObject(cx, kid); if (!kidobj) - return JS_FALSE; + return false; *vp = OBJECT_TO_JSVAL(kidobj); } else { *vp = JSVAL_VOID; } } - return JS_TRUE; + return true; } /* @@ -3889,37 +3837,34 @@ GetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) */ nameqn = ToXMLName(cx, id, &funid); if (!nameqn) - return JS_FALSE; + return false; if (funid) return GetXMLFunction(cx, obj, funid, vp); - roots[0] = OBJECT_TO_JSVAL(nameqn); - JS_PUSH_TEMP_ROOT(cx, 1, roots, &tvr); + jsval roots[2] = { OBJECT_TO_JSVAL(nameqn), JSVAL_NULL }; + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); listobj = js_NewXMLObject(cx, JSXML_CLASS_LIST); - if (listobj) { - roots[1] = OBJECT_TO_JSVAL(listobj); - tvr.count++; + if (!listobj) + return false; - list = (JSXML *) listobj->getPrivate(); - if (!GetNamedProperty(cx, xml, nameqn, list)) { - listobj = NULL; - } else { - /* - * Erratum: ECMA-357 9.1.1.1 misses that [[Append]] sets the - * given list's [[TargetProperty]] to the property that is being - * appended. This means that any use of the internal [[Get]] - * property returns a list which, when used by e.g. [[Insert]] - * duplicates the last element matched by id. See bug 336921. - */ - list->xml_target = xml; - list->xml_targetprop = nameqn; - *vp = OBJECT_TO_JSVAL(listobj); - } - } + roots[1] = OBJECT_TO_JSVAL(listobj); - JS_POP_TEMP_ROOT(cx, &tvr); - return listobj != NULL; + list = (JSXML *) listobj->getPrivate(); + if (!GetNamedProperty(cx, xml, nameqn, list)) + return false; + + /* + * Erratum: ECMA-357 9.1.1.1 misses that [[Append]] sets the + * given list's [[TargetProperty]] to the property that is being + * appended. This means that any use of the internal [[Get]] + * property returns a list which, when used by e.g. [[Insert]] + * duplicates the last element matched by id. See bug 336921. + */ + list->xml_target = xml; + list->xml_targetprop = nameqn; + *vp = OBJECT_TO_JSVAL(listobj); + return true; } static JSXML * @@ -3963,8 +3908,6 @@ PutProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) { JSBool ok, primitiveAssign; enum { OBJ_ROOT, ID_ROOT, VAL_ROOT }; - jsval roots[3]; - JSTempValueRooter tvr; JSXML *xml, *vxml, *rxml, *kid, *attr, *parent, *copy, *kid2, *match; JSObject *vobj, *nameobj, *attrobj, *parentobj, *kidobj, *copyobj; JSObject *targetprop, *nameqn, *attrqn; @@ -3993,11 +3936,13 @@ PutProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) ok = js_EnterLocalRootScope(cx); if (!ok) return JS_FALSE; + MUST_FLOW_THROUGH("out"); + jsval roots[3]; roots[OBJ_ROOT] = OBJECT_TO_JSVAL(obj); roots[ID_ROOT] = id; roots[VAL_ROOT] = *vp; - JS_PUSH_TEMP_ROOT(cx, 3, roots, &tvr); + AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); if (js_IdIsIndex(id, &index)) { if (xml->xml_class != JSXML_CLASS_LIST) { @@ -4584,7 +4529,6 @@ PutProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) } out: - JS_POP_TEMP_ROOT(cx, &tvr); js_LeaveLocalRootScope(cx); return ok; @@ -4716,38 +4660,34 @@ HasFunctionProperty(JSContext *cx, JSObject *obj, jsid funid, JSBool *found) JSObject *pobj; JSProperty *prop; JSXML *xml; - JSTempValueRooter tvr; - JSBool ok; JS_ASSERT(OBJ_GET_CLASS(cx, obj) == &js_XMLClass); if (!js_LookupProperty(cx, obj, funid, &pobj, &prop)) - return JS_FALSE; + return false; if (prop) { pobj->dropProperty(cx, prop); } else { xml = (JSXML *) obj->getPrivate(); if (HasSimpleContent(xml)) { + AutoObjectRooter tvr(cx); + /* * Search in String.prototype to set found whenever * GetXMLFunction returns existing function. */ - JS_PUSH_TEMP_ROOT_OBJECT(cx, NULL, &tvr); - ok = js_GetClassPrototype(cx, NULL, JSProto_String, - &tvr.u.object); - JS_ASSERT(tvr.u.object); - if (ok) { - ok = js_LookupProperty(cx, tvr.u.object, funid, &pobj, &prop); - if (ok && prop) - pobj->dropProperty(cx, prop); - } - JS_POP_TEMP_ROOT(cx, &tvr); - if (!ok) - return JS_FALSE; + if (!js_GetClassPrototype(cx, NULL, JSProto_String, tvr.addr())) + return false; + + JS_ASSERT(tvr.object()); + if (!js_LookupProperty(cx, tvr.object(), funid, &pobj, &prop)) + return false; + if (prop) + pobj->dropProperty(cx, prop); } } *found = (prop != NULL); - return JS_TRUE; + return true; } /* ECMA-357 9.1.1.6 XML [[HasProperty]] and 9.2.1.5 XMLList [[HasProperty]]. */ @@ -5014,18 +4954,22 @@ xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, switch (enum_op) { case JSENUMERATE_INIT: if (length == 0) { - cursor = NULL; + *statep = JSVAL_ZERO; } else { cursor = cx->create(&xml->xml_kids); if (!cursor) return JS_FALSE; + *statep = PRIVATE_TO_JSVAL(cursor); } - *statep = PRIVATE_TO_JSVAL(cursor); if (idp) *idp = INT_TO_JSID(length); break; case JSENUMERATE_NEXT: + if (*statep == JSVAL_ZERO) { + *statep = JSVAL_NULL; + break; + } cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); if (cursor && cursor->array && (index = cursor->index) < length) { *idp = INT_TO_JSID(index); @@ -5035,9 +4979,11 @@ xml_enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, /* FALL THROUGH */ case JSENUMERATE_DESTROY: - cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); - if (cursor) - cx->destroy(cursor); + if (*statep != JSVAL_ZERO) { + cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); + if (cursor) + cx->destroy(cursor); + } *statep = JSVAL_NULL; break; } @@ -5126,7 +5072,7 @@ js_GetXMLMethod(JSContext *cx, JSObject *obj, jsid id, jsval *vp) * As our callers have a bad habit of passing a pointer to an unrooted * local value as vp, we use a proper root here. */ - JSAutoTempValueRooter tvr(cx); + AutoValueRooter tvr(cx); JSBool ok = GetXMLFunction(cx, obj, id, tvr.addr()); *vp = tvr.value(); return ok; @@ -5149,20 +5095,22 @@ js_EnumerateXMLValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op, switch (enum_op) { case JSENUMERATE_INIT: if (length == 0) { - cursor = NULL; + *statep = JSVAL_ZERO; } else { cursor = cx->create(&xml->xml_kids); if (!cursor) return JS_FALSE; + *statep = PRIVATE_TO_JSVAL(cursor); } - *statep = PRIVATE_TO_JSVAL(cursor); - if (idp) - *idp = INT_TO_JSID(length); - if (vp) - *vp = JSVAL_VOID; + JS_ASSERT(!idp); + JS_ASSERT(!vp); break; case JSENUMERATE_NEXT: + if (*statep == JSVAL_ZERO) { + *statep = JSVAL_NULL; + break; + } cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); if (cursor && cursor->array && (index = cursor->index) < length) { while (!(kid = XMLARRAY_MEMBER(&xml->xml_kids, index, JSXML))) { @@ -5181,10 +5129,12 @@ js_EnumerateXMLValues(JSContext *cx, JSObject *obj, JSIterateOp enum_op, /* FALL THROUGH */ case JSENUMERATE_DESTROY: - cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); - if (cursor) { - destroy: - cx->destroy(cursor); + if (*statep != JSVAL_ZERO) { + cursor = (JSXMLArrayCursor *) JSVAL_TO_PRIVATE(*statep); + if (cursor) { + destroy: + cx->destroy(cursor); + } } *statep = JSVAL_NULL; break; @@ -5311,7 +5261,7 @@ out: return ok; } -/* Use NULL for objectMap so XML objects satisfy OBJ_IS_NATIVE tests. */ +/* Use NULL for objectMap so XML objects satisfy obj->isNative() tests. */ JS_FRIEND_DATA(JSObjectOps) js_XMLObjectOps = { NULL, xml_lookupProperty, xml_defineProperty, @@ -5477,7 +5427,7 @@ xml_attributes(JSContext *cx, uintN argc, jsval *vp) return JS_FALSE; name = OBJECT_TO_JSVAL(qn); - JSAutoTempValueRooter tvr(cx, name); + AutoValueRooter tvr(cx, name); return GetProperty(cx, JS_THIS_OBJECT(cx, vp), name, vp); } @@ -5883,81 +5833,6 @@ xml_hasSimpleContent(JSContext *cx, uintN argc, jsval *vp) return JS_TRUE; } -typedef struct JSTempRootedNSArray { - JSTempValueRooter tvr; - JSXMLArray array; - jsval value; /* extra root for temporaries */ -} JSTempRootedNSArray; - -static void -TraceObjectVector(JSTracer *trc, JSObject **vec, uint32 len) -{ - uint32 i; - JSObject *obj; - - for (i = 0; i < len; i++) { - obj = vec[i]; - if (obj) { - JS_SET_TRACING_INDEX(trc, "vector", i); - js_CallGCMarker(trc, obj, JSTRACE_OBJECT); - } - } -} - -static void -trace_temp_ns_array(JSTracer *trc, JSTempValueRooter *tvr) -{ - JSTempRootedNSArray *tmp = (JSTempRootedNSArray *)tvr; - - TraceObjectVector(trc, - (JSObject **) tmp->array.vector, - tmp->array.length); - XMLArrayCursorTrace(trc, tmp->array.cursors); - JS_CALL_VALUE_TRACER(trc, tmp->value, "temp_ns_array_value"); -} - -static void -InitTempNSArray(JSContext *cx, JSTempRootedNSArray *tmp) -{ - XMLArrayInit(cx, &tmp->array, 0); - tmp->value = JSVAL_NULL; - JS_PUSH_TEMP_ROOT_TRACE(cx, trace_temp_ns_array, &tmp->tvr); -} - -static void -FinishTempNSArray(JSContext *cx, JSTempRootedNSArray *tmp) -{ - JS_ASSERT(tmp->tvr.u.trace == trace_temp_ns_array); - JS_POP_TEMP_ROOT(cx, &tmp->tvr); - XMLArrayFinish(cx, &tmp->array); -} - -/* - * Populate a new JS array with elements of JSTempRootedNSArray.array and - * place the result into rval. rval must point to a rooted location. - */ -static JSBool -TempNSArrayToJSArray(JSContext *cx, JSTempRootedNSArray *tmp, jsval *rval) -{ - JSObject *arrayobj; - uint32 i, n; - JSObject *ns; - - arrayobj = js_NewArrayObject(cx, 0, NULL); - if (!arrayobj) - return JS_FALSE; - *rval = OBJECT_TO_JSVAL(arrayobj); - for (i = 0, n = tmp->array.length; i < n; i++) { - ns = XMLARRAY_MEMBER(&tmp->array, i, JSObject); - if (!ns) - continue; - tmp->value = OBJECT_TO_JSVAL(ns); - if (!arrayobj->setProperty(cx, INT_TO_JSID(i), &tmp->value)) - return JS_FALSE; - } - return JS_TRUE; -} - static JSBool FindInScopeNamespaces(JSContext *cx, JSXML *xml, JSXMLArray *nsarray) { @@ -5999,19 +5874,48 @@ FindInScopeNamespaces(JSContext *cx, JSXML *xml, JSXMLArray *nsarray) return JS_TRUE; } +class AutoNamespaceArray : public js::AutoNamespaces { + public: + AutoNamespaceArray(JSContext *cx) + : js::AutoNamespaces(cx) + { + XMLArrayInit(cx, &array, 0); + } + + ~AutoNamespaceArray() { + XMLArrayFinish(context, &array); + } + + /* + * Populate a new JS array with elements of array and place the result into + * rval. rval must point to a rooted location. + */ + bool toJSArray(jsval *rval) { + JSObject *arrayobj = js_NewArrayObject(context, 0, NULL); + if (!arrayobj) + return false; + *rval = OBJECT_TO_JSVAL(arrayobj); + + AutoValueRooter tvr(context); + for (uint32 i = 0, n = array.length; i < n; i++) { + JSObject *ns = XMLARRAY_MEMBER(&array, i, JSObject); + if (!ns) + continue; + *tvr.addr() = OBJECT_TO_JSVAL(ns); + if (!arrayobj->setProperty(context, INT_TO_JSID(i), tvr.addr())) + return false; + } + return true; + } +}; + static JSBool xml_inScopeNamespaces(JSContext *cx, uintN argc, jsval *vp) { - JSTempRootedNSArray namespaces; - JSBool ok; - NON_LIST_XML_METHOD_PROLOG; - InitTempNSArray(cx, &namespaces); - ok = FindInScopeNamespaces(cx, xml, &namespaces.array) && - TempNSArrayToJSArray(cx, &namespaces, vp); - FinishTempNSArray(cx, &namespaces); - return ok; + AutoNamespaceArray namespaces(cx); + return FindInScopeNamespaces(cx, xml, &namespaces.array) && namespaces.toJSArray(vp); } static JSBool @@ -6111,15 +6015,13 @@ static JSBool xml_namespace(JSContext *cx, uintN argc, jsval *vp) { JSString *prefix, *nsprefix; - JSTempRootedNSArray inScopeNSes; - JSBool ok; jsuint i, length; JSObject *ns; NON_LIST_XML_METHOD_PROLOG; if (argc == 0 && !JSXML_HAS_NAME(xml)) { *vp = JSVAL_NULL; - return JS_TRUE; + return true; } if (argc == 0) { @@ -6127,22 +6029,18 @@ xml_namespace(JSContext *cx, uintN argc, jsval *vp) } else { prefix = js_ValueToString(cx, vp[2]); if (!prefix) - return JS_FALSE; + return false; vp[2] = STRING_TO_JSVAL(prefix); /* local root */ } - InitTempNSArray(cx, &inScopeNSes); - MUST_FLOW_THROUGH("out"); - ok = FindInScopeNamespaces(cx, xml, &inScopeNSes.array); - if (!ok) - goto out; + AutoNamespaceArray inScopeNSes(cx); + if (!FindInScopeNamespaces(cx, xml, &inScopeNSes.array)) + return false; if (!prefix) { ns = GetNamespace(cx, xml->name, &inScopeNSes.array); - if (!ns) { - ok = JS_FALSE; - goto out; - } + if (!ns) + return false; } else { ns = NULL; for (i = 0, length = inScopeNSes.array.length; i < length; i++) { @@ -6157,64 +6055,44 @@ xml_namespace(JSContext *cx, uintN argc, jsval *vp) } *vp = (!ns) ? JSVAL_VOID : OBJECT_TO_JSVAL(ns); - - out: - FinishTempNSArray(cx, &inScopeNSes); - return JS_TRUE; + return true; } static JSBool xml_namespaceDeclarations(JSContext *cx, uintN argc, jsval *vp) { - JSBool ok; - JSTempRootedNSArray ancestors, declared; - JSXML *yml; - uint32 i, n; - JSObject *ns; - NON_LIST_XML_METHOD_PROLOG; if (JSXML_HAS_VALUE(xml)) - return JS_TRUE; + return true; - /* From here, control flow must goto out to finish these arrays. */ - ok = JS_TRUE; - InitTempNSArray(cx, &ancestors); - InitTempNSArray(cx, &declared); - yml = xml; + AutoNamespaceArray ancestors(cx); + AutoNamespaceArray declared(cx); + JSXML *yml = xml; while ((yml = yml->parent) != NULL) { JS_ASSERT(yml->xml_class == JSXML_CLASS_ELEMENT); - for (i = 0, n = yml->xml_namespaces.length; i < n; i++) { - ns = XMLARRAY_MEMBER(&yml->xml_namespaces, i, JSObject); - if (ns && - !XMLARRAY_HAS_MEMBER(&ancestors.array, ns, namespace_match)) { - ok = XMLARRAY_APPEND(cx, &ancestors.array, ns); - if (!ok) - goto out; + for (uint32 i = 0, n = yml->xml_namespaces.length; i < n; i++) { + JSObject *ns = XMLARRAY_MEMBER(&yml->xml_namespaces, i, JSObject); + if (ns && !XMLARRAY_HAS_MEMBER(&ancestors.array, ns, namespace_match)) { + if (!XMLARRAY_APPEND(cx, &ancestors.array, ns)) + return false; } } } - for (i = 0, n = xml->xml_namespaces.length; i < n; i++) { - ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSObject); + for (uint32 i = 0, n = xml->xml_namespaces.length; i < n; i++) { + JSObject *ns = XMLARRAY_MEMBER(&xml->xml_namespaces, i, JSObject); if (!ns) continue; if (!IsDeclared(ns)) continue; if (!XMLARRAY_HAS_MEMBER(&ancestors.array, ns, namespace_match)) { - ok = XMLARRAY_APPEND(cx, &declared.array, ns); - if (!ok) - goto out; + if (!XMLARRAY_APPEND(cx, &declared.array, ns)) + return false; } } - ok = TempNSArrayToJSArray(cx, &declared, vp); - -out: - /* Finishing must be in reverse order of initialization to follow LIFO. */ - FinishTempNSArray(cx, &declared); - FinishTempNSArray(cx, &ancestors); - return ok; + return declared.toJSArray(vp); } static const char js_attribute_str[] = "attribute"; @@ -7247,9 +7125,9 @@ js_TraceXML(JSTracer *trc, JSXML *xml) if (xml->xml_targetprop) JS_CALL_OBJECT_TRACER(trc, xml->xml_targetprop, "targetprop"); } else { - TraceObjectVector(trc, - (JSObject **) xml->xml_namespaces.vector, - xml->xml_namespaces.length); + js::TraceObjectVector(trc, + (JSObject **) xml->xml_namespaces.vector, + xml->xml_namespaces.length); XMLArrayCursorTrace(trc, xml->xml_namespaces.cursors); if (IS_GC_MARKING_TRACER(trc)) XMLArrayTrim(&xml->xml_namespaces); @@ -7282,17 +7160,12 @@ js_FinalizeXML(JSContext *cx, JSXML *xml) JSObject * js_NewXMLObject(JSContext *cx, JSXMLClass xml_class) { - JSXML *xml; - JSObject *obj; - JSTempValueRooter tvr; - - xml = js_NewXML(cx, xml_class); + JSXML *xml = js_NewXML(cx, xml_class); if (!xml) return NULL; - JS_PUSH_TEMP_ROOT_XML(cx, xml, &tvr); - obj = js_GetXMLObject(cx, xml); - JS_POP_TEMP_ROOT(cx, &tvr); - return obj; + + AutoXMLRooter root(cx, xml); + return js_GetXMLObject(cx, xml); } static JSObject * @@ -7412,7 +7285,7 @@ js_InitXMLClass(JSContext *cx, JSObject *obj) JS_ASSERT(prop); sprop = (JSScopeProperty *) prop; JS_ASSERT(SPROP_HAS_VALID_SLOT(sprop, OBJ_SCOPE(pobj))); - cval = OBJ_GET_SLOT(cx, pobj, sprop->slot); + cval = pobj->getSlotMT(cx, sprop->slot); pobj->dropProperty(cx, prop); JS_ASSERT(VALUE_IS_FUNCTION(cx, cval)); @@ -7618,7 +7491,7 @@ js_AddAttributePart(JSContext *cx, JSBool isName, JSString *str, JSString *str2) * Reallocating str (because we know it has no other references) * requires purging any deflated string cached for it. */ - js_PurgeDeflatedStringCache(cx->runtime, str); + cx->runtime->deflatedStringCache->remove(str); } str2->getCharsAndLength(chars2, len2); @@ -7809,48 +7682,35 @@ js_FindXMLProperty(JSContext *cx, jsval nameval, JSObject **objp, jsid *idp) static JSBool GetXMLFunction(JSContext *cx, JSObject *obj, jsid id, jsval *vp) { - JSObject *target; - JSXML *xml; - JSTempValueRooter tvr; - JSBool ok; - JS_ASSERT(OBJECT_IS_XML(cx, obj)); - MUST_FLOW_THROUGH("out"); - JS_PUSH_TEMP_ROOT_OBJECT(cx, NULL, &tvr); - /* * See comments before xml_lookupProperty about the need for the proto * chain lookup. */ - target = obj; + JSObject *target = obj; + AutoObjectRooter tvr(cx); for (;;) { - ok = js_GetProperty(cx, target, id, vp); - if (!ok) - goto out; - if (VALUE_IS_FUNCTION(cx, *vp)) { - ok = JS_TRUE; - goto out; - } + if (!js_GetProperty(cx, target, id, vp)) + return false; + if (VALUE_IS_FUNCTION(cx, *vp)) + return true; target = target->getProto(); if (target == NULL) break; - tvr.u.object = target; + tvr.setObject(target); } - xml = (JSXML *) obj->getPrivate(); - if (HasSimpleContent(xml)) { - /* Search in String.prototype to implement 11.2.2.1 Step 3(f). */ - ok = js_GetClassPrototype(cx, NULL, JSProto_String, &tvr.u.object); - if (!ok) - goto out; - JS_ASSERT(tvr.u.object); - ok = tvr.u.object->getProperty(cx, id, vp); - } + JSXML *xml = (JSXML *) obj->getPrivate(); + if (!HasSimpleContent(xml)) + return true; - out: - JS_POP_TEMP_ROOT(cx, &tvr); - return ok; + /* Search in String.prototype to implement 11.2.2.1 Step 3(f). */ + if (!js_GetClassPrototype(cx, NULL, JSProto_String, tvr.addr())) + return false; + + JS_ASSERT(tvr.object()); + return tvr.object()->getProperty(cx, id, vp); } static JSXML * diff --git a/js/src/jsxml.h b/js/src/jsxml.h index cb7198f5986..d5b20b0c881 100644 --- a/js/src/jsxml.h +++ b/js/src/jsxml.h @@ -63,6 +63,58 @@ struct JSXMLArray { JSXMLArrayCursor *cursors; }; +struct JSXMLArrayCursor +{ + JSXMLArray *array; + uint32 index; + JSXMLArrayCursor *next; + JSXMLArrayCursor **prevp; + void *root; + + JSXMLArrayCursor(JSXMLArray *array) + : array(array), index(0), next(array->cursors), prevp(&array->cursors), + root(NULL) + { + if (next) + next->prevp = &next; + array->cursors = this; + } + + ~JSXMLArrayCursor() { disconnect(); } + + void disconnect() { + if (!array) + return; + if (next) + next->prevp = prevp; + *prevp = next; + array = NULL; + } + + void *getNext() { + if (!array || index >= array->length) + return NULL; + return root = array->vector[index++]; + } + + void *getCurrent() { + if (!array || index >= array->length) + return NULL; + return root = array->vector[index]; + } + + void trace(JSTracer *trc) { +#ifdef DEBUG + size_t index = 0; +#endif + for (JSXMLArrayCursor *cursor = this; cursor; cursor = cursor->next) { + void *root = cursor->root; + JS_SET_TRACING_INDEX(trc, "cursor_root", index++); + js_CallValueTracerIfGCThing(trc, jsval(root)); + } + } +}; + #define JSXML_PRESET_CAPACITY JS_BIT(31) #define JSXML_CAPACITY_MASK JS_BITMASK(31) #define JSXML_CAPACITY(array) ((array)->capacity & JSXML_CAPACITY_MASK) diff --git a/js/src/lirasm/LInsClasses.tbl b/js/src/lirasm/LInsClasses.tbl index 207dbe63a3e..5ef5db6773f 100644 --- a/js/src/lirasm/LInsClasses.tbl +++ b/js/src/lirasm/LInsClasses.tbl @@ -124,7 +124,7 @@ CL_64( LCALL_Q_Q2, 1) // 95% LIR_qcall CL_64( LCALL_Q_Q7, 1) // 96% LIR_qcall CL___( LCALL_F_F3, 1) // 97% LIR_fcall CL___( LCALL_F_F8, 1) // 98% LIR_fcall -CL_64( LCALL_N_IQF, 1) // 99% LIR_icall or LIR_qcall +CL_64( LCALL_V_IQF, 1) // 99% LIR_icall or LIR_qcall CL___( LLABEL, 1) //100% LIR_label diff --git a/js/src/lirasm/lirasm.cpp b/js/src/lirasm/lirasm.cpp index ea6da1fbfee..72720ffa7b5 100644 --- a/js/src/lirasm/lirasm.cpp +++ b/js/src/lirasm/lirasm.cpp @@ -89,43 +89,40 @@ struct LasmSideExit : public SideExit { /* LIR SPI implementation */ -void -nanojit::StackFilter::getTops(LIns*, int& spTop, int& rpTop) +int +nanojit::StackFilter::getTop(LIns*) { - spTop = 0; - rpTop = 0; + return 0; } #if defined NJ_VERBOSE void -nanojit::LirNameMap::formatGuard(LIns *i, char *out) +nanojit::LInsPrinter::formatGuard(InsBuf *buf, LIns *ins) { - LasmSideExit *x; - - x = (LasmSideExit *)i->record()->exit; - sprintf(out, + RefBuf b1, b2; + LasmSideExit *x = (LasmSideExit *)ins->record()->exit; + VMPI_snprintf(buf->buf, buf->len, "%s: %s %s -> line=%ld (GuardID=%03d)", - formatRef(i), - lirNames[i->opcode()], - i->oprnd1() ? formatRef(i->oprnd1()) : "", + formatRef(&b1, ins), + lirNames[ins->opcode()], + ins->oprnd1() ? formatRef(&b2, ins->oprnd1()) : "", (long)x->line, - i->record()->profGuardID); + ins->record()->profGuardID); } void -nanojit::LirNameMap::formatGuardXov(LIns *i, char *out) +nanojit::LInsPrinter::formatGuardXov(InsBuf *buf, LIns *ins) { - LasmSideExit *x; - - x = (LasmSideExit *)i->record()->exit; - sprintf(out, + RefBuf b1, b2, b3; + LasmSideExit *x = (LasmSideExit *)ins->record()->exit; + VMPI_snprintf(buf->buf, buf->len, "%s = %s %s, %s -> line=%ld (GuardID=%03d)", - formatRef(i), - lirNames[i->opcode()], - formatRef(i->oprnd1()), - formatRef(i->oprnd2()), + formatRef(&b1, ins), + lirNames[ins->opcode()], + formatRef(&b2, ins->oprnd1()), + formatRef(&b3, ins->oprnd2()), (long)x->line, - i->record()->profGuardID); + ins->record()->profGuardID); } #endif @@ -157,12 +154,15 @@ enum ReturnType { #define FN(name, args) \ {#name, CI(name, args)} -const int I32 = nanojit::ARGSIZE_LO; +const ArgType I32 = nanojit::ARGTYPE_LO; #ifdef NANOJIT_64BIT -const int I64 = nanojit::ARGSIZE_Q; +const ArgType I64 = nanojit::ARGTYPE_Q; #endif -const int F64 = nanojit::ARGSIZE_F; -const int PTR = nanojit::ARGSIZE_P; +const ArgType F64 = nanojit::ARGTYPE_F; +const ArgType PTR = nanojit::ARGTYPE_P; +const ArgType WRD = nanojit::ARGTYPE_P; +const ArgType VD = nanojit::ARGTYPE_V; // "VOID" causes problems on Windows! + enum LirTokenType { NAME, NUMBER, PUNCT, NEWLINE @@ -270,7 +270,6 @@ public: bool lookupFunction(const string &name, CallInfo *&ci); LirBuffer *mLirbuf; - verbose_only( LabelMap *mLabelMap; ) LogControl mLogc; avmplus::AvmCore mCore; Allocator mAlloc; @@ -346,8 +345,8 @@ private: void endFragment(); }; -// Meaning: arg 'm' of 'n' has size 'sz'. -static int argMask(int sz, int m, int n) +// Meaning: arg 'm' of 'n' has type 'ty'. +static int argMask(int ty, int m, int n) { // Order examples, from MSB to LSB: // - 3 args: 000 | 000 | 000 | 000 | 000 | arg1| arg2| arg3| ret @@ -355,13 +354,13 @@ static int argMask(int sz, int m, int n) // If the mask encoding reversed the arg order the 'n' parameter wouldn't // be necessary, as argN would always be in the same place in the // bitfield. - return sz << ((1 + n - m) * ARGSIZE_SHIFT); + return ty << ((1 + n - m) * ARGTYPE_SHIFT); } -// Return value has size 'sz'. -static int retMask(int sz) +// Return value has type 'ty'. +static int retMask(int ty) { - return sz; + return ty; } // 'sin' is overloaded on some platforms, so taking its address @@ -375,8 +374,8 @@ double sinFn(double d) { Function functions[] = { FN(puts, argMask(PTR, 1, 1) | retMask(I32)), FN(sin, argMask(F64, 1, 1) | retMask(F64)), - FN(malloc, argMask(PTR, 1, 1) | retMask(PTR)), - FN(free, argMask(PTR, 1, 1) | retMask(I32)) + FN(malloc, argMask(WRD, 1, 1) | retMask(PTR)), + FN(free, argMask(PTR, 1, 1) | retMask(VD)) }; template out @@ -519,13 +518,14 @@ FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName, mLir = mBufWriter = new LirBufWriter(mParent.mLirbuf, nanojit::AvmCore::config); #ifdef DEBUG if (optimize) { // don't re-validate if no optimization has taken place - mLir = mValidateWriter2 = new ValidateWriter(mLir, "end of writer pipeline"); + mLir = mValidateWriter2 = + new ValidateWriter(mLir, mFragment->lirbuf->printer, "end of writer pipeline"); } #endif #ifdef DEBUG if (mParent.mVerbose) { mLir = mVerboseWriter = new VerboseWriter(mParent.mAlloc, mLir, - mParent.mLirbuf->names, + mParent.mLirbuf->printer, &mParent.mLogc); } #endif @@ -541,7 +541,8 @@ FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName, mLir = mExprFilter = new ExprFilter(mLir); } #ifdef DEBUG - mLir = mValidateWriter1 = new ValidateWriter(mLir, "start of writer pipeline"); + mLir = mValidateWriter1 = + new ValidateWriter(mLir, mFragment->lirbuf->printer, "start of writer pipeline"); #endif mReturnTypeBits = 0; @@ -635,7 +636,7 @@ FragmentAssembler::assemble_load() mTokens[1].find_first_of("0123456789") == 0) { return mLir->insLoad(mOpcode, ref(mTokens[0]), - imm(mTokens[1])); + imm(mTokens[1]), ACC_LOAD_ANY); } bad("immediate offset required for load"); return NULL; // not reached @@ -698,28 +699,28 @@ FragmentAssembler::assemble_call(const string &op) ci->_abi = _abi; - ci->_argtypes = 0; + ci->_typesig = 0; size_t argc = mTokens.size(); for (size_t i = 0; i < argc; ++i) { args[i] = ref(mTokens[mTokens.size() - (i+1)]); - if (args[i]->isF64()) ty = ARGSIZE_F; + if (args[i]->isF64()) ty = ARGTYPE_F; #ifdef NANOJIT_64BIT - else if (args[i]->isI64()) ty = ARGSIZE_Q; + else if (args[i]->isI64()) ty = ARGTYPE_Q; #endif - else ty = ARGSIZE_I; + else ty = ARGTYPE_I; // Nb: i+1 because argMask() uses 1-based arg counting. - ci->_argtypes |= argMask(ty, i+1, argc); + ci->_typesig |= argMask(ty, i+1, argc); } // Select return type from opcode. ty = 0; - if (mOpcode == LIR_icall) ty = ARGSIZE_LO; - else if (mOpcode == LIR_fcall) ty = ARGSIZE_F; + if (mOpcode == LIR_icall) ty = ARGTYPE_LO; + else if (mOpcode == LIR_fcall) ty = ARGTYPE_F; #ifdef NANOJIT_64BIT - else if (mOpcode == LIR_qcall) ty = ARGSIZE_Q; + else if (mOpcode == LIR_qcall) ty = ARGTYPE_Q; #endif else nyi("callh"); - ci->_argtypes |= retMask(ty); + ci->_typesig |= retMask(ty); } return mLir->insCall(ci, args); @@ -806,7 +807,7 @@ FragmentAssembler::endFragment() mLir->insGuard(LIR_x, NULL, createGuardRecord(createSideExit())); mParent.mAssm.compile(mFragment, mParent.mAlloc, optimize - verbose_only(, mParent.mLabelMap)); + verbose_only(, mParent.mLirbuf->printer)); if (mParent.mAssm.error() != nanojit::None) { cerr << "error during assembly: "; @@ -960,7 +961,6 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons ref(mTokens[0])); break; - case LIR_addp: case LIR_add: case LIR_sub: case LIR_mul: @@ -1060,7 +1060,7 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons need(3); ins = mLir->insStore(mOpcode, ref(mTokens[0]), ref(mTokens[1]), - imm(mTokens[2])); + imm(mTokens[2]), ACC_STORE_ANY); break; #if NJ_EXPANDED_LOADSTORE_SUPPORTED @@ -1243,7 +1243,7 @@ static double f_F_F8(double a, double b, double c, double d, } #ifdef NANOJIT_64BIT -static void f_N_IQF(int32_t, uint64_t, double) +static void f_V_IQF(int32_t, uint64_t, double) { return; // no need to do anything } @@ -1291,10 +1291,10 @@ const CallInfo ci_F_F8 = CI(f_F_F8, argMask(F64, 1, 8) | retMask(F64)); #ifdef NANOJIT_64BIT -const CallInfo ci_N_IQF = CI(f_N_IQF, argMask(I32, 1, 3) | +const CallInfo ci_V_IQF = CI(f_V_IQF, argMask(I32, 1, 3) | argMask(I64, 2, 3) | argMask(F64, 3, 3) | - retMask(ARGSIZE_NONE)); + retMask(ARGTYPE_V)); #endif // Generate a random block containing nIns instructions, plus a few more @@ -1351,9 +1351,6 @@ FragmentAssembler::assembleRandomFragment(int nIns) vector I_II_ops; I_II_ops.push_back(LIR_add); -#ifndef NANOJIT_64BIT - I_II_ops.push_back(LIR_iaddp); -#endif I_II_ops.push_back(LIR_sub); I_II_ops.push_back(LIR_mul); #if defined NANOJIT_IA32 || defined NANOJIT_X64 @@ -1370,7 +1367,6 @@ FragmentAssembler::assembleRandomFragment(int nIns) #ifdef NANOJIT_64BIT vector Q_QQ_ops; Q_QQ_ops.push_back(LIR_qiadd); - Q_QQ_ops.push_back(LIR_qaddp); Q_QQ_ops.push_back(LIR_qiand); Q_QQ_ops.push_back(LIR_qior); Q_QQ_ops.push_back(LIR_qxor); @@ -1809,7 +1805,7 @@ FragmentAssembler::assembleRandomFragment(int nIns) vector Ms = rnd(2) ? M4s : M8ps; if (!Ms.empty()) { LIns* base = rndPick(Ms); - ins = mLir->insLoad(rndPick(I_loads), base, rndOffset32(base->size())); + ins = mLir->insLoad(rndPick(I_loads), base, rndOffset32(base->size()), ACC_LOAD_ANY); addOrReplace(Is, ins); n++; } @@ -1820,7 +1816,7 @@ FragmentAssembler::assembleRandomFragment(int nIns) case LLD_Q: if (!M8ps.empty()) { LIns* base = rndPick(M8ps); - ins = mLir->insLoad(rndPick(Q_loads), base, rndOffset64(base->size())); + ins = mLir->insLoad(rndPick(Q_loads), base, rndOffset64(base->size()), ACC_LOAD_ANY); addOrReplace(Qs, ins); n++; } @@ -1830,7 +1826,7 @@ FragmentAssembler::assembleRandomFragment(int nIns) case LLD_F: if (!M8ps.empty()) { LIns* base = rndPick(M8ps); - ins = mLir->insLoad(rndPick(F_loads), base, rndOffset64(base->size())); + ins = mLir->insLoad(rndPick(F_loads), base, rndOffset64(base->size()), ACC_LOAD_ANY); addOrReplace(Fs, ins); n++; } @@ -1840,7 +1836,7 @@ FragmentAssembler::assembleRandomFragment(int nIns) vector Ms = rnd(2) ? M4s : M8ps; if (!Ms.empty() && !Is.empty()) { LIns* base = rndPick(Ms); - mLir->insStorei(rndPick(Is), base, rndOffset32(base->size())); + mLir->insStorei(rndPick(Is), base, rndOffset32(base->size()), ACC_STORE_ANY); n++; } break; @@ -1850,7 +1846,7 @@ FragmentAssembler::assembleRandomFragment(int nIns) case LST_Q: if (!M8ps.empty() && !Qs.empty()) { LIns* base = rndPick(M8ps); - mLir->insStorei(rndPick(Qs), base, rndOffset64(base->size())); + mLir->insStorei(rndPick(Qs), base, rndOffset64(base->size()), ACC_STORE_ANY); n++; } break; @@ -1859,7 +1855,7 @@ FragmentAssembler::assembleRandomFragment(int nIns) case LST_F: if (!M8ps.empty() && !Fs.empty()) { LIns* base = rndPick(M8ps); - mLir->insStorei(rndPick(Fs), base, rndOffset64(base->size())); + mLir->insStorei(rndPick(Fs), base, rndOffset64(base->size()), ACC_STORE_ANY); n++; } break; @@ -1924,11 +1920,11 @@ FragmentAssembler::assembleRandomFragment(int nIns) break; #ifdef NANOJIT_64BIT - case LCALL_N_IQF: + case LCALL_V_IQF: if (!Is.empty() && !Qs.empty() && !Fs.empty()) { // Nb: args[] holds the args in reverse order... sigh. LIns* args[3] = { rndPick(Fs), rndPick(Qs), rndPick(Is) }; - ins = mLir->insCall(&ci_N_IQF, args); + ins = mLir->insCall(&ci_V_IQF, args); n++; } break; @@ -1969,8 +1965,7 @@ Lirasm::Lirasm(bool verbose) : #ifdef DEBUG if (mVerbose) { mLogc.lcbits = LC_ReadLIR | LC_Assembly | LC_RegAlloc | LC_Activation; - mLabelMap = new (mAlloc) LabelMap(mAlloc, &mLogc); - mLirbuf->names = new (mAlloc) LirNameMap(mAlloc, mLabelMap); + mLirbuf->printer = new (mAlloc) LInsPrinter(mAlloc); } #endif @@ -1980,8 +1975,9 @@ Lirasm::Lirasm(bool verbose) : #include "nanojit/LIRopcode.tbl" #undef OP___ - mOpMap["alloc"] = mOpMap[PTR_SIZE("ialloc", "qalloc")]; - mOpMap["param"] = mOpMap[PTR_SIZE("iparam", "qparam")]; + // XXX: could add more pointer-sized synonyms here + mOpMap["allocp"] = mOpMap[PTR_SIZE("allocl", "allocq")]; + mOpMap["paramp"] = mOpMap[PTR_SIZE("paraml", "paramq")]; } Lirasm::~Lirasm() diff --git a/js/src/lirasm/tests/add.in b/js/src/lirasm/tests/add.in index 48479f9eb71..35bfa38c53f 100644 --- a/js/src/lirasm/tests/add.in +++ b/js/src/lirasm/tests/add.in @@ -1,4 +1,4 @@ -two = int 2 -three = int 3 -res = add two three -ret res +two = imml 2 +three = imml 3 +res = addl two three +retl res diff --git a/js/src/lirasm/tests/addsub.in b/js/src/lirasm/tests/addsub.in index 2aec24d869d..9752a2fe433 100644 --- a/js/src/lirasm/tests/addsub.in +++ b/js/src/lirasm/tests/addsub.in @@ -1,5 +1,5 @@ -two = int 7 -three = int 3 -targ = add two three -res = sub targ three -ret res +two = imml 7 +three = imml 3 +targ = addl two three +res = subl targ three +retl res diff --git a/js/src/lirasm/tests/call1.in b/js/src/lirasm/tests/call1.in index a42e7396e17..d3e80f06e69 100644 --- a/js/src/lirasm/tests/call1.in +++ b/js/src/lirasm/tests/call1.in @@ -1,12 +1,12 @@ -ptr = alloc 8 -a = int 65 -sti a ptr 0 -b = int 66 -sti b ptr 1 -c = int 67 -sti c ptr 2 -zero = int 0 -sti zero ptr 3 -ss = icall puts cdecl ptr -nn = ge ss zero -ret nn +ptr = allocp 8 +a = imml 65 +stl a ptr 0 +b = imml 66 +stl b ptr 1 +c = imml 67 +stl c ptr 2 +zero = imml 0 +stl zero ptr 3 +ss = calll puts cdecl ptr +nn = gel ss zero +retl nn diff --git a/js/src/lirasm/tests/call2.in b/js/src/lirasm/tests/call2.in index b1a5834f5c9..39357e0ee02 100644 --- a/js/src/lirasm/tests/call2.in +++ b/js/src/lirasm/tests/call2.in @@ -1,5 +1,5 @@ -pi = float 3.14 -half = float 0.5 -halfpi = fmul pi half -res = fcall sin cdecl halfpi -fret res +pi = immd 3.14 +half = immd 0.5 +halfpi = muld pi half +res = calld sin cdecl halfpi +retd res diff --git a/js/src/lirasm/tests/f2i.in b/js/src/lirasm/tests/f2i.in index 58452caeebe..08aa61c6fa5 100644 --- a/js/src/lirasm/tests/f2i.in +++ b/js/src/lirasm/tests/f2i.in @@ -1,6 +1,6 @@ -a = alloc 8 -d = float 5.0 -stfi d a 0 -x = ldf a 0 -i = f2i x -ret i +a = allocp 8 +d = immd 5.0 +std d a 0 +x = ldd a 0 +i = d2l x +retl i diff --git a/js/src/lirasm/tests/floatingpoint.in b/js/src/lirasm/tests/floatingpoint.in index d625ac18f82..f2f5b63fd37 100644 --- a/js/src/lirasm/tests/floatingpoint.in +++ b/js/src/lirasm/tests/floatingpoint.in @@ -1,4 +1,4 @@ -pi = float 3.14 -two = float 2.0 -TwoPi = fmul pi two -fret two +pi = immd 3.14 +two = immd 2.0 +TwoPi = muld pi two +retd two diff --git a/js/src/lirasm/tests/fuzz-527178.in b/js/src/lirasm/tests/fuzz-527178.in index 9b210716fff..f6f2e204d32 100644 --- a/js/src/lirasm/tests/fuzz-527178.in +++ b/js/src/lirasm/tests/fuzz-527178.in @@ -1,5 +1,5 @@ -base = alloc 512 -five = int 5 -sti five base 256 -x = ldzs base 256 -ret x +base = allocp 512 +five = imml 5 +stl five base 256 +x = lduw2ul base 256 +retl x diff --git a/js/src/lirasm/tests/loadstore.in b/js/src/lirasm/tests/loadstore.in index 9266ba3cd85..a3356c816f7 100644 --- a/js/src/lirasm/tests/loadstore.in +++ b/js/src/lirasm/tests/loadstore.in @@ -1,9 +1,9 @@ -ptr= alloc 8 -five = int 5 -sti five ptr 0 -three= int 3 -sti three ptr 4 -v= ld ptr 0 -u= ld ptr 4 -res= add u v -ret res +ptr = allocp 8 +five = imml 5 +stl five ptr 0 +three = imml 3 +stl three ptr 4 +v = ldl ptr 0 +u = ldl ptr 4 +res = addl u v +retl res diff --git a/js/src/lirasm/tests/mul_xxx.in b/js/src/lirasm/tests/mul_xxx.in index ec6cefea166..3d20e2c41f8 100644 --- a/js/src/lirasm/tests/mul_xxx.in +++ b/js/src/lirasm/tests/mul_xxx.in @@ -1,13 +1,13 @@ ; 46340 * 46340 < 2^31, and will not overflow. -big = int 46340 +big = imml 46340 ; Because 'big' isn't used after mul, it _may_ get allocated to the same ; register as 'res'. This is the case with the ARM back-end, and that is where ; this test is important as rX=rX*rX isn't possible on ARMv5 without some ; trickery. -res = mulxov big big ; no overflow, so we don't exit here +res = mulxovl big big ; no overflow, so we don't exit here ; Store 'res' so it isn't dead. -m = alloc 4 -sti res m 0 +m = allocp 4 +stl res m 0 x ; we exit here diff --git a/js/src/lirasm/tests/mul_xxy.in b/js/src/lirasm/tests/mul_xxy.in index c890ef83653..629aaa285fb 100644 --- a/js/src/lirasm/tests/mul_xxy.in +++ b/js/src/lirasm/tests/mul_xxy.in @@ -1,14 +1,14 @@ ; 1073741823 * 2 = 0x7ffffffe, and will nearly (but not quite) overflow. -big = int 1073741823 -two = int 2 +big = imml 1073741823 +two = imml 2 ; Because 'big' isn't used after mul, it _may_ get allocated to the same ; register as 'res'. This is the case with the ARM back-end, and that is where ; this test is important as rX=rX*rY isn't possible on ARMv5 without some ; trickery. -res = mulxov big two ; no overflow, so we don't exit here +res = mulxovl big two ; no overflow, so we don't exit here ; Store 'res' so it isn't dead. -m = alloc 4 -sti res m 0 +m = allocp 4 +stl res m 0 x ; we exit here diff --git a/js/src/lirasm/tests/mul_xyy.in b/js/src/lirasm/tests/mul_xyy.in index ae6963dfe31..1011c2c5af8 100644 --- a/js/src/lirasm/tests/mul_xyy.in +++ b/js/src/lirasm/tests/mul_xyy.in @@ -1,11 +1,11 @@ ; 46340 * 46340 < 2^31, and will not overflow. -big = int 46340 +big = imml 46340 -res = mulxov big big ; no overflow, so we don't exit here +res = mulxovl big big ; no overflow, so we don't exit here ; Ensure that 'big' gets its own register and isn't shared with 'res'. ; Also store 'res' so it isn't dead. -m = alloc 8 -sti big m 0 -sti res m 4 +m = allocp 8 +stl big m 0 +stl res m 4 x ; we exit here diff --git a/js/src/lirasm/tests/mul_xyz.in b/js/src/lirasm/tests/mul_xyz.in index 1107035889a..5ed0d2a4c5a 100644 --- a/js/src/lirasm/tests/mul_xyz.in +++ b/js/src/lirasm/tests/mul_xyz.in @@ -1,13 +1,13 @@ ; 1073741823 * 2 = 0x7ffffffe, and will nearly (but not quite) overflow. -big = int 1073741823 -two = int 2 +big = imml 1073741823 +two = imml 2 -res = mulxov big two ; no overflow, so we don't exit here +res = mulxovl big two ; no overflow, so we don't exit here ; Ensure that 'big' and 'two' get their own registers and ; aren't shared with 'res'. Also store 'res' so it isn't dead. -m = alloc 12 -sti big m 0 -sti two m 4 -sti res m 8 +m = allocp 12 +stl big m 0 +stl two m 4 +stl res m 8 x ; we exit here diff --git a/js/src/lirasm/tests/mulov_xxx.in b/js/src/lirasm/tests/mulov_xxx.in index 66370f5962b..d25a8185f74 100644 --- a/js/src/lirasm/tests/mulov_xxx.in +++ b/js/src/lirasm/tests/mulov_xxx.in @@ -1,14 +1,14 @@ ; 46341 * 46341 >= 2^31, and will overflow. -big = int 46341 +big = imml 46341 ; Because 'big' isn't used after mul, it _may_ get allocated to the same ; register as 'res'. This is the case with the ARM back-end, and that is where ; this test is important as rX=rX*rX isn't possible on ARMv5 without some ; trickery. -res = mulxov big big ; overflow, so we exit here +res = mulxovl big big ; overflow, so we exit here ; Store 'res' so it isn't dead. -m = alloc 4 -sti res m 0 +m = allocp 4 +stl res m 0 x ; we don't exit here diff --git a/js/src/lirasm/tests/mulov_xxy.in b/js/src/lirasm/tests/mulov_xxy.in index b57a5a2ea7e..ef1c0327874 100644 --- a/js/src/lirasm/tests/mulov_xxy.in +++ b/js/src/lirasm/tests/mulov_xxy.in @@ -1,14 +1,14 @@ ; 1073741824 * 2 >= 2^31, and will overflow. -big = int 1073741824 -two = int 2 +big = imml 1073741824 +two = imml 2 ; Because 'big' isn't used after mul, it _may_ get allocated to the same ; register as 'res'. This is the case with the ARM back-end, and that is where ; this test is important as rX=rX*rY isn't possible on ARMv5 without some ; trickery. -res = mulxov big two ; overflow, so we exit here +res = mulxovl big two ; overflow, so we exit here ; Store 'res' so it isn't dead. -m = alloc 4 -sti res m 0 +m = allocp 4 +stl res m 0 x ; we don't exit here diff --git a/js/src/lirasm/tests/mulov_xyy.in b/js/src/lirasm/tests/mulov_xyy.in index 50fe51d174e..8379650e192 100644 --- a/js/src/lirasm/tests/mulov_xyy.in +++ b/js/src/lirasm/tests/mulov_xyy.in @@ -1,11 +1,11 @@ ; 46341 * 46341 >= 2^31, and will overflow. -big = int 46341 +big = imml 46341 -res = mulxov big big ; overflow, so we exit here +res = mulxovl big big ; overflow, so we exit here ; Ensure that 'big' gets its own register and isn't shared with 'res'. ; Also store 'res' so it isn't dead. -m = alloc 8 -sti big m 0 -sti res m 4 +m = allocp 8 +stl big m 0 +stl res m 4 x ; we don't exit here diff --git a/js/src/lirasm/tests/mulov_xyz.in b/js/src/lirasm/tests/mulov_xyz.in index 0975cbae76a..32a4902aa6a 100644 --- a/js/src/lirasm/tests/mulov_xyz.in +++ b/js/src/lirasm/tests/mulov_xyz.in @@ -1,13 +1,13 @@ ; 1073741824 * 2 >= 2^31, and will overflow. -big = int 1073741824 -two = int 2 +big = imml 1073741824 +two = imml 2 -res = mulxov big two ; overflow, so we exit here +res = mulxovl big two ; overflow, so we exit here ; Ensure that 'big' and 'two' get their own registers and ; aren't shared with 'res'. Also store 'res' so it isn't dead. -m = alloc 12 -sti big m 0 -sti two m 4 -sti res m 8 +m = allocp 12 +stl big m 0 +stl two m 4 +stl res m 8 x ; we don't exit here diff --git a/js/src/lirasm/tests/multfrag1.in b/js/src/lirasm/tests/multfrag1.in index c2b07e7ace7..1881bac473f 100644 --- a/js/src/lirasm/tests/multfrag1.in +++ b/js/src/lirasm/tests/multfrag1.in @@ -1,27 +1,27 @@ .begin a -ptr = alloc 8 -a = int 65 -sti a ptr 0 -b = int 66 -sti b ptr 1 -c = int 67 -sti c ptr 2 -zero = int 0 -sti zero ptr 3 -ss = icall puts cdecl ptr -nn = ge ss zero -ret nn +ptr = allocp 8 +a = imml 65 +stl a ptr 0 +b = imml 66 +stl b ptr 1 +c = imml 67 +stl c ptr 2 +zero = imml 0 +stl zero ptr 3 +ss = calll puts cdecl ptr +nn = gel ss zero +retl nn .end .begin b -rr = icall a fastcall -ret rr +rr = calll a fastcall +retl rr .end .begin main -ans = icall b fastcall -five = int 5 -res = add five ans -ret res +ans = calll b fastcall +five = imml 5 +res = addl five ans +retl res .end diff --git a/js/src/lirasm/tests/multfrag2.in b/js/src/lirasm/tests/multfrag2.in index cf7553e71e4..6a769a964d6 100644 --- a/js/src/lirasm/tests/multfrag2.in +++ b/js/src/lirasm/tests/multfrag2.in @@ -1,14 +1,14 @@ .begin sinpibytwo -pi = float 3.14 -half = float 0.5 -halfpi = fmul pi half -res = fcall sin cdecl halfpi -fret res +pi = immd 3.14 +half = immd 0.5 +halfpi = muld pi half +res = calld sin cdecl halfpi +retd res .end .begin main -aa = fcall sinpibytwo fastcall -bb = float 5.53 -res = fadd aa bb -fret res +aa = calld sinpibytwo fastcall +bb = immd 5.53 +res = addd aa bb +retd res .end diff --git a/js/src/lirasm/tests/multfrag3.in b/js/src/lirasm/tests/multfrag3.in index c9469d6bbb2..8dba76241b2 100644 --- a/js/src/lirasm/tests/multfrag3.in +++ b/js/src/lirasm/tests/multfrag3.in @@ -1,16 +1,16 @@ ; See bug 541232 for why the params are commented out. .begin avg -oneh = int 100 ; should be: p1 = param 0 0 -twoh = int 200 ; should be: p2 = param 1 0 -sum = add oneh twoh ; should be: sum = add p1 p2 -one = int 1 -avg = rsh sum one -ret avg +oneh = imml 100 ; should be: p1 = paramp 0 0 +twoh = imml 200 ; should be: p2 = paramp 1 0 +sum = addl oneh twoh ; should be: sum = addp p1 p2 +one = imml 1 +avg = rshl sum one +retl avg .end .begin main -oneh = int 100 -twoh = int 200 -res = icall avg fastcall twoh oneh -ret res +oneh = imml 100 +twoh = imml 200 +res = calll avg fastcall twoh oneh +retl res .end diff --git a/js/src/nanojit-import-rev b/js/src/nanojit-import-rev index a1fffba9259..6aca08dcf64 100644 --- a/js/src/nanojit-import-rev +++ b/js/src/nanojit-import-rev @@ -1 +1 @@ -4adbf1bbb16cf4751b46a49a4f9c474c0ab0a3b9 +be4f098b6cb32f2c2787ef2842402f2b39e0925d diff --git a/js/src/nanojit/Assembler.cpp b/js/src/nanojit/Assembler.cpp index fa7ce831733..06297fe06f2 100755 --- a/js/src/nanojit/Assembler.cpp +++ b/js/src/nanojit/Assembler.cpp @@ -80,6 +80,7 @@ namespace nanojit , _config(config) { VMPI_memset(&_stats, 0, sizeof(_stats)); + VMPI_memset(lookahead, 0, N_LOOKAHEAD * sizeof(LInsp)); nInit(core); (void)logc; verbose_only( _logc = logc; ) @@ -118,49 +119,49 @@ namespace nanojit NanoAssert(_entries[i] == BAD_ENTRY); } - void AR::validate() - { + void AR::validate() + { static uint32_t validateCounter = 0; - if (++validateCounter >= 100) - { - validateFull(); - validateCounter = 0; - } - else - { - validateQuick(); - } - } + if (++validateCounter >= 100) + { + validateFull(); + validateCounter = 0; + } + else + { + validateQuick(); + } + } #endif - inline void AR::clear() - { - _highWaterMark = 0; + inline void AR::clear() + { + _highWaterMark = 0; NanoAssert(_entries[0] == NULL); #ifdef _DEBUG for (uint32_t i = 1; i < NJ_MAX_STACK_ENTRY; ++i) _entries[i] = BAD_ENTRY; #endif - } + } - bool AR::Iter::next(LIns*& ins, uint32_t& nStackSlots, int32_t& arIndex) - { - while (++_i <= _ar._highWaterMark) - { - if ((ins = _ar._entries[_i]) != NULL) - { - nStackSlots = nStackSlotsFor(ins); - _i += nStackSlots - 1; - arIndex = _i; - return true; - } - } - ins = NULL; - nStackSlots = 0; - arIndex = 0; - return false; - } + bool AR::Iter::next(LIns*& ins, uint32_t& nStackSlots, int32_t& arIndex) + { + while (_i <= _ar._highWaterMark) { + ins = _ar._entries[_i]; + if (ins) { + arIndex = _i; + nStackSlots = nStackSlotsFor(ins); + _i += nStackSlots; + return true; + } + _i++; + } + ins = NULL; + nStackSlots = 0; + arIndex = 0; + return false; + } void Assembler::arReset() { @@ -258,7 +259,7 @@ namespace nanojit * they can just be recalculated w/out any inputs. */ bool Assembler::canRemat(LIns *i) { - return i->isconst() || i->isconstq() || i->isop(LIR_alloc); + return i->isImmAny() || i->isop(LIR_alloc); } void Assembler::codeAlloc(NIns *&start, NIns *&end, NIns *&eip @@ -567,7 +568,7 @@ namespace nanojit int Assembler::findMemFor(LIns *ins) { #if NJ_USES_QUAD_CONSTANTS - NanoAssert(!ins->isconstq()); + NanoAssert(!ins->isconstf()); #endif if (!ins->isInAr()) { uint32_t const arIndex = arReserve(ins); @@ -583,13 +584,13 @@ namespace nanojit Register Assembler::deprecated_prepResultReg(LIns *ins, RegisterMask allow) { #ifdef NANOJIT_IA32 - const bool pop = (allow & rmask(FST0)) && - (!ins->isInReg() || ins->getReg() != FST0); -#else - const bool pop = false; + // We used to have to worry about possibly popping the x87 stack here. + // But this function is no longer used on i386, and this assertion + // ensures that. + NanoAssert(0); #endif Register r = findRegFor(ins, allow); - deprecated_freeRsrcOf(ins, pop); + deprecated_freeRsrcOf(ins); return r; } @@ -627,37 +628,46 @@ namespace nanojit // which case the restore will have already been generated, so we now // generate the spill (unless the restore was actually a // rematerialize, in which case it's not necessary). - // - // As for 'pop': it's only relevant on i386 and if 'allow' includes - // FST0, in which case we have to pop if 'ins' isn't in FST0 in the - // post-regstate. This could be because 'ins' is unused, 'ins' is in - // a spill slot, or 'ins' is in an XMM register. #ifdef NANOJIT_IA32 + // If 'allow' includes FST0 we have to pop if 'ins' isn't in FST0 in + // the post-regstate. This could be because 'ins' is unused, 'ins' is + // in a spill slot, or 'ins' is in an XMM register. const bool pop = (allow & rmask(FST0)) && (!ins->isInReg() || ins->getReg() != FST0); #else const bool pop = false; #endif Register r = findRegFor(ins, allow); - asm_spilli(ins, pop); + asm_maybe_spill(ins, pop); +#ifdef NANOJIT_IA32 + if (!ins->isInAr() && pop && r == FST0) { + // This can only happen with a LIR_fcall to an impure function + // whose return value was ignored (ie. if ins->isInReg() was false + // prior to the findRegFor() call). + FSTP(FST0); // pop the fpu result since it isn't used + } +#endif return r; } - void Assembler::asm_spilli(LInsp ins, bool pop) + void Assembler::asm_maybe_spill(LInsp ins, bool pop) { int d = ins->isInAr() ? arDisp(ins) : 0; Register r = ins->getReg(); - verbose_only( if (d && (_logc->lcbits & LC_Assembly)) { - setOutputForEOL(" <= spill %s", - _thisfrag->lirbuf->names->formatRef(ins)); } ) - asm_spill(r, d, pop, ins->isN64()); + if (ins->isInAr()) { + verbose_only( RefBuf b; + if (_logc->lcbits & LC_Assembly) { + setOutputForEOL(" <= spill %s", + _thisfrag->lirbuf->printer->formatRef(&b, ins)); } ) + asm_spill(r, d, pop, ins->isN64()); + } } // XXX: This function is error-prone and should be phased out; see bug 513615. - void Assembler::deprecated_freeRsrcOf(LIns *ins, bool pop) + void Assembler::deprecated_freeRsrcOf(LIns *ins) { if (ins->isInReg()) { - asm_spilli(ins, pop); + asm_maybe_spill(ins, /*pop*/false); _allocator.retire(ins->getReg()); // free any register associated with entry ins->clearReg(); } @@ -710,9 +720,10 @@ namespace nanojit NanoAssert(!_allocator.isFree(r)); NanoAssert(vic == _allocator.getActive(r)); - verbose_only( if (_logc->lcbits & LC_Assembly) { + verbose_only( RefBuf b; + if (_logc->lcbits & LC_Assembly) { setOutputForEOL(" <= restore %s", - _thisfrag->lirbuf->names->formatRef(vic)); } ) + _thisfrag->lirbuf->printer->formatRef(&b, vic)); } ) asm_restore(vic, r); _allocator.retire(r); @@ -825,7 +836,7 @@ namespace nanojit return jmpTarget; } - void Assembler::compile(Fragment* frag, Allocator& alloc, bool optimize verbose_only(, LabelMap* labels)) + void Assembler::compile(Fragment* frag, Allocator& alloc, bool optimize verbose_only(, LInsPrinter* printer)) { verbose_only( bool anyVerb = (_logc->lcbits & 0xFFFF & ~LC_FragProfile) > 0; @@ -851,7 +862,7 @@ namespace nanojit LirReader br(frag->lastIns); LirFilter* lir = &br; if (optimize) { - StackFilter* sf = new (alloc) StackFilter(lir, alloc, frag->lirbuf->sp, frag->lirbuf->rp); + StackFilter* sf = new (alloc) StackFilter(lir, alloc, frag->lirbuf->sp); lir = sf; } live(lir, alloc, frag, _logc); @@ -872,9 +883,9 @@ namespace nanojit }) // now the the main trunk + verbose_only( RefBuf b; ) verbose_only( if (anyVerb) { - _logc->printf("=== -- Compile trunk %s: begin\n", - labels->format(frag)); + _logc->printf("=== -- Compile trunk %s: begin\n", printer->formatAddr(&b, frag)); }) // Used for debug printing, if needed @@ -898,20 +909,19 @@ namespace nanojit // INITIAL PRINTING verbose_only( if (_logc->lcbits & LC_ReadLIR) { - pp_init = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->names, _logc, + pp_init = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->printer, _logc, "Initial LIR"); lir = pp_init; }) // STACKFILTER if (optimize) { - StackFilter* stackfilter = - new (alloc) StackFilter(lir, alloc, frag->lirbuf->sp, frag->lirbuf->rp); + StackFilter* stackfilter = new (alloc) StackFilter(lir, alloc, frag->lirbuf->sp); lir = stackfilter; } verbose_only( if (_logc->lcbits & LC_AfterSF) { - pp_after_sf = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->names, _logc, + pp_after_sf = new (alloc) ReverseLister(lir, alloc, frag->lirbuf->printer, _logc, "After StackFilter"); lir = pp_after_sf; }) @@ -926,13 +936,12 @@ namespace nanojit ) verbose_only( if (anyVerb) { - _logc->printf("=== -- Compile trunk %s: end\n", - labels->format(frag)); + _logc->printf("=== -- Compile trunk %s: end\n", printer->formatAddr(&b, frag)); }) verbose_only( if (asmVerb) - outputf("## compiling trunk %s", labels->format(frag)); + outputf("## compiling trunk %s", printer->formatAddr(&b, frag)); ) endAssembly(frag); @@ -1200,67 +1209,77 @@ namespace nanojit NanoAssert(_thisfrag->nStaticExits == 0); // The trace must end with one of these opcodes. - NanoAssert(reader->pos()->isop(LIR_x) || - reader->pos()->isop(LIR_xtbl) || - reader->pos()->isRet() || - reader->pos()->isLive()); + NanoAssert(reader->finalIns()->isop(LIR_x) || + reader->finalIns()->isop(LIR_xtbl) || + reader->finalIns()->isRet() || + reader->finalIns()->isLive()); InsList pending_lives(alloc); NanoAssert(!error()); - for (LInsp ins = reader->read(); !ins->isop(LIR_start); ins = reader->read()) + + // What's going on here: we're visiting all the LIR instructions in + // the buffer, working strictly backwards in buffer-order, and + // generating machine instructions for them as we go. + // + // For each LIns, we first determine whether it's actually necessary, + // and if not skip it. Otherwise we generate code for it. There are + // two kinds of "necessary" instructions: + // + // - "Statement" instructions, which have side effects. Anything that + // could change control flow or the state of memory. + // + // - "Value" or "expression" instructions, which compute a value based + // only on the operands to the instruction (and, in the case of + // loads, the state of memory). Because we visit instructions in + // reverse order, if some previously visited instruction uses the + // value computed by this instruction, then this instruction will + // already have a register assigned to hold that value. Hence we + // can consult the instruction to detect whether its value is in + // fact used (i.e. not dead). + // + // Note that the backwards code traversal can make register allocation + // confusing. (For example, we restore a value before we spill it!) + // In particular, words like "before" and "after" must be used very + // carefully -- their meaning at regalloc-time is opposite to their + // meaning at run-time. We use the term "pre-regstate" to refer to + // the register allocation state that occurs prior to an instruction's + // execution, and "post-regstate" to refer to the state that occurs + // after an instruction's execution, e.g.: + // + // pre-regstate: ebx(ins) + // instruction: mov eax, ebx // mov dst, src + // post-regstate: eax(ins) + // + // At run-time, the instruction updates the pre-regstate into the + // post-regstate (and these states are the real machine's regstates). + // But when allocating registers, because we go backwards, the + // pre-regstate is constructed from the post-regstate (and these + // regstates are those stored in RegAlloc). + // + // One consequence of generating code backwards is that we tend to + // both spill and restore registers as early (at run-time) as + // possible; this is good for tolerating memory latency. If we + // generated code forwards, we would expect to both spill and restore + // registers as late (at run-time) as possible; this might be better + // for reducing register pressure. + // + // Another thing to note: we provide N_LOOKAHEAD instruction's worth + // of lookahead because it's useful for backends. This is nice and + // easy because once read() gets to the LIR_start at the beginning of + // the buffer it'll just keep regetting it. + + for (int32_t i = 0; i < N_LOOKAHEAD; i++) + lookahead[i] = reader->read(); + + while (!lookahead[0]->isop(LIR_start)) { - /* What's going on here: we're visiting all the LIR instructions - in the buffer, working strictly backwards in buffer-order, and - generating machine instructions for them as we go. + LInsp ins = lookahead[0]; // give it a shorter name for local use + LOpcode op = ins->opcode(); - For each LIns, we first determine whether it's actually - necessary, and if not skip it. Otherwise we generate code for - it. There are two kinds of "necessary" instructions: - - - "Statement" instructions, which have side effects. Anything - that could change control flow or the state of memory. - - - "Value" or "expression" instructions, which compute a value - based only on the operands to the instruction (and, in the - case of loads, the state of memory). Because we visit - instructions in reverse order, if some previously visited - instruction uses the value computed by this instruction, then - this instruction will already have a register assigned to - hold that value. Hence we can consult the instruction to - detect whether its value is in fact used (i.e. not dead). - - Note that the backwards code traversal can make register - allocation confusing. (For example, we restore a value before - we spill it!) In particular, words like "before" and "after" - must be used very carefully -- their meaning at regalloc-time is - opposite to their meaning at run-time. We use the term - "pre-regstate" to refer to the register allocation state that - occurs prior to an instruction's execution, and "post-regstate" - to refer to the state that occurs after an instruction's - execution, e.g.: - - pre-regstate: ebx(ins) - instruction: mov eax, ebx // mov dst, src - post-regstate: eax(ins) - - At run-time, the instruction updates the pre-regstate into the - post-regstate (and these states are the real machine's - regstates). But when allocating registers, because we go - backwards, the pre-regstate is constructed from the - post-regstate (and these regstates are those stored in - RegAlloc). - - One consequence of generating code backwards is that we tend to - both spill and restore registers as early (at run-time) as - possible; this is good for tolerating memory latency. If we - generated code forwards, we would expect to both spill and - restore registers as late (at run-time) as possible; this might - be better for reducing register pressure. - */ bool required = ins->isStmt() || ins->isUsed(); if (!required) - continue; + goto end_of_loop; #ifdef NJ_VERBOSE // Output the post-regstate (registers and/or activation). @@ -1273,8 +1292,7 @@ namespace nanojit printRegState(); #endif - LOpcode op = ins->opcode(); - switch(op) + switch (op) { default: NanoAssertMsgf(false, "unsupported LIR instruction: %d\n", op); @@ -1329,14 +1347,21 @@ namespace nanojit case LIR_int: { countlir_imm(); - asm_int(ins); + asm_immi(ins); break; } - case LIR_float: - CASE64(LIR_quad:) +#ifdef NANOJIT_64BIT + case LIR_quad: { countlir_imm(); - asm_quad(ins); + asm_immq(ins); + break; + } +#endif + case LIR_float: + { + countlir_imm(); + asm_immf(ins); break; } case LIR_param: @@ -1414,7 +1439,6 @@ namespace nanojit case LIR_qursh: case LIR_qirsh: case LIR_qior: - CASE64(LIR_qaddp:) case LIR_qxor: { asm_qbinop(ins); @@ -1423,7 +1447,6 @@ namespace nanojit #endif case LIR_add: - CASE32(LIR_iaddp:) case LIR_sub: case LIR_mul: case LIR_and: @@ -1587,7 +1610,7 @@ namespace nanojit // Out of range indices aren't allowed or checked. // Code after this jtbl instruction is unreachable. releaseRegisters(); - AvmAssert(_allocator.countActive() == 0); + NanoAssert(_allocator.countActive() == 0); uint32_t count = ins->getTableSize(); bool has_back_edges = false; @@ -1598,7 +1621,8 @@ namespace nanojit LabelState *lstate = _labels.get(to); if (lstate) { unionRegisterState(lstate->regs); - asm_output(" %u: [&%s]", i, _thisfrag->lirbuf->names->formatRef(to)); + verbose_only( RefBuf b; ) + asm_output(" %u: [&%s]", i, _thisfrag->lirbuf->printer->formatRef(&b, to)); } else { has_back_edges = true; } @@ -1611,7 +1635,7 @@ namespace nanojit // to reconcile registers. So, frontends *must* insert LIR_regfence at labels of // forward jtbl jumps. Check here to make sure no registers were picked up from // any forward edges. - AvmAssert(_allocator.countActive() == 0); + NanoAssert(_allocator.countActive() == 0); if (has_back_edges) { handleLoopCarriedExprs(pending_lives); @@ -1621,7 +1645,8 @@ namespace nanojit LabelState *lstate = _labels.get(to); if (!lstate) { _labels.add(to, 0, _allocator); - asm_output(" %u: [&%s]", i, _thisfrag->lirbuf->names->formatRef(to)); + verbose_only( RefBuf b; ) + asm_output(" %u: [&%s]", i, _thisfrag->lirbuf->printer->formatRef(&b, to)); } } asm_output("backward edges"); @@ -1656,8 +1681,10 @@ namespace nanojit intersectRegisterState(label->regs); label->addr = _nIns; } - verbose_only( if (_logc->lcbits & LC_Assembly) { - asm_output("[%s]", _thisfrag->lirbuf->names->formatRef(ins)); + verbose_only( + RefBuf b; + if (_logc->lcbits & LC_Assembly) { + asm_output("[%s]", _thisfrag->lirbuf->printer->formatRef(&b, ins)); }) break; } @@ -1792,8 +1819,9 @@ namespace nanojit // field in another machine instruction). // if (_logc->lcbits & LC_Assembly) { - LirNameMap* names = _thisfrag->lirbuf->names; - outputf(" %s", names->formatIns(ins)); + InsBuf b; + LInsPrinter* printer = _thisfrag->lirbuf->printer; + outputf(" %s", printer->formatIns(&b, ins)); if (ins->isGuard() && ins->oprnd1() && ins->oprnd1()->isCmp()) { // Special case: code is generated for guard conditions at // the same time that code is generated for the guard @@ -1803,19 +1831,19 @@ namespace nanojit // the condition *is* used again we'll end up printing it // twice, but that's ok. outputf(" %s # codegen'd with the %s", - names->formatIns(ins->oprnd1()), lirNames[op]); + printer->formatIns(&b, ins->oprnd1()), lirNames[op]); } else if (ins->isCmov()) { // Likewise for cmov conditions. outputf(" %s # codegen'd with the %s", - names->formatIns(ins->oprnd1()), lirNames[op]); + printer->formatIns(&b, ins->oprnd1()), lirNames[op]); } #if defined NANOJIT_IA32 || defined NANOJIT_X64 else if (ins->isop(LIR_mod)) { // There's a similar case when a div feeds into a mod. outputf(" %s # codegen'd with the mod", - names->formatIns(ins->oprnd1())); + printer->formatIns(&b, ins->oprnd1())); } #endif } @@ -1831,6 +1859,11 @@ namespace nanojit // check that all is well (don't check in exit paths since its more complicated) debug_only( pageValidate(); ) debug_only( resourceConsistencyCheck(); ) + + end_of_loop: + for (int32_t i = 1; i < N_LOOKAHEAD; i++) + lookahead[i-1] = lookahead[i]; + lookahead[N_LOOKAHEAD-1] = reader->read(); } } @@ -1885,18 +1918,18 @@ namespace nanojit LIns *ins = p->head; NanoAssert(ins->isLive()); LIns *op1 = ins->oprnd1(); - // must findMemFor even if we're going to findRegFor; loop-carried + // Must findMemFor even if we're going to findRegFor; loop-carried // operands may spill on another edge, and we need them to always // spill to the same place. #if NJ_USES_QUAD_CONSTANTS - // exception: if quad constants are true constants, we should - // never call findMemFor on those ops - if (!op1->isconstq()) + // Exception: if float constants are true constants, we should + // never call findMemFor on those ops. + if (!op1->isconstf()) #endif { findMemFor(op1); } - if (! (op1->isconst() || op1->isconstf() || op1->isconstq())) + if (!op1->isImmAny()) findRegFor(op1, ins->isop(LIR_flive) ? FpRegs : GpRegs); } @@ -1934,7 +1967,8 @@ namespace nanojit if (ins) { NanoAssertMsg(!_allocator.isFree(r), "Coding error; register is both free and active! " ); - const char* n = _thisfrag->lirbuf->names->formatRef(ins); + RefBuf b; + const char* n = _thisfrag->lirbuf->printer->formatRef(&b, ins); if (ins->isop(LIR_param) && ins->paramKind()==1 && r == Assembler::savedRegs[ins->paramArg()]) @@ -1963,7 +1997,8 @@ namespace nanojit int32_t arIndex = 0; for (AR::Iter iter(_activation); iter.next(ins, nStackSlots, arIndex); ) { - const char* n = _thisfrag->lirbuf->names->formatRef(ins); + RefBuf b; + const char* n = _thisfrag->lirbuf->printer->formatRef(&b, ins); if (nStackSlots > 1) { VMPI_sprintf(s," %d-%d(%s)", 4*arIndex, 4*(arIndex+nStackSlots-1), n); } @@ -2055,7 +2090,7 @@ namespace nanojit void AR::checkForResourceLeaks() const { for (uint32_t i = 1; i <= _highWaterMark; i++) { - NanoAssertMsgf(_entries[i] == NULL, "frame entry %d wasn't freed\n",-4*i); + NanoAssertMsgf(_entries[i] == NULL, "frame entry %d wasn't freed\n",4*i); } } #endif @@ -2363,35 +2398,6 @@ namespace nanojit } #endif // NJ_VERBOSE - uint32_t CallInfo::_count_args(uint32_t mask) const - { - uint32_t argc = 0; - uint32_t argt = _argtypes; - for (uint32_t i = 0; i < MAXARGS; ++i) { - argt >>= ARGSIZE_SHIFT; - if (!argt) - break; - argc += (argt & mask) != 0; - } - return argc; - } - - uint32_t CallInfo::get_sizes(ArgSize* sizes) const - { - uint32_t argt = _argtypes; - uint32_t argc = 0; - for (uint32_t i = 0; i < MAXARGS; i++) { - argt >>= ARGSIZE_SHIFT; - ArgSize a = ArgSize(argt & ARGSIZE_MASK_ANY); - if (a != ARGSIZE_NONE) { - sizes[argc++] = a; - } else { - break; - } - } - return argc; - } - void LabelStateMap::add(LIns *label, NIns *addr, RegAlloc ®s) { LabelState *st = new (alloc) LabelState(addr, regs); labels.put(label, st); diff --git a/js/src/nanojit/Assembler.h b/js/src/nanojit/Assembler.h index 9f0f0a31048..e0f47e5060c 100644 --- a/js/src/nanojit/Assembler.h +++ b/js/src/nanojit/Assembler.h @@ -136,6 +136,7 @@ namespace nanojit { private: const AR& _ar; + // '_i' points to the start of the entries for an LIns, or to the first NULL entry. uint32_t _i; public: inline Iter(const AR& ar) : _ar(ar), _i(1) { } @@ -296,7 +297,7 @@ namespace nanojit Assembler(CodeAlloc& codeAlloc, Allocator& dataAlloc, Allocator& alloc, AvmCore* core, LogControl* logc, const Config& config); void compile(Fragment *frag, Allocator& alloc, bool optimize - verbose_only(, LabelMap*)); + verbose_only(, LInsPrinter*)); void endAssembly(Fragment* frag); void assemble(Fragment* frag, LirFilter* reader); @@ -343,22 +344,22 @@ namespace nanojit void assignSaved(RegAlloc &saved, RegisterMask skip); LInsp findVictim(RegisterMask allow); - Register getBaseReg(LIns *i, int &d, RegisterMask allow); + Register getBaseReg(LIns *ins, int &d, RegisterMask allow); void getBaseReg2(RegisterMask allowValue, LIns* value, Register& rv, RegisterMask allowBase, LIns* base, Register& rb, int &d); #if NJ_USES_QUAD_CONSTANTS const uint64_t* findQuadConstant(uint64_t q); #endif - int findMemFor(LIns* i); - Register findRegFor(LIns* i, RegisterMask allow); + int findMemFor(LIns* ins); + Register findRegFor(LIns* ins, RegisterMask allow); void findRegFor2(RegisterMask allowa, LIns* ia, Register &ra, RegisterMask allowb, LIns *ib, Register &rb); - Register findSpecificRegFor(LIns* i, Register r); - Register findSpecificRegForUnallocated(LIns* i, Register r); - Register deprecated_prepResultReg(LIns *i, RegisterMask allow); - Register prepareResultReg(LIns *i, RegisterMask allow); - void deprecated_freeRsrcOf(LIns *i, bool pop); + Register findSpecificRegFor(LIns* ins, Register r); + Register findSpecificRegForUnallocated(LIns* ins, Register r); + Register deprecated_prepResultReg(LIns *ins, RegisterMask allow); + Register prepareResultReg(LIns *ins, RegisterMask allow); + void deprecated_freeRsrcOf(LIns *ins); void freeResourcesOf(LIns *ins); void evictIfActive(Register r); void evict(LIns* vic); @@ -412,6 +413,13 @@ namespace nanojit NIns* pedanticTop; #endif + + // Instruction lookahead in gen(). lookahead[0] is the current + // instruction. Nb: lookahead[1..N_LOOKAHEAD] may include dead + // instructions, but we won't know that they're dead yet. + static const int N_LOOKAHEAD = 3; + LInsp lookahead[N_LOOKAHEAD]; + AR _activation; RegAlloc _allocator; @@ -422,22 +430,25 @@ namespace nanojit void asm_store32(LOpcode op, LIns *val, int d, LIns *base); void asm_store64(LOpcode op, LIns *val, int d, LIns *base); void asm_restore(LInsp, Register); - void asm_spilli(LInsp i, bool pop); + void asm_maybe_spill(LInsp ins, bool pop); void asm_spill(Register rr, int d, bool pop, bool quad); - void asm_load64(LInsp i); - void asm_ret(LInsp p); - void asm_quad(LInsp i); - void asm_fcond(LInsp i); - void asm_cond(LInsp i); - void asm_arith(LInsp i); - void asm_neg_not(LInsp i); - void asm_load32(LInsp i); - void asm_cmov(LInsp i); - void asm_param(LInsp i); - void asm_int(LInsp i); + void asm_load64(LInsp ins); + void asm_ret(LInsp ins); +#ifdef NANOJIT_64BIT + void asm_immq(LInsp ins); +#endif + void asm_immf(LInsp ins); + void asm_fcond(LInsp ins); + void asm_cond(LInsp ins); + void asm_arith(LInsp ins); + void asm_neg_not(LInsp ins); + void asm_load32(LInsp ins); + void asm_cmov(LInsp ins); + void asm_param(LInsp ins); + void asm_immi(LInsp ins); #if NJ_SOFTFLOAT_SUPPORTED - void asm_qlo(LInsp i); - void asm_qhi(LInsp i); + void asm_qlo(LInsp ins); + void asm_qhi(LInsp ins); void asm_qjoin(LIns *ins); #endif void asm_fneg(LInsp ins); diff --git a/js/src/nanojit/CodeAlloc.cpp b/js/src/nanojit/CodeAlloc.cpp index f1f2d86054a..b95e7e4b5c4 100644 --- a/js/src/nanojit/CodeAlloc.cpp +++ b/js/src/nanojit/CodeAlloc.cpp @@ -153,7 +153,7 @@ namespace nanojit if (verbose) avmplus::AvmLog("free %p-%p %d\n", start, end, (int)blk->size()); - AvmAssert(!blk->isFree); + NanoAssert(!blk->isFree); // coalesce adjacent blocks. bool already_on_avail_list; @@ -275,7 +275,8 @@ extern "C" int cacheflush(char *addr, int nbytes, int cache); #endif #ifdef AVMPLUS_SPARC -#ifdef __linux__ // bugzilla 502369 +// Note: the linux #define provided by the compiler. +#ifdef linux // bugzilla 502369 void sync_instruction_memory(caddr_t v, u_int len) { caddr_t end = v + len; diff --git a/js/src/nanojit/LIR.cpp b/js/src/nanojit/LIR.cpp index 52c4e95f64d..cf029bc41ac 100644 --- a/js/src/nanojit/LIR.cpp +++ b/js/src/nanojit/LIR.cpp @@ -81,6 +81,46 @@ namespace nanojit #endif /* NANOJIT_VERBOSE */ + uint32_t CallInfo::count_args() const + { + uint32_t argc = 0; + uint32_t argt = _typesig; + argt >>= ARGTYPE_SHIFT; // remove retType + while (argt) { + argc++; + argt >>= ARGTYPE_SHIFT; + } + return argc; + } + + uint32_t CallInfo::count_int32_args() const + { + uint32_t argc = 0; + uint32_t argt = _typesig; + argt >>= ARGTYPE_SHIFT; // remove retType + while (argt) { + ArgType a = ArgType(argt & ARGTYPE_MASK); + if (a == ARGTYPE_I || a == ARGTYPE_U) + argc++; + argt >>= ARGTYPE_SHIFT; + } + return argc; + } + + uint32_t CallInfo::getArgTypes(ArgType* argTypes) const + { + uint32_t argc = 0; + uint32_t argt = _typesig; + argt >>= ARGTYPE_SHIFT; // remove retType + while (argt) { + ArgType a = ArgType(argt & ARGTYPE_MASK); + argTypes[argc] = a; + argc++; + argt >>= ARGTYPE_SHIFT; + } + return argc; + } + // implementation #ifdef NJ_VERBOSE void ReverseLister::finish() @@ -96,12 +136,18 @@ namespace nanojit LInsp ReverseLister::read() { - LInsp i = in->read(); - const char* str = _names->formatIns(i); + // This check is necessary to avoid printing the LIR_start multiple + // times due to lookahead in Assembler::gen(). + if (_prevIns && _prevIns->isop(LIR_start)) + return _prevIns; + LInsp ins = in->read(); + InsBuf b; + const char* str = _printer->formatIns(&b, ins); char* cpy = new (_alloc) char[strlen(str)+1]; VMPI_strcpy(cpy, str); _strs.insert(cpy); - return i; + _prevIns = ins; + return ins; } #endif @@ -114,7 +160,7 @@ namespace nanojit // LCompressedBuffer LirBuffer::LirBuffer(Allocator& alloc) : #ifdef NJ_VERBOSE - names(NULL), + printer(NULL), #endif abi(ABI_FASTCALL), state(NULL), param1(NULL), sp(NULL), rp(NULL), _allocator(alloc) @@ -218,7 +264,7 @@ namespace nanojit return ins; } else { // If the displacement is more than 16 bits, put it in a separate instruction. - return insStore(op, val, ins2(LIR_addp, base, insImmWord(d)), 0, accSet); + return insStore(op, val, ins2(LIR_piadd, base, insImmWord(d)), 0, accSet); } } @@ -265,7 +311,7 @@ namespace nanojit // If the displacement is more than 16 bits, put it in a separate instruction. // Note that CseFilter::insLoad() also does this, so this will // only occur if CseFilter has been removed from the pipeline. - return insLoad(op, ins2(LIR_addp, base, insImmWord(d)), 0, accSet); + return insLoad(op, ins2(LIR_piadd, base, insImmWord(d)), 0, accSet); } } @@ -361,20 +407,20 @@ namespace nanojit 0 }; - // Check the invariant: _i never points to a skip. - NanoAssert(_i && !_i->isop(LIR_skip)); + // Check the invariant: _ins never points to a skip. + NanoAssert(_ins && !_ins->isop(LIR_skip)); // Step back one instruction. Use a table lookup rather than a switch // to avoid branch mispredictions. LIR_start is given a special size // of zero so that we don't step back past the start of the block. // (Callers of this function should stop once they see a LIR_start.) - LInsp ret = _i; - _i = (LInsp)(uintptr_t(_i) - insSizes[_i->opcode()]); + LInsp ret = _ins; + _ins = (LInsp)(uintptr_t(_ins) - insSizes[_ins->opcode()]); - // Ensure _i doesn't end up pointing to a skip. - while (_i->isop(LIR_skip)) { - NanoAssert(_i->prevLIns() != _i); - _i = _i->prevLIns(); + // Ensure _ins doesn't end up pointing to a skip. + while (_ins->isop(LIR_skip)) { + NanoAssert(_ins->prevLIns() != _ins); + _ins = _ins->prevLIns(); } return ret; @@ -409,6 +455,30 @@ namespace nanojit } #endif + LOpcode f64cmp_to_i32cmp(LOpcode op) + { + switch (op) { + case LIR_feq: return LIR_eq; + case LIR_flt: return LIR_lt; + case LIR_fgt: return LIR_gt; + case LIR_fle: return LIR_le; + case LIR_fge: return LIR_ge; + default: NanoAssert(0); return LIR_skip; + } + } + + LOpcode f64cmp_to_u32cmp(LOpcode op) + { + switch (op) { + case LIR_feq: return LIR_eq; + case LIR_flt: return LIR_ult; + case LIR_fgt: return LIR_ugt; + case LIR_fle: return LIR_ule; + case LIR_fge: return LIR_uge; + default: NanoAssert(0); return LIR_skip; + } + } + // This is never called, but that's ok because it contains only static // assertions. void LIns::staticSanityCheck() @@ -452,11 +522,6 @@ namespace nanojit (offsetof(LInsSti, ins) - offsetof(LInsSti, oprnd_2)) ); } - LIns* LirWriter::ins2i(LOpcode v, LIns* oprnd1, int32_t imm) - { - return ins2(v, oprnd1, insImm(imm)); - } - bool insIsS16(LInsp i) { if (i->isconst()) { @@ -483,13 +548,13 @@ namespace nanojit #endif #if NJ_SOFTFLOAT_SUPPORTED case LIR_qlo: - if (oprnd->isconstq()) + if (oprnd->isconstf()) return insImm(oprnd->imm64_0()); if (oprnd->isop(LIR_qjoin)) return oprnd->oprnd1(); break; case LIR_qhi: - if (oprnd->isconstq()) + if (oprnd->isconstf()) return insImm(oprnd->imm64_1()); if (oprnd->isop(LIR_qjoin)) return oprnd->oprnd2(); @@ -509,7 +574,7 @@ namespace nanojit return out->ins2(LIR_sub, oprnd->oprnd2(), oprnd->oprnd1()); goto involution; case LIR_fneg: - if (oprnd->isconstq()) + if (oprnd->isconstf()) return insImmf(-oprnd->imm64f()); if (oprnd->isop(LIR_fsub)) return out->ins2(LIR_fsub, oprnd->oprnd2(), oprnd->oprnd1()); @@ -519,7 +584,7 @@ namespace nanojit return insImmf(oprnd->imm32()); break; case LIR_f2i: - if (oprnd->isconstq()) + if (oprnd->isconstf()) return insImm(int32_t(oprnd->imm64f())); break; case LIR_u2f: @@ -640,7 +705,7 @@ namespace nanojit ; } } - else if (oprnd1->isconstq() && oprnd2->isconstq()) + else if (oprnd1->isconstf() && oprnd2->isconstf()) { double c1 = oprnd1->imm64f(); double c2 = oprnd2->imm64f(); @@ -672,7 +737,6 @@ namespace nanojit LIns* t; switch (v) { case LIR_add: - CASE32(LIR_iaddp:) case LIR_mul: case LIR_fadd: case LIR_fmul: @@ -733,7 +797,6 @@ namespace nanojit if (c == 0) { switch (v) { case LIR_add: - CASE32(LIR_iaddp:) case LIR_or: case LIR_xor: case LIR_sub: @@ -920,34 +983,6 @@ namespace nanojit return out->insLoad(op, base, off, accSet); } - LIns* LirWriter::ins_eq0(LIns* oprnd1) - { - return ins2i(LIR_eq, oprnd1, 0); - } - - LIns* LirWriter::ins_peq0(LIns* oprnd1) - { - return ins2(LIR_peq, oprnd1, insImmWord(0)); - } - - LIns* LirWriter::ins_i2p(LIns* intIns) - { -#ifdef NANOJIT_64BIT - return ins1(LIR_i2q, intIns); -#else - return intIns; -#endif - } - - LIns* LirWriter::ins_u2p(LIns* uintIns) - { -#ifdef NANOJIT_64BIT - return ins1(LIR_u2q, uintIns); -#else - return uintIns; -#endif - } - LIns* LirWriter::insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet) { // Determine which kind of store should be used for 'value' based on @@ -965,36 +1000,13 @@ namespace nanojit return insStore(op, value, base, d, accSet); } -#if NJ_SOFTFLOAT_SUPPORTED - LIns* LirWriter::qjoin(LInsp lo, LInsp hi) - { - return ins2(LIR_qjoin, lo, hi); - } -#endif - - LIns* LirWriter::insImmWord(intptr_t value) - { -#ifdef NANOJIT_64BIT - return insImmq(value); -#else - return insImm(value); -#endif - } - - LIns* LirWriter::insImmPtr(const void *ptr) - { -#ifdef NANOJIT_64BIT - return insImmq((uint64_t)ptr); -#else - return insImm((int32_t)ptr); -#endif - } - LIns* LirWriter::ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov) { - // if not a conditional, make it implicitly an ==0 test (then flop results) - if (!cond->isCmp()) - { + // 'cond' must be a conditional, unless it has been optimized to 0 or + // 1. In that case make it an ==0 test and flip the branches. It'll + // get constant-folded by ExprFilter subsequently. + if (!cond->isCmp()) { + NanoAssert(cond->isconst()); cond = ins_eq0(cond); LInsp tmp = iftrue; iftrue = iffalse; @@ -1050,56 +1062,50 @@ namespace nanojit using namespace avmplus; - StackFilter::StackFilter(LirFilter *in, Allocator& alloc, LInsp sp, LInsp rp) - : LirFilter(in), sp(sp), rp(rp), spStk(alloc), rpStk(alloc), spTop(0), rpTop(0) + StackFilter::StackFilter(LirFilter *in, Allocator& alloc, LInsp sp) + : LirFilter(in), sp(sp), stk(alloc), top(0) {} - bool StackFilter::ignoreStore(LInsp ins, int top, BitSet* stk) - { - bool ignore = false; - int d = ins->disp() >> 2; - if (d >= top) { - ignore = true; - } else { - d = top - d; - if (ins->oprnd1()->isN64()) { - // storing 8 bytes - if (stk->get(d) && stk->get(d-1)) { - ignore = true; - } else { - stk->set(d); - stk->set(d-1); - } - } - else { - // storing 4 bytes - NanoAssert(ins->oprnd1()->isI32()); - if (stk->get(d)) { - ignore = true; - } else { - stk->set(d); - } - } - } - return ignore; - } - + // If we see a sequence like this: + // + // sti sp[0] + // ... + // sti sp[0] + // + // where '...' contains no guards, we can remove the first store. Also, + // because stack entries are eight bytes each (we check this), if we have + // this: + // + // stfi sp[0] + // ... + // sti sp[0] + // + // we can again remove the first store -- even though the second store + // doesn't clobber the high four bytes -- because we know the entire value + // stored by the first store is dead. + // LInsp StackFilter::read() { - for (;;) - { - LInsp i = in->read(); - if (i->isStore()) - { - LInsp base = i->oprnd2(); + for (;;) { + LInsp ins = in->read(); + if (ins->isStore()) { + LInsp base = ins->oprnd2(); if (base == sp) { - if (ignoreStore(i, spTop, &spStk)) - continue; + // 'disp' must be eight-aligned because each stack entry is 8 bytes. + NanoAssert((ins->disp() & 0x7) == 0); - } else if (base == rp) { - if (ignoreStore(i, rpTop, &rpStk)) + int d = ins->disp() >> 3; + if (d >= top) { continue; + } else { + d = top - d; + if (stk.get(d)) { + continue; + } else { + stk.set(d); + } + } } } /* @@ -1107,16 +1113,13 @@ namespace nanojit * going to be wrong. Unfortunately there doesn't seem to be an easy way to detect * such branches. Just do not create any. */ - else if (i->isGuard()) - { - spStk.reset(); - rpStk.reset(); - getTops(i, spTop, rpTop); - spTop >>= 2; - rpTop >>= 2; + else if (ins->isGuard()) { + stk.reset(); + top = getTop(ins); + top >>= 3; } - return i; + return ins; } } @@ -1173,24 +1176,31 @@ namespace nanojit m_list[kind] = new (alloc) LInsp[m_cap[kind]]; } clear(); - m_find[LInsImm] = &LInsHashSet::findImm; - m_find[LInsImmq] = PTR_SIZE(NULL, &LInsHashSet::findImmq); - m_find[LInsImmf] = &LInsHashSet::findImmf; - m_find[LIns1] = &LInsHashSet::find1; - m_find[LIns2] = &LInsHashSet::find2; - m_find[LIns3] = &LInsHashSet::find3; - m_find[LInsLoad] = &LInsHashSet::findLoad; - m_find[LInsCall] = &LInsHashSet::findCall; + m_find[LInsImm] = &LInsHashSet::findImm; + m_find[LInsImmq] = PTR_SIZE(NULL, &LInsHashSet::findImmq); + m_find[LInsImmf] = &LInsHashSet::findImmf; + m_find[LIns1] = &LInsHashSet::find1; + m_find[LIns2] = &LInsHashSet::find2; + m_find[LIns3] = &LInsHashSet::find3; + m_find[LInsCall] = &LInsHashSet::findCall; + m_find[LInsLoadReadOnly] = &LInsHashSet::findLoadReadOnly; + m_find[LInsLoadStack] = &LInsHashSet::findLoadStack; + m_find[LInsLoadRStack] = &LInsHashSet::findLoadRStack; + m_find[LInsLoadOther] = &LInsHashSet::findLoadOther; + m_find[LInsLoadMultiple] = &LInsHashSet::findLoadMultiple; + } + + void LInsHashSet::clear(LInsHashKind kind) { + VMPI_memset(m_list[kind], 0, sizeof(LInsp)*m_cap[kind]); + m_used[kind] = 0; } void LInsHashSet::clear() { for (LInsHashKind kind = LInsFirst; kind <= LInsLast; kind = nextKind(kind)) { - VMPI_memset(m_list[kind], 0, sizeof(LInsp)*m_cap[kind]); - m_used[kind] = 0; + clear(kind); } } - inline uint32_t LInsHashSet::hashImm(int32_t a) { return _hashfinish(_hash32(0,a)); } @@ -1218,10 +1228,15 @@ namespace nanojit return _hashfinish(_hashptr(hash, c)); } - inline uint32_t LInsHashSet::hashLoad(LOpcode op, LInsp a, int32_t d) { + NanoStaticAssert(sizeof(AccSet) == 1); // required for hashLoad to work properly + + // Nb: no need to hash the load's AccSet because each region's loads go in + // a different hash table. + inline uint32_t LInsHashSet::hashLoad(LOpcode op, LInsp a, int32_t d, AccSet accSet) { uint32_t hash = _hash8(0,uint8_t(op)); hash = _hashptr(hash, a); - return _hashfinish(_hash32(hash, d)); + hash = _hash32(hash, d); + return _hashfinish(_hash8(hash, accSet)); } inline uint32_t LInsHashSet::hashCall(const CallInfo *ci, uint32_t argc, LInsp args[]) { @@ -1243,11 +1258,12 @@ namespace nanojit LInsp ins = oldlist[i]; if (!ins) continue; uint32_t j = (this->*find)(ins); + NanoAssert(!m_list[kind][j]); m_list[kind][j] = ins; } } - LInsp LInsHashSet::add(LInsHashKind kind, LInsp ins, uint32_t k) + void LInsHashSet::add(LInsHashKind kind, LInsp ins, uint32_t k) { NanoAssert(!m_list[kind][k]); m_used[kind]++; @@ -1255,20 +1271,21 @@ namespace nanojit if ((m_used[kind] * 4) >= (m_cap[kind] * 3)) { // load factor of 0.75 grow(kind); } - return ins; } LInsp LInsHashSet::findImm(int32_t a, uint32_t &k) { LInsHashKind kind = LInsImm; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hashImm(a) & bitmask; + k = hashImm(a) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (ins->imm32() != a)) - { + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; NanoAssert(ins->isconst()); + if (ins->imm32() == a) + return ins; // Quadratic probe: h(k,i) = h(k) + 0.5i + 0.5i^2, which gives the // sequence h(k), h(k)+1, h(k)+3, h(k)+6, h+10, ... This is a // good sequence for 2^n-sized tables as the values h(k,i) for i @@ -1276,11 +1293,9 @@ namespace nanojit // See http://portal.acm.org/citation.cfm?id=360737 and // http://en.wikipedia.org/wiki/Quadratic_probing (fetched // 06-Nov-2009) for more details. - hash = (hash + n) & bitmask; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } uint32_t LInsHashSet::findImm(LInsp ins) @@ -1295,18 +1310,18 @@ namespace nanojit { LInsHashKind kind = LInsImmq; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hashImmq(a) & bitmask; + k = hashImmq(a) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (ins->imm64() != a)) - { + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; NanoAssert(ins->isconstq()); - hash = (hash + n) & bitmask; + if (ins->imm64() == a) + return ins; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } uint32_t LInsHashSet::findImmq(LInsp ins) @@ -1321,18 +1336,18 @@ namespace nanojit { LInsHashKind kind = LInsImmf; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hashImmq(a) & bitmask; + k = hashImmq(a) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (ins->imm64() != a)) - { + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; NanoAssert(ins->isconstf()); - hash = (hash + n) & bitmask; + if (ins->imm64() == a) + return ins; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } uint32_t LInsHashSet::findImmf(LInsp ins) @@ -1346,17 +1361,17 @@ namespace nanojit { LInsHashKind kind = LIns1; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hash1(op,a) & bitmask; + k = hash1(op, a) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (ins->opcode() != op || ins->oprnd1() != a)) - { - hash = (hash + n) & bitmask; + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; + if (ins->isop(op) && ins->oprnd1() == a) + return ins; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } uint32_t LInsHashSet::find1(LInsp ins) @@ -1370,17 +1385,17 @@ namespace nanojit { LInsHashKind kind = LIns2; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hash2(op,a,b) & bitmask; + k = hash2(op, a, b) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (ins->opcode() != op || ins->oprnd1() != a || ins->oprnd2() != b)) - { - hash = (hash + n) & bitmask; + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; + if (ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b) + return ins; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } uint32_t LInsHashSet::find2(LInsp ins) @@ -1394,17 +1409,17 @@ namespace nanojit { LInsHashKind kind = LIns3; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hash3(op,a,b,c) & bitmask; + k = hash3(op, a, b, c) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (ins->opcode() != op || ins->oprnd1() != a || ins->oprnd2() != b || ins->oprnd3() != c)) - { - hash = (hash + n) & bitmask; + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; + if (ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b && ins->oprnd3() == c) + return ins; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } uint32_t LInsHashSet::find3(LInsp ins) @@ -1414,27 +1429,57 @@ namespace nanojit return k; } - LInsp LInsHashSet::findLoad(LOpcode op, LInsp a, int32_t d, uint32_t &k) + LInsp LInsHashSet::findLoad(LOpcode op, LInsp a, int32_t d, AccSet accSet, LInsHashKind kind, + uint32_t &k) { - LInsHashKind kind = LInsLoad; + (void)accSet; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hashLoad(op,a,d) & bitmask; + k = hashLoad(op, a, d, accSet) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (ins->opcode() != op || ins->oprnd1() != a || ins->disp() != d)) - { - hash = (hash + n) & bitmask; + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; + NanoAssert(ins->accSet() == accSet); + if (ins->isop(op) && ins->oprnd1() == a && ins->disp() == d) + return ins; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } - uint32_t LInsHashSet::findLoad(LInsp ins) + uint32_t LInsHashSet::findLoadReadOnly(LInsp ins) { uint32_t k; - findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), k); + findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadReadOnly, k); + return k; + } + + uint32_t LInsHashSet::findLoadStack(LInsp ins) + { + uint32_t k; + findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadStack, k); + return k; + } + + uint32_t LInsHashSet::findLoadRStack(LInsp ins) + { + uint32_t k; + findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadRStack, k); + return k; + } + + uint32_t LInsHashSet::findLoadOther(LInsp ins) + { + uint32_t k; + findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadOther, k); + return k; + } + + uint32_t LInsHashSet::findLoadMultiple(LInsp ins) + { + uint32_t k; + findLoad(ins->opcode(), ins->oprnd1(), ins->disp(), ins->accSet(), LInsLoadMultiple, k); return k; } @@ -1450,17 +1495,17 @@ namespace nanojit { LInsHashKind kind = LInsCall; const uint32_t bitmask = m_cap[kind] - 1; - uint32_t hash = hashCall(ci, argc, args) & bitmask; + k = hashCall(ci, argc, args) & bitmask; uint32_t n = 1; - LInsp ins; - while ((ins = m_list[kind][hash]) != NULL && - (!ins->isCall() || ins->callInfo() != ci || !argsmatch(ins, argc, args))) - { - hash = (hash + n) & bitmask; + while (true) { + LInsp ins = m_list[kind][k]; + if (!ins) + return NULL; + if (ins->isCall() && ins->callInfo() == ci && argsmatch(ins, argc, args)) + return ins; + k = (k + n) & bitmask; n += 1; } - k = hash; - return ins; } uint32_t LInsHashSet::findCall(LInsp ins) @@ -1500,10 +1545,10 @@ namespace nanojit , maxlive(0) { } - void add(LInsp i, LInsp use) { - if (!i->isconst() && !i->isconstq() && !live.containsKey(i)) { - NanoAssert(size_t(i->opcode()) < sizeof(lirNames) / sizeof(lirNames[0])); - live.put(i,use); + void add(LInsp ins, LInsp use) { + if (!ins->isImmAny() && !live.containsKey(ins)) { + NanoAssert(size_t(ins->opcode()) < sizeof(lirNames) / sizeof(lirNames[0])); + live.put(ins,use); } } @@ -1549,7 +1594,7 @@ namespace nanojit uint32_t exits = 0; int total = 0; if (frag->lirbuf->state) - live.add(frag->lirbuf->state, in->pos()); + live.add(frag->lirbuf->state, in->finalIns()); for (LInsp ins = in->read(); !ins->isop(LIR_start); ins = in->read()) { total++; @@ -1656,7 +1701,6 @@ namespace nanojit CASE64(LIR_qilsh:) CASE64(LIR_qirsh:) CASE64(LIR_qursh:) - case LIR_addp: case LIR_add: case LIR_sub: case LIR_mul: @@ -1717,10 +1761,12 @@ namespace nanojit logc->printf("\n"); // print live exprs, going forwards - LirNameMap *names = frag->lirbuf->names; + LInsPrinter *printer = frag->lirbuf->printer; bool newblock = true; for (Seq* p = live.retired.get(); p != NULL; p = p->tail) { RetiredEntry* e = p->head; + InsBuf ib; + RefBuf rb; char livebuf[4000], *s=livebuf; *s = 0; if (!newblock && e->i->isop(LIR_label)) { @@ -1728,7 +1774,7 @@ namespace nanojit } newblock = false; for (Seq* p = e->live; p != NULL; p = p->tail) { - VMPI_strcpy(s, names->formatRef(p->head)); + VMPI_strcpy(s, printer->formatRef(&rb, p->head)); s += VMPI_strlen(s); *s++ = ' '; *s = 0; NanoAssert(s < livebuf+sizeof(livebuf)); @@ -1736,11 +1782,11 @@ namespace nanojit /* If the LIR insn is pretty short, print it and its live-after set on the same line. If not, put live-after set on a new line, suitably indented. */ - const char* insn_text = names->formatIns(e->i); + const char* insn_text = printer->formatIns(&ib, e->i); if (VMPI_strlen(insn_text) >= 30-2) { - logc->printf(" %-30s\n %-30s %s\n", names->formatIns(e->i), "", livebuf); + logc->printf(" %-30s\n %-30s %s\n", insn_text, "", livebuf); } else { - logc->printf(" %-30s %s\n", names->formatIns(e->i), livebuf); + logc->printf(" %-30s %s\n", insn_text, livebuf); } if (e->i->isGuard() || e->i->isBranch() || e->i->isRet()) { @@ -1750,137 +1796,184 @@ namespace nanojit } } - void LirNameMap::addName(LInsp i, const char* name) { - if (!names.containsKey(i)) { - char *copy = new (alloc) char[VMPI_strlen(name)+1]; - VMPI_strcpy(copy, name); + void LirNameMap::addNameWithSuffix(LInsp ins, const char *name, int suffix, + bool ignoreOneSuffix) { + // The lookup may succeed, ie. we may already have a name for this + // instruction. This can happen because of CSE. Eg. if we have this: + // + // ins = addName("foo", insImm(0)) + // + // that assigns the name "foo1" to 'ins'. If we later do this: + // + // ins2 = addName("foo", insImm(0)) + // + // then CSE will cause 'ins' and 'ins2' to be equal. So 'ins2' + // already has a name ("foo1") and there's no need to generate a new + // name "foo2". + // + if (!names.containsKey(ins)) { + const int N = 100; + char name2[N]; + if (suffix == 1 && ignoreOneSuffix) { + VMPI_snprintf(name2, N, "%s", name); // don't add '1' suffix + } else if (VMPI_isdigit(name[VMPI_strlen(name)-1])) { + VMPI_snprintf(name2, N, "%s_%d", name, suffix); // use '_' to avoid confusion + } else { + VMPI_snprintf(name2, N, "%s%d", name, suffix); // normal case + } + + char *copy = new (alloc) char[VMPI_strlen(name2)+1]; + VMPI_strcpy(copy, name2); Entry *e = new (alloc) Entry(copy); - names.put(i, e); + names.put(ins, e); } } - char* LirNameMap::formatAccSet(LInsp ins, bool isLoad, char* buf) { - AccSet accSet = ins->accSet(); + void LirNameMap::addName(LInsp ins, const char* name) { + addNameWithSuffix(ins, name, namecounts.add(name), /*ignoreOneSuffix*/true); + } + + const char* LirNameMap::createName(LInsp ins) { + if (ins->isCall()) { +#if NJ_SOFTFLOAT_SUPPORTED + if (ins->isop(LIR_callh)) { + ins = ins->oprnd1(); // we've presumably seen the other half already + } else +#endif + { + addNameWithSuffix(ins, ins->callInfo()->_name, funccounts.add(ins->callInfo()), + /*ignoreOneSuffix*/false); + } + } else { + addNameWithSuffix(ins, lirNames[ins->opcode()], lircounts.add(ins->opcode()), + /*ignoreOneSuffix*/false); + + } + return names.get(ins)->name; + } + + const char* LirNameMap::lookupName(LInsp ins) + { + Entry* e = names.get(ins); + return e ? e->name : NULL; + } + + + char* LInsPrinter::formatAccSet(RefBuf* buf, AccSet accSet) { int i = 0; - if ((isLoad && accSet == ACC_LOAD_ANY) || - (!isLoad && accSet == ACC_STORE_ANY)) - { - // boring, don't bother with a suffix - } else { - buf[i++] = '.'; - if (accSet & ACC_READONLY) { buf[i++] = 'r'; accSet &= ~ACC_READONLY; } - if (accSet & ACC_STACK) { buf[i++] = 's'; accSet &= ~ACC_STACK; } - if (accSet & ACC_OTHER) { buf[i++] = 'o'; accSet &= ~ACC_OTHER; } - // This assertion will fail if we add a new accSet value but - // forget to handle it here. - NanoAssert(accSet == 0); - } - buf[i] = 0; - return buf; + // 'c' is short for "const", because 'r' is used for RSTACK. + if (accSet & ACC_READONLY) { buf->buf[i++] = 'c'; accSet &= ~ACC_READONLY; } + if (accSet & ACC_STACK) { buf->buf[i++] = 's'; accSet &= ~ACC_STACK; } + if (accSet & ACC_RSTACK) { buf->buf[i++] = 'r'; accSet &= ~ACC_RSTACK; } + if (accSet & ACC_OTHER) { buf->buf[i++] = 'o'; accSet &= ~ACC_OTHER; } + // This assertion will fail if we add a new accSet value but + // forget to handle it here. + NanoAssert(accSet == 0); + buf->buf[i] = 0; + NanoAssert(size_t(i) < buf->len); + return buf->buf; } - void LirNameMap::copyName(LInsp i, const char *s, int suffix) { - char s2[200]; - if (VMPI_isdigit(s[VMPI_strlen(s)-1])) { - // if s ends with a digit, add '_' to clarify the suffix - VMPI_sprintf(s2,"%s_%d", s, suffix); - } else { - VMPI_sprintf(s2,"%s%d", s, suffix); - } - addName(i, s2); - } - - void LirNameMap::formatImm(int32_t c, char *buf) { + void LInsPrinter::formatImm(RefBuf* buf, int32_t c) { if (-10000 < c || c < 10000) { - VMPI_sprintf(buf,"%d", c); + VMPI_snprintf(buf->buf, buf->len, "%d", c); } else { #if !defined NANOJIT_64BIT - VMPI_sprintf(buf, "%s", labels->format((void*)c)); + formatAddr(buf, (void*)c); #else - VMPI_sprintf(buf, "0x%x", (unsigned int)c); + VMPI_snprintf(buf->buf, buf->len, "0x%x", (unsigned int)c); #endif } } - void LirNameMap::formatImmq(uint64_t c, char *buf) { + void LInsPrinter::formatImmq(RefBuf* buf, uint64_t c) { if (-10000 < (int64_t)c || c < 10000) { - VMPI_sprintf(buf, "%dLL", (int)c); + VMPI_snprintf(buf->buf, buf->len, "%dLL", (int)c); } else { #if defined NANOJIT_64BIT - VMPI_sprintf(buf, "%s", labels->format((void*)c)); + formatAddr(buf, (void*)c); #else - VMPI_sprintf(buf, "0x%llxLL", c); + VMPI_snprintf(buf->buf, buf->len, "0x%llxLL", c); #endif } } - const char* LirNameMap::formatRef(LIns *ref) + char* LInsPrinter::formatAddr(RefBuf* buf, void* p) { - char buffer[200], *buf=buffer; - buf[0]=0; - if (names.containsKey(ref)) { - const char* name = names.get(ref)->name; - VMPI_strcat(buf, name); + char* name; + int32_t offset; + addrNameMap->lookupAddr(p, name, offset); + + if (name) { + if (offset != 0) { + VMPI_snprintf(buf->buf, buf->len, "%p %s+%d", p, name, offset); + } else { + VMPI_snprintf(buf->buf, buf->len, "%p %s", p, name); + } + } else { + VMPI_snprintf(buf->buf, buf->len, "%p", p); } - else if (ref->isconstf()) { - VMPI_sprintf(buf, "%g", ref->imm64f()); - } - else if (ref->isconstq()) { - formatImmq(ref->imm64(), buf); + + return buf->buf; + } + + char* LInsPrinter::formatRef(RefBuf* buf, LIns *ref) + { + // - If 'ref' already has a name, use it. + // - Otherwise, if it's a constant, use the constant. + // - Otherwise, give it a name and use it. + const char* name = lirNameMap->lookupName(ref); + if (name) { + VMPI_snprintf(buf->buf, buf->len, "%s", name); } else if (ref->isconst()) { - formatImm(ref->imm32(), buf); + formatImm(buf, ref->imm32()); + } +#ifdef NANOJIT_64BIT + else if (ref->isconstq()) { + formatImmq(buf, ref->imm64()); + } +#endif + else if (ref->isconstf()) { + VMPI_snprintf(buf->buf, buf->len, "%g", ref->imm64f()); } else { - if (ref->isCall()) { -#if NJ_SOFTFLOAT_SUPPORTED - if (ref->isop(LIR_callh)) { - // we've presumably seen the other half already - ref = ref->oprnd1(); - } else -#endif - { - copyName(ref, ref->callInfo()->_name, funccounts.add(ref->callInfo())); - } - } else { - NanoAssert(size_t(ref->opcode()) < sizeof(lirNames) / sizeof(lirNames[0])); - copyName(ref, lirNames[ref->opcode()], lircounts.add(ref->opcode())); - } - const char* name = names.get(ref)->name; - VMPI_strcat(buf, name); + name = lirNameMap->createName(ref); + VMPI_snprintf(buf->buf, buf->len, "%s", name); } - return labels->dup(buffer); + return buf->buf; } - const char* LirNameMap::formatIns(LIns* i) + char* LInsPrinter::formatIns(InsBuf* buf, LIns* i) { - char sbuf[4096]; - char *s = sbuf; + char *s = buf->buf; + size_t n = buf->len; + RefBuf b1, b2, b3, b4; LOpcode op = i->opcode(); switch (op) { case LIR_int: - VMPI_sprintf(s, "%s = %s %d", formatRef(i), lirNames[op], i->imm32()); + VMPI_snprintf(s, n, "%s = %s %d", formatRef(&b1, i), lirNames[op], i->imm32()); break; case LIR_alloc: - VMPI_sprintf(s, "%s = %s %d", formatRef(i), lirNames[op], i->size()); + VMPI_snprintf(s, n, "%s = %s %d", formatRef(&b1, i), lirNames[op], i->size()); break; #ifdef NANOJIT_64BIT case LIR_quad: - VMPI_sprintf(s, "%s = %s %X:%X", formatRef(i), lirNames[op], + VMPI_snprintf(s, n, "%s = %s %X:%X", formatRef(&b1, i), lirNames[op], i->imm64_1(), i->imm64_0()); break; #endif case LIR_float: - VMPI_sprintf(s, "%s = %s %g", formatRef(i), lirNames[op], i->imm64f()); + VMPI_snprintf(s, n, "%s = %s %g", formatRef(&b1, i), lirNames[op], i->imm64f()); break; case LIR_start: case LIR_regfence: - VMPI_sprintf(s, "%s", lirNames[op]); + VMPI_snprintf(s, n, "%s", lirNames[op]); break; case LIR_icall: @@ -1888,64 +1981,69 @@ namespace nanojit CASE64(LIR_qcall:) { const CallInfo* call = i->callInfo(); int32_t argc = i->argc(); + int32_t m = int32_t(n); // Windows doesn't have 'ssize_t' if (call->isIndirect()) - VMPI_sprintf(s, "%s = %s [%s] ( ", formatRef(i), lirNames[op], formatRef(i->arg(--argc))); + m -= VMPI_snprintf(s, m, "%s = %s.%s [%s] ( ", formatRef(&b1, i), lirNames[op], + formatAccSet(&b2, call->_storeAccSet), + formatRef(&b3, i->arg(--argc))); else - VMPI_sprintf(s, "%s = %s #%s ( ", formatRef(i), lirNames[op], call->_name); + m -= VMPI_snprintf(s, m, "%s = %s.%s #%s ( ", formatRef(&b1, i), lirNames[op], + formatAccSet(&b2, call->_storeAccSet), call->_name); + if (m < 0) break; for (int32_t j = argc - 1; j >= 0; j--) { s += VMPI_strlen(s); - VMPI_sprintf(s, "%s ",formatRef(i->arg(j))); + m -= VMPI_snprintf(s, m, "%s ",formatRef(&b2, i->arg(j))); + if (m < 0) break; } s += VMPI_strlen(s); - VMPI_sprintf(s, ")"); + m -= VMPI_snprintf(s, m, ")"); break; } - case LIR_jtbl: - VMPI_sprintf(s, "%s %s [ ", lirNames[op], formatRef(i->oprnd1())); - for (uint32_t j = 0, n = i->getTableSize(); j < n; j++) { - if (VMPI_strlen(sbuf) + 50 > sizeof(sbuf)) { - s += VMPI_strlen(s); - VMPI_sprintf(s, "... "); - break; - } + case LIR_jtbl: { + int32_t m = int32_t(n); // Windows doesn't have 'ssize_t' + m -= VMPI_snprintf(s, m, "%s %s [ ", lirNames[op], formatRef(&b1, i->oprnd1())); + if (m < 0) break; + for (uint32_t j = 0, sz = i->getTableSize(); j < sz; j++) { LIns* target = i->getTarget(j); s += VMPI_strlen(s); - VMPI_sprintf(s, "%s ", target ? formatRef(target) : "unpatched"); + m -= VMPI_snprintf(s, m, "%s ", target ? formatRef(&b2, target) : "unpatched"); + if (m < 0) break; } s += VMPI_strlen(s); - VMPI_sprintf(s, "]"); + m -= VMPI_snprintf(s, m, "]"); break; + } case LIR_param: { uint32_t arg = i->paramArg(); if (!i->paramKind()) { if (arg < sizeof(Assembler::argRegs)/sizeof(Assembler::argRegs[0])) { - VMPI_sprintf(s, "%s = %s %d %s", formatRef(i), lirNames[op], + VMPI_snprintf(s, n, "%s = %s %d %s", formatRef(&b1, i), lirNames[op], arg, gpn(Assembler::argRegs[arg])); } else { - VMPI_sprintf(s, "%s = %s %d", formatRef(i), lirNames[op], arg); + VMPI_snprintf(s, n, "%s = %s %d", formatRef(&b1, i), lirNames[op], arg); } } else { - VMPI_sprintf(s, "%s = %s %d %s", formatRef(i), lirNames[op], + VMPI_snprintf(s, n, "%s = %s %d %s", formatRef(&b1, i), lirNames[op], arg, gpn(Assembler::savedRegs[arg])); } break; } case LIR_label: - VMPI_sprintf(s, "%s:", formatRef(i)); + VMPI_snprintf(s, n, "%s:", formatRef(&b1, i)); break; case LIR_jt: case LIR_jf: - VMPI_sprintf(s, "%s %s -> %s", lirNames[op], formatRef(i->oprnd1()), - i->oprnd2() ? formatRef(i->oprnd2()) : "unpatched"); + VMPI_snprintf(s, n, "%s %s -> %s", lirNames[op], formatRef(&b1, i->oprnd1()), + i->oprnd2() ? formatRef(&b2, i->oprnd2()) : "unpatched"); break; case LIR_j: - VMPI_sprintf(s, "%s -> %s", lirNames[op], - i->oprnd2() ? formatRef(i->oprnd2()) : "unpatched"); + VMPI_snprintf(s, n, "%s -> %s", lirNames[op], + i->oprnd2() ? formatRef(&b1, i->oprnd2()) : "unpatched"); break; case LIR_live: @@ -1954,7 +2052,7 @@ namespace nanojit case LIR_ret: CASE64(LIR_qret:) case LIR_fret: - VMPI_sprintf(s, "%s %s", lirNames[op], formatRef(i->oprnd1())); + VMPI_snprintf(s, n, "%s %s", lirNames[op], formatRef(&b1, i->oprnd1())); break; CASESF(LIR_callh:) @@ -1970,7 +2068,8 @@ namespace nanojit CASE64(LIR_u2q:) CASE64(LIR_q2i:) case LIR_f2i: - VMPI_sprintf(s, "%s = %s %s", formatRef(i), lirNames[op], formatRef(i->oprnd1())); + VMPI_snprintf(s, n, "%s = %s %s", formatRef(&b1, i), lirNames[op], + formatRef(&b2, i->oprnd1())); break; case LIR_x: @@ -1978,17 +2077,16 @@ namespace nanojit case LIR_xf: case LIR_xbarrier: case LIR_xtbl: - formatGuard(i, s); + formatGuard(buf, i); break; case LIR_addxov: case LIR_subxov: case LIR_mulxov: - formatGuardXov(i, s); + formatGuardXov(buf, i); break; case LIR_add: CASE64(LIR_qiadd:) - case LIR_addp: case LIR_sub: case LIR_mul: CASE86(LIR_div:) @@ -2016,25 +2114,20 @@ namespace nanojit case LIR_fle: case LIR_fgt: case LIR_fge: - VMPI_sprintf(s, "%s = %s %s, %s", formatRef(i), lirNames[op], - formatRef(i->oprnd1()), - formatRef(i->oprnd2())); - break; - #if NJ_SOFTFLOAT_SUPPORTED case LIR_qjoin: - VMPI_sprintf(s, "%s (%s), %s", lirNames[op], - formatRef(i->oprnd1()), - formatRef(i->oprnd2())); - break; #endif + VMPI_snprintf(s, n, "%s = %s %s, %s", formatRef(&b1, i), lirNames[op], + formatRef(&b2, i->oprnd1()), + formatRef(&b3, i->oprnd2())); + break; CASE64(LIR_qcmov:) case LIR_cmov: - VMPI_sprintf(s, "%s = %s %s ? %s : %s", formatRef(i), lirNames[op], - formatRef(i->oprnd1()), - formatRef(i->oprnd2()), - formatRef(i->oprnd3())); + VMPI_snprintf(s, n, "%s = %s %s ? %s : %s", formatRef(&b1, i), lirNames[op], + formatRef(&b2, i->oprnd1()), + formatRef(&b3, i->oprnd2()), + formatRef(&b4, i->oprnd3())); break; case LIR_ld: @@ -2044,52 +2137,51 @@ namespace nanojit case LIR_ldzs: case LIR_ldsb: case LIR_ldss: - case LIR_ld32f: { - char b[32]; - VMPI_sprintf(s, "%s = %s%s %s[%d]", formatRef(i), lirNames[op], - formatAccSet(i, /*isLoad*/true, b), - formatRef(i->oprnd1()), + case LIR_ld32f: + VMPI_snprintf(s, n, "%s = %s.%s %s[%d]", formatRef(&b1, i), lirNames[op], + formatAccSet(&b2, i->accSet()), + formatRef(&b3, i->oprnd1()), i->disp()); break; - } case LIR_sti: CASE64(LIR_stqi:) case LIR_stfi: case LIR_stb: case LIR_sts: - case LIR_st32f: { - char b[32]; - VMPI_sprintf(s, "%s%s %s[%d] = %s", lirNames[op], - formatAccSet(i, /*isLoad*/false, b), - formatRef(i->oprnd2()), + case LIR_st32f: + VMPI_snprintf(s, n, "%s.%s %s[%d] = %s", lirNames[op], + formatAccSet(&b1, i->accSet()), + formatRef(&b2, i->oprnd2()), i->disp(), - formatRef(i->oprnd1())); + formatRef(&b3, i->oprnd1())); break; - } default: NanoAssertMsgf(0, "Can't handle opcode %s\n", lirNames[op]); break; } - NanoAssert(VMPI_strlen(sbuf) < sizeof(sbuf)-1); - return labels->dup(sbuf); + return buf->buf; } #endif CseFilter::CseFilter(LirWriter *out, Allocator& alloc) - : LirWriter(out) + : LirWriter(out), storesSinceLastLoad(ACC_NONE) { uint32_t kInitialCaps[LInsLast + 1]; - kInitialCaps[LInsImm] = 128; - kInitialCaps[LInsImmq] = PTR_SIZE(0, 16); - kInitialCaps[LInsImmf] = 16; - kInitialCaps[LIns1] = 256; - kInitialCaps[LIns2] = 512; - kInitialCaps[LIns3] = 16; - kInitialCaps[LInsLoad] = 16; - kInitialCaps[LInsCall] = 64; + kInitialCaps[LInsImm] = 128; + kInitialCaps[LInsImmq] = PTR_SIZE(0, 16); + kInitialCaps[LInsImmf] = 16; + kInitialCaps[LIns1] = 256; + kInitialCaps[LIns2] = 512; + kInitialCaps[LIns3] = 16; + kInitialCaps[LInsCall] = 64; + kInitialCaps[LInsLoadReadOnly] = 16; + kInitialCaps[LInsLoadStack] = 16; + kInitialCaps[LInsLoadRStack] = 16; + kInitialCaps[LInsLoadOther] = 16; + kInitialCaps[LInsLoadMultiple] = 16; exprs = new (alloc) LInsHashSet(alloc, kInitialCaps); } @@ -2097,13 +2189,14 @@ namespace nanojit { uint32_t k; LInsp ins = exprs->findImm(imm, k); - if (ins) - return ins; - ins = out->insImm(imm); + if (!ins) { + ins = out->insImm(imm); + exprs->add(LInsImm, ins, k); + } // We assume that downstream stages do not modify the instruction, so // that we can insert 'ins' into slot 'k'. Check this. - NanoAssert(ins->opcode() == LIR_int && ins->imm32() == imm); - return exprs->add(LInsImm, ins, k); + NanoAssert(ins->isop(LIR_int) && ins->imm32() == imm); + return ins; } #ifdef NANOJIT_64BIT @@ -2111,11 +2204,12 @@ namespace nanojit { uint32_t k; LInsp ins = exprs->findImmq(q, k); - if (ins) - return ins; - ins = out->insImmq(q); - NanoAssert(ins->opcode() == LIR_quad && ins->imm64() == q); - return exprs->add(LInsImmq, ins, k); + if (!ins) { + ins = out->insImmq(q); + exprs->add(LInsImmq, ins, k); + } + NanoAssert(ins->isop(LIR_quad) && ins->imm64() == q); + return ins; } #endif @@ -2130,85 +2224,126 @@ namespace nanojit } u; u.d = d; LInsp ins = exprs->findImmf(u.u64, k); - if (ins) - return ins; - ins = out->insImmf(d); - NanoAssert(ins->opcode() == LIR_float && ins->imm64() == u.u64); - return exprs->add(LInsImmf, ins, k); + if (!ins) { + ins = out->insImmf(d); + exprs->add(LInsImmf, ins, k); + } + NanoAssert(ins->isop(LIR_float) && ins->imm64() == u.u64); + return ins; } - LIns* CseFilter::ins0(LOpcode v) + LIns* CseFilter::ins0(LOpcode op) { - if (v == LIR_label) + if (op == LIR_label) exprs->clear(); - return out->ins0(v); + return out->ins0(op); } - LIns* CseFilter::ins1(LOpcode v, LInsp a) + LIns* CseFilter::ins1(LOpcode op, LInsp a) { - if (isCseOpcode(v)) { + LInsp ins; + if (isCseOpcode(op)) { uint32_t k; - LInsp ins = exprs->find1(v, a, k); - if (ins) - return ins; - ins = out->ins1(v, a); - NanoAssert(ins->opcode() == v && ins->oprnd1() == a); - return exprs->add(LIns1, ins, k); - } - return out->ins1(v,a); - } - - LIns* CseFilter::ins2(LOpcode v, LInsp a, LInsp b) - { - if (isCseOpcode(v)) { - uint32_t k; - LInsp ins = exprs->find2(v, a, b, k); - if (ins) - return ins; - ins = out->ins2(v, a, b); - NanoAssert(ins->opcode() == v && ins->oprnd1() == a && ins->oprnd2() == b); - return exprs->add(LIns2, ins, k); - } - return out->ins2(v,a,b); - } - - LIns* CseFilter::ins3(LOpcode v, LInsp a, LInsp b, LInsp c) - { - NanoAssert(isCseOpcode(v)); - uint32_t k; - LInsp ins = exprs->find3(v, a, b, c, k); - if (ins) - return ins; - ins = out->ins3(v, a, b, c); - NanoAssert(ins->opcode() == v && ins->oprnd1() == a && ins->oprnd2() == b && - ins->oprnd3() == c); - return exprs->add(LIns3, ins, k); - } - - LIns* CseFilter::insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet) - { - if (isS16(disp)) { - // XXX: This condition is overly strict. Bug 517910 will make it better. - if (accSet == ACC_READONLY) { - uint32_t k; - LInsp ins = exprs->findLoad(v, base, disp, k); - if (ins) - return ins; - ins = out->insLoad(v, base, disp, accSet); - NanoAssert(ins->opcode() == v && ins->oprnd1() == base && ins->disp() == disp); - return exprs->add(LInsLoad, ins, k); + ins = exprs->find1(op, a, k); + if (!ins) { + ins = out->ins1(op, a); + exprs->add(LIns1, ins, k); } - return out->insLoad(v, base, disp, accSet); + } else { + ins = out->ins1(op, a); + } + NanoAssert(ins->isop(op) && ins->oprnd1() == a); + return ins; + } + + LIns* CseFilter::ins2(LOpcode op, LInsp a, LInsp b) + { + LInsp ins; + NanoAssert(isCseOpcode(op)); + uint32_t k; + ins = exprs->find2(op, a, b, k); + if (!ins) { + ins = out->ins2(op, a, b); + exprs->add(LIns2, ins, k); + } + NanoAssert(ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b); + return ins; + } + + LIns* CseFilter::ins3(LOpcode op, LInsp a, LInsp b, LInsp c) + { + NanoAssert(isCseOpcode(op)); + uint32_t k; + LInsp ins = exprs->find3(op, a, b, c, k); + if (!ins) { + ins = out->ins3(op, a, b, c); + exprs->add(LIns3, ins, k); + } + NanoAssert(ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b && ins->oprnd3() == c); + return ins; + } + + LIns* CseFilter::insLoad(LOpcode op, LInsp base, int32_t disp, AccSet loadAccSet) + { + LInsp ins; + if (isS16(disp)) { + // Clear all loads aliased by stores and calls since the last time + // we were in this function. + if (storesSinceLastLoad != ACC_NONE) { + NanoAssert(!(storesSinceLastLoad & ACC_READONLY)); // can't store to READONLY + if (storesSinceLastLoad & ACC_STACK) { exprs->clear(LInsLoadStack); } + if (storesSinceLastLoad & ACC_RSTACK) { exprs->clear(LInsLoadRStack); } + if (storesSinceLastLoad & ACC_OTHER) { exprs->clear(LInsLoadOther); } + // Loads marked with multiple access regions must be treated + // conservatively -- we always clear all of them. + exprs->clear(LInsLoadMultiple); + storesSinceLastLoad = ACC_NONE; + } + + LInsHashKind kind; + switch (loadAccSet) { + case ACC_READONLY: kind = LInsLoadReadOnly; break; + case ACC_STACK: kind = LInsLoadStack; break; + case ACC_RSTACK: kind = LInsLoadRStack; break; + case ACC_OTHER: kind = LInsLoadOther; break; + default: kind = LInsLoadMultiple; break; + } + + uint32_t k; + ins = exprs->findLoad(op, base, disp, loadAccSet, kind, k); + if (!ins) { + ins = out->insLoad(op, base, disp, loadAccSet); + exprs->add(kind, ins, k); + } + NanoAssert(ins->isop(op) && ins->oprnd1() == base && ins->disp() == disp); + } else { // If the displacement is more than 16 bits, put it in a separate - // instruction. LirBufWriter also does this, we do it here as - // well because CseFilter relies on LirBufWriter not changing - // code. - return insLoad(v, ins2(LIR_addp, base, insImmWord(disp)), 0, accSet); + // instruction. Nb: LirBufWriter also does this, we do it here + // too because CseFilter relies on LirBufWriter not changing code. + ins = insLoad(op, ins2(LIR_piadd, base, insImmWord(disp)), 0, loadAccSet); } + return ins; } - LInsp CseFilter::insGuard(LOpcode v, LInsp c, GuardRecord *gr) + LIns* CseFilter::insStore(LOpcode op, LInsp value, LInsp base, int32_t disp, AccSet accSet) + { + LInsp ins; + if (isS16(disp)) { + storesSinceLastLoad |= accSet; + ins = out->insStore(op, value, base, disp, accSet); + NanoAssert(ins->isop(op) && ins->oprnd1() == value && ins->oprnd2() == base && + ins->disp() == disp && ins->accSet() == accSet); + } else { + // If the displacement is more than 16 bits, put it in a separate + // instruction. Nb: LirBufWriter also does this, we do it here + // too because CseFilter relies on LirBufWriter not changing code. + ins = insStore(op, value, ins2(LIR_piadd, base, insImmWord(disp)), 0, accSet); + } + return ins; + } + + LInsp CseFilter::insGuard(LOpcode op, LInsp c, GuardRecord *gr) { // LIR_xt and LIR_xf guards are CSEable. Note that we compare the // opcode and condition when determining if two guards are equivalent @@ -2227,104 +2362,58 @@ namespace nanojit // - The CSE algorithm will always keep guard 1 and remove guard 2 // (not vice versa). The current algorithm does this. // - if (isCseOpcode(v)) { + LInsp ins; + if (isCseOpcode(op)) { // conditional guard uint32_t k; - LInsp ins = exprs->find1(v, c, k); - if (ins) - return 0; - ins = out->insGuard(v, c, gr); - NanoAssert(ins->opcode() == v && ins->oprnd1() == c); - return exprs->add(LIns1, ins, k); + ins = exprs->find1(op, c, k); + if (!ins) { + ins = out->insGuard(op, c, gr); + exprs->add(LIns1, ins, k); + } + } else { + ins = out->insGuard(op, c, gr); } - return out->insGuard(v, c, gr); + NanoAssert(ins->isop(op) && ins->oprnd1() == c); + return ins; } - LInsp CseFilter::insGuardXov(LOpcode v, LInsp a, LInsp b, GuardRecord *gr) + LInsp CseFilter::insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr) { // LIR_*xov are CSEable. See CseFilter::insGuard() for details. - NanoAssert(isCseOpcode(v)); + NanoAssert(isCseOpcode(op)); // conditional guard uint32_t k; - LInsp ins = exprs->find2(v, a, b, k); - if (ins) - return ins; - ins = out->insGuardXov(v, a, b, gr); - NanoAssert(ins->opcode() == v && ins->oprnd1() == a && ins->oprnd2() == b); - return exprs->add(LIns2, ins, k); + LInsp ins = exprs->find2(op, a, b, k); + if (!ins) { + ins = out->insGuardXov(op, a, b, gr); + exprs->add(LIns2, ins, k); + } + NanoAssert(ins->isop(op) && ins->oprnd1() == a && ins->oprnd2() == b); + return ins; } LInsp CseFilter::insCall(const CallInfo *ci, LInsp args[]) { + LInsp ins; + uint32_t argc = ci->count_args(); if (ci->_isPure) { NanoAssert(ci->_storeAccSet == ACC_NONE); uint32_t k; - uint32_t argc = ci->count_args(); - LInsp ins = exprs->findCall(ci, argc, args, k); - if (ins) - return ins; - ins = out->insCall(ci, args); - NanoAssert(ins->isCall() && ins->callInfo() == ci && argsmatch(ins, argc, args)); - return exprs->add(LInsCall, ins, k); - } - return out->insCall(ci, args); - } - - LInsp LoadFilter::insLoad(LOpcode v, LInsp base, int32_t disp, AccSet accSet) - { - if (base != sp && base != rp) - { - switch (v) - { - case LIR_ld: - CASE64(LIR_ldq:) - case LIR_ldf: - case LIR_ld32f: - case LIR_ldsb: - case LIR_ldss: - case LIR_ldzb: - case LIR_ldzs: - { - uint32_t k; - LInsp ins = exprs->findLoad(v, base, disp, k); - if (ins) - return ins; - ins = out->insLoad(v, base, disp, accSet); - return exprs->add(LInsLoad, ins, k); - } - default: - // fall thru - break; + ins = exprs->findCall(ci, argc, args, k); + if (!ins) { + ins = out->insCall(ci, args); + exprs->add(LInsCall, ins, k); } + } else { + // We only need to worry about aliasing if !ci->_isPure. + storesSinceLastLoad |= ci->_storeAccSet; + ins = out->insCall(ci, args); } - return out->insLoad(v, base, disp, accSet); + NanoAssert(ins->isCall() && ins->callInfo() == ci && argsmatch(ins, argc, args)); + return ins; } - void LoadFilter::clear(LInsp p) - { - if (p != sp && p != rp) - exprs->clear(); - } - - LInsp LoadFilter::insStore(LOpcode op, LInsp v, LInsp b, int32_t d, AccSet accSet) - { - clear(b); - return out->insStore(op, v, b, d, accSet); - } - - LInsp LoadFilter::insCall(const CallInfo *ci, LInsp args[]) - { - if (!ci->_isPure) - exprs->clear(); - return out->insCall(ci, args); - } - - LInsp LoadFilter::ins0(LOpcode op) - { - if (op == LIR_label) - exprs->clear(); - return out->ins0(op); - } #if NJ_SOFTFLOAT_SUPPORTED static double FASTCALL i2f(int32_t i) { return i; } @@ -2340,11 +2429,11 @@ namespace nanojit static int32_t FASTCALL fle(double a, double b) { return a <= b; } static int32_t FASTCALL fge(double a, double b) { return a >= b; } - #define SIG_F_I (ARGSIZE_F | ARGSIZE_I << ARGSIZE_SHIFT*1) - #define SIG_F_U (ARGSIZE_F | ARGSIZE_U << ARGSIZE_SHIFT*1) - #define SIG_F_F (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1) - #define SIG_F_FF (ARGSIZE_F | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2) - #define SIG_B_FF (ARGSIZE_B | ARGSIZE_F << ARGSIZE_SHIFT*1 | ARGSIZE_F << ARGSIZE_SHIFT*2) + #define SIG_F_I (ARGTYPE_F | ARGTYPE_I << ARGTYPE_SHIFT*1) + #define SIG_F_U (ARGTYPE_F | ARGTYPE_U << ARGTYPE_SHIFT*1) + #define SIG_F_F (ARGTYPE_F | ARGTYPE_F << ARGTYPE_SHIFT*1) + #define SIG_F_FF (ARGTYPE_F | ARGTYPE_F << ARGTYPE_SHIFT*1 | ARGTYPE_F << ARGTYPE_SHIFT*2) + #define SIG_B_FF (ARGTYPE_B | ARGTYPE_F << ARGTYPE_SHIFT*1 | ARGTYPE_F << ARGTYPE_SHIFT*2) #define SF_CALLINFO(name, typesig) \ static const CallInfo name##_ci = \ @@ -2434,14 +2523,13 @@ namespace nanojit } LIns* SoftFloatFilter::insCall(const CallInfo *ci, LInsp args[]) { - uint32_t argt = ci->_argtypes; - - for (uint32_t i = 0, argsizes = argt >> ARGSIZE_SHIFT; argsizes != 0; i++, argsizes >>= ARGSIZE_SHIFT) + uint32_t nArgs = ci->count_args(); + for (uint32_t i = 0; i < nArgs; i++) args[i] = split(args[i]); - if ((argt & ARGSIZE_MASK_ANY) == ARGSIZE_F) { - // this function returns a double as two 32bit values, so replace - // call with qjoin(qhi(call), call) + if (ci->returnType() == ARGTYPE_F) { + // This function returns a double as two 32bit values, so replace + // call with qjoin(qhi(call), call). return split(ci, args); } return out->insCall(ci, args); @@ -2452,11 +2540,11 @@ namespace nanojit #endif /* FEATURE_NANOJIT */ #if defined(NJ_VERBOSE) - LabelMap::LabelMap(Allocator& a, LogControl *logc) - : allocator(a), names(a), logc(logc), end(buf) + AddrNameMap::AddrNameMap(Allocator& a) + : allocator(a), names(a) {} - void LabelMap::add(const void *p, size_t size, size_t align, const char *name) + void AddrNameMap::addAddrRange(const void *p, size_t size, size_t align, const char *name) { if (!this || names.containsKey(p)) return; @@ -2466,45 +2554,28 @@ namespace nanojit names.put(p, e); } - const char *LabelMap::format(const void *p) + void AddrNameMap::lookupAddr(void *p, char*& name, int32_t& offset) { - char b[200]; - const void *start = names.findNear(p); - if (start != NULL) { + if (start) { Entry *e = names.get(start); const void *end = (const char*)start + e->size; - const char *name = e->name; if (p == start) { - VMPI_sprintf(b, "%p %s", p, name); - return dup(b); + name = e->name; + offset = 0; } else if (p > start && p < end) { - int32_t d = int32_t(intptr_t(p)-intptr_t(start)) >> e->align; - VMPI_sprintf(b, "%p %s+%d", p, name, d); - return dup(b); + name = e->name; + offset = int32_t(intptr_t(p)-intptr_t(start)) >> e->align; } else { - VMPI_sprintf(b, "%p", p); - return dup(b); + name = NULL; + offset = 0; } + } else { + name = NULL; + offset = 0; } - VMPI_sprintf(b, "%p", p); - return dup(b); - } - - const char *LabelMap::dup(const char *b) - { - size_t need = VMPI_strlen(b)+1; - NanoAssert(need <= sizeof(buf)); - char *s = end; - end += need; - if (end > buf+sizeof(buf)) { - s = buf; - end = s+need; - } - VMPI_strcpy(s, b); - return s; } // --------------------------------------------------------------- @@ -2558,7 +2629,7 @@ namespace nanojit NanoAssertMsgf(0, "LIR type error (%s): arg %d of '%s' is '%s' " "which has type %s (expected %s)", - _whereInPipeline, i+1, lirNames[op], + whereInPipeline, i+1, lirNames[op], lirNames[args[i]->opcode()], type2string(actual), type2string(formal)); } @@ -2570,15 +2641,16 @@ namespace nanojit { NanoAssertMsgf(0, "LIR structure error (%s): %s %d of '%s' is '%s' (expected %s)", - _whereInPipeline, argDesc, argN, + whereInPipeline, argDesc, argN, lirNames[op], lirNames[arg->opcode()], shouldBeDesc); } - void ValidateWriter::errorAccSetShould(const char* what, AccSet accSet, const char* shouldDesc) + void ValidateWriter::errorAccSet(const char* what, AccSet accSet, const char* shouldDesc) { + RefBuf b; NanoAssertMsgf(0, - "LIR AccSet error (%s): '%s' AccSet is %d; it should %s", - _whereInPipeline, what, accSet, shouldDesc); + "LIR AccSet error (%s): '%s' AccSet is '%s'; %s", + whereInPipeline, what, printer->formatAccSet(&b, accSet), shouldDesc); } void ValidateWriter::checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins) @@ -2603,17 +2675,60 @@ namespace nanojit errorStructureShouldBe(op, "argument", argN, ins, lirNames[op2]); } - ValidateWriter::ValidateWriter(LirWriter *out, const char* stageName) - : LirWriter(out), _whereInPipeline(stageName) + void ValidateWriter::checkAccSet(LOpcode op, LInsp base, AccSet accSet, AccSet maxAccSet) + { + if (accSet == ACC_NONE) + errorAccSet(lirNames[op], accSet, "it should not equal ACC_NONE"); + + if (accSet & ~maxAccSet) + errorAccSet(lirNames[op], accSet, + "it should not contain bits that aren't in ACC_LOAD_ANY/ACC_STORE_ANY"); + + // Some sanity checking, which is based on the following assumptions: + // - STACK ones should use 'sp' or 'sp+k' as the base. (We could look + // for more complex patterns, but that feels dangerous. Better to + // keep it really simple.) + // - RSTACK ones should use 'rp' as the base. + // - READONLY/OTHER ones should not use 'sp'/'sp+k' or 'rp' as the base. + // + // Things that aren't checked: + // - There's no easy way to check if READONLY ones really are read-only. + + bool isStack = base == sp || + (base->isop(LIR_piadd) && base->oprnd1() == sp && base->oprnd2()->isconstp()); + bool isRStack = base == rp; + + switch (accSet) { + case ACC_STACK: + if (!isStack) + errorAccSet(lirNames[op], accSet, "but it's not a stack access"); + break; + + case ACC_RSTACK: + if (!isRStack) + errorAccSet(lirNames[op], accSet, "but it's not an rstack access"); + break; + + case ACC_READONLY: + case ACC_OTHER: + if (isStack) + errorAccSet(lirNames[op], accSet, "but it's a stack access"); + if (isRStack) + errorAccSet(lirNames[op], accSet, "but it's an rstack access"); + break; + + default: + break; + } + } + + ValidateWriter::ValidateWriter(LirWriter *out, LInsPrinter* printer, const char* where) + : LirWriter(out), printer(printer), whereInPipeline(where), sp(0), rp(0) {} LIns* ValidateWriter::insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet) { - if (accSet == ACC_NONE) - errorAccSetShould(lirNames[op], accSet, "not equal ACC_NONE"); - - if (accSet & ~ACC_LOAD_ANY) - errorAccSetShould(lirNames[op], accSet, "not contain bits that aren't in ACC_LOAD_ANY"); + checkAccSet(op, base, accSet, ACC_LOAD_ANY); int nArgs = 1; LTy formals[1] = { LTy_Ptr }; @@ -2640,11 +2755,7 @@ namespace nanojit LIns* ValidateWriter::insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet) { - if (accSet == ACC_NONE) - errorAccSetShould(lirNames[op], accSet, "not equal ACC_NONE"); - - if (accSet & ~ACC_STORE_ANY) - errorAccSetShould(lirNames[op], accSet, "not contain bits that aren't in ACC_STORE_ANY"); + checkAccSet(op, base, accSet, ACC_STORE_ANY); int nArgs = 2; LTy formals[2] = { LTy_Void, LTy_Ptr }; // LTy_Void is overwritten shortly @@ -2674,7 +2785,7 @@ namespace nanojit typeCheckArgs(op, nArgs, formals, args); - return out->insStore(op, value, base, d); + return out->insStore(op, value, base, d, accSet); } LIns* ValidateWriter::ins0(LOpcode op) @@ -2795,11 +2906,6 @@ namespace nanojit formals[1] = LTy_I32; break; - case LIR_addp: - formals[0] = LTy_Ptr; - formals[1] = LTy_Ptr; - break; - #if NJ_SOFTFLOAT_SUPPORTED case LIR_qjoin: formals[0] = LTy_I32; @@ -2909,34 +3015,34 @@ namespace nanojit LIns* ValidateWriter::insCall(const CallInfo *ci, LIns* args0[]) { - ArgSize sizes[MAXARGS]; - uint32_t nArgs = ci->get_sizes(sizes); + ArgType argTypes[MAXARGS]; + uint32_t nArgs = ci->getArgTypes(argTypes); LTy formals[MAXARGS]; LIns* args[MAXARGS]; // in left-to-right order, unlike args0[] LOpcode op = getCallOpcode(ci); if (ci->_isPure && ci->_storeAccSet != ACC_NONE) - errorAccSetShould(ci->_name, ci->_storeAccSet, "equal ACC_NONE for pure functions"); + errorAccSet(ci->_name, ci->_storeAccSet, "it should be ACC_NONE for pure functions"); if (ci->_storeAccSet & ~ACC_STORE_ANY) - errorAccSetShould(lirNames[op], ci->_storeAccSet, - "not contain bits that aren't in ACC_STORE_ANY"); + errorAccSet(lirNames[op], ci->_storeAccSet, + "it should not contain bits that aren't in ACC_STORE_ANY"); - // This loop iterates over the args from right-to-left (because - // arg() and get_sizes() use right-to-left order), but puts the - // results into formals[] and args[] in left-to-right order so - // that arg numbers in error messages make sense to the user. + // This loop iterates over the args from right-to-left (because arg() + // and getArgTypes() use right-to-left order), but puts the results + // into formals[] and args[] in left-to-right order so that arg + // numbers in error messages make sense to the user. for (uint32_t i = 0; i < nArgs; i++) { uint32_t i2 = nArgs - i - 1; // converts right-to-left to left-to-right - switch (sizes[i]) { - case ARGSIZE_I: - case ARGSIZE_U: formals[i2] = LTy_I32; break; + switch (argTypes[i]) { + case ARGTYPE_I: + case ARGTYPE_U: formals[i2] = LTy_I32; break; #ifdef NANOJIT_64BIT - case ARGSIZE_Q: formals[i2] = LTy_I64; break; + case ARGTYPE_Q: formals[i2] = LTy_I64; break; #endif - case ARGSIZE_F: formals[i2] = LTy_F64; break; - default: NanoAssert(0); formals[i2] = LTy_Void; break; + case ARGTYPE_F: formals[i2] = LTy_F64; break; + default: NanoAssertMsgf(0, "%d %s\n", argTypes[i],ci->_name); formals[i2] = LTy_Void; break; } args[i2] = args0[i]; } diff --git a/js/src/nanojit/LIR.h b/js/src/nanojit/LIR.h index a2a19da8fea..c2c3712e888 100644 --- a/js/src/nanojit/LIR.h +++ b/js/src/nanojit/LIR.h @@ -60,18 +60,228 @@ namespace nanojit # define PTR_SIZE(a,b) a #endif - // pointer op aliases - LIR_ldp = PTR_SIZE(LIR_ld, LIR_ldq), + // Pointer-sized synonyms. + + LIR_paramp = PTR_SIZE(LIR_paraml, LIR_paramq), + + LIR_allocp = PTR_SIZE(LIR_allocl, LIR_allocq), + + LIR_retp = PTR_SIZE(LIR_retl, LIR_retq), + + LIR_livep = PTR_SIZE(LIR_livel, LIR_liveq), + + LIR_ldp = PTR_SIZE(LIR_ldl, LIR_ldq), + + LIR_stp = PTR_SIZE(LIR_stl, LIR_stq), + + LIR_callp = PTR_SIZE(LIR_calll, LIR_callq), + + LIR_eqp = PTR_SIZE(LIR_eql, LIR_eqq), + LIR_ltp = PTR_SIZE(LIR_ltl, LIR_ltq), + LIR_gtp = PTR_SIZE(LIR_gtl, LIR_gtq), + LIR_lep = PTR_SIZE(LIR_lel, LIR_leq), + LIR_gep = PTR_SIZE(LIR_gel, LIR_geq), + LIR_ltup = PTR_SIZE(LIR_ltul, LIR_ltuq), + LIR_gtup = PTR_SIZE(LIR_gtul, LIR_gtuq), + LIR_leup = PTR_SIZE(LIR_leul, LIR_leuq), + LIR_geup = PTR_SIZE(LIR_geul, LIR_geuq), + + LIR_addp = PTR_SIZE(LIR_addl, LIR_addq), + + LIR_andp = PTR_SIZE(LIR_andl, LIR_andq), + LIR_orp = PTR_SIZE(LIR_orl, LIR_orq), + LIR_xorp = PTR_SIZE(LIR_xorl, LIR_xorq), + + LIR_lshp = PTR_SIZE(LIR_lshl, LIR_lshq), + LIR_rshp = PTR_SIZE(LIR_rshl, LIR_rshq), + LIR_rshup = PTR_SIZE(LIR_rshul, LIR_rshuq), + + LIR_cmovp = PTR_SIZE(LIR_cmovl, LIR_cmovq), + + // XXX: temporary synonyms for old opcode names and old pointer-sized + // synonyms, for the Great Opcode Renaming transition period (bug + // 504506). Those in comments have not changed and so don't need a + // temporary synonym. + + // LIR_start + + // LIR_regfence + + // LIR_skip + +#ifndef NANOJIT_64BIT + LIR_iparam = LIR_paraml, +#else + LIR_qparam = LIR_paramq, +#endif + +#ifndef NANOJIT_64BIT + LIR_ialloc = LIR_allocl, +#else + LIR_qalloc = LIR_allocq, +#endif + + LIR_ret = LIR_retl, +#ifdef NANOJIT_64BIT + LIR_qret = LIR_retq, +#endif + LIR_fret = LIR_retd, + + LIR_live = LIR_livel, +#ifdef NANOJIT_64BIT + LIR_qlive = LIR_liveq, +#endif + LIR_flive = LIR_lived, + + // file + // line + + LIR_ldsb = LIR_ldb2l, + LIR_ldss = LIR_ldw2l, + LIR_ldzb = LIR_ldub2ul, + LIR_ldzs = LIR_lduw2ul, + LIR_ld = LIR_ldl, + // LIR_ldq + LIR_ldf = LIR_ldd, + LIR_ld32f = LIR_lds2d, + + // LIR_stb + LIR_sts = LIR_stw, + LIR_sti = LIR_stl, +#ifdef NANOJIT_64BIT + LIR_stqi = LIR_stq, +#endif + LIR_stfi = LIR_std, + LIR_st32f = LIR_std2s, + + LIR_icall = LIR_calll, +#ifdef NANOJIT_64BIT + LIR_qcall = LIR_callq, +#endif + LIR_fcall = LIR_calld, + + // LIR_j + // LIR_jt + // LIR_jf + // LIR_jtbl + + // LIR_label = LIR_label + + // LIR_x + // LIR_xt + // LIR_xf + // LIR_xtbl + // LIR_xbarrier + + LIR_int = LIR_imml, +#ifdef NANOJIT_64BIT + LIR_quad = LIR_immq, +#endif + LIR_float = LIR_immd, + + LIR_eq = LIR_eql, + LIR_lt = LIR_ltl, + LIR_gt = LIR_gtl, + LIR_le = LIR_lel, + LIR_ge = LIR_gel, + LIR_ult = LIR_ltul, + LIR_ugt = LIR_gtul, + LIR_ule = LIR_leul, + LIR_uge = LIR_geul, + +#ifdef NANOJIT_64BIT + LIR_qeq = LIR_eqq, + LIR_qlt = LIR_ltq, + LIR_qgt = LIR_gtq, + LIR_qle = LIR_leq, + LIR_qge = LIR_geq, + LIR_qult = LIR_ltuq, + LIR_qugt = LIR_gtuq, + LIR_qule = LIR_leuq, + LIR_quge = LIR_geuq, +#endif + + LIR_feq = LIR_eqd, + LIR_flt = LIR_ltd, + LIR_fgt = LIR_gtd, + LIR_fle = LIR_led, + LIR_fge = LIR_ged, + + LIR_neg = LIR_negl, + LIR_add = LIR_addl, + LIR_sub = LIR_subl, + LIR_mul = LIR_mull, +#if defined NANOJIT_IA32 || defined NANOJIT_X64 + LIR_div = LIR_divl, + LIR_mod = LIR_modl, +#endif + + LIR_not = LIR_notl, + LIR_and = LIR_andl, + LIR_or = LIR_orl, + LIR_xor = LIR_xorl, + + LIR_lsh = LIR_lshl, + LIR_rsh = LIR_rshl, + LIR_ush = LIR_rshul, + +#ifdef NANOJIT_64BIT + LIR_qiadd = LIR_addq, + + LIR_qiand = LIR_andq, + LIR_qior = LIR_orq, + LIR_qxor = LIR_xorq, + + LIR_qilsh = LIR_lshq, + LIR_qirsh = LIR_rshq, + LIR_qursh = LIR_rshuq, +#endif + + LIR_fneg = LIR_negd, + LIR_fadd = LIR_addd, + LIR_fsub = LIR_subd, + LIR_fmul = LIR_muld, + LIR_fdiv = LIR_divd, + LIR_fmod = LIR_modd, + + LIR_cmov = LIR_cmovl, +#ifdef NANOJIT_64BIT + LIR_qcmov = LIR_cmovq, +#endif + +#ifdef NANOJIT_64BIT + LIR_i2q = LIR_l2q, + LIR_u2q = LIR_ul2uq, + LIR_q2i = LIR_q2l, +#endif + + LIR_i2f = LIR_l2d, + LIR_u2f = LIR_ul2d, + LIR_f2i = LIR_d2l, + + LIR_addxov = LIR_addxovl, + LIR_subxov = LIR_subxovl, + LIR_mulxov = LIR_mulxovl, + +#if NJ_SOFTFLOAT_SUPPORTED + LIR_qlo = LIR_dlo2l, + LIR_qhi = LIR_dhi2l, + LIR_qjoin = LIR_ll2d, + LIR_callh = LIR_hcalll, +#endif + + LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam), + + LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc), + + LIR_pret = PTR_SIZE(LIR_ret, LIR_qret), + + LIR_plive = PTR_SIZE(LIR_live, LIR_qlive), + LIR_stpi = PTR_SIZE(LIR_sti, LIR_stqi), - LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd), - LIR_piand = PTR_SIZE(LIR_and, LIR_qiand), - LIR_pilsh = PTR_SIZE(LIR_lsh, LIR_qilsh), - LIR_pirsh = PTR_SIZE(LIR_rsh, LIR_qirsh), - LIR_pursh = PTR_SIZE(LIR_ush, LIR_qursh), - LIR_pcmov = PTR_SIZE(LIR_cmov, LIR_qcmov), - LIR_pior = PTR_SIZE(LIR_or, LIR_qior), - LIR_pxor = PTR_SIZE(LIR_xor, LIR_qxor), - LIR_addp = PTR_SIZE(LIR_iaddp, LIR_qaddp), + + LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall), + LIR_peq = PTR_SIZE(LIR_eq, LIR_qeq), LIR_plt = PTR_SIZE(LIR_lt, LIR_qlt), LIR_pgt = PTR_SIZE(LIR_gt, LIR_qgt), @@ -81,11 +291,17 @@ namespace nanojit LIR_pugt = PTR_SIZE(LIR_ugt, LIR_qugt), LIR_pule = PTR_SIZE(LIR_ule, LIR_qule), LIR_puge = PTR_SIZE(LIR_uge, LIR_quge), - LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc), - LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall), - LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam), - LIR_plive = PTR_SIZE(LIR_live, LIR_qlive), - LIR_pret = PTR_SIZE(LIR_ret, LIR_qret) + LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd), + + LIR_piand = PTR_SIZE(LIR_and, LIR_qiand), + LIR_pior = PTR_SIZE(LIR_or, LIR_qior), + LIR_pxor = PTR_SIZE(LIR_xor, LIR_qxor), + + LIR_pilsh = PTR_SIZE(LIR_lsh, LIR_qilsh), + LIR_pirsh = PTR_SIZE(LIR_rsh, LIR_qirsh), + LIR_pursh = PTR_SIZE(LIR_ush, LIR_qursh), + + LIR_pcmov = PTR_SIZE(LIR_cmov, LIR_qcmov) }; // 32-bit integer comparisons must be contiguous, as must 64-bit integer @@ -124,14 +340,14 @@ namespace nanojit NanoStaticAssert((LIR_le^1) == LIR_ge && (LIR_ge^1) == LIR_le); NanoStaticAssert((LIR_ult^1) == LIR_ugt && (LIR_ugt^1) == LIR_ult); NanoStaticAssert((LIR_ule^1) == LIR_uge && (LIR_uge^1) == LIR_ule); - + #ifdef NANOJIT_64BIT NanoStaticAssert((LIR_qlt^1) == LIR_qgt && (LIR_qgt^1) == LIR_qlt); NanoStaticAssert((LIR_qle^1) == LIR_qge && (LIR_qge^1) == LIR_qle); NanoStaticAssert((LIR_qult^1) == LIR_qugt && (LIR_qugt^1) == LIR_qult); NanoStaticAssert((LIR_qule^1) == LIR_quge && (LIR_quge^1) == LIR_qule); #endif - + NanoStaticAssert((LIR_flt^1) == LIR_fgt && (LIR_fgt^1) == LIR_flt); NanoStaticAssert((LIR_fle^1) == LIR_fge && (LIR_fge^1) == LIR_fle); @@ -146,25 +362,26 @@ namespace nanojit ABI_CDECL }; - enum ArgSize { - ARGSIZE_NONE = 0, - ARGSIZE_F = 1, // double (64bit) - ARGSIZE_I = 2, // int32_t + // All values must fit into three bits. See CallInfo for details. + enum ArgType { + ARGTYPE_V = 0, // void + ARGTYPE_F = 1, // double (64bit) + ARGTYPE_I = 2, // int32_t + ARGTYPE_U = 3, // uint32_t #ifdef NANOJIT_64BIT - ARGSIZE_Q = 3, // uint64_t + ARGTYPE_Q = 4, // uint64_t #endif - ARGSIZE_U = 6, // uint32_t - ARGSIZE_MASK_ANY = 7, - ARGSIZE_MASK_INT = 2, - ARGSIZE_SHIFT = 3, // aliases - ARGSIZE_P = PTR_SIZE(ARGSIZE_I, ARGSIZE_Q), // pointer - ARGSIZE_LO = ARGSIZE_I, // int32_t - ARGSIZE_B = ARGSIZE_I, // bool - ARGSIZE_V = ARGSIZE_NONE // void + ARGTYPE_P = PTR_SIZE(ARGTYPE_I, ARGTYPE_Q), // pointer + ARGTYPE_LO = ARGTYPE_I, // int32_t + ARGTYPE_B = ARGTYPE_I // bool }; + // In _typesig, each entry is three bits. + static const int ARGTYPE_SHIFT = 3; + static const int ARGTYPE_MASK = 0x7; + enum IndirectCall { CALL_INDIRECT = 0 }; @@ -209,7 +426,10 @@ namespace nanojit // A load from a READONLY region will never alias with any stores. // // - STACK: the stack. Stack loads/stores can usually be easily - // identified because they use SP as the stack pointer. + // identified because they use SP as the base pointer. + // + // - RSTACK: the return stack. Return stack loads/stores can usually be + // easily identified because they use RP as the base pointer. // // - OTHER: all other regions of memory. // @@ -228,7 +448,7 @@ namespace nanojit // ------------------------------------------- // The LIR generator must mark each load/store with an "access region // set", which is a set of one or more access regions. This indicates - // which parts of LIR-accessible memory the load/store may touch. + // which parts of LIR-accessible memory the load/store may touch. // // The LIR generator must also mark each function called from LIR with an // access region set for memory stored to by the function. (We could also @@ -258,6 +478,14 @@ namespace nanojit // true for the store set of a function.) // // Such imprecision is safe but may reduce optimisation opportunities. + // + // Optimisations that use access region info + // ----------------------------------------- + // Currently only CseFilter uses this, and only for determining whether + // loads can be CSE'd. Note that CseFilter treats loads that are marked + // with a single access region precisely, but all loads marked with + // multiple access regions get lumped together. So if you can't mark a + // load with a single access region, you might as well use ACC_LOAD_ANY. //----------------------------------------------------------------------- // An access region set is represented as a bitset. Nb: this restricts us @@ -266,11 +494,13 @@ namespace nanojit // The access regions. Note that because of the bitset representation // these constants are also valid (singleton) AccSet values. If you add - // new ones please update ACC_ALL_WRITABLE and LirNameMap::formatAccSet(). + // new ones please update ACC_ALL_STORABLE and formatAccSet() and + // CseFilter. // static const AccSet ACC_READONLY = 1 << 0; // 0000_0001b static const AccSet ACC_STACK = 1 << 1; // 0000_0010b - static const AccSet ACC_OTHER = 1 << 2; // 0000_0100b + static const AccSet ACC_RSTACK = 1 << 2; // 0000_0100b + static const AccSet ACC_OTHER = 1 << 3; // 0000_1000b // Some common (non-singleton) access region sets. ACC_NONE does not make // sense for loads or stores (which must access at least one region), it @@ -278,50 +508,39 @@ namespace nanojit // // A convention that's worth using: use ACC_LOAD_ANY/ACC_STORE_ANY for // cases that you're unsure about or haven't considered carefully. Use - // ACC_ALL/ACC_ALL_WRITABLE for cases that you have considered carefully. + // ACC_ALL/ACC_ALL_STORABLE for cases that you have considered carefully. // That way it's easy to tell which ones have been considered and which // haven't. static const AccSet ACC_NONE = 0x0; - static const AccSet ACC_ALL_WRITABLE = ACC_STACK | ACC_OTHER; - static const AccSet ACC_ALL = ACC_READONLY | ACC_ALL_WRITABLE; + static const AccSet ACC_ALL_STORABLE = ACC_STACK | ACC_RSTACK | ACC_OTHER; + static const AccSet ACC_ALL = ACC_READONLY | ACC_ALL_STORABLE; static const AccSet ACC_LOAD_ANY = ACC_ALL; // synonym - static const AccSet ACC_STORE_ANY = ACC_ALL_WRITABLE; // synonym - + static const AccSet ACC_STORE_ANY = ACC_ALL_STORABLE; // synonym struct CallInfo { + private: + + public: uintptr_t _address; - uint32_t _argtypes:27; // 9 3-bit fields indicating arg type, by ARGSIZE above (including ret type): a1 a2 a3 a4 a5 ret + uint32_t _typesig:27; // 9 3-bit fields indicating arg type, by ARGTYPE above (including ret type): a1 a2 a3 a4 a5 ret AbiKind _abi:3; uint8_t _isPure:1; // _isPure=1 means no side-effects, result only depends on args AccSet _storeAccSet; // access regions stored by the function verbose_only ( const char* _name; ) - uint32_t _count_args(uint32_t mask) const; + uint32_t count_args() const; + uint32_t count_int32_args() const; // Nb: uses right-to-left order, eg. sizes[0] is the size of the right-most arg. - uint32_t get_sizes(ArgSize* sizes) const; + uint32_t getArgTypes(ArgType* types) const; - inline ArgSize returnType() const { - return ArgSize(_argtypes & ARGSIZE_MASK_ANY); - } - - // Note that this indexes arguments *backwards*, that is to - // get the Nth arg, you have to ask for index (numargs - N). - // See mozilla bug 525815 for fixing this. - inline ArgSize argType(uint32_t arg) const { - return ArgSize((_argtypes >> (ARGSIZE_SHIFT * (arg+1))) & ARGSIZE_MASK_ANY); + inline ArgType returnType() const { + return ArgType(_typesig & ARGTYPE_MASK); } inline bool isIndirect() const { return _address < 256; } - inline uint32_t count_args() const { - return _count_args(ARGSIZE_MASK_ANY); - } - inline uint32_t count_iargs() const { - return _count_args(ARGSIZE_MASK_INT); - } - // fargs = args - iargs }; /* @@ -345,14 +564,14 @@ namespace nanojit return isCses[op] == 1; } inline bool isRetOpcode(LOpcode op) { - return + return #if defined NANOJIT_64BIT op == LIR_qret || #endif op == LIR_ret || op == LIR_fret; } inline bool isCmovOpcode(LOpcode op) { - return + return #if defined NANOJIT_64BIT op == LIR_qcmov || #endif @@ -408,14 +627,14 @@ namespace nanojit inline LOpcode getCallOpcode(const CallInfo* ci) { LOpcode op = LIR_pcall; switch (ci->returnType()) { - case ARGSIZE_NONE: op = LIR_pcall; break; - case ARGSIZE_I: - case ARGSIZE_U: op = LIR_icall; break; - case ARGSIZE_F: op = LIR_fcall; break; + case ARGTYPE_V: op = LIR_pcall; break; + case ARGTYPE_I: + case ARGTYPE_U: op = LIR_icall; break; + case ARGTYPE_F: op = LIR_fcall; break; #ifdef NANOJIT_64BIT - case ARGSIZE_Q: op = LIR_qcall; break; + case ARGTYPE_Q: op = LIR_qcall; break; #endif - default: NanoAssert(0); break; + default: NanoAssert(0); break; } return op; } @@ -424,6 +643,8 @@ namespace nanojit #ifdef NANOJIT_64BIT LOpcode i32cmp_to_i64cmp(LOpcode op); #endif + LOpcode f64cmp_to_i32cmp(LOpcode op); + LOpcode f64cmp_to_u32cmp(LOpcode op); // Array holding the 'repKind' field from LIRopcode.tbl. extern const uint8_t repKinds[]; @@ -683,7 +904,7 @@ namespace nanojit // For loads/stores. inline int32_t disp() const; - inline int32_t accSet() const; + inline AccSet accSet() const; // For LInsSk. inline LIns* prevLIns() const; @@ -814,24 +1035,22 @@ namespace nanojit isop(LIR_xbarrier) || isop(LIR_xtbl) || isop(LIR_addxov) || isop(LIR_subxov) || isop(LIR_mulxov); } - // True if the instruction is a 32-bit or smaller constant integer. + // True if the instruction is a 32-bit integer immediate. bool isconst() const { return isop(LIR_int); } - // True if the instruction is a 32-bit or smaller constant integer and - // has the value val when treated as a 32-bit signed integer. + // True if the instruction is a 32-bit integer immediate and + // has the value 'val' when treated as a 32-bit signed integer. bool isconstval(int32_t val) const { return isconst() && imm32()==val; } - // True if the instruction is a constant quad value. - bool isconstq() const { - return #ifdef NANOJIT_64BIT - isop(LIR_quad) || -#endif - isop(LIR_float); + // True if the instruction is a 64-bit integer immediate. + bool isconstq() const { + return isop(LIR_quad); } - // True if the instruction is a constant pointer value. +#endif + // True if the instruction is a pointer-sized integer immediate. bool isconstp() const { #ifdef NANOJIT_64BIT @@ -840,10 +1059,22 @@ namespace nanojit return isconst(); #endif } - // True if the instruction is a constant float value. + // True if the instruction is a 64-bit float immediate. bool isconstf() const { return isop(LIR_float); } + // True if the instruction is a 64-bit integer or float immediate. + bool isconstqf() const { + return +#ifdef NANOJIT_64BIT + isconstq() || +#endif + isconstf(); + } + // True if the instruction an any type of immediate. + bool isImmAny() const { + return isconst() || isconstqf(); + } bool isBranch() const { return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_j) || isop(LIR_jtbl); @@ -867,9 +1098,9 @@ namespace nanojit return retType() == LTy_F64; } bool isN64() const { - return + return #ifdef NANOJIT_64BIT - isI64() || + isI64() || #endif isF64(); } @@ -888,7 +1119,7 @@ namespace nanojit // Note, this assumes that loads will never fault and hence cannot // affect the control flow. bool isStmt() { - NanoAssert(!isop(LIR_start) && !isop(LIR_skip)); + NanoAssert(!isop(LIR_skip)); // All instructions with Void retType are statements, as are calls // to impure functions. if (isCall()) @@ -1163,7 +1394,8 @@ namespace nanojit clearArIndex(); lastWord.opcode = opcode; toLInsLd()->oprnd_1 = val; - toLInsLd()->disp = d; + NanoAssert(d == int16_t(d)); + toLInsLd()->disp = int16_t(d); toLInsLd()->accSet = accSet; NanoAssert(isLInsLd()); } @@ -1173,7 +1405,8 @@ namespace nanojit lastWord.opcode = opcode; toLInsSti()->oprnd_1 = val; toLInsSti()->oprnd_2 = base; - toLInsSti()->disp = d; + NanoAssert(d == int16_t(d)); + toLInsSti()->disp = int16_t(d); toLInsSti()->accSet = accSet; NanoAssert(isLInsSti()); } @@ -1293,7 +1526,7 @@ namespace nanojit } } - int32_t LIns::accSet() const { + AccSet LIns::accSet() const { if (isLInsSti()) { return toLInsSti()->accSet; } else { @@ -1312,13 +1545,14 @@ namespace nanojit inline int32_t LIns::imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; } - inline int32_t LIns::imm64_0() const { NanoAssert(isconstq()); return toLInsN64()->imm64_0; } - inline int32_t LIns::imm64_1() const { NanoAssert(isconstq()); return toLInsN64()->imm64_1; } + inline int32_t LIns::imm64_0() const { NanoAssert(isconstqf()); return toLInsN64()->imm64_0; } + inline int32_t LIns::imm64_1() const { NanoAssert(isconstqf()); return toLInsN64()->imm64_1; } uint64_t LIns::imm64() const { - NanoAssert(isconstq()); + NanoAssert(isconstqf()); return (uint64_t(toLInsN64()->imm64_1) << 32) | uint32_t(toLInsN64()->imm64_0); } double LIns::imm64f() const { + NanoAssert(isconstf()); union { double f; uint64_t q; @@ -1439,71 +1673,91 @@ namespace nanojit LIns* ins_choose(LIns* cond, LIns* iftrue, LIns* iffalse, bool use_cmov); // Inserts an integer comparison to 0 - LIns* ins_eq0(LIns* oprnd1); + LIns* ins_eq0(LIns* oprnd1) { + return ins2i(LIR_eq, oprnd1, 0); + } // Inserts a pointer comparison to 0 - LIns* ins_peq0(LIns* oprnd1); + LIns* ins_peq0(LIns* oprnd1) { + return ins2(LIR_peq, oprnd1, insImmWord(0)); + } // Inserts a binary operation where the second operand is an // integer immediate. - LIns* ins2i(LOpcode op, LIns *oprnd1, int32_t); + LIns* ins2i(LOpcode v, LIns* oprnd1, int32_t imm) { + return ins2(v, oprnd1, insImm(imm)); + } #if NJ_SOFTFLOAT_SUPPORTED - LIns* qjoin(LInsp lo, LInsp hi); + LIns* qjoin(LInsp lo, LInsp hi) { + return ins2(LIR_qjoin, lo, hi); + } #endif - LIns* insImmPtr(const void *ptr); - LIns* insImmWord(intptr_t ptr); + LIns* insImmPtr(const void *ptr) { +#ifdef NANOJIT_64BIT + return insImmq((uint64_t)ptr); +#else + return insImm((int32_t)ptr); +#endif + } - // Sign or zero extend integers to native integers. On 32-bit this is a no-op. - LIns* ins_i2p(LIns* intIns); - LIns* ins_u2p(LIns* uintIns); + LIns* insImmWord(intptr_t value) { +#ifdef NANOJIT_64BIT + return insImmq(value); +#else + return insImm(value); +#endif + } + + // Sign-extend integers to native integers. On 32-bit this is a no-op. + LIns* ins_i2p(LIns* intIns) { +#ifdef NANOJIT_64BIT + return ins1(LIR_i2q, intIns); +#else + return intIns; +#endif + } + + // Zero-extend integers to native integers. On 32-bit this is a no-op. + LIns* ins_u2p(LIns* uintIns) { + #ifdef NANOJIT_64BIT + return ins1(LIR_u2q, uintIns); + #else + return uintIns; + #endif + } // Chooses LIR_sti or LIR_stqi based on size of value. LIns* insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet); - - // Insert a load/store with the most pessimistic region access info, which is always safe. - LIns* insLoad(LOpcode op, LIns* base, int32_t d) { - return insLoad(op, base, d, ACC_LOAD_ANY); - } - LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d) { - return insStore(op, value, base, d, ACC_STORE_ANY); - } - LIns* insStorei(LIns* value, LIns* base, int32_t d) { - return insStorei(value, base, d, ACC_STORE_ANY); - } }; #ifdef NJ_VERBOSE extern const char* lirNames[]; - /** - * map address ranges to meaningful names. - */ - class LabelMap + // Maps address ranges to meaningful names. + class AddrNameMap { Allocator& allocator; class Entry { public: Entry(int) : name(0), size(0), align(0) {} - Entry(char *n, size_t s, size_t a) : name(n),size(s),align(a) {} + Entry(char *n, size_t s, size_t a) : name(n), size(s), align(a) {} char* name; size_t size:29, align:3; }; - TreeMap names; - LogControl *logc; - char buf[5000], *end; - void formatAddr(const void *p, char *buf); + TreeMap names; // maps code regions to names public: - LabelMap(Allocator& allocator, LogControl* logc); - void add(const void *p, size_t size, size_t align, const char *name); - const char *dup(const char *); - const char *format(const void *p); + AddrNameMap(Allocator& allocator); + void addAddrRange(const void *p, size_t size, size_t align, const char *name); + void lookupAddr(void *p, char*& name, int32_t& offset); }; + // Maps LIR instructions to meaningful names. class LirNameMap { + private: Allocator& alloc; template @@ -1519,8 +1773,12 @@ namespace nanojit return c; } }; + CountMap lircounts; CountMap funccounts; + CountMap namecounts; + + void addNameWithSuffix(LInsp i, const char *s, int suffix, bool ignoreOneSuffix); class Entry { @@ -1529,41 +1787,77 @@ namespace nanojit Entry(char* n) : name(n) {} char* name; }; + HashMap names; - void formatImm(int32_t c, char *buf); - void formatImmq(uint64_t c, char *buf); public: - LabelMap *labels; - LirNameMap(Allocator& alloc, LabelMap *lm) + LirNameMap(Allocator& alloc) : alloc(alloc), lircounts(alloc), funccounts(alloc), - names(alloc), - labels(lm) + namecounts(alloc), + names(alloc) {} - void addName(LInsp i, const char *s); - void copyName(LInsp i, const char *s, int suffix); - char* formatAccSet(LInsp ins, bool isLoad, char* buf); - const char *formatRef(LIns *ref); - const char *formatIns(LInsp i); - void formatGuard(LInsp i, char *buf); - void formatGuardXov(LInsp i, char *buf); + void addName(LInsp ins, const char *s); // gives 'ins' a special name + const char* createName(LInsp ins); // gives 'ins' a generic name + const char* lookupName(LInsp ins); + }; + + // We use big buffers for cases where we need to fit a whole instruction, + // and smaller buffers for all the others. These should easily be long + // enough, but for safety the formatXyz() functions check and won't exceed + // those limits. + class InsBuf { + public: + static const size_t len = 1000; + char buf[len]; + }; + class RefBuf { + public: + static const size_t len = 200; + char buf[len]; + }; + + class LInsPrinter + { + private: + Allocator& alloc; + + void formatImm(RefBuf* buf, int32_t c); + void formatImmq(RefBuf* buf, uint64_t c); + void formatGuard(InsBuf* buf, LInsp ins); + void formatGuardXov(InsBuf* buf, LInsp ins); + + public: + LInsPrinter(Allocator& alloc) + : alloc(alloc) + { + addrNameMap = new (alloc) AddrNameMap(alloc); + lirNameMap = new (alloc) LirNameMap(alloc); + } + + char *formatAddr(RefBuf* buf, void* p); + char *formatRef(RefBuf* buf, LInsp ref); + char *formatIns(InsBuf* buf, LInsp ins); + char *formatAccSet(RefBuf* buf, AccSet accSet); + + AddrNameMap* addrNameMap; + LirNameMap* lirNameMap; }; class VerboseWriter : public LirWriter { InsList code; - LirNameMap* names; + LInsPrinter* printer; LogControl* logc; const char* const prefix; bool const always_flush; public: - VerboseWriter(Allocator& alloc, LirWriter *out, - LirNameMap* names, LogControl* logc, const char* prefix = "", bool always_flush = false) - : LirWriter(out), code(alloc), names(names), logc(logc), prefix(prefix), always_flush(always_flush) + VerboseWriter(Allocator& alloc, LirWriter *out, LInsPrinter* printer, LogControl* logc, + const char* prefix = "", bool always_flush = false) + : LirWriter(out), code(alloc), printer(printer), logc(logc), prefix(prefix), always_flush(always_flush) {} LInsp add(LInsp i) { @@ -1584,9 +1878,10 @@ namespace nanojit void flush() { if (!code.isEmpty()) { + InsBuf b; int32_t count = 0; for (Seq* p = code.get(); p != NULL; p = p->tail) { - logc->printf("%s %s\n",prefix,names->formatIns(p->head)); + logc->printf("%s %s\n", prefix, printer->formatIns(&b, p->head)); count++; } code.clear(); @@ -1674,23 +1969,35 @@ namespace nanojit // We divide instruction kinds into groups for the use of LInsHashSet. // LIns0 isn't present because we don't need to record any 0-ary // instructions. - LInsImm = 0, - LInsImmq = 1, // only occurs on 64-bit platforms - LInsImmf = 2, - LIns1 = 3, - LIns2 = 4, - LIns3 = 5, - LInsLoad = 6, - LInsCall = 7, + LInsImm = 0, + LInsImmq = 1, // only occurs on 64-bit platforms + LInsImmf = 2, + LIns1 = 3, + LIns2 = 4, + LIns3 = 5, + LInsCall = 6, + + // Loads are special. We group them by access region: one table for + // each region, and then a catch-all table for any loads marked with + // multiple regions. This arrangement makes the removal of + // invalidated loads fast -- eg. we can invalidate all STACK loads by + // just clearing the LInsLoadStack table. The disadvantage is that + // loads marked with multiple regions must be invalidated + // conservatively, eg. if any intervening stores occur. But loads + // marked with multiple regions should be rare. + LInsLoadReadOnly = 7, + LInsLoadStack = 8, + LInsLoadRStack = 9, + LInsLoadOther = 10, + LInsLoadMultiple = 11, LInsFirst = 0, - LInsLast = 7, + LInsLast = 11, // need a value after "last" to outsmart compilers that will insist last+1 is impossible - LInsInvalid = 8 + LInsInvalid = 12 }; #define nextKind(kind) LInsHashKind(kind+1) - // @todo, this could be replaced by a generic HashMap or HashSet, if we had one class LInsHashSet { // Must be a power of 2. @@ -1707,14 +2014,15 @@ namespace nanojit uint32_t m_used[LInsLast + 1]; typedef uint32_t (LInsHashSet::*find_t)(LInsp); find_t m_find[LInsLast + 1]; + Allocator& alloc; static uint32_t hashImm(int32_t); - static uint32_t hashImmq(uint64_t); // not NANOJIT_64BIT only used by findImmf() - static uint32_t hash1(LOpcode v, LInsp); - static uint32_t hash2(LOpcode v, LInsp, LInsp); - static uint32_t hash3(LOpcode v, LInsp, LInsp, LInsp); - static uint32_t hashLoad(LOpcode v, LInsp, int32_t); + static uint32_t hashImmq(uint64_t); // not NANOJIT_64BIT-only -- used by findImmf() + static uint32_t hash1(LOpcode op, LInsp); + static uint32_t hash2(LOpcode op, LInsp, LInsp); + static uint32_t hash3(LOpcode op, LInsp, LInsp, LInsp); + static uint32_t hashLoad(LOpcode op, LInsp, int32_t, AccSet); static uint32_t hashCall(const CallInfo *call, uint32_t argc, LInsp args[]); // These private versions are used after an LIns has been created; @@ -1727,8 +2035,12 @@ namespace nanojit uint32_t find1(LInsp ins); uint32_t find2(LInsp ins); uint32_t find3(LInsp ins); - uint32_t findLoad(LInsp ins); uint32_t findCall(LInsp ins); + uint32_t findLoadReadOnly(LInsp ins); + uint32_t findLoadStack(LInsp ins); + uint32_t findLoadRStack(LInsp ins); + uint32_t findLoadOther(LInsp ins); + uint32_t findLoadMultiple(LInsp ins); void grow(LInsHashKind kind); @@ -1745,19 +2057,22 @@ namespace nanojit LInsp find1(LOpcode v, LInsp a, uint32_t &k); LInsp find2(LOpcode v, LInsp a, LInsp b, uint32_t &k); LInsp find3(LOpcode v, LInsp a, LInsp b, LInsp c, uint32_t &k); - LInsp findLoad(LOpcode v, LInsp a, int32_t b, uint32_t &k); + LInsp findLoad(LOpcode v, LInsp a, int32_t b, AccSet accSet, LInsHashKind kind, + uint32_t &k); LInsp findCall(const CallInfo *call, uint32_t argc, LInsp args[], uint32_t &k); // 'k' is the index found by findXYZ(). - LInsp add(LInsHashKind kind, LInsp ins, uint32_t k); + void add(LInsHashKind kind, LInsp ins, uint32_t k); - void clear(); + void clear(); // clears all tables + void clear(LInsHashKind); // clears one table }; class CseFilter: public LirWriter { private: LInsHashSet* exprs; + AccSet storesSinceLastLoad; // regions stored to since the last load public: CseFilter(LirWriter *out, Allocator&); @@ -1771,7 +2086,8 @@ namespace nanojit LIns* ins1(LOpcode v, LInsp); LIns* ins2(LOpcode v, LInsp, LInsp); LIns* ins3(LOpcode v, LInsp, LInsp, LInsp); - LIns* insLoad(LOpcode op, LInsp cond, int32_t d, AccSet accSet); + LIns* insLoad(LOpcode op, LInsp base, int32_t d, AccSet accSet); + LIns* insStore(LOpcode op, LInsp value, LInsp base, int32_t d, AccSet accSet); LIns* insCall(const CallInfo *call, LInsp args[]); LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr); LIns* insGuardXov(LOpcode op, LInsp a, LInsp b, GuardRecord *gr); @@ -1785,7 +2101,7 @@ namespace nanojit uintptr_t makeRoom(size_t szB); // make room for an instruction debug_only (void validate() const;) - verbose_only(LirNameMap* names;) + verbose_only(LInsPrinter* printer;) int32_t insCount(); size_t byteCount(); @@ -1856,21 +2172,25 @@ namespace nanojit LirFilter(LirFilter *in) : in(in) {} virtual ~LirFilter(){} + // It's crucial that once this reaches the LIR_start at the beginning + // of the buffer, that it just keeps returning that LIR_start LIns on + // any subsequent calls. virtual LInsp read() { return in->read(); } - virtual LInsp pos() { - return in->pos(); + virtual LInsp finalIns() { + return in->finalIns(); } }; // concrete class LirReader : public LirFilter { - LInsp _i; // next instruction to be read; invariant: is never a skip + LInsp _ins; // next instruction to be read; invariant: is never a skip + LInsp _finalIns; // final instruction in the stream; ie. the first one to be read public: - LirReader(LInsp i) : LirFilter(0), _i(i) + LirReader(LInsp ins) : LirFilter(0), _ins(ins), _finalIns(ins) { // The last instruction for a fragment shouldn't be a skip. // (Actually, if the last *inserted* instruction exactly fills up @@ -1879,7 +2199,7 @@ namespace nanojit // cross-chunk link. But the last *inserted* instruction is what // is recorded and used to initialise each LirReader, and that is // what is seen here, and therefore this assertion holds.) - NanoAssert(i && !i->isop(LIR_skip)); + NanoAssert(ins && !ins->isop(LIR_skip)); } virtual ~LirReader() {} @@ -1887,61 +2207,28 @@ namespace nanojit // Invariant: never returns a skip. LInsp read(); - // Returns next instruction. Invariant: never returns a skip. - LInsp pos() { - return _i; + LInsp finalIns() { + return _finalIns; } }; verbose_only(void live(LirFilter* in, Allocator& alloc, Fragment* frag, LogControl*);) + // WARNING: StackFilter assumes that all stack entries are eight bytes. + // Some of its optimisations aren't valid if that isn't true. See + // StackFilter::read() for more details. class StackFilter: public LirFilter { LInsp sp; - LInsp rp; - BitSet spStk; - BitSet rpStk; - int spTop; - int rpTop; - void getTops(LInsp br, int& spTop, int& rpTop); + BitSet stk; + int top; + int getTop(LInsp br); public: - StackFilter(LirFilter *in, Allocator& alloc, LInsp sp, LInsp rp); - bool ignoreStore(LInsp ins, int top, BitSet* stk); + StackFilter(LirFilter *in, Allocator& alloc, LInsp sp); LInsp read(); }; - // eliminate redundant loads by watching for stores & mutator calls - class LoadFilter: public LirWriter - { - public: - LInsp sp, rp; - LInsHashSet* exprs; - - void clear(LInsp p); - - public: - LoadFilter(LirWriter *out, Allocator& alloc) - : LirWriter(out), sp(NULL), rp(NULL) - { - uint32_t kInitialCaps[LInsLast + 1]; - kInitialCaps[LInsImm] = 1; - kInitialCaps[LInsImmq] = 1; - kInitialCaps[LInsImmf] = 1; - kInitialCaps[LIns1] = 1; - kInitialCaps[LIns2] = 1; - kInitialCaps[LIns3] = 1; - kInitialCaps[LInsLoad] = 64; - kInitialCaps[LInsCall] = 1; - exprs = new (alloc) LInsHashSet(alloc, kInitialCaps); - } - - LInsp ins0(LOpcode); - LInsp insLoad(LOpcode op, LInsp base, int32_t disp, AccSet accSet); - LInsp insStore(LOpcode op, LInsp value, LInsp base, int32_t disp, AccSet accSet); - LInsp insCall(const CallInfo *call, LInsp args[]); - }; - struct SoftFloatOps { const CallInfo* opmap[LIR_sentinel]; @@ -1985,19 +2272,26 @@ namespace nanojit class ValidateWriter : public LirWriter { private: - const char* _whereInPipeline; + LInsPrinter* printer; + const char* whereInPipeline; const char* type2string(LTy type); void typeCheckArgs(LOpcode op, int nArgs, LTy formals[], LIns* args[]); void errorStructureShouldBe(LOpcode op, const char* argDesc, int argN, LIns* arg, const char* shouldBeDesc); - void errorAccSetShould(const char* what, AccSet accSet, const char* shouldDesc); + void errorAccSet(const char* what, AccSet accSet, const char* shouldDesc); void checkLInsHasOpcode(LOpcode op, int argN, LIns* ins, LOpcode op2); void checkLInsIsACondOrConst(LOpcode op, int argN, LIns* ins); void checkLInsIsNull(LOpcode op, int argN, LIns* ins); + void checkAccSet(LOpcode op, LInsp base, AccSet accSet, AccSet maxAccSet); + + LInsp sp, rp; public: - ValidateWriter(LirWriter* out, const char* stageName); + ValidateWriter(LirWriter* out, LInsPrinter* printer, const char* where); + void setSp(LInsp ins) { sp = ins; } + void setRp(LInsp ins) { rp = ins; } + LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet); LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet); LIns* ins0(LOpcode v); @@ -2036,19 +2330,21 @@ namespace nanojit class ReverseLister : public LirFilter { Allocator& _alloc; - LirNameMap* _names; + LInsPrinter* _printer; const char* _title; StringList _strs; LogControl* _logc; + LIns* _prevIns; public: ReverseLister(LirFilter* in, Allocator& alloc, - LirNameMap* names, LogControl* logc, const char* title) + LInsPrinter* printer, LogControl* logc, const char* title) : LirFilter(in) , _alloc(alloc) - , _names(names) + , _printer(printer) , _title(title) , _strs(alloc) , _logc(logc) + , _prevIns(NULL) { } void finish(); diff --git a/js/src/nanojit/LIRopcode.tbl b/js/src/nanojit/LIRopcode.tbl index 91b71530829..6528e7acf9f 100644 --- a/js/src/nanojit/LIRopcode.tbl +++ b/js/src/nanojit/LIRopcode.tbl @@ -39,6 +39,9 @@ * ***** END LICENSE BLOCK ***** */ /* + * This file is best viewed with 128 columns: +12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 + * * Definitions of LIR opcodes. If you need to allocate an opcode, look * for one defined using OP_UN() and claim it. * @@ -47,8 +50,8 @@ * #define OPxyz(op, number, repKind, retType) ... * * Selected arguments can then be used within the macro expansions. - * - op Bytecode name, token-pasted after "LIR_" to form an LOpcode. - * - number Bytecode number, used as the LOpcode enum value. + * - op Opcode name, token-pasted after "LIR_" to form an LOpcode. + * - number Opcode number, used as the LOpcode enum value. * - repKind Indicates how the instruction is represented in memory; XYZ * corresponds to LInsXYZ and LRK_XYZ. * - retType Type (LTy) of the value returned by the instruction. @@ -56,11 +59,22 @@ * can, -1 if things are more complicated -- in which case * isCseOpcode() shouldn't be called on this opcode. * - * This file is best viewed with 128 columns: -12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 + * Opcodes use type-indicators suffixes that are based on the Intel asm ones: + * - 'b': "byte", ie. 8-bit integer + * - 'w': "word", ie. 16-bit integer [*] + * - 'l': "long", ie. 32-bit integer + * - 'q': "quad", ie. 64-bit integer + * - 'u': "unsigned", is used as a prefix on integer type-indicators when necessary + * - 's': "single", ie. 32-bit floating point value + * - 'd': "double", ie. 64-bit floating point value + * - 'p': "pointer", ie. a long on 32-bit machines, a quad on 64-bit machines * - * Aliases for pointer-sized operations that choose 32-bit or 64-bit instructions - * are given in the LOpcode enum in LIR.h just after including LIRopcodes.tbl. + * [*] This meaning of "word" is used in direct relation to LIR types. But + * you'll probably see it elsewhere (eg. in comments) where it has the + * sense of "pointer-sized" word. + * + * 'p' opcodes are all aliases of long and quad opcodes, they're given in LIR.h + * and chosen according to the platform pointer size. * * Certain opcodes aren't supported on all platforms, so OPxyz must be one of * the following: @@ -95,201 +109,238 @@ # define OP_86(a, b, c, d, e) OP_UN(b) #endif - -// Special operations. +//--------------------------------------------------------------------------- +// Miscellaneous operations +//--------------------------------------------------------------------------- OP___(start, 0, Op0, Void, 0) // start of a fragment -OP___(regfence, 1, Op0, Void, 0) // register fence, no register allocation is allowed across this meta instruction -OP___(skip, 2, Sk, Void, 0) // used to link code chunks -// Impure operations. -OP___(ldsb, 3, Ld, I32, -1) // 8-bit integer load, sign-extend to 32-bit -OP___(ldss, 4, Ld, I32, -1) // 16-bit integer load, sign-extend to 32-bit -OP___(ldzb, 5, Ld, I32, -1) // 8-bit integer load, zero-extend to 32-bit -OP___(ldzs, 6, Ld, I32, -1) // 16-bit integer load, zero-extend to 32-bit -OP_32(iaddp, 7, Op2, I32, 0) // 32-bit integer addition for temporary pointer calculations -OP_32(iparam, 8, P, I32, 0) // load a parameter (32-bit register or stack location) -OP___(stb, 9, Sti, Void, 0) // 8-bit integer store -OP___(ld, 10, Ld, I32, -1) // 32-bit integer load -OP_32(ialloc, 11, I, I32, 0) // allocate some stack space (result is a 32-bit address) -OP___(sti, 12, Sti, Void, 0) // 32-bit integer store -OP___(ret, 13, Op1, Void, 0) // return a 32-bit integer -OP___(live, 14, Op1, Void, 0) // extend live range of a 32-bit integer -OP___(flive, 15, Op1, Void, 0) // extend live range of a 64-bit float -OP___(icall, 16, C, I32, -1) // subroutine call returning a 32-bit value -OP___(sts, 17, Sti, Void, 0) // 16-bit integer store +// A register fence causes no code to be generated, but it affects register +// allocation so that no registers are live when it is reached. +OP___(regfence, 1, Op0, Void, 0) -OP___(x, 18, Op2, Void, 0) // exit always +OP___(skip, 2, Sk, Void, 0) // links code chunks -// Branches. 'jt' and 'jf' must be adjacent so that (op ^ 1) gives the -// opposite one. Static assertions in LIR.h check this requirement. -OP___(j, 19, Op2, Void, 0) // jump always -OP___(jt, 20, Op2, Void, 0) // jump if true -OP___(jf, 21, Op2, Void, 0) // jump if false -OP___(label, 22, Op0, Void, 0) // a jump target (no machine code is emitted for this) -OP___(jtbl, 23, Jtbl, Void, 0) // jump to address in table +OP_32(paraml, 3, P, I32, 0) // load a long parameter (register or stack location) +OP_64(paramq, 4, P, I64, 0) // load a quad parameter (register or stack location) -OP___(int, 24, I, I32, 1) // constant 32-bit integer -OP___(cmov, 25, Op3, I32, 1) // conditional move -// LIR_callh is a hack that's only used on 32-bit platforms that use SoftFloat. -// Its operand is always a LIR_icall, but one that specifies a function that -// returns a 64-bit float. It indicates that the 64-bit float return value is -// being returned via two 32-bit integer registers. The result is always used -// as the second operand of a LIR_qjoin. -OP_SF(callh, 26, Op1, I32, 1) +OP_32(allocl, 5, I, I32, 0) // allocate stack space (result is a long address) +OP_64(allocq, 6, I, I64, 0) // allocate stack space (result is a quad address) -// 64-bit float comparisons. Their order must be preserved so that, except for -// 'feq', (op ^ 1) gives the opposite one (eg. flt ^ 1 == fgt). They must also -// remain contiguous so that opcode range checking works correctly. -// Static assertions in LIR.h check these requirements. -OP___(feq, 27, Op2, I32, 1) // floating-point equality -OP___(flt, 28, Op2, I32, 1) // floating-point less-than -OP___(fgt, 29, Op2, I32, 1) // floating-point greater-than -OP___(fle, 30, Op2, I32, 1) // floating-point less-than-or-equal -OP___(fge, 31, Op2, I32, 1) // floating-point greater-than-or-equal +OP___(retl, 7, Op1, Void, 0) // return a long +OP_64(retq, 8, Op1, Void, 0) // return a quad +OP___(retd, 9, Op1, Void, 0) // return a double +OP___(livel, 10, Op1, Void, 0) // extend live range of a long +OP_64(liveq, 11, Op1, Void, 0) // extend live range of a quad +OP___(lived, 12, Op1, Void, 0) // extend live range of a double + +OP___(file, 13, Op1, Void, 0) // source filename for debug symbols +OP___(line, 14, Op1, Void, 0) // source line number for debug symbols + +OP_UN(15) +OP_UN(16) + +//--------------------------------------------------------------------------- +// Loads and stores +//--------------------------------------------------------------------------- +OP___(ldb2l, 17, Ld, I32, -1) // load byte and sign-extend to a long +OP___(ldw2l, 18, Ld, I32, -1) // load word and sign-extend to a long +OP___(ldub2ul, 19, Ld, I32, -1) // load unsigned byte and zero-extend to an unsigned long +OP___(lduw2ul, 20, Ld, I32, -1) // load unsigned word and zero-extend to an unsigned long +OP___(ldl, 21, Ld, I32, -1) // load long +OP_64(ldq, 22, Ld, I64, -1) // load quad +OP___(ldd, 23, Ld, F64, -1) // load double +OP___(lds2d, 24, Ld, F64, -1) // load single and extend to a double + +OP___(stb, 25, Sti, Void, 0) // store byte +OP___(stw, 26, Sti, Void, 0) // store word +OP___(stl, 27, Sti, Void, 0) // store long +OP_64(stq, 28, Sti, Void, 0) // store quad +OP___(std, 29, Sti, Void, 0) // store double +OP___(std2s, 30, Sti, Void, 0) // store double as a single (losing precision) + +OP_UN(31) OP_UN(32) -OP_UN(33) -OP_UN(34) -OP___(neg, 35, Op1, I32, 1) // 32-bit integer negation -OP___(add, 36, Op2, I32, 1) // 32-bit integer addition -OP___(sub, 37, Op2, I32, 1) // 32-bit integer subtraction -OP___(mul, 38, Op2, I32, 1) // 32-bit integer multiplication -OP_86(div, 39, Op2, I32, 1) // 32-bit integer division -// LIR_mod is a hack. It's only used on i386/X64. The operand is the result -// of a LIR_div because on i386/X64 div and mod results are computed by the -// same instruction. -OP_86(mod, 40, Op1, I32, 1) // 32-bit integer modulus +//--------------------------------------------------------------------------- +// Calls +//--------------------------------------------------------------------------- +OP___(calll, 33, C, I32, -1) // call subroutine that returns a long +OP_64(callq, 34, C, I64, -1) // call subroutine that returns a quad +OP___(calld, 35, C, F64, -1) // call subroutine that returns a double -OP___(and, 41, Op2, I32, 1) // 32-bit bitwise AND -OP___(or, 42, Op2, I32, 1) // 32-bit bitwise OR -OP___(xor, 43, Op2, I32, 1) // 32-bit bitwise XOR -OP___(not, 44, Op1, I32, 1) // 32-bit bitwise NOT -OP___(lsh, 45, Op2, I32, 1) // 32-bit left shift -OP___(rsh, 46, Op2, I32, 1) // 32-bit right shift with sign-extend (>>) -OP___(ush, 47, Op2, I32, 1) // 32-bit unsigned right shift (>>>) +OP_UN(36) -// Conditional guards. 'xt' and 'xf' must be adjacent so that (op ^ 1) gives -// the opposite one. Static assertions in LIR.h check this requirement. -OP___(xt, 48, Op2, Void, 1) // exit if true (0x30 0011 0000) -OP___(xf, 49, Op2, Void, 1) // exit if false (0x31 0011 0001) +//--------------------------------------------------------------------------- +// Branches and labels +//--------------------------------------------------------------------------- +// 'jt' and 'jf' must be adjacent so that (op ^ 1) gives the opposite one. +// Static assertions in LIR.h check this requirement. +OP___(j, 37, Op2, Void, 0) // jump always +OP___(jt, 38, Op2, Void, 0) // jump if true +OP___(jf, 39, Op2, Void, 0) // jump if false +OP___(jtbl, 40, Jtbl, Void, 0) // jump to address in table -OP_SF(qlo, 50, Op1, I32, 1) // get the low 32 bits of a 64-bit value -OP_SF(qhi, 51, Op1, I32, 1) // get the high 32 bits of a 64-bit value +OP___(label, 41, Op0, Void, 0) // a jump target (no machine code is emitted for this) + +OP_UN(42) + +//--------------------------------------------------------------------------- +// Guards +//--------------------------------------------------------------------------- +// 'xt' and 'xf' must be adjacent so that (op ^ 1) gives the opposite one. +// Static assertions in LIR.h check this requirement. +OP___(x, 43, Op2, Void, 0) // exit always +OP___(xt, 44, Op2, Void, 1) // exit if true +OP___(xf, 45, Op2, Void, 1) // exit if false +OP___(xtbl, 46, Op2, Void, 0) // exit via indirect jump +// A LIR_xbarrier cause no code to be generated, but it acts like a never-taken +// guard in that it inhibits certain optimisations, such as dead stack store +// elimination. +OP___(xbarrier, 47, Op2, Void, 0) + +OP_UN(48) + +//--------------------------------------------------------------------------- +// Immediates +//--------------------------------------------------------------------------- +OP___(imml, 49, I, I32, 1) // long immediate +OP_64(immq, 50, N64, I64, 1) // quad immediate +OP___(immd, 51, N64, F64, 1) // double immediate OP_UN(52) -OP_UN(53) -OP___(addxov, 54, Op3, I32, 1) // 32-bit integer addition; exit if overflow occurred, result is valid on either path -OP___(subxov, 55, Op3, I32, 1) // 32-bit integer subtraction; exit if overflow occurred, result is valid on either path -OP___(mulxov, 56, Op3, I32, 1) // 32-bit integer multiplication; exit if overflow occurred, result is valid on either path +//--------------------------------------------------------------------------- +// Comparisons +//--------------------------------------------------------------------------- -// 32-bit integer comparisons. Their order must be preserved so that, except -// for 'eq', (op ^ 1) gives the opposite one (eg. lt ^ 1 == gt). They must -// also remain contiguous so that opcode range checking works correctly. -// Static assertions in LIR.h check these requirements. -OP___(eq, 57, Op2, I32, 1) // integer equality -OP___(lt, 58, Op2, I32, 1) // signed integer less-than (0x38 0011 1000) -OP___(gt, 59, Op2, I32, 1) // signed integer greater-than (0x39 0011 1001) -OP___(le, 60, Op2, I32, 1) // signed integer less-than-or-equal (0x3A 0011 1010) -OP___(ge, 61, Op2, I32, 1) // signed integer greater-than-or-equal (0x3B 0011 1011) -OP___(ult, 62, Op2, I32, 1) // unsigned integer less-than (0x3C 0011 1100) -OP___(ugt, 63, Op2, I32, 1) // unsigned integer greater-than (0x3D 0011 1101) -OP___(ule, 64, Op2, I32, 1) // unsigned integer less-than-or-equal (0x3E 0011 1110) -OP___(uge, 65, Op2, I32, 1) // unsigned integer greater-than-or-equal (0x3F 0011 1111) +// Within each size group, order must be preserved so that, except for eq*, (op +// ^ 1) gives the opposite one (eg. lt ^ 1 == gt). eq* must have odd numbers +// for this to work. They must also remain contiguous so that opcode range +// checking works correctly. Static assertions in LIR.h check these +// requirements. +OP___(eql, 53, Op2, I32, 1) // long equality +OP___(ltl, 54, Op2, I32, 1) // signed long less-than +OP___(gtl, 55, Op2, I32, 1) // signed long greater-than +OP___(lel, 56, Op2, I32, 1) // signed long less-than-or-equal +OP___(gel, 57, Op2, I32, 1) // signed long greater-than-or-equal +OP___(ltul, 58, Op2, I32, 1) // unsigned long less-than +OP___(gtul, 59, Op2, I32, 1) // unsigned long greater-than +OP___(leul, 60, Op2, I32, 1) // unsigned long less-than-or-equal +OP___(geul, 61, Op2, I32, 1) // unsigned long greater-than-or-equal -OP___(file, 66, Op1, Void, 0) // source filename for debug symbols -OP___(line, 67, Op1, Void, 0) // source line number for debug symbols +OP_UN(62) -OP___(xbarrier, 68, Op2, Void, 0) // memory barrier; doesn't exit, but flushes all values to the stack -OP___(xtbl, 69, Op2, Void, 0) // exit via indirect jump +OP_64(eqq, 63, Op2, I32, 1) // long equality +OP_64(ltq, 64, Op2, I32, 1) // signed long less-than +OP_64(gtq, 65, Op2, I32, 1) // signed long greater-than +OP_64(leq, 66, Op2, I32, 1) // signed long less-than-or-equal +OP_64(geq, 67, Op2, I32, 1) // signed long greater-than-or-equal +OP_64(ltuq, 68, Op2, I32, 1) // unsigned long less-than +OP_64(gtuq, 69, Op2, I32, 1) // unsigned long greater-than +OP_64(leuq, 70, Op2, I32, 1) // unsigned long less-than-or-equal +OP_64(geuq, 71, Op2, I32, 1) // unsigned long greater-than-or-equal -OP_64(qlive, 70, Op1, Void, 0) // extend live range of a 64-bit integer +OP_UN(72) -OP_64(qaddp, 71, Op2, I64, 0) // 64-bit integer addition for temp pointer calculations -OP_64(qparam, 72, P, I64, 0) // load a parameter (64bit register or stack location) +OP___(eqd, 73, Op2, I32, 1) // double equality +OP___(ltd, 74, Op2, I32, 1) // double less-than +OP___(gtd, 75, Op2, I32, 1) // double greater-than +OP___(led, 76, Op2, I32, 1) // double less-than-or-equal +OP___(ged, 77, Op2, I32, 1) // double greater-than-or-equal -OP___(ldf, 73, Ld, F64, -1) // 64-bit float load -OP_64(ldq, 74, Ld, I64, -1) // 64-bit integer load +OP_UN(78) -OP_64(qalloc, 75, I, I64, 0) // allocate some stack space (result is a 64-bit address) +//--------------------------------------------------------------------------- +// Arithmetic +//--------------------------------------------------------------------------- +OP___(negl, 79, Op1, I32, 1) // negate long +OP___(addl, 80, Op2, I32, 1) // add long +OP___(subl, 81, Op2, I32, 1) // subtract long +OP___(mull, 82, Op2, I32, 1) // multiply long +OP_86(divl, 83, Op2, I32, 1) // divide long +// LIR_modl is a hack. It's only used on i386/X64. The operand is the result +// of a LIR_divl because on i386/X64 div and mod results are computed by the +// same instruction. +OP_86(modl, 84, Op1, I32, 1) // modulo long -OP_64(stqi, 76, Sti, Void, 0) // 64-bit integer store +OP___(notl, 85, Op1, I32, 1) // bitwise-NOT long +OP___(andl, 86, Op2, I32, 1) // bitwise-AND long +OP___(orl, 87, Op2, I32, 1) // bitwise-OR long +OP___(xorl, 88, Op2, I32, 1) // bitwise-XOR long -OP___(st32f, 77, Sti, Void, 0) // store 64-bit float as a 32-bit float (dropping precision) -OP___(ld32f, 78, Ld, F64, -1) // load 32-bit float and widen to 64-bit float +OP___(lshl, 89, Op2, I32, 1) // left shift long +OP___(rshl, 90, Op2, I32, 1) // right shift long (>>) +OP___(rshul, 91, Op2, I32, 1) // right shift unsigned long (>>>) -OP___(fcall, 79, C, F64, -1) // subroutine call returning 64-bit float value -OP_64(qcall, 80, C, I64, -1) // subroutine call returning 64-bit integer value +OP_64(addq, 92, Op2, I64, 1) // add quad -OP___(stfi, 81, Sti, Void, 0) // 64-bit float store +OP_64(andq, 93, Op2, I64, 1) // bitwise-AND quad +OP_64(orq, 94, Op2, I64, 1) // bitwise-OR quad +OP_64(xorq, 95, Op2, I64, 1) // bitwise-XOR quad -OP___(fret, 82, Op1, Void, 0) // return a 64-bit float -OP_64(qret, 83, Op1, Void, 0) // return a 64-bit integer +OP_64(lshq, 96, Op2, I64, 1) // left shift quad; 2nd operand is a long +OP_64(rshq, 97, Op2, I64, 1) // right shift quad; 2nd operand is a long +OP_64(rshuq, 98, Op2, I64, 1) // right shift unsigned quad; 2nd operand is a long -OP_UN(84) -OP_UN(85) -OP_UN(86) -OP_UN(87) - -OP_64(quad, 88, N64, I64, 1) // 64-bit integer constant value -OP_64(qcmov, 89, Op3, I64, 1) // 64-bit conditional move - -OP_64(i2q, 90, Op1, I64, 1) // sign-extend i32 to i64 -OP_64(u2q, 91, Op1, I64, 1) // zero-extend u32 to u64 -OP___(i2f, 92, Op1, F64, 1) // convert a signed 32-bit integer to a float -OP___(u2f, 93, Op1, F64, 1) // convert an unsigned 32-bit integer to a float -OP___(f2i, 94, Op1, I32, 1) // f2i conversion, no exception raised, platform rounding rules. - -OP_UN(95) -OP_UN(96) -OP_UN(97) -OP_UN(98) - -OP___(fneg, 99, Op1, F64, 1) // floating-point negation -OP___(fadd, 100, Op2, F64, 1) // floating-point addition -OP___(fsub, 101, Op2, F64, 1) // floating-point subtraction -OP___(fmul, 102, Op2, F64, 1) // floating-point multiplication -OP___(fdiv, 103, Op2, F64, 1) // floating-point division -// LIR_fmod is just a place-holder opcode, ie. the back-ends cannot generate +OP___(negd, 99, Op1, F64, 1) // negate double +OP___(addd, 100, Op2, F64, 1) // add double +OP___(subd, 101, Op2, F64, 1) // subtract double +OP___(muld, 102, Op2, F64, 1) // multiply double +OP___(divd, 103, Op2, F64, 1) // divide double +// LIR_modd is just a place-holder opcode, ie. the back-ends cannot generate // code for it. It's used in TraceMonkey briefly but is always demoted to a -// LIR_mod or converted to a function call before Nanojit has to do anything +// LIR_modl or converted to a function call before Nanojit has to do anything // serious with it. -OP___(fmod, 104, Op2, F64, 1) // floating-point modulus +OP___(modd, 104, Op2, F64, 1) // modulo double -OP_64(qiand, 105, Op2, I64, 1) // 64-bit bitwise AND -OP_64(qior, 106, Op2, I64, 1) // 64-bit bitwise OR -OP_64(qxor, 107, Op2, I64, 1) // 64-bit bitwise XOR +OP___(cmovl, 105, Op3, I32, 1) // conditional move long +OP_64(cmovq, 106, Op3, I64, 1) // conditional move quad + +OP_UN(107) OP_UN(108) -OP_64(qilsh, 109, Op2, I64, 1) // 64-bit left shift; 2nd operand is a 32-bit integer -OP_64(qirsh, 110, Op2, I64, 1) // 64-bit signed right shift; 2nd operand is a 32-bit integer -OP_64(qursh, 111, Op2, I64, 1) // 64-bit unsigned right shift; 2nd operand is a 32-bit integer -OP_64(qiadd, 112, Op2, I64, 1) // 64-bit bitwise ADD -OP_UN(113) +//--------------------------------------------------------------------------- +// Conversions +//--------------------------------------------------------------------------- +OP_64(l2q, 109, Op1, I64, 1) // sign-extend long to quad +OP_64(ul2uq, 110, Op1, I64, 1) // zero-extend unsigned long to unsigned quad +OP_64(q2l, 111, Op1, I32, 1) // truncate quad to long (removes the high 32 bits) -OP_SF(qjoin, 114, Op2, F64, 1) // join two 32-bit values (1st arg is low bits, 2nd is high) -OP_64(q2i, 115, Op1, I32, 1) // truncate i64 to i32 +OP___(l2d, 112, Op1, F64, 1) // convert long to double +OP___(ul2d, 113, Op1, F64, 1) // convert unsigned long to double +OP___(d2l, 114, Op1, I32, 1) // convert double to long (no exceptions raised, platform rounding rules) +OP_UN(115) OP_UN(116) -OP_UN(117) -OP___(float, 118, N64, F64, 1) // 64-bit float constant value +//--------------------------------------------------------------------------- +// Overflow arithmetic +//--------------------------------------------------------------------------- +// These all exit if overflow occurred. The results is valid on either path. +OP___(addxovl, 117, Op3, I32, 1) // add long and exit on overflow +OP___(subxovl, 118, Op3, I32, 1) // sub long and exit on overflow +OP___(mulxovl, 119, Op3, I32, 1) // multiply long and exit on overflow -// 64-bit integer comparisons. Their order must be preserved so that, except -// for 'qeq', (op ^ 1) gives the opposite one (eg. qlt ^ 1 == qgt). They must -// also remain contiguous so that opcode range checking works correctly. -// Static assertions in LIR.h check these requirements. -OP_64(qeq, 119, Op2, I32, 1) // integer equality -OP_64(qlt, 120, Op2, I32, 1) // signed integer less-than (0x78 0111 1000) -OP_64(qgt, 121, Op2, I32, 1) // signed integer greater-than (0x79 0111 1001) -OP_64(qle, 122, Op2, I32, 1) // signed integer less-than-or-equal (0x7A 0111 1010) -OP_64(qge, 123, Op2, I32, 1) // signed integer greater-than-or-equal (0x7B 0111 1011) -OP_64(qult, 124, Op2, I32, 1) // unsigned integer less-than (0x7C 0111 1100) -OP_64(qugt, 125, Op2, I32, 1) // unsigned integer greater-than (0x7D 0111 1101) -OP_64(qule, 126, Op2, I32, 1) // unsigned integer less-than-or-equal (0x7E 0111 1110) -OP_64(quge, 127, Op2, I32, 1) // unsigned integer greater-than-or-equal (0x7F 0111 1111) +OP_UN(120) +//--------------------------------------------------------------------------- +// SoftFloat +//--------------------------------------------------------------------------- +OP_SF(dlo2l, 121, Op1, I32, 1) // get the low 32 bits of a double as a long +OP_SF(dhi2l, 122, Op1, I32, 1) // get the high 32 bits of a double as a long +OP_SF(ll2d, 123, Op2, F64, 1) // join two longs (1st arg is low bits, 2nd is high) + +// LIR_hcalll is a hack that's only used on 32-bit platforms that use +// SoftFloat. Its operand is always a LIR_calll, but one that specifies a +// function that returns a double. It indicates that the double result is +// returned via two 32-bit integer registers. The result is always used as the +// second operand of a LIR_ll2d. +OP_SF(hcalll, 124, Op1, I32, 1) + +OP_UN(125) +OP_UN(126) +OP_UN(127) #undef OP_UN #undef OP_32 diff --git a/js/src/nanojit/NativeARM.cpp b/js/src/nanojit/NativeARM.cpp index 2cd25386920..423f6dd4439 100644 --- a/js/src/nanojit/NativeARM.cpp +++ b/js/src/nanojit/NativeARM.cpp @@ -597,19 +597,19 @@ Assembler::genEpilogue() * alignment. */ void -Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd) +Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd) { // The stack pointer must always be at least aligned to 4 bytes. NanoAssert((stkd & 3) == 0); - if (sz == ARGSIZE_F) { + if (ty == ARGTYPE_F) { // This task is fairly complex and so is delegated to asm_arg_64. asm_arg_64(arg, r, stkd); } else { - NanoAssert(sz == ARGSIZE_I || sz == ARGSIZE_U); + NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U); // pre-assign registers R0-R3 for arguments (if they fit) if (r < R4) { - asm_regarg(sz, arg, r); + asm_regarg(ty, arg, r); r = nextreg(r); } else { asm_stkarg(arg, stkd); @@ -620,7 +620,7 @@ Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd) // Encode a 64-bit floating-point argument using the appropriate ABI. // This function operates in the same way as asm_arg, except that it will only -// handle arguments where (ArgSize)sz == ARGSIZE_F. +// handle arguments where (ArgType)ty == ARGTYPE_F. void Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd) { @@ -665,8 +665,8 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd) if (_config.arm_vfp) { FMRRD(ra, rb, fp_reg); } else { - asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra); - asm_regarg(ARGSIZE_LO, arg->oprnd2(), rb); + asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra); + asm_regarg(ARGTYPE_LO, arg->oprnd2(), rb); } #ifndef NJ_ARM_EABI @@ -699,7 +699,7 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd) // Without VFP, we can simply use asm_regarg and asm_stkarg to // encode the two 32-bit words as we don't need to load from a VFP // register. - asm_regarg(ARGSIZE_LO, arg->oprnd1(), ra); + asm_regarg(ARGTYPE_LO, arg->oprnd1(), ra); asm_stkarg(arg->oprnd2(), 0); stkd += 4; } @@ -720,10 +720,10 @@ Assembler::asm_arg_64(LInsp arg, Register& r, int& stkd) } void -Assembler::asm_regarg(ArgSize sz, LInsp p, Register r) +Assembler::asm_regarg(ArgType ty, LInsp p, Register r) { NanoAssert(deprecated_isKnownReg(r)); - if (sz & ARGSIZE_MASK_INT) + if (ty == ARGTYPE_I || ty == ARGTYPE_U) { // arg goes in specific register if (p->isconst()) { @@ -752,7 +752,7 @@ Assembler::asm_regarg(ArgSize sz, LInsp p, Register r) } else { - NanoAssert(sz == ARGSIZE_F); + NanoAssert(ty == ARGTYPE_F); // fpu argument in register - should never happen since FPU // args are converted to two 32-bit ints on ARM NanoAssert(false); @@ -848,10 +848,10 @@ Assembler::asm_call(LInsp ins) evictScratchRegsExcept(0); - const CallInfo* call = ins->callInfo(); - ArgSize sizes[MAXARGS]; - uint32_t argc = call->get_sizes(sizes); - bool indirect = call->isIndirect(); + const CallInfo* ci = ins->callInfo(); + ArgType argTypes[MAXARGS]; + uint32_t argc = ci->getArgTypes(argTypes); + bool indirect = ci->isIndirect(); // If we aren't using VFP, assert that the LIR operation is an integer // function call. @@ -862,12 +862,9 @@ Assembler::asm_call(LInsp ins) // See comments above for more details as to why this is necessary here // for floating point calls, but not for integer calls. if (_config.arm_vfp && ins->isUsed()) { - // Determine the size (and type) of the instruction result. - ArgSize rsize = (ArgSize)(call->_argtypes & ARGSIZE_MASK_ANY); - // If the result size is a floating-point value, treat the result // specially, as described previously. - if (rsize == ARGSIZE_F) { + if (ci->returnType() == ARGTYPE_F) { Register rr = ins->deprecated_getReg(); NanoAssert(ins->opcode() == LIR_fcall); @@ -875,7 +872,7 @@ Assembler::asm_call(LInsp ins) if (!deprecated_isKnownReg(rr)) { int d = deprecated_disp(ins); NanoAssert(d != 0); - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); // The result doesn't have a register allocated, so store the // result (in R0,R1) directly to its stack slot. @@ -902,7 +899,7 @@ Assembler::asm_call(LInsp ins) // interlock in the "long" branch sequence by manually loading the // target address into LR ourselves before setting up the parameters // in other registers. - BranchWithLink((NIns*)call->_address); + BranchWithLink((NIns*)ci->_address); } else { // Indirect call: we assign the address arg to LR since it's not // used for regular arguments, and is otherwise scratch since it's @@ -917,7 +914,7 @@ Assembler::asm_call(LInsp ins) } else { BLX(LR); } - asm_regarg(ARGSIZE_LO, ins->arg(--argc), LR); + asm_regarg(ARGTYPE_LO, ins->arg(--argc), LR); } // Encode the arguments, starting at R0 and with an empty argument stack. @@ -930,7 +927,7 @@ Assembler::asm_call(LInsp ins) // in reverse order. uint32_t i = argc; while(i--) { - asm_arg(sizes[i], ins->arg(i), r, stkd); + asm_arg(argTypes[i], ins->arg(i), r, stkd); } if (stkd > max_out_args) { @@ -1192,7 +1189,7 @@ Assembler::asm_qjoin(LIns *ins) // okay if r gets recycled. r = findRegFor(lo, GpRegs); STR(r, FP, d); - deprecated_freeRsrcOf(ins, false); // if we had a reg in use, emit a ST to flush it to mem + deprecated_freeRsrcOf(ins); // if we had a reg in use, emit a ST to flush it to mem } void @@ -1279,28 +1276,27 @@ Assembler::asm_spill(Register rr, int d, bool pop, bool quad) { (void) pop; (void) quad; - if (d) { - if (_config.arm_vfp && IsFpReg(rr)) { - if (isS8(d >> 2)) { - FSTD(rr, FP, d); - } else { - FSTD(rr, IP, 0); - asm_add_imm(IP, FP, d); - } + NanoAssert(d); + if (_config.arm_vfp && IsFpReg(rr)) { + if (isS8(d >> 2)) { + FSTD(rr, FP, d); } else { - NIns merged; - STR(rr, FP, d); - // See if we can merge this store into an immediately following one, - // one, by creating or extending a STM instruction. - if (/* is it safe to poke _nIns[1] ? */ - does_next_instruction_exist(_nIns, codeStart, codeEnd, - exitStart, exitEnd) - && /* can we merge _nIns[0] into _nIns[1] ? */ - do_peep_2_1(&merged, _nIns[0], _nIns[1])) { - _nIns[1] = merged; - _nIns++; - verbose_only( asm_output("merge next into STMDB"); ) - } + FSTD(rr, IP, 0); + asm_add_imm(IP, FP, d); + } + } else { + NIns merged; + STR(rr, FP, d); + // See if we can merge this store into an immediately following one, + // one, by creating or extending a STM instruction. + if (/* is it safe to poke _nIns[1] ? */ + does_next_instruction_exist(_nIns, codeStart, codeEnd, + exitStart, exitEnd) + && /* can we merge _nIns[0] into _nIns[1] ? */ + do_peep_2_1(&merged, _nIns[0], _nIns[1])) { + _nIns[1] = merged; + _nIns++; + verbose_only( asm_output("merge next into STMDB"); ) } } } @@ -1320,7 +1316,7 @@ Assembler::asm_load64(LInsp ins) Register rb = findRegFor(base, GpRegs); NanoAssert(IsGpReg(rb)); - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); //outputf("--- load64: Finished register allocation."); @@ -1399,7 +1395,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base) if (_config.arm_vfp) { Register rb = findRegFor(base, GpRegs); - if (value->isconstq()) { + if (value->isconstf()) { underrunProtect(LD32_size*2 + 8); // XXX use another reg, get rid of dependency @@ -1432,9 +1428,9 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base) // if it's a constant, make sure our baseReg/baseOffset location // has the right value - if (value->isconstq()) { + if (value->isconstf()) { underrunProtect(4*4); - asm_quad_nochk(rv, value->imm64_0(), value->imm64_1()); + asm_immf_nochk(rv, value->imm64_0(), value->imm64_1()); } } else { int da = findMemFor(value); @@ -1448,7 +1444,7 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base) if (_config.arm_vfp) { Register rb = findRegFor(base, GpRegs); - if (value->isconstq()) { + if (value->isconstf()) { underrunProtect(LD32_size*2 + 8); // XXX use another reg, get rid of dependency @@ -1483,9 +1479,9 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base) // if it's a constant, make sure our baseReg/baseOffset location // has the right value - if (value->isconstq()) { + if (value->isconstf()) { underrunProtect(4*4); - asm_quad_nochk(rv, value->imm64_0(), value->imm64_1()); + asm_immf_nochk(rv, value->imm64_0(), value->imm64_1()); } } else { NanoAssertMsg(0, "st32f not supported with non-VFP, fix me"); @@ -1499,10 +1495,10 @@ Assembler::asm_store64(LOpcode op, LInsp value, int dr, LInsp base) //asm_output(">>> store64"); } -// stick a quad into register rr, where p points to the two +// Stick a float into register rr, where p points to the two // 32-bit parts of the quad, optinally also storing at FP+d void -Assembler::asm_quad_nochk(Register rr, int32_t imm64_0, int32_t imm64_1) +Assembler::asm_immf_nochk(Register rr, int32_t imm64_0, int32_t imm64_1) { // We're not going to use a slot, because it might be too far // away. Instead, we're going to stick a branch in the stream to @@ -1524,20 +1520,21 @@ Assembler::asm_quad_nochk(Register rr, int32_t imm64_0, int32_t imm64_1) } void -Assembler::asm_quad(LInsp ins) +Assembler::asm_immf(LInsp ins) { - //asm_output(">>> asm_quad"); + //asm_output(">>> asm_immf"); int d = deprecated_disp(ins); Register rr = ins->deprecated_getReg(); - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); if (_config.arm_vfp && deprecated_isKnownReg(rr)) { - asm_spill(rr, d, false, true); + if (d) + asm_spill(rr, d, false, true); underrunProtect(4*4); - asm_quad_nochk(rr, ins->imm64_0(), ins->imm64_1()); + asm_immf_nochk(rr, ins->imm64_0(), ins->imm64_1()); } else { NanoAssert(d); // asm_mmq might spill a reg, so don't call it; @@ -1550,7 +1547,7 @@ Assembler::asm_quad(LInsp ins) asm_ld_imm(IP, ins->imm64_0()); } - //asm_output("<<< asm_quad"); + //asm_output("<<< asm_immf"); } void @@ -2373,7 +2370,7 @@ Assembler::asm_arith(LInsp ins) // trace-tests.js so it is very unlikely to be worthwhile implementing it. if (rhs->isconst() && op != LIR_mul && op != LIR_mulxov) { - if ((op == LIR_add || op == LIR_iaddp || op == LIR_addxov) && lhs->isop(LIR_ialloc)) { + if ((op == LIR_add || op == LIR_addxov) && lhs->isop(LIR_ialloc)) { // Add alloc+const. The result should be the address of the // allocated space plus a constant. Register rs = deprecated_prepResultReg(ins, allow); @@ -2387,7 +2384,6 @@ Assembler::asm_arith(LInsp ins) switch (op) { - case LIR_iaddp: case LIR_add: asm_add_imm(rr, ra, imm32); break; case LIR_addxov: asm_add_imm(rr, ra, imm32, 1); break; case LIR_sub: asm_sub_imm(rr, ra, imm32); break; @@ -2424,7 +2420,6 @@ Assembler::asm_arith(LInsp ins) const Register SBZ = (Register)0; switch (op) { - case LIR_iaddp: case LIR_add: ADDs(rr, ra, rb, 0); break; case LIR_addxov: ADDs(rr, ra, rb, 1); break; case LIR_sub: SUBs(rr, ra, rb, 0); break; @@ -2676,7 +2671,7 @@ Assembler::asm_param(LInsp ins) } void -Assembler::asm_int(LInsp ins) +Assembler::asm_immi(LInsp ins) { Register rr = deprecated_prepResultReg(ins, GpRegs); asm_ld_imm(rr, ins->imm32()); diff --git a/js/src/nanojit/NativeARM.h b/js/src/nanojit/NativeARM.h index 230fc16b2ed..b00e60119f8 100644 --- a/js/src/nanojit/NativeARM.h +++ b/js/src/nanojit/NativeARM.h @@ -219,15 +219,15 @@ verbose_only( extern const char* shiftNames[]; ) void underrunProtect(int bytes); \ void nativePageReset(); \ void nativePageSetup(); \ - void asm_quad_nochk(Register, int32_t, int32_t); \ - void asm_regarg(ArgSize, LInsp, Register); \ + void asm_immf_nochk(Register, int32_t, int32_t); \ + void asm_regarg(ArgType, LInsp, Register); \ void asm_stkarg(LInsp p, int stkd); \ void asm_cmpi(Register, int32_t imm); \ void asm_ldr_chk(Register d, Register b, int32_t off, bool chk); \ void asm_cmp(LIns *cond); \ void asm_fcmp(LIns *cond); \ void asm_ld_imm(Register d, int32_t imm, bool chk = true); \ - void asm_arg(ArgSize sz, LInsp arg, Register& r, int& stkd); \ + void asm_arg(ArgType ty, LInsp arg, Register& r, int& stkd); \ void asm_arg_64(LInsp arg, Register& r, int& stkd); \ void asm_add_imm(Register rd, Register rn, int32_t imm, int stat = 0); \ void asm_sub_imm(Register rd, Register rn, int32_t imm, int stat = 0); \ diff --git a/js/src/nanojit/NativeMIPS.cpp b/js/src/nanojit/NativeMIPS.cpp index f0e3251b1bd..cf9f79d25a4 100644 --- a/js/src/nanojit/NativeMIPS.cpp +++ b/js/src/nanojit/NativeMIPS.cpp @@ -361,7 +361,7 @@ namespace nanojit void Assembler::asm_store_imm64(LIns *value, int dr, Register rbase) { - NanoAssert(value->isconstq()); + NanoAssert(value->isconstf()); int32_t msw = value->imm64_1(); int32_t lsw = value->imm64_0(); @@ -389,10 +389,10 @@ namespace nanojit } } - void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r) + void Assembler::asm_regarg(ArgType ty, LInsp p, Register r) { NanoAssert(deprecated_isKnownReg(r)); - if (sz & ARGSIZE_MASK_INT) { + if (ty == ARGTYPE_I || ty == ARGTYPE_U) { // arg goes in specific register if (p->isconst()) asm_li(r, p->imm32()); @@ -464,7 +464,7 @@ namespace nanojit // Encode a 64-bit floating-point argument using the appropriate ABI. // This function operates in the same way as asm_arg, except that it will only - // handle arguments where (ArgSize)sz == ARGSIZE_F. + // handle arguments where (ArgType)ty == ARGTYPE_F. void Assembler::asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) { @@ -530,7 +530,7 @@ namespace nanojit default: BADOPCODE(op); } - + TAG("asm_store32(value=%p{%s}, dr=%d, base=%p{%s})", value, lirNames[value->opcode()], dr, base, lirNames[base->opcode()]); } @@ -590,7 +590,7 @@ namespace nanojit SW(r, d+mswoff(), FP); r = findRegFor(lo, GpRegs); // okay if r gets recycled. SW(r, d+lswoff(), FP); - deprecated_freeRsrcOf(ins, false); // if we had a reg in use, flush it to mem + deprecated_freeRsrcOf(ins); // if we had a reg in use, flush it to mem TAG("asm_qjoin(ins=%p{%s})", ins, lirNames[ins->opcode()]); } @@ -635,22 +635,23 @@ namespace nanojit TAG("asm_fneg(ins=%p{%s})", ins, lirNames[ins->opcode()]); } - void Assembler::asm_quad(LIns *ins) + void Assembler::asm_immf(LIns *ins) { int d = deprecated_disp(ins); Register rr = ins->deprecated_getReg(); - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); if (cpu_has_fpu && deprecated_isKnownReg(rr)) { - asm_spill(rr, d, false, true); + if (d) + asm_spill(rr, d, false, true); asm_li_d(rr, ins->imm64_1(), ins->imm64_0()); } else { NanoAssert(d); asm_store_imm64(ins, d, FP); } - TAG("asm_quad(ins=%p{%s})", ins, lirNames[ins->opcode()]); + TAG("asm_immf(ins=%p{%s})", ins, lirNames[ins->opcode()]); } void @@ -678,7 +679,7 @@ namespace nanojit Register rbase = findRegFor(base, GpRegs); NanoAssert(IsGpReg(rbase)); - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); if (cpu_has_fpu && deprecated_isKnownReg(rd)) { NanoAssert(IsFpReg(rd)); @@ -764,11 +765,11 @@ namespace nanojit TAG("asm_neg_not(ins=%p{%s})", ins, lirNames[ins->opcode()]); } - void Assembler::asm_int(LIns *ins) + void Assembler::asm_immi(LIns *ins) { Register rr = deprecated_prepResultReg(ins, GpRegs); asm_li(rr, ins->imm32()); - TAG("asm_int(ins=%p{%s})", ins, lirNames[ins->opcode()]); + TAG("asm_immi(ins=%p{%s})", ins, lirNames[ins->opcode()]); } void Assembler::asm_cmov(LIns *ins) @@ -880,7 +881,7 @@ namespace nanojit case LIR_ldzb: // 8-bit integer load, zero-extend to 32-bit asm_ldst(OP_LBU, rres, d, rbase); break; - case LIR_ldzs: // 16-bit integer load, zero-extend to 32-bit + case LIR_ldzs: // 16-bit integer load, zero-extend to 32-bit asm_ldst(OP_LHU, rres, d, rbase); break; case LIR_ldsb: // 8-bit integer load, sign-extend to 32-bit @@ -889,7 +890,7 @@ namespace nanojit case LIR_ldss: // 16-bit integer load, sign-extend to 32-bit asm_ldst(OP_LH, rres, d, rbase); break; - case LIR_ld: // 32-bit integer load + case LIR_ld: // 32-bit integer load asm_ldst(OP_LW, rres, d, rbase); break; default: @@ -953,7 +954,6 @@ namespace nanojit // MIPS arith immediate ops sign-extend the imm16 value switch (op) { case LIR_add: - case LIR_iaddp: if (ovreg != deprecated_UnknownReg) SLT(ovreg, rr, ra); ADDIU(rr, ra, rhsc); @@ -1015,7 +1015,6 @@ namespace nanojit switch (op) { case LIR_add: - case LIR_iaddp: if (ovreg != deprecated_UnknownReg) SLT(ovreg,rr,ra); ADDU(rr, ra, rb); @@ -1103,7 +1102,7 @@ namespace nanojit else rbase = findRegFor(base, GpRegs); - if (value->isconstq()) + if (value->isconstf()) asm_store_imm64(value, dr, rbase); else if (!cpu_has_fpu || value->isop(LIR_ldq)) { @@ -1129,7 +1128,7 @@ namespace nanojit } else BADOPCODE(op); - + TAG("asm_store64(value=%p{%s}, dr=%d, base=%p{%s})", value, lirNames[value->opcode()], dr, base, lirNames[base->opcode()]); } @@ -1471,15 +1470,14 @@ namespace nanojit { USE(pop); USE(quad); - if (d) { - if (IsFpReg(rr)) { - NanoAssert(quad); - asm_ldst64(true, rr, d, FP); - } - else { - NanoAssert(!quad); - asm_ldst(OP_SW, rr, d, FP); - } + NanoAssert(d); + if (IsFpReg(rr)) { + NanoAssert(quad); + asm_ldst64(true, rr, d, FP); + } + else { + NanoAssert(!quad); + asm_ldst(OP_SW, rr, d, FP); } TAG("asm_spill(rr=%d, d=%d, pop=%d, quad=%d)", rr, d, pop, quad); } @@ -1504,19 +1502,19 @@ namespace nanojit * - 32-bit arguments are placed in registers and 32-bit aligned * on the stack. */ - void - Assembler::asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd) + void + Assembler::asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd) { // The stack offset must always be at least aligned to 4 bytes. NanoAssert((stkd & 3) == 0); - if (sz == ARGSIZE_F) { + if (ty == ARGTYPE_F) { // This task is fairly complex and so is delegated to asm_arg_64. asm_arg_64(arg, r, fr, stkd); - } - else if (sz & ARGSIZE_MASK_INT) { + } else { + NanoAssert(ty == ARGTYPE_I || ty == ARGTYPE_U); if (stkd < 16) { - asm_regarg(sz, arg, r); + asm_regarg(ty, arg, r); fr = nextreg(fr); r = nextreg(r); } @@ -1527,16 +1525,11 @@ namespace nanojit fr = r; stkd += 4; } - else { - NanoAssert(sz == ARGSIZE_Q); - // shouldn't have 64 bit int params - NanoAssert(false); - } } void Assembler::asm_call(LInsp ins) - { + { Register rr; LOpcode op = ins->opcode(); @@ -1560,10 +1553,10 @@ namespace nanojit evictScratchRegsExcept(0); - const CallInfo* call = ins->callInfo(); - ArgSize sizes[MAXARGS]; - uint32_t argc = call->get_sizes(sizes); - bool indirect = call->isIndirect(); + const CallInfo* ci = ins->callInfo(); + ArgType argTypes[MAXARGS]; + uint32_t argc = ci->getArgTypes(argTypes); + bool indirect = ci->isIndirect(); // FIXME: Put one of the argument moves into the BDS slot @@ -1574,11 +1567,11 @@ namespace nanojit if (!indirect) // FIXME: If we can tell that we are calling non-PIC // (ie JIT) code, we could call direct instead of using t9 - asm_li(T9, call->_address); + asm_li(T9, ci->_address); else // Indirect call: we assign the address arg to t9 // which matches the o32 ABI for calling functions - asm_regarg(ARGSIZE_P, ins->arg(--argc), T9); + asm_regarg(ARGTYPE_P, ins->arg(--argc), T9); // Encode the arguments, starting at A0 and with an empty argument stack. Register r = A0, fr = FA0; @@ -1589,7 +1582,7 @@ namespace nanojit // Note that we loop through the arguments backwards as LIR specifies them // in reverse order. while(argc--) - asm_arg(sizes[argc], ins->arg(argc), r, fr, stkd); + asm_arg(argTypes[argc], ins->arg(argc), r, fr, stkd); if (stkd > max_out_args) max_out_args = stkd; diff --git a/js/src/nanojit/NativeMIPS.h b/js/src/nanojit/NativeMIPS.h index d31aae3137a..8af897ac467 100644 --- a/js/src/nanojit/NativeMIPS.h +++ b/js/src/nanojit/NativeMIPS.h @@ -179,9 +179,9 @@ namespace nanojit NIns *asm_branch_near(bool, LIns*, NIns*); \ void asm_cmp(LOpcode condop, LIns *a, LIns *b, Register cr); \ void asm_move(Register d, Register s); \ - void asm_regarg(ArgSize sz, LInsp p, Register r); \ + void asm_regarg(ArgType ty, LInsp p, Register r); \ void asm_stkarg(LInsp arg, int stkd); \ - void asm_arg(ArgSize sz, LInsp arg, Register& r, Register& fr, int& stkd); \ + void asm_arg(ArgType ty, LInsp arg, Register& r, Register& fr, int& stkd); \ void asm_arg_64(LInsp arg, Register& r, Register& fr, int& stkd) ; diff --git a/js/src/nanojit/NativePPC.cpp b/js/src/nanojit/NativePPC.cpp index c695a0f16fe..dcf91251e64 100644 --- a/js/src/nanojit/NativePPC.cpp +++ b/js/src/nanojit/NativePPC.cpp @@ -224,7 +224,7 @@ namespace nanojit Register rr = ins->deprecated_getReg(); if (deprecated_isKnownReg(rr) && (rmask(rr) & FpRegs)) { // FPR already assigned, fine, use it - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); } else { // use a GPR register; its okay to copy doubles with GPR's // but *not* okay to copy non-doubles with FPR's @@ -571,10 +571,10 @@ namespace nanojit Register rb = b==a ? ra : findRegFor(b, allow & ~rmask(ra)); if (isSICmpOpcode(condop)) { CMPW(cr, ra, rb); - } + } else if (isUICmpOpcode(condop)) { CMPLW(cr, ra, rb); - } + } #if defined NANOJIT_64BIT else if (isSQCmpOpcode(condop)) { CMPD(cr, ra, rb); @@ -625,6 +625,8 @@ namespace nanojit } asm_li(r, i->imm32()); } + // XXX: should really rematerializable isconstf() and isconstq() cases + // here; canRemat() assumes they will be rematerialized. else { d = findMemFor(i); if (IsFpReg(r)) { @@ -641,7 +643,7 @@ namespace nanojit } } - void Assembler::asm_int(LIns *ins) { + void Assembler::asm_immi(LIns *ins) { Register rr = deprecated_prepResultReg(ins, GpRegs); asm_li(rr, ins->imm32()); } @@ -683,8 +685,8 @@ namespace nanojit evictScratchRegsExcept(0); const CallInfo* call = ins->callInfo(); - ArgSize sizes[MAXARGS]; - uint32_t argc = call->get_sizes(sizes); + ArgType argTypes[MAXARGS]; + uint32_t argc = call->getArgTypes(argTypes); bool indirect; if (!(indirect = call->isIndirect())) { @@ -699,7 +701,7 @@ namespace nanojit underrunProtect(8); // underrunProtect might clobber CTR BCTRL(); MTCTR(R11); - asm_regarg(ARGSIZE_P, ins->arg(--argc), R11); + asm_regarg(ARGTYPE_P, ins->arg(--argc), R11); } int param_size = 0; @@ -708,22 +710,23 @@ namespace nanojit Register fr = F1; for(uint32_t i = 0; i < argc; i++) { uint32_t j = argc - i - 1; - ArgSize sz = sizes[j]; + ArgType ty = argTypes[j]; LInsp arg = ins->arg(j); - if (sz & ARGSIZE_MASK_INT) { + NanoAssert(ty != ARGTYPE_V); + if (ty != ARGTYPE_F) { // GP arg if (r <= R10) { - asm_regarg(sz, arg, r); + asm_regarg(ty, arg, r); r = nextreg(r); param_size += sizeof(void*); } else { // put arg on stack TODO(stack_int32); } - } else if (sz == ARGSIZE_F) { + } else { // double if (fr <= F13) { - asm_regarg(sz, arg, fr); + asm_regarg(ty, arg, fr); fr = nextreg(fr); #ifdef NANOJIT_64BIT r = nextreg(r); @@ -735,24 +738,23 @@ namespace nanojit // put arg on stack TODO(stack_double); } - } else { - TODO(ARGSIZE_UNK); } } if (param_size > max_param_size) max_param_size = param_size; } - void Assembler::asm_regarg(ArgSize sz, LInsp p, Register r) + void Assembler::asm_regarg(ArgType ty, LInsp p, Register r) { NanoAssert(r != deprecated_UnknownReg); - if (sz & ARGSIZE_MASK_INT) + NanoAssert(ty != ARGTYPE_V); + if (ty != ARGTYPE_F) { #ifdef NANOJIT_64BIT - if (sz == ARGSIZE_I) { + if (ty == ARGTYPE_I) { // sign extend 32->64 EXTSW(r, r); - } else if (sz == ARGSIZE_U) { + } else if (ty == ARGTYPE_U) { // zero extend 32->64 CLRLDI(r, r, 32); } @@ -785,7 +787,7 @@ namespace nanojit } } } - else if (sz == ARGSIZE_F) { + else { if (p->isUsed()) { Register rp = p->deprecated_getReg(); if (!deprecated_isKnownReg(rp) || !IsFpReg(rp)) { @@ -804,27 +806,23 @@ namespace nanojit findSpecificRegFor(p, r); } } - else { - TODO(ARGSIZE_UNK); - } } void Assembler::asm_spill(Register rr, int d, bool /* pop */, bool quad) { (void)quad; - if (d) { - if (IsFpReg(rr)) { - NanoAssert(quad); - STFD(rr, d, FP); - } - #ifdef NANOJIT_64BIT - else if (quad) { - STD(rr, d, FP); - } - #endif - else { - NanoAssert(!quad); - STW(rr, d, FP); - } + NanoAssert(d); + if (IsFpReg(rr)) { + NanoAssert(quad); + STFD(rr, d, FP); + } + #ifdef NANOJIT_64BIT + else if (quad) { + STD(rr, d, FP); + } + #endif + else { + NanoAssert(!quad); + STW(rr, d, FP); } } @@ -842,9 +840,7 @@ namespace nanojit // ppc arith immediate ops sign-exted the imm16 value switch (op) { case LIR_add: - CASE32(LIR_iaddp:) CASE64(LIR_qiadd:) - CASE64(LIR_qaddp:) ADDI(rr, ra, rhsc); return; case LIR_sub: @@ -891,9 +887,7 @@ namespace nanojit Register rb = rhs==lhs ? ra : findRegFor(rhs, GpRegs&~rmask(ra)); switch (op) { CASE64(LIR_qiadd:) - CASE64(LIR_qaddp:) case LIR_add: - CASE32(LIR_iaddp:) ADD(rr, ra, rb); break; CASE64(LIR_qiand:) @@ -1027,13 +1021,52 @@ namespace nanojit } } #endif - - void Assembler::asm_quad(LIns *ins) { + +#ifdef NANOJIT_64BIT + void Assembler::asm_immq(LIns *ins) { + Register r = ins->deprecated_getReg(); + if (deprecated_isKnownReg(r) && (rmask(r) & FpRegs)) { + // FPR already assigned, fine, use it + deprecated_freeRsrcOf(ins); + } else { + // use a GPR register; its okay to copy doubles with GPR's + // but *not* okay to copy non-doubles with FPR's + r = deprecated_prepResultReg(ins, GpRegs); + } + + if (rmask(r) & FpRegs) { + union { + double d; + struct { + int32_t hi, lo; + } w; + }; + d = ins->imm64f(); + LFD(r, 8, SP); + STW(R0, 12, SP); + asm_li(R0, w.lo); + STW(R0, 8, SP); + asm_li(R0, w.hi); + } + else { + int64_t q = ins->imm64(); + if (isS32(q)) { + asm_li(r, int32_t(q)); + return; + } + RLDIMI(r,R0,32,0); // or 32,32? + asm_li(R0, int32_t(q>>32)); // hi bits into R0 + asm_li(r, int32_t(q)); // lo bits into dest reg + } + } +#endif + + void Assembler::asm_immf(LIns *ins) { #ifdef NANOJIT_64BIT Register r = ins->deprecated_getReg(); if (deprecated_isKnownReg(r) && (rmask(r) & FpRegs)) { // FPR already assigned, fine, use it - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); } else { // use a GPR register; its okay to copy doubles with GPR's // but *not* okay to copy non-doubles with FPR's @@ -1172,7 +1205,7 @@ namespace nanojit #else NanoAssert((ins->opcode() == LIR_cmov && iftrue->isI32() && iffalse->isI32())); #endif - + // fixme: we could handle fpu registers here, too, since we're just branching Register rr = deprecated_prepResultReg(ins, GpRegs); findSpecificRegFor(iftrue, rr); @@ -1326,7 +1359,6 @@ namespace nanojit void Assembler::asm_qbinop(LIns *ins) { LOpcode op = ins->opcode(); switch (op) { - case LIR_qaddp: case LIR_qior: case LIR_qiand: case LIR_qursh: diff --git a/js/src/nanojit/NativePPC.h b/js/src/nanojit/NativePPC.h index 6da77812af0..6e609e1fe09 100644 --- a/js/src/nanojit/NativePPC.h +++ b/js/src/nanojit/NativePPC.h @@ -287,7 +287,7 @@ namespace nanojit void nativePageSetup(); \ void br(NIns *addr, int link); \ void br_far(NIns *addr, int link); \ - void asm_regarg(ArgSize, LIns*, Register); \ + void asm_regarg(ArgType, LIns*, Register); \ void asm_li(Register r, int32_t imm); \ void asm_li32(Register r, int32_t imm); \ void asm_li64(Register r, uint64_t imm); \ diff --git a/js/src/nanojit/NativeSparc.cpp b/js/src/nanojit/NativeSparc.cpp index 0bcc0a59ffd..85f41a4f919 100644 --- a/js/src/nanojit/NativeSparc.cpp +++ b/js/src/nanojit/NativeSparc.cpp @@ -161,21 +161,21 @@ namespace nanojit evictScratchRegsExcept(0); - const CallInfo* call = ins->callInfo(); + const CallInfo* ci = ins->callInfo(); underrunProtect(8); NOP(); - ArgSize sizes[MAXARGS]; - uint32_t argc = call->get_sizes(sizes); + ArgType argTypes[MAXARGS]; + uint32_t argc = ci->getArgTypes(argTypes); NanoAssert(ins->isop(LIR_pcall) || ins->isop(LIR_fcall)); verbose_only(if (_logc->lcbits & LC_Assembly) outputf(" %p:", _nIns); ) - bool indirect = call->isIndirect(); + bool indirect = ci->isIndirect(); if (!indirect) { - CALL(call); + CALL(ci); } else { argc--; @@ -189,8 +189,8 @@ namespace nanojit for(int i=0; iarg(j), FpRegs); GPRIndex += 2; offset += 8; @@ -317,12 +317,11 @@ namespace nanojit { underrunProtect(24); (void)quad; - if (d) { - if (rmask(rr) & FpRegs) { - STDF32(rr, d, FP); - } else { - STW32(rr, d, FP); - } + NanoAssert(d); + if (rmask(rr) & FpRegs) { + STDF32(rr, d, FP); + } else { + STW32(rr, d, FP); } } @@ -359,7 +358,7 @@ namespace nanojit if (dr) asm_mmq(FP, dr, rb, db); - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); if (rr != deprecated_UnknownReg) { @@ -384,7 +383,7 @@ namespace nanojit } underrunProtect(48); - if (value->isconstq()) + if (value->isconstf()) { // if a constant 64-bit value just store it now rather than // generating a pointless store/load/store sequence @@ -634,7 +633,7 @@ namespace nanojit } allow &= ~rmask(rb); } - else if ((op == LIR_add||op == LIR_iaddp||op == LIR_addxov) && lhs->isop(LIR_alloc) && rhs->isconst()) { + else if ((op == LIR_add || op == LIR_addxov) && lhs->isop(LIR_alloc) && rhs->isconst()) { // add alloc+const, use lea Register rr = deprecated_prepResultReg(ins, allow); int d = findMemFor(lhs) + rhs->imm32(); @@ -654,7 +653,7 @@ namespace nanojit if (lhs == rhs) rb = ra; - if (op == LIR_add || op == LIR_iaddp || op == LIR_addxov) + if (op == LIR_add || op == LIR_addxov) ADDCC(rr, rb, rr); else if (op == LIR_sub || op == LIR_subxov) SUBCC(rr, rb, rr); @@ -678,11 +677,11 @@ namespace nanojit else { int c = rhs->imm32(); - if (op == LIR_add || op == LIR_iaddp || op == LIR_addxov) { + if (op == LIR_add || op == LIR_addxov) ADDCC(rr, L2, rr); - } else if (op == LIR_sub || op == LIR_subxov) + else if (op == LIR_sub || op == LIR_subxov) SUBCC(rr, L2, rr); - } else if (op == LIR_and) + else if (op == LIR_and) AND(rr, L2, rr); else if (op == LIR_or) OR(rr, L2, rr); @@ -783,8 +782,6 @@ namespace nanojit case LIR_uge: MOVCS (iffalsereg, 1, 0, 0, rr); break; debug_only( default: NanoAssert(0); break; ) } - } else if (op == LIR_qcmov) { - NanoAssert(0); } /*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr); asm_cmp(condval); @@ -797,7 +794,7 @@ namespace nanojit deprecated_prepResultReg(ins, rmask(argRegs[a])); } - void Assembler::asm_int(LInsp ins) + void Assembler::asm_immi(LInsp ins) { underrunProtect(8); Register rr = deprecated_prepResultReg(ins, GpRegs); @@ -808,7 +805,7 @@ namespace nanojit SET32(val, rr); } - void Assembler::asm_quad(LInsp ins) + void Assembler::asm_immf(LInsp ins) { underrunProtect(64); Register rr = ins->deprecated_getReg(); @@ -825,7 +822,7 @@ namespace nanojit // @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here int d = deprecated_disp(ins); - deprecated_freeRsrcOf(ins, false); + deprecated_freeRsrcOf(ins); if (d) { STW32(L2, d+4, FP); diff --git a/js/src/nanojit/NativeX64.cpp b/js/src/nanojit/NativeX64.cpp index 5d5c9b9e1d3..ca9658d6637 100644 --- a/js/src/nanojit/NativeX64.cpp +++ b/js/src/nanojit/NativeX64.cpp @@ -451,7 +451,8 @@ namespace nanojit // XORPD because it's one byte shorter. This is ok because it's only used for // zeroing an XMM register; hence the single argument. // Also note that (unlike most SSE2 instructions) XORPS does not have a prefix, thus emitrr() should be used. - void Assembler::XORPS( R r) { emitrr(X64_xorps, r,r); asm_output("xorps %s, %s", RQ(r),RQ(r)); } + void Assembler::XORPS( R r) { emitrr(X64_xorps, r,r); asm_output("xorps %s, %s", RQ(r),RQ(r)); } + void Assembler::XORPS( R l, R r) { emitrr(X64_xorps, l,r); asm_output("xorps %s, %s", RQ(l),RQ(r)); } void Assembler::DIVSD( R l, R r) { emitprr(X64_divsd, l,r); asm_output("divsd %s, %s", RQ(l),RQ(r)); } void Assembler::MULSD( R l, R r) { emitprr(X64_mulsd, l,r); asm_output("mulsd %s, %s", RQ(l),RQ(r)); } void Assembler::ADDSD( R l, R r) { emitprr(X64_addsd, l,r); asm_output("addsd %s, %s", RQ(l),RQ(r)); } @@ -718,8 +719,7 @@ namespace nanojit case LIR_sub: case LIR_subxov: SUBLR8(rr, imm); break; case LIR_xor: XORLR8(rr, imm); break; - case LIR_qiadd: - case LIR_qaddp: ADDQR8(rr, imm); break; + case LIR_qiadd: ADDQR8(rr, imm); break; case LIR_qiand: ANDQR8(rr, imm); break; case LIR_qior: ORQR8( rr, imm); break; case LIR_qxor: XORQR8(rr, imm); break; @@ -734,8 +734,7 @@ namespace nanojit case LIR_sub: case LIR_subxov: SUBLRI(rr, imm); break; case LIR_xor: XORLRI(rr, imm); break; - case LIR_qiadd: - case LIR_qaddp: ADDQRI(rr, imm); break; + case LIR_qiadd: ADDQRI(rr, imm); break; case LIR_qiand: ANDQRI(rr, imm); break; case LIR_qior: ORQRI( rr, imm); break; case LIR_qxor: XORQRI(rr, imm); break; @@ -804,7 +803,7 @@ namespace nanojit // binary op with integer registers void Assembler::asm_arith(LIns *ins) { - Register rr, ra, rb; + Register rr, ra, rb = UnspecifiedReg; // init to shut GCC up switch (ins->opcode()) { case LIR_lsh: case LIR_qilsh: @@ -844,8 +843,7 @@ namespace nanojit case LIR_qxor: XORQRR(rr, rb); break; case LIR_qior: ORQRR(rr, rb); break; case LIR_qiand: ANDQRR(rr, rb); break; - case LIR_qiadd: - case LIR_qaddp: ADDQRR(rr, rb); break; + case LIR_qiadd: ADDQRR(rr, rb); break; } if (rr != ra) MR(rr, ra); @@ -892,11 +890,10 @@ namespace nanojit evictScratchRegsExcept(rmask(rr)); const CallInfo *call = ins->callInfo(); - ArgSize sizes[MAXARGS]; - int argc = call->get_sizes(sizes); + ArgType argTypes[MAXARGS]; + int argc = call->getArgTypes(argTypes); - bool indirect = call->isIndirect(); - if (!indirect) { + if (!call->isIndirect()) { verbose_only(if (_logc->lcbits & LC_Assembly) outputf(" %p:", _nIns); ) @@ -906,18 +903,23 @@ namespace nanojit } else { // can't reach target from here, load imm64 and do an indirect jump CALLRAX(); - asm_quad(RAX, (uint64_t)target, /*canClobberCCs*/true); + asm_immq(RAX, (uint64_t)target, /*canClobberCCs*/true); } + // Call this now so that the arg setup can involve 'rr'. + freeResourcesOf(ins); } else { // Indirect call: we assign the address arg to RAX since it's not // used for regular arguments, and is otherwise scratch since it's // clobberred by the call. - asm_regarg(ARGSIZE_P, ins->arg(--argc), RAX); CALLRAX(); - } - // Call this now so that the arg setup can involve 'rr'. - freeResourcesOf(ins); + // Call this now so that the arg setup can involve 'rr'. + freeResourcesOf(ins); + + // Assign the call address to RAX. Must happen after freeResourcesOf() + // since RAX is usually the return value and will be allocated until that point. + asm_regarg(ARGTYPE_P, ins->arg(--argc), RAX); + } #ifdef _WIN64 int stk_used = 32; // always reserve 32byte shadow area @@ -928,28 +930,28 @@ namespace nanojit int arg_index = 0; for (int i = 0; i < argc; i++) { int j = argc - i - 1; - ArgSize sz = sizes[j]; + ArgType ty = argTypes[j]; LIns* arg = ins->arg(j); - if ((sz & ARGSIZE_MASK_INT) && arg_index < NumArgRegs) { + if ((ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) && arg_index < NumArgRegs) { // gp arg - asm_regarg(sz, arg, argRegs[arg_index]); + asm_regarg(ty, arg, argRegs[arg_index]); arg_index++; } #ifdef _WIN64 - else if (sz == ARGSIZE_F && arg_index < NumArgRegs) { + else if (ty == ARGTYPE_F && arg_index < NumArgRegs) { // double goes in XMM reg # based on overall arg_index - asm_regarg(sz, arg, Register(XMM0+arg_index)); + asm_regarg(ty, arg, Register(XMM0+arg_index)); arg_index++; } #else - else if (sz == ARGSIZE_F && fr < XMM8) { + else if (ty == ARGTYPE_F && fr < XMM8) { // double goes in next available XMM register - asm_regarg(sz, arg, fr); + asm_regarg(ty, arg, fr); fr = nextreg(fr); } #endif else { - asm_stkarg(sz, arg, stk_used); + asm_stkarg(ty, arg, stk_used); stk_used += sizeof(void*); } } @@ -958,23 +960,25 @@ namespace nanojit max_stk_used = stk_used; } - void Assembler::asm_regarg(ArgSize sz, LIns *p, Register r) { - if (sz == ARGSIZE_I) { + void Assembler::asm_regarg(ArgType ty, LIns *p, Register r) { + if (ty == ARGTYPE_I) { NanoAssert(p->isI32()); if (p->isconst()) { - asm_quad(r, int64_t(p->imm32()), /*canClobberCCs*/true); + asm_immq(r, int64_t(p->imm32()), /*canClobberCCs*/true); return; } // sign extend int32 to int64 MOVSXDR(r, r); - } else if (sz == ARGSIZE_U) { + } else if (ty == ARGTYPE_U) { NanoAssert(p->isI32()); if (p->isconst()) { - asm_quad(r, uint64_t(uint32_t(p->imm32())), /*canClobberCCs*/true); + asm_immq(r, uint64_t(uint32_t(p->imm32())), /*canClobberCCs*/true); return; } // zero extend with 32bit mov, auto-zeros upper 32bits MOVLR(r, r); + } else { + // Do nothing. } /* there is no point in folding an immediate here, because * the argument register must be a scratch register and we're @@ -986,19 +990,22 @@ namespace nanojit findSpecificRegFor(p, r); } - void Assembler::asm_stkarg(ArgSize sz, LIns *p, int stk_off) { + void Assembler::asm_stkarg(ArgType ty, LIns *p, int stk_off) { NanoAssert(isS8(stk_off)); - if (sz & ARGSIZE_MASK_INT) { + if (ty == ARGTYPE_I || ty == ARGTYPE_U || ty == ARGTYPE_Q) { Register r = findRegFor(p, GpRegs); MOVQSPR(stk_off, r); // movq [rsp+d8], r - if (sz == ARGSIZE_I) { + if (ty == ARGTYPE_I) { // extend int32 to int64 NanoAssert(p->isI32()); MOVSXDR(r, r); - } else if (sz == ARGSIZE_U) { + } else if (ty == ARGTYPE_U) { // extend uint32 to uint64 NanoAssert(p->isI32()); MOVLR(r, r); + } else { + NanoAssert(ty == ARGTYPE_Q); + // Do nothing. } } else { TODO(asm_stkarg_non_int); @@ -1372,11 +1379,15 @@ namespace nanojit } else if (ins->isconst()) { ins->clearReg(); - asm_int(r, ins->imm32(), /*canClobberCCs*/false); + asm_immi(r, ins->imm32(), /*canClobberCCs*/false); } - else if (ins->isconstq() && IsGpReg(r)) { + else if (ins->isconstq()) { ins->clearReg(); - asm_quad(r, ins->imm64(), /*canClobberCCs*/false); + asm_immq(r, ins->imm64(), /*canClobberCCs*/false); + } + else if (ins->isconstf()) { + ins->clearReg(); + asm_immf(r, ins->imm64(), /*canClobberCCs*/false); } else { int d = findMemFor(ins); @@ -1388,7 +1399,6 @@ namespace nanojit MOVQRM(r, d, FP); } else { NanoAssert(ins->isI32()); - NanoAssert(IsGpReg(r)); MOVLRM(r, d, FP); } } @@ -1583,41 +1593,37 @@ namespace nanojit } } - void Assembler::asm_int(LIns *ins) { + void Assembler::asm_immi(LIns *ins) { Register rr = prepareResultReg(ins, GpRegs); - - asm_int(rr, ins->imm32(), /*canClobberCCs*/true); - + asm_immi(rr, ins->imm32(), /*canClobberCCs*/true); freeResourcesOf(ins); } - void Assembler::asm_int(Register r, int32_t v, bool canClobberCCs) { + void Assembler::asm_immq(LIns *ins) { + Register rr = prepareResultReg(ins, GpRegs); + asm_immq(rr, ins->imm64(), /*canClobberCCs*/true); + freeResourcesOf(ins); + } + + void Assembler::asm_immf(LIns *ins) { + Register r = prepareResultReg(ins, FpRegs); + asm_immf(r, ins->imm64(), /*canClobberCCs*/true); + freeResourcesOf(ins); + } + + void Assembler::asm_immi(Register r, int32_t v, bool canClobberCCs) { + NanoAssert(IsGpReg(r)); if (v == 0 && canClobberCCs) { - if (IsGpReg(r)) { - XORRR(r, r); - } else { - XORPS(r); - } + XORRR(r, r); } else { - NanoAssert(!IsFpReg(r)); MOVI(r, v); } } - void Assembler::asm_quad(LIns *ins) { - uint64_t v = ins->imm64(); - RegisterMask allow = v == 0 ? GpRegs|FpRegs : GpRegs; - Register rr = prepareResultReg(ins, allow); - - asm_quad(rr, v, /*canClobberCCs*/true); - - freeResourcesOf(ins); - } - - void Assembler::asm_quad(Register r, uint64_t v, bool canClobberCCs) { - NanoAssert(v == 0 || IsGpReg(r)); + void Assembler::asm_immq(Register r, uint64_t v, bool canClobberCCs) { + NanoAssert(IsGpReg(r)); if (isU32(v)) { - asm_int(r, int32_t(v), canClobberCCs); + asm_immi(r, int32_t(v), canClobberCCs); } else if (isS32(v)) { // safe for sign-extension 32->64 MOVQI32(r, int32_t(v)); @@ -1630,6 +1636,21 @@ namespace nanojit } } + void Assembler::asm_immf(Register r, uint64_t v, bool canClobberCCs) { + NanoAssert(IsFpReg(r)); + if (v == 0 && canClobberCCs) { + XORPS(r); + } else { + // There's no general way to load an immediate into an XMM reg. + // For non-zero floats the best thing is to put the equivalent + // 64-bit integer into a scratch GpReg and then move it into the + // appropriate FpReg. + Register rt = registerAllocTmp(GpRegs); + MOVQXR(r, rt); + asm_immq(rt, v, canClobberCCs); + } + } + void Assembler::asm_param(LIns *ins) { uint32_t a = ins->paramArg(); uint32_t kind = ins->paramKind(); @@ -1689,6 +1710,8 @@ namespace nanojit // Register clean-up for 2-address style unary ops of the form R = (op) R. // Pairs with beginOp1Regs() and beginOp2Regs(). void Assembler::endOpRegs(LIns* ins, Register rr, Register ra) { + (void) rr; // quell warnings when NanoAssert is compiled out. + LIns* a = ins->oprnd1(); // We're finished with 'ins'. @@ -1706,43 +1729,48 @@ namespace nanojit void Assembler::asm_fneg(LIns *ins) { Register rr, ra; - if (isS32((uintptr_t)negateMask) || isTargetWithinS32((NIns*)negateMask)) { - beginOp1Regs(ins, FpRegs, rr, ra); - if (isS32((uintptr_t)negateMask)) { - // builtin code is in bottom or top 2GB addr space, use absolute addressing - XORPSA(rr, (int32_t)(uintptr_t)negateMask); - } else { - // jit code is within +/-2GB of builtin code, use rip-relative - XORPSM(rr, (NIns*)negateMask); - } - if (ra != rr) - asm_nongp_copy(rr,ra); - endOpRegs(ins, rr, ra); - + beginOp1Regs(ins, FpRegs, rr, ra); + if (isS32((uintptr_t)negateMask)) { + // builtin code is in bottom or top 2GB addr space, use absolute addressing + XORPSA(rr, (int32_t)(uintptr_t)negateMask); + } else if (isTargetWithinS32((NIns*)negateMask)) { + // jit code is within +/-2GB of builtin code, use rip-relative + XORPSM(rr, (NIns*)negateMask); } else { // This is just hideous - can't use RIP-relative load, can't use // absolute-address load, and cant move imm64 const to XMM. - // so do it all in a GPR. hrmph. - rr = prepareResultReg(ins, GpRegs); - ra = findRegFor(ins->oprnd1(), GpRegs & ~rmask(rr)); - XORQRR(rr, ra); // xor rr, ra - asm_quad(rr, negateMask[0], /*canClobberCCs*/true); // mov rr, 0x8000000000000000 - freeResourcesOf(ins); + // Solution: move negateMask into a temp GP register, then copy to + // a temp XMM register. + // Nb: we don't want any F64 values to end up in a GpReg, nor any + // I64 values to end up in an FpReg. + // + // # 'gt' and 'ga' are temporary GpRegs. + // # ins->oprnd1() is in 'rr' (FpRegs) + // mov gt, 0x8000000000000000 + // mov rt, gt + // xorps rr, rt + Register rt = registerAllocTmp(FpRegs & ~(rmask(ra)|rmask(rr))); + Register gt = registerAllocTmp(GpRegs); + XORPS(rr, rt); + MOVQXR(rt, gt); + asm_immq(gt, negateMask[0], /*canClobberCCs*/true); } + if (ra != rr) + asm_nongp_copy(rr,ra); + endOpRegs(ins, rr, ra); } void Assembler::asm_spill(Register rr, int d, bool /*pop*/, bool quad) { - if (d) { - if (!IsFpReg(rr)) { - if (quad) - MOVQMR(rr, d, FP); - else - MOVLMR(rr, d, FP); - } else { - // store 64bits from XMM to memory - NanoAssert(quad); - MOVSDMR(rr, d, FP); - } + NanoAssert(d); + if (!IsFpReg(rr)) { + if (quad) + MOVQMR(rr, d, FP); + else + MOVLMR(rr, d, FP); + } else { + // store 64bits from XMM to memory + NanoAssert(quad); + MOVSDMR(rr, d, FP); } } @@ -1874,7 +1902,7 @@ namespace nanojit MR(RSP, RBP); // return value is GuardRecord* - asm_quad(RAX, uintptr_t(lr), /*canClobberCCs*/true); + asm_immq(RAX, uintptr_t(lr), /*canClobberCCs*/true); } void Assembler::nInit(AvmCore*) { @@ -1951,7 +1979,7 @@ namespace nanojit // at this point. emitr(X64_popr, RAX); // popq %rax emit(X64_inclmRAX); // incl (%rax) - asm_quad(RAX, (uint64_t)pCtr, /*canClobberCCs*/true); // movabsq $pCtr, %rax + asm_immq(RAX, (uint64_t)pCtr, /*canClobberCCs*/true); // movabsq $pCtr, %rax emitr(X64_pushr, RAX); // pushq %rax } ) @@ -1971,7 +1999,7 @@ namespace nanojit // jmp [indexreg*8 + tablereg] JMPXB(indexreg, tablereg); // tablereg <- #table - asm_quad(tablereg, (uint64_t)table, /*canClobberCCs*/true); + asm_immq(tablereg, (uint64_t)table, /*canClobberCCs*/true); } } diff --git a/js/src/nanojit/NativeX64.h b/js/src/nanojit/NativeX64.h index 67fe76cb882..eb98da84c61 100644 --- a/js/src/nanojit/NativeX64.h +++ b/js/src/nanojit/NativeX64.h @@ -207,7 +207,7 @@ namespace nanojit X64_imul = 0xC0AF0F4000000004LL, // 32bit signed mul r *= b X64_imuli = 0xC069400000000003LL, // 32bit signed mul r = b * imm32 X64_imul8 = 0x00C06B4000000004LL, // 32bit signed mul r = b * imm8 - X64_jmpi = 0x0000000025FF0006LL, // jump *0(rip) + X64_jmpi = 0x0000000025FF0006LL, // jump *0(rip) X64_jmp = 0x00000000E9000005LL, // jump near rel32 X64_jmp8 = 0x00EB000000000002LL, // jump near rel8 X64_jo = 0x00000000800F0006LL, // jump near if overflow @@ -392,10 +392,11 @@ namespace nanojit void emitxm_rel(uint64_t op, Register r, NIns* addr64);\ bool isTargetWithinS8(NIns* target);\ bool isTargetWithinS32(NIns* target);\ - void asm_int(Register r, int32_t v, bool canClobberCCs);\ - void asm_quad(Register r, uint64_t v, bool canClobberCCs);\ - void asm_regarg(ArgSize, LIns*, Register);\ - void asm_stkarg(ArgSize, LIns*, int);\ + void asm_immi(Register r, int32_t v, bool canClobberCCs);\ + void asm_immq(Register r, uint64_t v, bool canClobberCCs);\ + void asm_immf(Register r, uint64_t v, bool canClobberCCs);\ + void asm_regarg(ArgType, LIns*, Register);\ + void asm_stkarg(ArgType, LIns*, int);\ void asm_shift(LIns*);\ void asm_shift_imm(LIns*);\ void asm_arith_imm(LIns*);\ @@ -478,6 +479,7 @@ namespace nanojit void MOVSXDR(Register l, Register r);\ void MOVZX8(Register l, Register r);\ void XORPS(Register r);\ + void XORPS(Register l, Register r);\ void DIVSD(Register l, Register r);\ void MULSD(Register l, Register r);\ void ADDSD(Register l, Register r);\ diff --git a/js/src/nanojit/Nativei386.cpp b/js/src/nanojit/Nativei386.cpp index dd4e5c5c175..1572e08855c 100644 --- a/js/src/nanojit/Nativei386.cpp +++ b/js/src/nanojit/Nativei386.cpp @@ -148,7 +148,7 @@ namespace nanojit MR(SP,FP); // return value is GuardRecord* - asm_int(EAX, int(lr), /*canClobberCCs*/true); + asm_immi(EAX, int(lr), /*canClobberCCs*/true); } NIns *Assembler::genEpilogue() @@ -168,7 +168,7 @@ namespace nanojit const CallInfo* call = ins->callInfo(); // must be signed, not unsigned - uint32_t iargs = call->count_iargs(); + uint32_t iargs = call->count_int32_args(); int32_t fargs = call->count_args() - iargs; bool indirect = call->isIndirect(); @@ -237,13 +237,13 @@ namespace nanojit // Pre-assign registers to the first N 4B args based on the calling convention. uint32_t n = 0; - ArgSize sizes[MAXARGS]; - uint32_t argc = call->get_sizes(sizes); + ArgType argTypes[MAXARGS]; + uint32_t argc = call->getArgTypes(argTypes); int32_t stkd = 0; if (indirect) { argc--; - asm_arg(ARGSIZE_P, ins->arg(argc), EAX, stkd); + asm_arg(ARGTYPE_P, ins->arg(argc), EAX, stkd); if (!_config.i386_fixed_esp) stkd = 0; } @@ -251,12 +251,12 @@ namespace nanojit for (uint32_t i = 0; i < argc; i++) { uint32_t j = argc-i-1; - ArgSize sz = sizes[j]; + ArgType ty = argTypes[j]; Register r = UnspecifiedReg; - if (n < max_regs && sz != ARGSIZE_F) { + if (n < max_regs && ty != ARGTYPE_F) { r = argRegs[n++]; // tell asm_arg what reg to use } - asm_arg(sz, ins->arg(j), r, stkd); + asm_arg(ty, ins->arg(j), r, stkd); if (!_config.i386_fixed_esp) stkd = 0; } @@ -281,7 +281,7 @@ namespace nanojit btr RegAlloc::free[ecx], eax // free &= ~rmask(i) mov r, eax } - #elif defined __SUNPRO_CC + #elif defined __SUNPRO_CC // Workaround for Sun Studio bug on handler embeded asm code. // See bug 544447 for detail. // https://bugzilla.mozilla.org/show_bug.cgi?id=544447 @@ -369,11 +369,11 @@ namespace nanojit LEA(r, arDisp(ins), FP); } else if (ins->isconst()) { - asm_int(r, ins->imm32(), /*canClobberCCs*/false); + asm_immi(r, ins->imm32(), /*canClobberCCs*/false); ins->clearReg(); - } else if (ins->isconstq()) { - asm_quad(r, ins->imm64(), ins->imm64f(), /*canClobberCCs*/false); + } else if (ins->isconstf()) { + asm_immf(r, ins->imm64(), ins->imm64f(), /*canClobberCCs*/false); ins->clearReg(); } else if (ins->isop(LIR_param) && ins->paramKind() == 0 && @@ -465,21 +465,14 @@ namespace nanojit void Assembler::asm_spill(Register rr, int d, bool pop, bool quad) { (void)quad; - if (d) - { - if (rmask(rr) & GpRegs) { - ST(FP, d, rr); - } else if (rmask(rr) & XmmRegs) { - SSE_STQ(d, FP, rr); - } else { - NanoAssert(rmask(rr) & x87Regs); - FSTQ((pop?1:0), d, FP); - } - } - else if (pop && (rmask(rr) & x87Regs)) - { - // pop the fpu result since it isn't used - FSTP(FST0); + NanoAssert(d); + if (rmask(rr) & GpRegs) { + ST(FP, d, rr); + } else if (rmask(rr) & XmmRegs) { + SSE_STQ(d, FP, rr); + } else { + NanoAssert(rmask(rr) & x87Regs); + FSTQ((pop?1:0), d, FP); } } @@ -501,7 +494,7 @@ namespace nanojit // if (ins->isInReg()) { Register rr = ins->getReg(); - asm_spilli(ins, false); // if also in memory in post-state, spill it now + asm_maybe_spill(ins, false); // if also in memory in post-state, spill it now switch (ins->opcode()) { case LIR_ldf: if (rmask(rr) & XmmRegs) { @@ -576,7 +569,7 @@ namespace nanojit FST32(pop?1:0, dr, rb); } - } else if (value->isconstq()) { + } else if (value->isconstf()) { STi(rb, dr+4, value->imm64_1()); STi(rb, dr, value->imm64_0()); @@ -749,11 +742,19 @@ namespace nanojit // disturb the CCs! Register r = findRegFor(lhs, GpRegs); if (c == 0 && cond->isop(LIR_eq)) { - TEST(r, r); + NanoAssert(N_LOOKAHEAD >= 3); + if ((lhs->isop(LIR_and) || lhs->isop(LIR_or)) && + cond == lookahead[1] && lhs == lookahead[2]) + { + // Do nothing. At run-time, 'lhs' will have just computed + // by an i386 instruction that sets ZF for us ('and' or + // 'or'), so we don't have to do it ourselves. + } else { + TEST(r, r); // sets ZF according to the value of 'lhs' + } } else { CMPi(r, c); } - } else { Register ra, rb; findRegFor2(GpRegs, lhs, ra, GpRegs, rhs, rb); @@ -848,9 +849,9 @@ namespace nanojit LInsp rhs = ins->oprnd2(); // Second special case. - // XXX: bug 547125: don't need this once LEA is used for LIR_add/LIR_addp in all cases below - if ((op == LIR_add || op == LIR_iaddp) && lhs->isop(LIR_alloc) && rhs->isconst()) { - // LIR_add(LIR_alloc, LIR_int) or LIR_addp(LIR_alloc, LIR_int) -- use lea. + // XXX: bug 547125: don't need this once LEA is used for LIR_add in all cases below + if (op == LIR_add && lhs->isop(LIR_alloc) && rhs->isconst()) { + // LIR_add(LIR_alloc, LIR_int) -- use lea. Register rr = prepareResultReg(ins, GpRegs); int d = findMemFor(lhs) + rhs->imm32(); @@ -912,7 +913,6 @@ namespace nanojit switch (op) { case LIR_add: - case LIR_addp: case LIR_addxov: ADD(rr, rb); break; // XXX: bug 547125: could use LEA for LIR_add case LIR_sub: case LIR_subxov: SUB(rr, rb); break; @@ -934,7 +934,6 @@ namespace nanojit } else { int c = rhs->imm32(); switch (op) { - case LIR_addp: case LIR_add: // this doesn't set cc's, only use it when cc's not required. LEA(rr, c, ra); @@ -1251,16 +1250,16 @@ namespace nanojit freeResourcesOf(ins); } - void Assembler::asm_int(LInsp ins) + void Assembler::asm_immi(LInsp ins) { Register rr = prepareResultReg(ins, GpRegs); - asm_int(rr, ins->imm32(), /*canClobberCCs*/true); + asm_immi(rr, ins->imm32(), /*canClobberCCs*/true); freeResourcesOf(ins); } - void Assembler::asm_int(Register r, int32_t val, bool canClobberCCs) + void Assembler::asm_immi(Register r, int32_t val, bool canClobberCCs) { if (val == 0 && canClobberCCs) XOR(r, r); @@ -1268,15 +1267,15 @@ namespace nanojit LDi(r, val); } - void Assembler::asm_quad(Register r, uint64_t q, double d, bool canClobberCCs) + void Assembler::asm_immf(Register r, uint64_t q, double d, bool canClobberCCs) { - // Quads require non-standard handling. There is no load-64-bit-immediate + // Floats require non-standard handling. There is no load-64-bit-immediate // instruction on i386, so in the general case, we must load it from memory. // This is unlike most other LIR operations which can be computed directly // in a register. We can special-case 0.0 and various other small ints // (1.0 on x87, any int32_t value on SSE2), but for all other values, we // allocate an 8-byte chunk via dataAlloc and load from there. Note that - // this implies that quads never require spill area, since they will always + // this implies that floats never require spill area, since they will always // be rematerialized from const data (or inline instructions in the special cases). if (rmask(r) & XmmRegs) { @@ -1288,7 +1287,7 @@ namespace nanojit Register tr = registerAllocTmp(GpRegs); SSE_CVTSI2SD(r, tr); SSE_XORPDr(r, r); // zero r to ensure no dependency stalls - asm_int(tr, (int)d, canClobberCCs); + asm_immi(tr, (int)d, canClobberCCs); } else { const uint64_t* p = findQuadConstant(q); LDSDm(r, (const double*)p); @@ -1307,13 +1306,13 @@ namespace nanojit } } - void Assembler::asm_quad(LInsp ins) + void Assembler::asm_immf(LInsp ins) { NanoAssert(ins->isconstf()); if (ins->isInReg()) { Register rr = ins->getReg(); NanoAssert(rmask(rr) & FpRegs); - asm_quad(rr, ins->imm64(), ins->imm64f(), /*canClobberCCs*/true); + asm_immf(rr, ins->imm64(), ins->imm64f(), /*canClobberCCs*/true); } else { // Do nothing, will be rematerialized when necessary. } @@ -1384,16 +1383,16 @@ namespace nanojit } } - void Assembler::asm_arg(ArgSize sz, LInsp ins, Register r, int32_t& stkd) + void Assembler::asm_arg(ArgType ty, LInsp ins, Register r, int32_t& stkd) { // If 'r' is known, then that's the register we have to put 'ins' // into. - if (sz == ARGSIZE_I || sz == ARGSIZE_U) { + if (ty == ARGTYPE_I || ty == ARGTYPE_U) { if (r != UnspecifiedReg) { if (ins->isconst()) { // Rematerialize the constant. - asm_int(r, ins->imm32(), /*canClobberCCs*/true); + asm_immi(r, ins->imm32(), /*canClobberCCs*/true); } else if (ins->isInReg()) { if (r != ins->getReg()) MR(r, ins->getReg()); @@ -1420,7 +1419,7 @@ namespace nanojit } } else { - NanoAssert(sz == ARGSIZE_F); + NanoAssert(ty == ARGTYPE_F); asm_farg(ins, stkd); } } @@ -1562,7 +1561,7 @@ namespace nanojit NanoAssert(FST0 == rr); NanoAssert(!lhs->isInReg() || FST0 == lhs->getReg()); - if (rhs->isconstq()) { + if (rhs->isconstf()) { const uint64_t* p = findQuadConstant(rhs->imm64()); switch (op) { @@ -1889,7 +1888,7 @@ namespace nanojit } else { TEST_AH(mask); FNSTSW_AX(); // requires EAX to be free - if (rhs->isconstq()) + if (rhs->isconstf()) { const uint64_t* p = findQuadConstant(rhs->imm64()); FCOMdm((pop?1:0), (const double*)p); diff --git a/js/src/nanojit/Nativei386.h b/js/src/nanojit/Nativei386.h index c87277aeb27..5bf19769fa3 100644 --- a/js/src/nanojit/Nativei386.h +++ b/js/src/nanojit/Nativei386.h @@ -181,17 +181,17 @@ namespace nanojit void nativePageReset();\ void nativePageSetup();\ void underrunProtect(int);\ - void asm_int(Register r, int32_t val, bool canClobberCCs);\ + void asm_immi(Register r, int32_t val, bool canClobberCCs);\ void asm_stkarg(LInsp p, int32_t& stkd);\ void asm_farg(LInsp, int32_t& stkd);\ - void asm_arg(ArgSize sz, LInsp p, Register r, int32_t& stkd);\ + void asm_arg(ArgType ty, LInsp p, Register r, int32_t& stkd);\ void asm_pusharg(LInsp);\ void asm_fcmp(LIns *cond);\ NIns* asm_fbranch(bool, LIns*, NIns*);\ void asm_cmp(LIns *cond); \ void asm_div_mod(LIns *cond); \ void asm_load(int d, Register r); \ - void asm_quad(Register r, uint64_t q, double d, bool canClobberCCs); + void asm_immf(Register r, uint64_t q, double d, bool canClobberCCs); #define IMM8(i) \ _nIns -= 1; \ @@ -968,23 +968,23 @@ namespace nanojit #define EMMS() do { count_fpu(); FPUc(0x0f77); asm_output("emms"); } while (0) // standard direct call -#define CALL(c) do { \ +#define CALL(ci) do { \ count_call();\ underrunProtect(5); \ - int offset = (c->_address) - ((int)_nIns); \ + int offset = (ci->_address) - ((int)_nIns); \ IMM32( (uint32_t)offset ); \ *(--_nIns) = 0xE8; \ - verbose_only(asm_output("call %s",(c->_name));) \ - debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\ + verbose_only(asm_output("call %s",(ci->_name));) \ + debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\ } while (0) // indirect call thru register -#define CALLr(c,r) do { \ +#define CALLr(ci,r) do { \ count_calli();\ underrunProtect(2);\ ALU(0xff, 2, (r));\ verbose_only(asm_output("call %s",gpn(r));) \ - debug_only(if ((c->_argtypes & ARGSIZE_MASK_ANY)==ARGSIZE_F) fpu_push();)\ + debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)\ } while (0) } diff --git a/js/src/nanojit/VMPI.cpp b/js/src/nanojit/VMPI.cpp index b7727960d52..893deb04de7 100644 --- a/js/src/nanojit/VMPI.cpp +++ b/js/src/nanojit/VMPI.cpp @@ -101,7 +101,7 @@ VMPI_setPageProtection(void *address, ULONG attrib; ULONG range = size; ULONG retval = DosQueryMem(address, &range, &attrib); - AvmAssert(retval == 0); + NanoAssert(retval == 0); // exit if this is the start of the next memory object if (attrib & attribFlags) { @@ -111,7 +111,7 @@ VMPI_setPageProtection(void *address, range = size > range ? range : size; retval = DosSetMem(address, range, flags); - AvmAssert(retval == 0); + NanoAssert(retval == 0); address = (char*)address + range; size -= range; @@ -140,7 +140,7 @@ void VMPI_setPageProtection(void *address, flags |= PROT_WRITE; } int retval = mprotect((maddr_ptr)beginPage, (unsigned int)sizePaged, flags); - AvmAssert(retval == 0); + NanoAssert(retval == 0); (void)retval; } diff --git a/js/src/nanojit/VMPI.h b/js/src/nanojit/VMPI.h index 63838579180..4938dfcf3e7 100644 --- a/js/src/nanojit/VMPI.h +++ b/js/src/nanojit/VMPI.h @@ -85,6 +85,11 @@ typedef unsigned __int64 uint64_t; #define VMPI_strncat strncat #define VMPI_strcpy strcpy #define VMPI_sprintf sprintf +#ifdef _MSC_VER +# define VMPI_snprintf sprintf_s +#else +# define VMPI_snprintf snprintf +#endif #define VMPI_vfprintf vfprintf #define VMPI_memset memset #define VMPI_isdigit isdigit diff --git a/js/src/nanojit/avmplus.cpp b/js/src/nanojit/avmplus.cpp index 085e740e749..e46b5d20292 100644 --- a/js/src/nanojit/avmplus.cpp +++ b/js/src/nanojit/avmplus.cpp @@ -62,15 +62,7 @@ avmplus::AvmLog(char const *msg, ...) { #ifdef _DEBUG void NanoAssertFail() { - #if defined(WIN32) - DebugBreak(); - exit(3); - #elif defined(XP_OS2) || (defined(__GNUC__) && defined(__i386)) - asm("int $3"); - abort(); - #else - abort(); - #endif + abort(); } #endif diff --git a/js/src/nanojit/avmplus.h b/js/src/nanojit/avmplus.h index fb2a6392bf4..dd3c1a6d202 100644 --- a/js/src/nanojit/avmplus.h +++ b/js/src/nanojit/avmplus.h @@ -90,12 +90,11 @@ void NanoAssertFail(); #endif -#define AvmAssert(x) assert(x) -#define AvmAssertMsg(x, y) -#define AvmDebugLog(x) printf x - #if defined(AVMPLUS_IA32) #if defined(_MSC_VER) + +# define AVMPLUS_HAS_RDTSC 1 + __declspec(naked) static inline __int64 rdtsc() { __asm @@ -104,24 +103,35 @@ __declspec(naked) static inline __int64 rdtsc() ret; } } + #elif defined(SOLARIS) + +# define AVMPLUS_HAS_RDTSC 1 + static inline unsigned long long rdtsc(void) { unsigned long long int x; asm volatile (".byte 0x0f, 0x31" : "=A" (x)); return x; } + #elif defined(__i386__) + +# define AVMPLUS_HAS_RDTSC 1 + static __inline__ unsigned long long rdtsc(void) { unsigned long long int x; __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x)); return x; } + #endif /* compilers */ #elif defined(__x86_64__) +# define AVMPLUS_HAS_RDTSC 1 + static __inline__ uint64_t rdtsc(void) { unsigned hi, lo; @@ -131,6 +141,8 @@ static __inline__ uint64_t rdtsc(void) #elif defined(_MSC_VER) && defined(_M_AMD64) +# define AVMPLUS_HAS_RDTSC 1 + #include #pragma intrinsic(__rdtsc) @@ -141,6 +153,8 @@ static inline unsigned __int64 rdtsc(void) #elif defined(__powerpc__) +# define AVMPLUS_HAS_RDTSC 1 + typedef unsigned long long int unsigned long long; static __inline__ unsigned long long rdtsc(void) @@ -165,6 +179,10 @@ static __inline__ unsigned long long rdtsc(void) #endif /* architecture */ +#ifndef AVMPLUS_HAS_RDTSC +# define AVMPLUS_HAS_RDTSC 0 +#endif + struct JSContext; #ifdef PERFM @@ -250,10 +268,6 @@ namespace avmplus { * on a set of items or conditions. Class BitSet provides functions * to manipulate individual bits in the vector. * - * Since most vectors are rather small an array of longs is used by - * default to house the value of the bits. If more bits are needed - * then an array is allocated dynamically outside of this object. - * * This object is not optimized for a fixed sized bit vector * it instead allows for dynamically growing the bit vector. */ @@ -266,23 +280,19 @@ namespace avmplus { BitSet() { capacity = kDefaultCapacity; + ar = (long*)calloc(capacity, sizeof(long)); reset(); } ~BitSet() { - if (capacity > kDefaultCapacity) - free(bits.ptr); + free(ar); } void reset() { - if (capacity > kDefaultCapacity) - for(int i=0; i= capacity) grow(index+1); - if (capacity > kDefaultCapacity) - bits.ptr[index] |= (1< kDefaultCapacity) - bits.ptr[index] &= ~(1< kDefaultCapacity) - value = ( bits.ptr[index] & (1< kDefaultCapacity) - for(int i=0; i kDefaultCapacity) - free(bits.ptr); + free(ar); - bits.ptr = newBits; + ar = newAr; capacity = newCapacity; } - // by default we use the array, but if the vector - // size grows beyond kDefaultCapacity we allocate - // space dynamically. int capacity; - union - { - long ar[kDefaultCapacity]; - long* ptr; - } - bits; + long* ar; }; } diff --git a/js/src/nanojit/njconfig.h b/js/src/nanojit/njconfig.h index 9b5c3ca5361..ac4f8092099 100644 --- a/js/src/nanojit/njconfig.h +++ b/js/src/nanojit/njconfig.h @@ -52,12 +52,12 @@ namespace nanojit * A struct used to configure the assumptions that Assembler can make when * generating code. The ctor will fill in all fields with the most reasonable * values it can derive from compiler flags and/or runtime detection, but - * the embedder is free to override any or all of them as it sees fit. + * the embedder is free to override any or all of them as it sees fit. * Using the ctor-provided default setup is guaranteed to provide a safe * runtime environment (though perhaps suboptimal in some cases), so an embedder * should replace these values with great care. * - * Note that although many fields are used on only specific architecture(s), + * Note that although many fields are used on only specific architecture(s), * this struct is deliberately declared without ifdef's for them, so (say) ARM-specific * fields are declared everywhere. This reduces build dependencies (so that this * files does not require nanojit.h to be included beforehand) and also reduces @@ -69,29 +69,29 @@ namespace nanojit public: // fills in reasonable default values for all fields. Config(); - + // ARM architecture to assume when generate instructions for (currently, 5 <= arm_arch <= 7) uint8_t arm_arch; // If true, use CSE. uint32_t cseopt:1; - + // Can we use SSE2 instructions? (x86-only) uint32_t i386_sse2:1; - + // Can we use cmov instructions? (x86-only) uint32_t i386_use_cmov:1; - + // Should we use a virtual stack pointer? (x86-only) uint32_t i386_fixed_esp:1; // Whether or not to generate VFP instructions. (ARM only) uint32_t arm_vfp:1; - + // @todo, document me uint32_t arm_show_stats:1; - // If true, use softfloat for all floating point operations, + // If true, use softfloat for all floating point operations, // whether or not an FPU is present. (ARM only for now, but might also includes MIPS in the future) uint32_t soft_float:1; }; diff --git a/js/src/nanojit/njcpudetect.h b/js/src/nanojit/njcpudetect.h index 6d669003df8..7f8f1471bda 100644 --- a/js/src/nanojit/njcpudetect.h +++ b/js/src/nanojit/njcpudetect.h @@ -62,7 +62,7 @@ // GCC and RealView usually define __ARM_ARCH__ #if defined(__ARM_ARCH__) - + #define NJ_COMPILER_ARM_ARCH __ARM_ARCH__ // ok, try well-known GCC flags ( see http://gcc.gnu.org/onlinedocs/gcc/ARM-Options.html ) @@ -97,7 +97,7 @@ #define NJ_COMPILER_ARM_ARCH _M_ARM #else - + // non-numeric value #define NJ_COMPILER_ARM_ARCH "Unable to determine valid NJ_COMPILER_ARM_ARCH (nanojit only supports ARMv5 or later)" diff --git a/js/src/shell/js.cpp b/js/src/shell/js.cpp index 0d4e4f6d036..90d53ee2988 100644 --- a/js/src/shell/js.cpp +++ b/js/src/shell/js.cpp @@ -476,12 +476,15 @@ Process(JSContext *cx, JSObject *obj, char *filename, JSBool forceTTY) size_t len = 0; /* initialize to avoid warnings */ do { ScheduleWatchdog(cx->runtime, -1); - jsrefcount rc = JS_SuspendRequest(cx); gCanceled = false; errno = 0; - char *line = GetLine(file, startline == lineno ? "js> " : ""); + + char *line; + { + JSAutoSuspendRequest suspended(cx); + line = GetLine(file, startline == lineno ? "js> " : ""); + } if (!line) { - JS_ResumeRequest(cx, rc); if (errno) { JS_ReportError(cx, strerror(errno)); free(buffer); @@ -505,7 +508,6 @@ Process(JSContext *cx, JSObject *obj, char *filename, JSBool forceTTY) if (!newBuf) { free(buffer); free(line); - JS_ResumeRequest(cx, rc); JS_ReportOutOfMemory(cx); return; } @@ -519,7 +521,6 @@ Process(JSContext *cx, JSObject *obj, char *filename, JSBool forceTTY) free(line); } lineno++; - JS_ResumeRequest(cx, rc); if (!ScheduleWatchdog(cx->runtime, gTimeoutInterval)) { hitEOF = JS_TRUE; break; @@ -1820,7 +1821,7 @@ DisassembleValue(JSContext *cx, jsval v, bool lines, bool recursive) JSObjectArray *objects = script->objects(); for (uintN i = 0; i != objects->length; ++i) { JSObject *obj = objects->vector[i]; - if (HAS_FUNCTION_CLASS(obj)) { + if (obj->isFunction()) { putchar('\n'); if (!DisassembleValue(cx, OBJECT_TO_JSVAL(obj), lines, recursive)) { @@ -1927,8 +1928,15 @@ DisassWithSrc(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, /* burn the leading lines */ line2 = JS_PCToLineNumber(cx, script, pc); - for (line1 = 0; line1 < line2 - 1; line1++) - fgets(linebuf, LINE_BUF_LEN, file); + for (line1 = 0; line1 < line2 - 1; line1++) { + char *tmp = fgets(linebuf, LINE_BUF_LEN, file); + if (!tmp) { + JS_ReportError(cx, "failed to read %s fully", + script->filename); + ok = JS_FALSE; + goto bail; + } + } bupline = 0; while (pc < end) { @@ -2772,6 +2780,8 @@ split_getObjectOps(JSContext *cx, JSClass *clasp) if (!split_objectops.thisObject) { memcpy(&split_objectops, &js_ObjectOps, sizeof split_objectops); split_objectops.thisObject = split_thisObject; + split_objectops.call = NULL; + split_objectops.construct = NULL; } return &split_objectops; @@ -2954,7 +2964,6 @@ EvalInContext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, const jschar *src; size_t srclen; JSBool lazy, split, ok; - jsval v; JSStackFrame *fp; sobj = NULL; @@ -2970,7 +2979,7 @@ EvalInContext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, } JS_SetOptions(scx, JS_GetOptions(cx)); - JS_BeginRequest(scx); + JS_TransferRequest(cx, scx); src = JS_GetStringChars(str); srclen = JS_GetStringLength(str); split = lazy = JS_FALSE; @@ -2994,12 +3003,12 @@ EvalInContext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, ok = JS_FALSE; goto out; } - v = BOOLEAN_TO_JSVAL(lazy); - ok = JS_SetProperty(cx, sobj, "lazy", &v); + AutoValueRooter root(scx, BOOLEAN_TO_JSVAL(lazy)); + ok = JS_SetProperty(scx, sobj, "lazy", root.addr()); if (!ok) goto out; if (split) - sobj = split_outerObject(cx, sobj); + sobj = split_outerObject(scx, sobj); } if (srclen == 0) { @@ -3009,7 +3018,7 @@ EvalInContext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, fp = JS_GetScriptedCaller(cx, NULL); JS_SetGlobalObject(scx, sobj); JS_ToggleOptions(scx, JSOPTION_DONT_REPORT_UNCAUGHT); - OBJ_TO_INNER_OBJECT(cx, sobj); + OBJ_TO_INNER_OBJECT(scx, sobj); if (!sobj) { ok = JS_FALSE; goto out; @@ -3019,22 +3028,72 @@ EvalInContext(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, JS_PCToLineNumber(cx, fp->script, fp->regs->pc), rval); - if (!ok) { - if (JS_GetPendingException(scx, &v)) - JS_SetPendingException(cx, v); - else - JS_ReportOutOfMemory(cx); - } } out: - JS_EndRequest(scx); + jsval exceptionValue = JSVAL_NULL; + JSBool exception = !ok && JS_GetPendingException(scx, &exceptionValue); + + JS_TransferRequest(scx, cx); + if (exception) + JS_SetPendingException(cx, exceptionValue); + else if (!ok) + JS_ClearPendingException(cx); + WITH_LOCKED_CONTEXT_LIST( JS_DestroyContextNoGC(scx) ); return ok; } +static JSBool +EvalInFrame(JSContext *cx, uintN argc, jsval *vp) +{ + jsval *argv = JS_ARGV(cx, vp); + if (argc < 2 || + !JSVAL_IS_INT(argv[0]) || + !JSVAL_IS_STRING(argv[1])) { + JS_ReportError(cx, "Invalid arguments to evalInFrame"); + return JS_FALSE; + } + + uint32 upCount = JSVAL_TO_INT(argv[0]); + JSString *str = JSVAL_TO_STRING(argv[1]); + + bool saveCurrent = (argc >= 3 && JSVAL_IS_BOOLEAN(argv[2])) + ? (bool)JSVAL_TO_SPECIAL(argv[2]) + : false; + + JS_ASSERT(cx->fp); + + JSStackFrame *fp = cx->fp; + for (uint32 i = 0; i < upCount; ++i) { + if (!fp->down) + break; + fp = fp->down; + } + + if (!fp->script) { + JS_ReportError(cx, "cannot eval in non-script frame"); + return JS_FALSE; + } + + JSStackFrame *oldfp = NULL; + if (saveCurrent) + oldfp = JS_SaveFrameChain(cx); + + JSBool ok = JS_EvaluateUCInStackFrame(cx, fp, str->chars(), str->length(), + fp->script->filename, + JS_PCToLineNumber(cx, fp->script, + fp->regs->pc), + vp); + + if (saveCurrent) + JS_RestoreFrameChain(cx, oldfp); + + return ok; +} + static JSBool ShapeOf(JSContext *cx, uintN argc, jsval *vp) { @@ -3048,7 +3107,7 @@ ShapeOf(JSContext *cx, uintN argc, jsval *vp) *vp = JSVAL_ZERO; return JS_TRUE; } - if (!OBJ_IS_NATIVE(obj)) { + if (!obj->isNative()) { *vp = INT_TO_JSVAL(-1); return JS_TRUE; } @@ -3094,7 +3153,7 @@ Sleep_fn(JSContext *cx, uintN argc, jsval *vp) if (t_ticks == 0) { JS_YieldRequest(cx); } else { - jsrefcount rc = JS_SuspendRequest(cx); + JSAutoSuspendRequest suspended(cx); PR_Lock(gWatchdogLock); PRIntervalTime to_wakeup = PR_IntervalNow() + t_ticks; for (;;) { @@ -3107,7 +3166,6 @@ Sleep_fn(JSContext *cx, uintN argc, jsval *vp) t_ticks = to_wakeup - now; } PR_Unlock(gWatchdogLock); - JS_ResumeRequest(cx, rc); } return !gCanceled; } @@ -3195,9 +3253,9 @@ Scatter(JSContext *cx, uintN argc, jsval *vp) jsuint n; /* number of threads */ JSObject *inArr; JSObject *arr; + JSObject *global; ScatterData sd; JSBool ok; - jsrefcount rc; sd.lock = NULL; sd.cvar = NULL; @@ -3264,6 +3322,7 @@ Scatter(JSContext *cx, uintN argc, jsval *vp) } } + global = JS_GetGlobalObject(cx); for (i = 1; i < n; i++) { JSContext *newcx; WITH_LOCKED_CONTEXT_LIST( @@ -3271,9 +3330,11 @@ Scatter(JSContext *cx, uintN argc, jsval *vp) ); if (!newcx) goto fail; - JS_BeginRequest(newcx); - JS_SetGlobalObject(newcx, JS_GetGlobalObject(cx)); - JS_EndRequest(newcx); + + { + JSAutoTransferRequest transfer(cx, newcx); + JS_SetGlobalObject(newcx, global); + } JS_ClearContextThread(newcx); sd.threads[i].cx = newcx; } @@ -3306,11 +3367,12 @@ Scatter(JSContext *cx, uintN argc, jsval *vp) DoScatteredWork(cx, &sd.threads[0]); - rc = JS_SuspendRequest(cx); - for (i = 1; i < n; i++) { - PR_JoinThread(sd.threads[i].thr); + { + JSAutoSuspendRequest suspended(cx); + for (i = 1; i < n; i++) { + PR_JoinThread(sd.threads[i].thr); + } } - JS_ResumeRequest(cx, rc); success: arr = JS_NewArrayObject(cx, n, sd.results); @@ -3541,7 +3603,9 @@ CancelExecution(JSRuntime *rt) static const char msg[] = "Script runs for too long, terminating.\n"; #if defined(XP_UNIX) && !defined(JS_THREADSAFE) /* It is not safe to call fputs from signals. */ - write(2, msg, sizeof(msg) - 1); + /* Dummy assignment avoids GCC warning on "attribute warn_unused_result" */ + ssize_t dummy = write(2, msg, sizeof(msg) - 1); + (void)dummy; #else fputs(msg, stderr); #endif @@ -3789,6 +3853,7 @@ static JSFunctionSpec shell_functions[] = { JS_FN("getslx", GetSLX, 1,0), JS_FN("toint32", ToInt32, 1,0), JS_FS("evalcx", EvalInContext, 1,0,0), + JS_FN("evalInFrame", EvalInFrame, 2,0), JS_FN("shapeOf", ShapeOf, 1,0), #ifdef MOZ_SHARK JS_FS("startShark", js_StartShark, 0,0,0), @@ -3890,6 +3955,8 @@ static const char *const shell_help_messages[] = { " if (s == '' && !o) return new o with eager standard classes\n" " if (s == 'lazy' && !o) return new o with lazy standard classes\n" " if (s == 'split' && !o) return new split-object o with lazy standard classes", +"evalInFrame(n,str,save) Evaluate 'str' in the nth up frame.\n" +" If 'save' (default false), save the frame chain", "shapeOf(obj) Get the shape of obj (an implementation detail)", #ifdef MOZ_SHARK "startShark() Start a Shark session.\n" @@ -4839,6 +4906,10 @@ main(int argc, char **argv, char **envp) #else if (!JS_InitStandardClasses(cx, glob)) return 1; +#endif +#ifdef JS_HAS_CTYPES + if (!JS_InitCTypesClass(cx, glob)) + return 1; #endif if (!JS_DefineFunctions(cx, glob, shell_functions)) return 1; diff --git a/js/src/tests/js1_5/Regress/jstests.list b/js/src/tests/js1_5/Regress/jstests.list index 23cf2f32b44..d0f2a22420b 100644 --- a/js/src/tests/js1_5/Regress/jstests.list +++ b/js/src/tests/js1_5/Regress/jstests.list @@ -260,6 +260,7 @@ script regress-451884.js script regress-451946.js script regress-452008.js script regress-452170.js +script regress-452189.js script regress-452333.js script regress-452336.js script regress-452346.js @@ -346,6 +347,7 @@ script regress-482421.js script regress-482783.js script regress-483103.js script regress-501124.js +script regress-503860.js script regress-504078.js script regress-506567.js script regress-511859.js diff --git a/js/src/tests/js1_5/Regress/regress-452189.js b/js/src/tests/js1_5/Regress/regress-452189.js new file mode 100644 index 00000000000..1305b6e6baf --- /dev/null +++ b/js/src/tests/js1_5/Regress/regress-452189.js @@ -0,0 +1,24 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributor: Geoff Garen + */ + +var gTestfile = 'regress-452189.js'; +//----------------------------------------------------------------------------- +var BUGNUMBER = 452189; +var summary = "Don't shadow a readonly or setter proto-property"; +var expect = "PASS"; +var actual = "FAIL"; + +function c() { + this.x = 3; +} + + +new c; +Object.prototype.__defineSetter__('x', function(){ actual = expect; }) +new c; + +reportCompare(expect, actual, summary); diff --git a/js/src/tests/js1_5/Regress/regress-503860.js b/js/src/tests/js1_5/Regress/regress-503860.js new file mode 100644 index 00000000000..2ae44e8e627 --- /dev/null +++ b/js/src/tests/js1_5/Regress/regress-503860.js @@ -0,0 +1,25 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributor: Jason Orendorff + */ + +var gTestfile = 'regress-503860.js'; +//----------------------------------------------------------------------------- +var BUGNUMBER = 503860; +var summary = "Don't shadow a readonly or setter proto-property"; +var expect = "PASS"; +var actual = "FAIL"; +var a = {y: 1}; + +function B(){} +B.prototype.__defineSetter__('x', function setx(val) { actual = expect; }); +var b = new B; +b.y = 1; + +var arr = [a, b]; // same shape prior to bug 497789 fix +for each (var obj in arr) + obj.x = 2; // should call b's setter but doesn't + +reportCompare(expect, actual, summary); diff --git a/js/src/tests/js1_8_1/extensions/jstests.list b/js/src/tests/js1_8_1/extensions/jstests.list index de844256d18..8610b773b2f 100644 --- a/js/src/tests/js1_8_1/extensions/jstests.list +++ b/js/src/tests/js1_8_1/extensions/jstests.list @@ -7,7 +7,7 @@ script regress-452498-193.js script regress-452498-196.js script regress-452498-224.js script regress-466905-04.js -script regress-466905-05.js +skip script regress-466905-05.js # no-op in browser, fails in shell - see bug 554793 script regress-477158.js script regress-477187.js script regress-520572.js diff --git a/js/src/tests/js1_8_1/trace/trace-test.js b/js/src/tests/js1_8_1/trace/trace-test.js index 917f21bb537..fb8b3ceb9a2 100644 --- a/js/src/tests/js1_8_1/trace/trace-test.js +++ b/js/src/tests/js1_8_1/trace/trace-test.js @@ -5290,20 +5290,22 @@ test(testConstructorBail); function testNewArrayCount() { + function count(a) { var n = 0; for (var p in a) n++; return n; } var a = []; for (var i = 0; i < 5; i++) a = [0]; - assertEq(a.__count__, 1); + assertEq(count(a), 1); for (var i = 0; i < 5; i++) a = [0, , 2]; - assertEq(a.__count__, 2); + assertEq(count(a), 2); } test(testNewArrayCount); function testNewArrayCount2() { + function count(a) { var n = 0; for (var p in a) n++; return n; } var x = 0; for (var i = 0; i < 10; ++i) - x = new Array(1,2,3).__count__; + x = count(new Array(1,2,3)); return x; } testNewArrayCount2.expected = 3; diff --git a/js/src/tests/js1_8_5/extensions/typedarray.js b/js/src/tests/js1_8_5/extensions/typedarray.js index 81b8a27e31f..0e088c7f96c 100644 --- a/js/src/tests/js1_8_5/extensions/typedarray.js +++ b/js/src/tests/js1_8_5/extensions/typedarray.js @@ -182,7 +182,6 @@ function test() //checkThrows(function() a[-10] = 0); check(function() (a[0] = "10") && (a[0] == 10)); - // check Uint8ClampedArray, which is an extension to this extension a = new Uint8ClampedArray(4); a[0] = 128; @@ -195,6 +194,31 @@ function test() check(function() a[2] == 0); check(function() a[3] == 0); + // check handling of holes and non-numeric values + var x = Array(5); + x[0] = "hello"; + x[1] = { }; + //x[2] is a hole + x[3] = undefined; + x[4] = true; + + a = new Uint8Array(x); + check(function() a[0] == 0); + check(function() a[1] == 0); + check(function() a[2] == 0); + check(function() a[3] == 0); + check(function() a[4] == 1); + + a = new Float32Array(x); + check(function() !(a[0] == a[0])); + check(function() !(a[1] == a[1])); + check(function() !(a[2] == a[2])); + check(function() !(a[3] == a[3])); + check(function() a[4] == 1); + + a = new ArrayBuffer(0x10); + checkThrows(function() new Uint32Array(buffer, 4, 0x3FFFFFFF)); + print ("done"); reportCompare(0, TestFailCount, "typed array tests"); diff --git a/js/src/tests/js1_8_5/regress/jstests.list b/js/src/tests/js1_8_5/regress/jstests.list index 28c43b28058..0f28b6f0c01 100644 --- a/js/src/tests/js1_8_5/regress/jstests.list +++ b/js/src/tests/js1_8_5/regress/jstests.list @@ -1,4 +1,12 @@ url-prefix ../../jsreftest.html?test=js1_8_5/regress/ +script regress-500528.js fails script regress-533876.js +script regress-541255-0.js +script regress-541255-1.js +script regress-541255-2.js +script regress-541255-3.js +script regress-541255-4.js script regress-541455.js script regress-546615.js +script regress-555246-0.js +fails script regress-555246-1.js diff --git a/js/src/tests/js1_8_5/regress/regress-500528.js b/js/src/tests/js1_8_5/regress/regress-500528.js index a3e1c5b6b30..bf2908e201a 100644 --- a/js/src/tests/js1_8_5/regress/regress-500528.js +++ b/js/src/tests/js1_8_5/regress/regress-500528.js @@ -19,4 +19,4 @@ for each (var obj in [c1, c2]) s += obj.x; assertEq(s, 'ab'); -print(" PASSED! Property cache soundness: objects with the same shape but different prototypes."); +reportCompare(0, 0, "Property cache soundness: objects with the same shape but different prototypes."); diff --git a/js/src/tests/js1_8_5/regress/regress-541255-0.js b/js/src/tests/js1_8_5/regress/regress-541255-0.js new file mode 100644 index 00000000000..6165b6136aa --- /dev/null +++ b/js/src/tests/js1_8_5/regress/regress-541255-0.js @@ -0,0 +1,14 @@ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributor: Gary Kwong + */ +(function(e) { + eval("\ + [(function() {\ + x.k = function(){}\ + })() \ + for (x in [0])]\ + ") +})(); +reportCompare(0, 0, ""); diff --git a/js/src/tests/js1_8_5/regress/regress-541255-1.js b/js/src/tests/js1_8_5/regress/regress-541255-1.js new file mode 100644 index 00000000000..e7255c67c8b --- /dev/null +++ b/js/src/tests/js1_8_5/regress/regress-541255-1.js @@ -0,0 +1,23 @@ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributor: Gary Kwong + */ + +function f(e) { + eval("\ + [((function g(o, bbbbbb) {\ + if (aaaaaa = bbbbbb) {\ + return window.r = []\ + }\ + g(aaaaaa, bbbbbb + 1);\ + #3={}\ + })([], 0)) \ + for (window in this) \ + for each(x in [0, 0])\ + ]\ + ") +} +t = 1; +f(); +reportCompare(0, 0, ""); diff --git a/js/src/tests/js1_8_5/regress/regress-541255-2.js b/js/src/tests/js1_8_5/regress/regress-541255-2.js new file mode 100644 index 00000000000..dfb65c6d6e9 --- /dev/null +++ b/js/src/tests/js1_8_5/regress/regress-541255-2.js @@ -0,0 +1,11 @@ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributors: Gary Kwong and Jason Orendorff + */ + +function f(e) { + eval("[function () { w.r = 0 }() for (w in [0])]") +} +f(0); +reportCompare(0, 0, ""); diff --git a/js/src/tests/js1_8_5/regress/regress-541255-3.js b/js/src/tests/js1_8_5/regress/regress-541255-3.js new file mode 100644 index 00000000000..ec1ab791636 --- /dev/null +++ b/js/src/tests/js1_8_5/regress/regress-541255-3.js @@ -0,0 +1,13 @@ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributors: Gary Kwong and Jason Orendorff + */ + +function f(y) { + eval("let (z=2, w=y) { (function () { w.p = 7; })(); }"); +} +var x = {}; +f(x); +assertEq(x.p, 7); +reportCompare(0, 0, ""); diff --git a/js/src/tests/js1_8_5/regress/regress-541255-4.js b/js/src/tests/js1_8_5/regress/regress-541255-4.js new file mode 100644 index 00000000000..6113a7f275a --- /dev/null +++ b/js/src/tests/js1_8_5/regress/regress-541255-4.js @@ -0,0 +1,11 @@ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributors: Gary Kwong and Jason Orendorff + */ + +function f(e) { + eval("[function () { w.r = 0 }() for (w in [0,1,2,3,4,5,6,7,8,9])]") +} +f(0); +reportCompare(0, 0, ""); diff --git a/js/src/tests/js1_8_5/regress/regress-555246-0.js b/js/src/tests/js1_8_5/regress/regress-555246-0.js new file mode 100644 index 00000000000..a79ac62045c --- /dev/null +++ b/js/src/tests/js1_8_5/regress/regress-555246-0.js @@ -0,0 +1,14 @@ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributor: Jason Orendorff + */ + +if (typeof evalcx == 'function') { + var cx = evalcx(""); + evalcx("function f() { return this; }", cx); + var f = cx.f; + assertEq(f(), cx); +} + +reportCompare(0, 0, ""); diff --git a/js/src/tests/js1_8_5/regress/regress-555246-1.js b/js/src/tests/js1_8_5/regress/regress-555246-1.js new file mode 100644 index 00000000000..af3f3e1205c --- /dev/null +++ b/js/src/tests/js1_8_5/regress/regress-555246-1.js @@ -0,0 +1,12 @@ +/* + * Any copyright is dedicated to the Public Domain. + * http://creativecommons.org/licenses/publicdomain/ + * Contributor: Jason Orendorff + */ + +assertEq(typeof evalcx, "function", "") +var cx = evalcx(""); +evalcx("function f() { return this; }", cx); +f = cx.f; +assertEq(f(), cx); +reportCompare(0, 0, ""); diff --git a/js/src/trace-test/tests/basic/bigLoadStoreDisp.js b/js/src/trace-test/tests/basic/bigLoadStoreDisp.js new file mode 100644 index 00000000000..528ba254b3c --- /dev/null +++ b/js/src/trace-test/tests/basic/bigLoadStoreDisp.js @@ -0,0 +1,25 @@ +// In Nanojit, loads and stores have a maximum displacement of 16-bits. Any +// displacements larger than that should be split off into a separate +// instruction that adds the displacement to the base pointer. This +// program tests if this is done correctly. +// +// x.y ends up having a dslot offset of 79988, because of the 20000 array +// elements before it. If Nanojit incorrectly stores this offset into a +// 16-bit value it will truncate to 14452 (because 79988 - 65536 == 14452). +// This means that the increments in the second loop will be done to one of +// the array elements instead of x.y. And so x.y's final value will be +// (99 + HOTLOOP) instead of 1099. +// +// Note that setting x.y to 99 and checking its value at the end will +// access the correct location because those lines are interpreted. Phew. + +var x = {} +for (var i = 0; i < 20000; i++) + x[i] = 0; +x.y = 99; // not traced, correctly accessed + +for (var i = 0; i < 1000; ++i) { + x.y++; // traced, will access an array elem if disp was truncated +} +assertEq(x.y, 1099); // not traced, correctly accessed + diff --git a/js/src/trace-test/tests/basic/bug551705.js b/js/src/trace-test/tests/basic/bug551705.js new file mode 100644 index 00000000000..a97d9886875 --- /dev/null +++ b/js/src/trace-test/tests/basic/bug551705.js @@ -0,0 +1,39 @@ +(Function("\ + for each(let x in [\n\ + true,\n\ + (1),\n\ + (1),\n\ + (1),\n\ + (1),\n\ + true,\n\ + true,\n\ + true,\n\ + (1),\n\ + true,\n\ + true,\n\ + (1),\n\ + true,\n\ + true,\n\ + (1),\n\ + (1),\n\ + true,\n\ + true,\n\ + true,\n\ + true\n\ + ]) { \n\ + ((function f(aaaaaa) {\n\ + return aaaaaa.length == 0 ? 0 : aaaaaa[0] + f(aaaaaa.slice(1))\n\ + })([\n\ + x,\n\ + Math.I,\n\ + '',\n\ + null,\n\ + Math.I,\n\ + null,\n\ + new String(),\n\ + new String()\n\ + ]))\n\ +}"))() + +/* Don't assert/crash. */ + diff --git a/js/src/trace-test/tests/basic/bug552196.js b/js/src/trace-test/tests/basic/bug552196.js new file mode 100644 index 00000000000..d5262348515 --- /dev/null +++ b/js/src/trace-test/tests/basic/bug552196.js @@ -0,0 +1,12 @@ +(Function("\ + for (a = 0; a < 5; a++)\n\ + (function f(b) {\n\ + if (b > 0) {\n\ + f(b - 1)\n\ + }\n\ + })\n\ + (3)\n\ +"))() + +/* Don't assert. */ + diff --git a/js/src/trace-test/tests/basic/testBug552248.js b/js/src/trace-test/tests/basic/testBug552248.js new file mode 100644 index 00000000000..936b33a5a15 --- /dev/null +++ b/js/src/trace-test/tests/basic/testBug552248.js @@ -0,0 +1,35 @@ +var a = new Array(); + +function i(save) { + var x = 9; + evalInFrame(0, "a.push(x)", save); + evalInFrame(1, "a.push(z)", save); + evalInFrame(2, "a.push(z)", save); + evalInFrame(3, "a.push(y)", save); + evalInFrame(4, "a.push(x)", save); +} + +function h() { + var z = 5; + evalInFrame(0, "a.push(z)"); + evalInFrame(1, "a.push(y)"); + evalInFrame(2, "a.push(x)"); + evalInFrame(0, "i(false)"); + evalInFrame(0, "a.push(z)", true); + evalInFrame(1, "a.push(y)", true); + evalInFrame(2, "a.push(x)", true); + evalInFrame(0, "i(true)", true); +} + +function g() { + var y = 4; + h(); +} + +function f() { + var x = 3; + g(); +} + +f(); +assertEq(a+'', [5, 4, 3, 9, 5, 5, 4, 3, 5, 4, 3, 9, 5, 5, 4, 3]+''); diff --git a/js/src/trace-test/tests/basic/testBug554043.js b/js/src/trace-test/tests/basic/testBug554043.js new file mode 100644 index 00000000000..a0071aeb800 --- /dev/null +++ b/js/src/trace-test/tests/basic/testBug554043.js @@ -0,0 +1,6 @@ +(function () { + for (var a = 0; a < 5; a++) { + print(-false) + assertEq(-false, -0.0); + } +})() diff --git a/js/src/trace-test/tests/basic/testNewArrayCount.js b/js/src/trace-test/tests/basic/testNewArrayCount.js index dacbf97c4fa..cb7e6d9843c 100644 --- a/js/src/trace-test/tests/basic/testNewArrayCount.js +++ b/js/src/trace-test/tests/basic/testNewArrayCount.js @@ -1,11 +1,12 @@ function testNewArrayCount() { + function count(a) { var n = 0; for (var p in a) n++; return n; } var a = []; for (var i = 0; i < 5; i++) a = [0]; - assertEq(a.__count__, 1); + assertEq(count(a), 1); for (var i = 0; i < 5; i++) a = [0, , 2]; - assertEq(a.__count__, 2); + assertEq(count(a), 2); } testNewArrayCount(); diff --git a/js/src/trace-test/tests/basic/testNewArrayCount2.js b/js/src/trace-test/tests/basic/testNewArrayCount2.js index 5b34332380f..6318e4c25f3 100644 --- a/js/src/trace-test/tests/basic/testNewArrayCount2.js +++ b/js/src/trace-test/tests/basic/testNewArrayCount2.js @@ -1,7 +1,8 @@ function testNewArrayCount2() { + function count(a) { var n = 0; for (var p in a) n++; return n; } var x = 0; for (var i = 0; i < 10; ++i) - x = new Array(1,2,3).__count__; + x = count(new Array(1,2,3)); return x; } assertEq(testNewArrayCount2(), 3); diff --git a/js/src/trace-test/tests/basic/testTypedArrays.js b/js/src/trace-test/tests/basic/testTypedArrays.js new file mode 100644 index 00000000000..485d1132c93 --- /dev/null +++ b/js/src/trace-test/tests/basic/testTypedArrays.js @@ -0,0 +1,104 @@ +function testBasicTypedArrays() +{ + var ar, aridx, idx; + + var a = new Uint8Array(16); + var b = new Uint16Array(16); + var c = new Uint32Array(16); + var d = new Int8Array(16); + var e = new Int16Array(16); + var f = new Int32Array(16); + + var g = new Float32Array(16); + var h = new Float64Array(16); + + var iarrays = [ a, b, c, d, e, f ]; + for (aridx = 0; aridx < iarrays.length; ++aridx) { + ar = iarrays[aridx]; + + for (idx = 0; idx < ar.length-4; ++idx) { + ar[idx] = 22; + ar[idx+1] = 12.7; + ar[idx+2] = "99"; + ar[idx+3] = { k: "thing" }; + ar[idx+4] = Infinity; + } + + assertEq(ar[ar.length-5], 22); + assertEq(ar[ar.length-4], 12); + assertEq(ar[ar.length-3], 99); + assertEq(ar[ar.length-2], 0); + assertEq(ar[ar.length-1], 0); + } + + var farrays = [ g, h ]; + for (aridx = 0; aridx < farrays.length; ++aridx) { + ar = farrays[aridx]; + + for (idx = 0; idx < ar.length-4; ++idx) { + ar[idx] = 22; + ar[idx+1] = 12.25; + ar[idx+2] = "99"; + ar[idx+3] = { k: "thing" }; + ar[idx+4] = Infinity; + } + + assertEq(ar[ar.length-5], 22); + assertEq(ar[ar.length-4], 12.25); + assertEq(ar[ar.length-3], 99); + assertEq(!(ar[ar.length-2] == ar[ar.length-2]), true); + assertEq(ar[ar.length-1], Infinity); + } +} + +function testSpecialTypedArrays() +{ + var ar, aridx, idx; + + ar = new Uint8ClampedArray(16); + for (idx = 0; idx < ar.length-4; ++idx) { + ar[idx] = -200; + ar[idx+1] = 127.5; + ar[idx+2] = 987; + ar[idx+3] = Infinity; + ar[idx+4] = "hello world"; + } + + assertEq(ar[ar.length-5], 0); + assertEq(ar[ar.length-4], 128); + assertEq(ar[ar.length-3], 255); + assertEq(ar[ar.length-2], 255); + assertEq(ar[ar.length-1], 0); +} + +function testTypedArrayOther() +{ + var ar = new Int32Array(16); + for (var i = 0; i < ar.length; ++i) { + ar[i] = i; + } + + for (var i = 0; i < ar.length; ++i) { + // deliberate out of bounds access + ar[i-2] = ar[i+2]; + } + + var t = 0; + for (var i = 0; i < ar.length; ++i) { + t += ar[i]; + } + + assertEq(t, 143); +} + +testBasicTypedArrays(); +testSpecialTypedArrays(); +testTypedArrayOther(); + +checkStats({ + // Note! These are all inner tree growing aborts, because we change + // the array type in the inner loop of the tests. This isn't ideal, + // and if we ever fix these to not report as aborts, this should go + // back to 0. + recorderAborted: 5 +}); diff --git a/js/src/vprof/vprof.h b/js/src/vprof/vprof.h index a71eea8245f..0ae030ab194 100644 --- a/js/src/vprof/vprof.h +++ b/js/src/vprof/vprof.h @@ -145,7 +145,7 @@ extern void* _tprof_before_id; (id != 0) ? \ _profileEntryValue (id, (int64_t) (v)) \ : \ - profileValue (&id, (char*) (e), -1, (int64_t) (v), ##__VA_ARGS__, NULL) \ + profileValue (&id, (char*) (e), -1, (int64_t) (v), NULL) \ ; \ } diff --git a/js/src/xpconnect/idl/nsIXPConnect.idl b/js/src/xpconnect/idl/nsIXPConnect.idl index 725b94f550c..57793758fda 100644 --- a/js/src/xpconnect/idl/nsIXPConnect.idl +++ b/js/src/xpconnect/idl/nsIXPConnect.idl @@ -65,6 +65,7 @@ [ptr] native JSClassPtr(JSClass); [ptr] native JSObjectPtr(JSObject); [ptr] native JSValPtr(jsval); + native JSPropertyOp(JSPropertyOp); native JSEqualityOp(JSEqualityOp); native JSID(jsid); [ptr] native voidPtrPtr(void*); @@ -396,7 +397,7 @@ interface nsIXPCFunctionThisTranslator : nsISupports { 0xbd, 0xd6, 0x0, 0x0, 0x64, 0x65, 0x73, 0x74 } } %} -[uuid(96540596-c21e-4183-bb47-f6a5c1ad2dba)] +[uuid(0332b12a-8103-4601-aed3-b9933a0d9441)] interface nsIXPConnect : nsISupports { %{ C++ @@ -868,5 +869,5 @@ interface nsIXPConnect : nsISupports #endif %} - [notxpcom] JSClassPtr getNativeWrapperClass(); + [notxpcom] void getNativeWrapperGetPropertyOp(out JSPropertyOp getProperty); }; diff --git a/js/src/xpconnect/shell/xpcshell.cpp b/js/src/xpconnect/shell/xpcshell.cpp index 6e06a356495..282b18a472f 100644 --- a/js/src/xpconnect/shell/xpcshell.cpp +++ b/js/src/xpconnect/shell/xpcshell.cpp @@ -553,6 +553,19 @@ GC(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) return JS_TRUE; } +#ifdef JS_GC_ZEAL +static JSBool +GCZeal(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) +{ + uint32 zeal; + if (!JS_ValueToECMAUint32(cx, argv[0], &zeal)) + return JS_FALSE; + + JS_SetGCZeal(cx, (PRUint8)zeal); + return JS_TRUE; +} +#endif + #ifdef DEBUG static JSBool @@ -756,6 +769,9 @@ static JSFunctionSpec glob_functions[] = { {"dumpXPC", DumpXPC, 1,0,0}, {"dump", Dump, 1,0,0}, {"gc", GC, 0,0,0}, +#ifdef JS_GC_ZEAL + {"gczeal", GCZeal, 1,0,0}, +#endif {"clear", Clear, 1,0,0}, {"options", Options, 0,0,0}, #ifdef DEBUG diff --git a/js/src/xpconnect/src/XPCChromeObjectWrapper.cpp b/js/src/xpconnect/src/XPCChromeObjectWrapper.cpp index 0efa3d3e181..199742f4812 100644 --- a/js/src/xpconnect/src/XPCChromeObjectWrapper.cpp +++ b/js/src/xpconnect/src/XPCChromeObjectWrapper.cpp @@ -40,7 +40,7 @@ #include "xpcprivate.h" #include "nsDOMError.h" #include "jsdbgapi.h" -#include "jscntxt.h" // For JSAutoTempValueRooter. +#include "jscntxt.h" // For js::AutoValueRooter. #include "jsobj.h" #include "XPCNativeWrapper.h" #include "XPCWrapper.h" @@ -290,17 +290,16 @@ WrapObject(JSContext *cx, JSObject *parent, jsval v, jsval *vp) *vp = OBJECT_TO_JSVAL(wrapperObj); - jsval exposedProps = JSVAL_VOID; - JSAutoTempValueRooter tvr(cx, 1, &exposedProps); + js::AutoValueRooter exposedProps(cx, JSVAL_VOID); - if (!GetExposedProperties(cx, JSVAL_TO_OBJECT(v), &exposedProps)) { + if (!GetExposedProperties(cx, JSVAL_TO_OBJECT(v), exposedProps.addr())) { return JS_FALSE; } if (!JS_SetReservedSlot(cx, wrapperObj, XPCWrapper::sWrappedObjSlot, v) || - !JS_SetReservedSlot(cx, wrapperObj, XPCWrapper::sFlagsSlot, - JSVAL_ZERO) || - !JS_SetReservedSlot(cx, wrapperObj, sExposedPropsSlot, exposedProps)) { + !JS_SetReservedSlot(cx, wrapperObj, XPCWrapper::sFlagsSlot, JSVAL_ZERO) || + !JS_SetReservedSlot(cx, wrapperObj, sExposedPropsSlot, + exposedProps.value())) { return JS_FALSE; } @@ -324,7 +323,7 @@ ThrowException(nsresult rv, JSContext *cx) static inline JSObject * GetWrappedJSObject(JSContext *cx, JSObject *obj) { - JSClass *clasp = STOBJ_GET_CLASS(obj); + JSClass *clasp = obj->getClass(); if (!(clasp->flags & JSCLASS_IS_EXTENDED)) { return obj; } @@ -348,7 +347,7 @@ static inline JSObject * GetWrapper(JSObject *obj) { - while (STOBJ_GET_CLASS(obj) != &COWClass.base) { + while (obj->getClass() != &COWClass.base) { obj = obj->getProto(); if (!obj) { break; @@ -710,7 +709,7 @@ XPC_COW_Convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp) return ThrowException(NS_ERROR_FAILURE, cx); } - if (!STOBJ_GET_CLASS(wrappedObj)->convert(cx, wrappedObj, type, vp)) { + if (!wrappedObj->getClass()->convert(cx, wrappedObj, type, vp)) { return JS_FALSE; } @@ -755,7 +754,7 @@ XPC_COW_Equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) XPCWrappedNative *me = XPCWrappedNative::GetWrappedNativeOfJSObject(cx, obj); obj = me->GetFlatJSObject(); test = other->GetFlatJSObject(); - return ((JSExtendedClass *)STOBJ_GET_CLASS(obj))-> + return ((JSExtendedClass *)obj->getClass())-> equality(cx, obj, OBJECT_TO_JSVAL(test), bp); } diff --git a/js/src/xpconnect/src/XPCCrossOriginWrapper.cpp b/js/src/xpconnect/src/XPCCrossOriginWrapper.cpp index 9a963148278..4cbdde35b0c 100644 --- a/js/src/xpconnect/src/XPCCrossOriginWrapper.cpp +++ b/js/src/xpconnect/src/XPCCrossOriginWrapper.cpp @@ -40,7 +40,7 @@ #include "xpcprivate.h" #include "nsDOMError.h" #include "jsdbgapi.h" -#include "jscntxt.h" // For JSAutoTempValueRooter. +#include "jscntxt.h" // For js::AutoValueRooter. #include "XPCWrapper.h" #include "nsIDOMWindow.h" #include "nsIDOMWindowCollection.h" @@ -125,7 +125,7 @@ static inline JSObject * GetWrapper(JSObject *obj) { - while (STOBJ_GET_CLASS(obj) != &XPCCrossOriginWrapper::XOWClass.base) { + while (obj->getClass() != &XPCCrossOriginWrapper::XOWClass.base) { obj = obj->getProto(); if (!obj) { break; @@ -354,7 +354,7 @@ WrapObject(JSContext *cx, JSObject *parent, jsval *vp, XPCWrappedNative* wn) JSObject *wrappedObj; if (JSVAL_IS_PRIMITIVE(*vp) || !(wrappedObj = JSVAL_TO_OBJECT(*vp)) || - STOBJ_GET_CLASS(wrappedObj) == &XOWClass.base) { + wrappedObj->getClass() == &XOWClass.base) { return JS_TRUE; } @@ -368,7 +368,7 @@ WrapObject(JSContext *cx, JSObject *parent, jsval *vp, XPCWrappedNative* wn) // The parent must be the inner global object for its scope. parent = JS_GetGlobalForObject(cx, parent); - JSClass *clasp = STOBJ_GET_CLASS(parent); + JSClass *clasp = parent->getClass(); if (clasp->flags & JSCLASS_IS_EXTENDED) { JSExtendedClass *xclasp = reinterpret_cast(clasp); if (xclasp->innerObject) { @@ -384,7 +384,7 @@ WrapObject(JSContext *cx, JSObject *parent, jsval *vp, XPCWrappedNative* wn) #ifdef DEBUG_mrbkap_off printf("Wrapping object at %p (%s) [%p]\n", - (void *)wrappedObj, STOBJ_GET_CLASS(wrappedObj)->name, + (void *)wrappedObj, wrappedObj->getClass()->name, (void *)parentScope); #endif @@ -393,7 +393,7 @@ WrapObject(JSContext *cx, JSObject *parent, jsval *vp, XPCWrappedNative* wn) outerObj = map->Find(wrappedObj); if (outerObj) { - NS_ASSERTION(STOBJ_GET_CLASS(outerObj) == &XOWClass.base, + NS_ASSERTION(outerObj->getClass() == &XOWClass.base, "What crazy object are we getting here?"); #ifdef DEBUG_mrbkap_off printf("But found a wrapper in the map %p!\n", (void *)outerObj); @@ -434,7 +434,7 @@ static JSBool IsValFrame(JSObject *obj, jsval v, XPCWrappedNative *wn) { // Fast path for the common case. - if (STOBJ_GET_CLASS(obj)->name[0] != 'W') { + if (obj->getClass()->name[0] != 'W') { return JS_FALSE; } @@ -530,7 +530,7 @@ WrapSameOriginProp(JSContext *cx, JSObject *outerObj, jsval *vp) } JSObject *wrappedObj = JSVAL_TO_OBJECT(*vp); - JSClass *clasp = STOBJ_GET_CLASS(wrappedObj); + JSClass *clasp = wrappedObj->getClass(); if (ClassNeedsXOW(clasp->name)) { return WrapObject(cx, JS_GetGlobalForObject(cx, outerObj), vp); } @@ -682,7 +682,7 @@ XPC_XOW_GetOrSetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp, return ThrowException(NS_ERROR_NOT_INITIALIZED, cx); } rv = ssm->CheckPropertyAccess(cx, wrappedObj, - STOBJ_GET_CLASS(wrappedObj)->name, + wrappedObj->getClass()->name, id, isSet ? sSecMgrSetProp : sSecMgrGetProp); if (NS_FAILED(rv)) { @@ -805,7 +805,7 @@ XPC_XOW_Enumerate(JSContext *cx, JSObject *obj) static JSObject * GetUXPCObject(JSContext *cx, JSObject *obj) { - NS_ASSERTION(STOBJ_GET_CLASS(obj) == &XOWClass.base, "wrong object"); + NS_ASSERTION(obj->getClass() == &XOWClass.base, "wrong object"); jsval v; if (!JS_GetReservedSlot(cx, obj, sFlagsSlot, &v)) { @@ -830,7 +830,7 @@ GetUXPCObject(JSContext *cx, JSObject *obj) return nsnull; } - JSAutoTempValueRooter tvr(cx, uxpco); + js::AutoValueRooter tvr(cx, uxpco); jsval wrappedObj, parentScope; if (!JS_GetReservedSlot(cx, obj, sWrappedObjSlot, &wrappedObj) || @@ -895,7 +895,7 @@ XPC_XOW_NewResolve(JSContext *cx, JSObject *obj, jsval id, uintN flags, ? sSecMgrSetProp : sSecMgrGetProp; rv = ssm->CheckPropertyAccess(cx, wrappedObj, - STOBJ_GET_CLASS(wrappedObj)->name, + wrappedObj->getClass()->name, id, action); if (NS_FAILED(rv)) { // The security manager threw an exception for us. @@ -975,7 +975,7 @@ XPC_XOW_Convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp) return JS_FALSE; } - if (!STOBJ_GET_CLASS(wrappedObj)->convert(cx, wrappedObj, type, vp)) { + if (!wrappedObj->getClass()->convert(cx, wrappedObj, type, vp)) { return JS_FALSE; } @@ -1111,7 +1111,7 @@ XPC_XOW_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) return JS_FALSE; } - JSClass *clasp = STOBJ_GET_CLASS(iface); + JSClass *clasp = iface->getClass(); *bp = JS_FALSE; if (!clasp->hasInstance) { @@ -1142,7 +1142,7 @@ XPC_XOW_Equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) } JSObject *test = JSVAL_TO_OBJECT(v); - if (STOBJ_GET_CLASS(test) == &XOWClass.base) { + if (test->getClass() == &XOWClass.base) { if (!JS_GetReservedSlot(cx, test, sWrappedObjSlot, &v)) { return JS_FALSE; } @@ -1169,7 +1169,7 @@ XPC_XOW_Equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) XPCWrappedNative *me = XPCWrappedNative::GetWrappedNativeOfJSObject(cx, obj); obj = me->GetFlatJSObject(); test = other->GetFlatJSObject(); - return ((JSExtendedClass *)STOBJ_GET_CLASS(obj))-> + return ((JSExtendedClass *)obj->getClass())-> equality(cx, obj, OBJECT_TO_JSVAL(test), bp); } @@ -1206,7 +1206,7 @@ XPC_XOW_Iterator(JSContext *cx, JSObject *obj, JSBool keysonly) return nsnull; } - JSAutoTempValueRooter tvr(cx, OBJECT_TO_JSVAL(wrapperIter)); + js::AutoObjectRooter tvr(cx, wrapperIter); // Initialize our XOW. jsval v = OBJECT_TO_JSVAL(wrappedObj); @@ -1260,7 +1260,7 @@ XPC_XOW_toString(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, return ThrowException(NS_ERROR_NOT_INITIALIZED, cx); } rv = ssm->CheckPropertyAccess(cx, wrappedObj, - STOBJ_GET_CLASS(wrappedObj)->name, + wrappedObj->getClass()->name, GetRTStringByIndex(cx, XPCJSRuntime::IDX_TO_STRING), nsIXPCSecurityManager::ACCESS_GET_PROPERTY); } diff --git a/js/src/xpconnect/src/XPCDispObject.cpp b/js/src/xpconnect/src/XPCDispObject.cpp index fdc9a164f64..ab80aab8e13 100644 --- a/js/src/xpconnect/src/XPCDispObject.cpp +++ b/js/src/xpconnect/src/XPCDispObject.cpp @@ -195,7 +195,7 @@ JSBool XPCDispObject::Dispatch(XPCCallContext& ccx, IDispatch * disp, // Scope the lock { // avoid deadlock in case the native method blocks somehow - AutoJSSuspendRequest req(ccx); // scoped suspend of request + JSAutoSuspendRequest req(ccx); // scoped suspend of request // call IDispatch's invoke invokeResult= disp->Invoke( dispID, // IDispatch ID diff --git a/js/src/xpconnect/src/XPCNativeWrapper.cpp b/js/src/xpconnect/src/XPCNativeWrapper.cpp index d0416bbba2c..2a42897a916 100644 --- a/js/src/xpconnect/src/XPCNativeWrapper.cpp +++ b/js/src/xpconnect/src/XPCNativeWrapper.cpp @@ -112,7 +112,33 @@ namespace XPCNativeWrapper { namespace internal { // JS class for XPCNativeWrapper (and this doubles as the constructor // for XPCNativeWrapper for the moment too...) -JSExtendedClass NWClass = { +JSExtendedClass NW_NoCall_Class = { + // JSClass (JSExtendedClass.base) initialization + { "XPCNativeWrapper", + JSCLASS_HAS_PRIVATE | JSCLASS_PRIVATE_IS_NSISUPPORTS | + // Our one reserved slot holds a jsint of flag bits + JSCLASS_NEW_RESOLVE | JSCLASS_HAS_RESERVED_SLOTS(1) | + JSCLASS_MARK_IS_TRACE | JSCLASS_IS_EXTENDED | JSCLASS_CONSTRUCT_PROTOTYPE, + XPC_NW_AddProperty, XPC_NW_DelProperty, + XPC_NW_GetProperty, XPC_NW_SetProperty, + XPC_NW_Enumerate, (JSResolveOp)XPC_NW_NewResolve, + XPC_NW_Convert, XPC_NW_Finalize, + nsnull, XPC_NW_CheckAccess, + nsnull, XPC_NW_Construct, + nsnull, XPC_NW_HasInstance, + JS_CLASS_TRACE(XPC_NW_Trace), nsnull + }, + + // JSExtendedClass initialization + XPC_NW_Equality, + nsnull, // outerObject + nsnull, // innerObject + XPC_NW_Iterator, + nsnull, // wrappedObject + JSCLASS_NO_RESERVED_MEMBERS +}; + +JSExtendedClass NW_Call_Class = { // JSClass (JSExtendedClass.base) initialization { "XPCNativeWrapper", JSCLASS_HAS_PRIVATE | JSCLASS_PRIVATE_IS_NSISUPPORTS | @@ -282,10 +308,10 @@ using namespace XPCNativeWrapper; // in the call from XPC_NW_Convert, for example. #define XPC_NW_CALL_HOOK(obj, hook, args) \ - return STOBJ_GET_CLASS(obj)->hook args; + return obj->getClass()->hook args; #define XPC_NW_CAST_HOOK(obj, type, hook, args) \ - return ((type) STOBJ_GET_CLASS(obj)->hook) args; + return ((type) obj->getClass()->hook) args; static JSBool ShouldBypassNativeWrapper(JSContext *cx, JSObject *obj) @@ -329,7 +355,7 @@ ShouldBypassNativeWrapper(JSContext *cx, JSObject *obj) #define XPC_NW_BYPASS_TEST(cx, obj, hook, args) \ XPC_NW_BYPASS_BASE(cx, obj, \ - JSClass *clasp_ = STOBJ_GET_CLASS(obj); \ + JSClass *clasp_ = obj->getClass(); \ return !clasp_->hook || clasp_->hook args; \ ) @@ -400,7 +426,7 @@ EnsureLegalActivity(JSContext *cx, JSObject *obj, (accessType & (sSecMgrSetProp | sSecMgrGetProp)) && (flatObj = wn->GetFlatJSObject())) { rv = ssm->CheckPropertyAccess(cx, flatObj, - STOBJ_GET_CLASS(flatObj)->name, + flatObj->getClass()->name, id, accessType); return NS_SUCCEEDED(rv); } @@ -805,7 +831,7 @@ XPC_NW_CheckAccess(JSContext *cx, JSObject *obj, jsval id, JSObject *wrapperJSObject = wrappedNative->GetFlatJSObject(); - JSClass *clazz = STOBJ_GET_CLASS(wrapperJSObject); + JSClass *clazz = wrapperJSObject->getClass(); return !clazz->checkAccess || clazz->checkAccess(cx, wrapperJSObject, id, mode, vp); } @@ -1105,13 +1131,13 @@ XPC_NW_Iterator(JSContext *cx, JSObject *obj, JSBool keysonly) } JSObject *wrapperIter = - JS_NewObjectWithGivenProto(cx, XPCNativeWrapper::GetJSClass(), nsnull, + JS_NewObjectWithGivenProto(cx, XPCNativeWrapper::GetJSClass(false), nsnull, obj->getParent()); if (!wrapperIter) { return nsnull; } - JSAutoTempValueRooter tvr(cx, OBJECT_TO_JSVAL(wrapperIter)); + js::AutoObjectRooter tvr(cx, wrapperIter); // Initialize our native wrapper. XPCWrappedNative *wn = static_cast(JS_GetPrivate(cx, obj)); @@ -1201,7 +1227,7 @@ XPCNativeWrapper::AttachNewConstructorObject(XPCCallContext &ccx, JSObject *aGlobalObject) { JSObject *class_obj = - ::JS_InitClass(ccx, aGlobalObject, nsnull, &internal::NWClass.base, + ::JS_InitClass(ccx, aGlobalObject, nsnull, &internal::NW_Call_Class.base, XPCNativeWrapperCtor, 0, nsnull, nsnull, nsnull, static_functions); if (!class_obj) { @@ -1219,7 +1245,7 @@ XPCNativeWrapper::AttachNewConstructorObject(XPCCallContext &ccx, JSBool found; return ::JS_SetPropertyAttributes(ccx, aGlobalObject, - internal::NWClass.base.name, + internal::NW_Call_Class.base.name, JSPROP_READONLY | JSPROP_PERMANENT, &found); } @@ -1288,7 +1314,9 @@ XPCNativeWrapper::GetNewOrUsed(JSContext *cx, XPCWrappedNative *wrapper, ::JS_LockGCThing(cx, nw_parent); } - obj = ::JS_NewObjectWithGivenProto(cx, GetJSClass(), nsnull, nw_parent); + bool call = NATIVE_HAS_FLAG(wrapper, WantCall) || + NATIVE_HAS_FLAG(wrapper, WantConstruct); + obj = ::JS_NewObjectWithGivenProto(cx, GetJSClass(call), nsnull, nw_parent); if (lock) { ::JS_UnlockGCThing(cx, nw_parent); @@ -1331,8 +1359,10 @@ XPCNativeWrapper::CreateExplicitWrapper(JSContext *cx, printf("Creating new JSObject\n"); #endif + bool call = NATIVE_HAS_FLAG(wrappedNative, WantCall) || + NATIVE_HAS_FLAG(wrappedNative, WantConstruct); JSObject *wrapperObj = - JS_NewObjectWithGivenProto(cx, XPCNativeWrapper::GetJSClass(), nsnull, + JS_NewObjectWithGivenProto(cx, XPCNativeWrapper::GetJSClass(call), nsnull, wrappedNative->GetScope()->GetGlobalJSObject()); if (!wrapperObj) { diff --git a/js/src/xpconnect/src/XPCNativeWrapper.h b/js/src/xpconnect/src/XPCNativeWrapper.h index 74f8b68f130..2bccbca14fe 100644 --- a/js/src/xpconnect/src/XPCNativeWrapper.h +++ b/js/src/xpconnect/src/XPCNativeWrapper.h @@ -44,7 +44,10 @@ class nsIPrincipal; namespace XPCNativeWrapper { -namespace internal { extern JSExtendedClass NWClass; } +namespace internal { + extern JSExtendedClass NW_NoCall_Class; + extern JSExtendedClass NW_Call_Class; +} PRBool AttachNewConstructorObject(XPCCallContext &ccx, JSObject *aGlobalObject); @@ -59,13 +62,14 @@ CreateExplicitWrapper(JSContext *cx, XPCWrappedNative *wrapper, JSBool deep, inline PRBool IsNativeWrapperClass(JSClass *clazz) { - return clazz == &internal::NWClass.base; + return clazz == &internal::NW_NoCall_Class.base || + clazz == &internal::NW_Call_Class.base; } inline PRBool IsNativeWrapper(JSObject *obj) { - return STOBJ_GET_CLASS(obj) == &internal::NWClass.base; + return IsNativeWrapperClass(obj->getClass()); } JSBool @@ -80,9 +84,11 @@ SafeGetWrappedNative(JSObject *obj) } inline JSClass * -GetJSClass() +GetJSClass(bool call) { - return &internal::NWClass.base; + return call + ? &internal::NW_Call_Class.base + : &internal::NW_NoCall_Class.base; } void diff --git a/js/src/xpconnect/src/XPCSafeJSObjectWrapper.cpp b/js/src/xpconnect/src/XPCSafeJSObjectWrapper.cpp index ad89675fd1d..3926f146e29 100644 --- a/js/src/xpconnect/src/XPCSafeJSObjectWrapper.cpp +++ b/js/src/xpconnect/src/XPCSafeJSObjectWrapper.cpp @@ -228,7 +228,7 @@ FindObjectPrincipals(JSContext *cx, JSObject *safeObj, JSObject *innerObj) static inline JSObject * FindSafeObject(JSObject *obj) { - while (STOBJ_GET_CLASS(obj) != &SJOWClass.base) { + while (obj->getClass() != &SJOWClass.base) { obj = obj->getProto(); if (!obj) { @@ -285,7 +285,7 @@ WrapObject(JSContext *cx, JSObject *scope, jsval v, jsval *vp) // with XPCSafeJSObjectWrapper, and never let the eval function // object be directly wrapped. - if (STOBJ_GET_CLASS(objToWrap) == &js_ScriptClass || + if (objToWrap->getClass() == &js_ScriptClass || (JS_ObjectIsFunction(cx, objToWrap) && JS_GetFunctionFastNative(cx, JS_ValueToFunction(cx, v)) == XPCWrapper::sEvalNative)) { @@ -406,7 +406,7 @@ WrapJSValue(JSContext *cx, JSObject *obj, jsval val, jsval *rval) // parent we pass in here, the construct hook will ensure we get // the right parent for the wrapper. JSObject *safeObj = JSVAL_TO_OBJECT(*rval); - if (STOBJ_GET_CLASS(safeObj) == &SJOWClass.base && + if (safeObj->getClass() == &SJOWClass.base && JS_GetGlobalForObject(cx, obj) != JS_GetGlobalForObject(cx, safeObj)) { // Check to see if the new object we just wrapped is accessible // from the unsafe object we got the new object through. If not, @@ -534,7 +534,7 @@ XPC_SJOW_DelProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) NS_STACK_CLASS class SafeCallGuard { public: SafeCallGuard(JSContext *cx, nsIPrincipal *principal) - : cx(cx) { + : cx(cx), tvr(cx) { nsIScriptSecurityManager *ssm = XPCWrapper::GetSecurityManager(); if (ssm) { // Note: We pass null as the target frame pointer because we know that @@ -573,7 +573,7 @@ public: private: JSContext *cx; JSRegExpStatics statics; - JSTempValueRooter tvr; + js::AutoValueRooter tvr; uint32 options; JSStackFrame *fp; }; @@ -752,7 +752,7 @@ XPC_SJOW_CheckAccess(JSContext *cx, JSObject *obj, jsval id, return JS_FALSE; } - JSClass *clazz = STOBJ_GET_CLASS(unsafeObj); + JSClass *clazz = unsafeObj->getClass(); return !clazz->checkAccess || clazz->checkAccess(cx, unsafeObj, id, mode, vp); } @@ -991,7 +991,7 @@ XPC_SJOW_Iterator(JSContext *cx, JSObject *obj, JSBool keysonly) return nsnull; } - JSAutoTempValueRooter tvr(cx, OBJECT_TO_JSVAL(wrapperIter)); + js::AutoValueRooter tvr(cx, OBJECT_TO_JSVAL(wrapperIter)); // Initialize the wrapper. return XPCWrapper::CreateIteratorObj(cx, wrapperIter, obj, unsafeObj, diff --git a/js/src/xpconnect/src/XPCSystemOnlyWrapper.cpp b/js/src/xpconnect/src/XPCSystemOnlyWrapper.cpp index 8cc894da14d..b77c13dbbfa 100644 --- a/js/src/xpconnect/src/XPCSystemOnlyWrapper.cpp +++ b/js/src/xpconnect/src/XPCSystemOnlyWrapper.cpp @@ -40,7 +40,7 @@ #include "xpcprivate.h" #include "nsDOMError.h" #include "jsdbgapi.h" -#include "jscntxt.h" // For JSAutoTempValueRooter. +#include "jscntxt.h" // For js::AutoValueRooter. #include "XPCNativeWrapper.h" #include "XPCWrapper.h" @@ -139,7 +139,7 @@ WrapObject(JSContext *cx, JSObject *parent, jsval v, jsval *vp) } *vp = OBJECT_TO_JSVAL(wrapperObj); - JSAutoTempValueRooter tvr(cx, *vp); + js::AutoValueRooter tvr(cx, *vp); if (!JS_SetReservedSlot(cx, wrapperObj, sWrappedObjSlot, v) || !JS_SetReservedSlot(cx, wrapperObj, sFlagsSlot, JSVAL_ZERO)) { @@ -154,7 +154,7 @@ MakeSOW(JSContext *cx, JSObject *obj) { #ifdef DEBUG { - JSClass *clasp = STOBJ_GET_CLASS(obj); + JSClass *clasp = obj->getClass(); NS_ASSERTION(clasp != &SystemOnlyWrapper::SOWClass.base && clasp != &XPCCrossOriginWrapper::XOWClass.base && strcmp(clasp->name, "XPCNativeWrapper"), @@ -268,7 +268,7 @@ using namespace SystemOnlyWrapper; static inline JSObject * GetWrappedJSObject(JSContext *cx, JSObject *obj) { - JSClass *clasp = STOBJ_GET_CLASS(obj); + JSClass *clasp = obj->getClass(); if (!(clasp->flags & JSCLASS_IS_EXTENDED)) { return obj; } @@ -286,7 +286,7 @@ static inline JSObject * GetWrapper(JSObject *obj) { - while (STOBJ_GET_CLASS(obj) != &SOWClass.base) { + while (obj->getClass() != &SOWClass.base) { obj = obj->getProto(); if (!obj) { break; @@ -405,7 +405,7 @@ XPC_SOW_RewrapValue(JSContext *cx, JSObject *wrapperObj, jsval *vp) return XPC_SOW_WrapFunction(cx, wrapperObj, obj, vp); } - if (STOBJ_GET_CLASS(obj) == &SOWClass.base) { + if (obj->getClass() == &SOWClass.base) { // We are extra careful about content-polluted wrappers here. I don't know // if it's possible to reach them through objects that we wrap, but figuring // that out is more expensive (and harder) than simply checking and @@ -430,7 +430,7 @@ XPC_SOW_RewrapValue(JSContext *cx, JSObject *wrapperObj, jsval *vp) static JSBool XPC_SOW_AddProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp) { - NS_ASSERTION(STOBJ_GET_CLASS(obj) == &SOWClass.base, "Wrong object"); + NS_ASSERTION(obj->getClass() == &SOWClass.base, "Wrong object"); jsval resolving; if (!JS_GetReservedSlot(cx, obj, sFlagsSlot, &resolving)) { @@ -482,7 +482,7 @@ XPC_SOW_GetOrSetProperty(JSContext *cx, JSObject *obj, jsval id, jsval *vp, return JS_FALSE; } - JSAutoTempValueRooter tvr(cx, 1, vp); + js::AutoArrayRooter tvr(cx, 1, vp); JSObject *wrappedObj = GetWrappedObject(cx, obj); if (!wrappedObj) { @@ -578,7 +578,7 @@ XPC_SOW_Convert(JSContext *cx, JSObject *obj, JSType type, jsval *vp) return JS_TRUE; } - return STOBJ_GET_CLASS(wrappedObj)->convert(cx, wrappedObj, type, vp); + return wrappedObj->getClass()->convert(cx, wrappedObj, type, vp); } static JSBool @@ -613,7 +613,7 @@ XPC_SOW_HasInstance(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) return JS_TRUE; } - JSClass *clasp = STOBJ_GET_CLASS(iface); + JSClass *clasp = iface->getClass(); *bp = JS_FALSE; if (!clasp->hasInstance) { @@ -659,7 +659,7 @@ XPC_SOW_Equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) if (lhs) { // Delegate to our wrapped object if we can. - JSClass *clasp = STOBJ_GET_CLASS(lhs); + JSClass *clasp = lhs->getClass(); if (clasp->flags & JSCLASS_IS_EXTENDED) { JSExtendedClass *xclasp = (JSExtendedClass *) clasp; // NB: JSExtendedClass.equality is a required field. @@ -668,7 +668,7 @@ XPC_SOW_Equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) } // We know rhs is non-null. - JSClass *clasp = STOBJ_GET_CLASS(rhs); + JSClass *clasp = rhs->getClass(); if (clasp->flags & JSCLASS_IS_EXTENDED) { JSExtendedClass *xclasp = (JSExtendedClass *) clasp; // NB: JSExtendedClass.equality is a required field. @@ -694,7 +694,7 @@ XPC_SOW_Iterator(JSContext *cx, JSObject *obj, JSBool keysonly) return nsnull; } - JSAutoTempValueRooter tvr(cx, OBJECT_TO_JSVAL(wrapperIter)); + js::AutoValueRooter tvr(cx, OBJECT_TO_JSVAL(wrapperIter)); // Initialize our SOW. jsval v = OBJECT_TO_JSVAL(wrappedObj); diff --git a/js/src/xpconnect/src/XPCWrapper.cpp b/js/src/xpconnect/src/XPCWrapper.cpp index 83b39a4e784..909cd6efe2b 100644 --- a/js/src/xpconnect/src/XPCWrapper.cpp +++ b/js/src/xpconnect/src/XPCWrapper.cpp @@ -60,7 +60,7 @@ const PRUint32 sSecMgrGetProp = nsIXPCSecurityManager::ACCESS_GET_PROPERTY; JSObject * Unwrap(JSContext *cx, JSObject *wrapper) { - JSClass *clasp = STOBJ_GET_CLASS(wrapper); + JSClass *clasp = wrapper->getClass(); if (clasp == &XPCCrossOriginWrapper::XOWClass.base) { return UnwrapXOW(cx, wrapper); } @@ -322,7 +322,7 @@ CreateIteratorObj(JSContext *cx, JSObject *tempWrapper, return nsnull; } - JSAutoTempValueRooter tvr(cx, OBJECT_TO_JSVAL(iterObj)); + js::AutoObjectRooter tvr(cx, iterObj); // Do this sooner rather than later to avoid complications in // IteratorFinalize. @@ -337,7 +337,7 @@ CreateIteratorObj(JSContext *cx, JSObject *tempWrapper, // call enumerate, and then re-set the prototype. As we do this, we have // to protec the temporary wrapper from garbage collection. - JSAutoTempValueRooter tvr(cx, tempWrapper); + js::AutoValueRooter tvr(cx, tempWrapper); if (!JS_SetPrototype(cx, iterObj, wrapperObj) || !XPCWrapper::Enumerate(cx, iterObj, wrapperObj) || !JS_SetPrototype(cx, iterObj, tempWrapper)) { @@ -386,7 +386,7 @@ CreateSimpleIterator(JSContext *cx, JSObject *scope, JSBool keysonly, return nsnull; } - JSAutoTempValueRooter tvr(cx, iterObj); + js::AutoValueRooter tvr(cx, iterObj); if (!propertyContainer) { if (!JS_SetReservedSlot(cx, iterObj, 0, PRIVATE_TO_JSVAL(nsnull)) || !JS_SetReservedSlot(cx, iterObj, 1, JSVAL_ZERO) || diff --git a/js/src/xpconnect/src/XPCWrapper.h b/js/src/xpconnect/src/XPCWrapper.h index b3a583186c1..81294b4b533 100644 --- a/js/src/xpconnect/src/XPCWrapper.h +++ b/js/src/xpconnect/src/XPCWrapper.h @@ -294,7 +294,7 @@ MaybePreserveWrapper(JSContext *cx, XPCWrappedNative *wn, uintN flags) inline JSBool IsSecurityWrapper(JSObject *wrapper) { - JSClass *clasp = STOBJ_GET_CLASS(wrapper); + JSClass *clasp = wrapper->getClass(); return (clasp->flags & JSCLASS_IS_EXTENDED) && ((JSExtendedClass*)clasp)->wrappedObject; } @@ -318,7 +318,7 @@ Unwrap(JSContext *cx, JSObject *wrapper); inline JSObject * UnwrapGeneric(JSContext *cx, const JSExtendedClass *xclasp, JSObject *wrapper) { - if (STOBJ_GET_CLASS(wrapper) != &xclasp->base) { + if (wrapper->getClass() != &xclasp->base) { return nsnull; } diff --git a/js/src/xpconnect/src/nsXPConnect.cpp b/js/src/xpconnect/src/nsXPConnect.cpp index a16c88603c9..f8f14e652c7 100644 --- a/js/src/xpconnect/src/nsXPConnect.cpp +++ b/js/src/xpconnect/src/nsXPConnect.cpp @@ -1954,7 +1954,7 @@ nsXPConnect::RestoreWrappedNativePrototype(JSContext * aJSContext, if(NS_FAILED(rv)) return UnexpectedFailure(rv); - if(!IS_PROTO_CLASS(STOBJ_GET_CLASS(protoJSObject))) + if(!IS_PROTO_CLASS(protoJSObject->getClass())) return UnexpectedFailure(NS_ERROR_INVALID_ARG); XPCWrappedNativeScope* scope = @@ -2184,7 +2184,7 @@ nsXPConnect::UpdateXOWs(JSContext* aJSContext, if(!list) return NS_OK; // No wrappers to update. - AutoJSRequestWithNoCallContext req(aJSContext); + JSAutoRequest req(aJSContext); Link* cur = list; if(cur->obj && !PerformOp(aJSContext, aWay, cur->obj)) @@ -2546,7 +2546,7 @@ nsXPConnect::GetWrapperForObject(JSContext* aJSContext, JSBool sameOrigin; JSBool sameScope = xpc_SameScope(objectscope, xpcscope, &sameOrigin); JSBool forceXOW = - XPCCrossOriginWrapper::ClassNeedsXOW(STOBJ_GET_CLASS(aObject)->name); + XPCCrossOriginWrapper::ClassNeedsXOW(aObject->getClass()->name); // We can do nothing if: // - We're wrapping a system object @@ -2752,7 +2752,7 @@ nsXPConnect::SetSafeJSContext(JSContext * aSafeJSContext) nsIPrincipal* nsXPConnect::GetPrincipal(JSObject* obj, PRBool allowShortCircuit) const { - NS_ASSERTION(IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)), + NS_ASSERTION(IS_WRAPPER_CLASS(obj->getClass()), "What kind of wrapper is this?"); if(IS_WN_WRAPPER_OBJECT(obj)) @@ -2801,10 +2801,14 @@ nsXPConnect::GetPrincipal(JSObject* obj, PRBool allowShortCircuit) const return nsnull; } -NS_IMETHODIMP_(JSClass *) -nsXPConnect::GetNativeWrapperClass() +NS_IMETHODIMP_(void) +nsXPConnect::GetNativeWrapperGetPropertyOp(JSPropertyOp *getPropertyPtr) { - return XPCNativeWrapper::GetJSClass(); + NS_ASSERTION(XPCNativeWrapper::GetJSClass(true)->getProperty == + XPCNativeWrapper::GetJSClass(false)->getProperty, + "Call and NoCall XPCNativeWrapper Class must use the same " + "getProperty hook."); + *getPropertyPtr = XPCNativeWrapper::GetJSClass(true)->getProperty; } /* These are here to be callable from a debugger */ diff --git a/js/src/xpconnect/src/qsgen.py b/js/src/xpconnect/src/qsgen.py index 7d5f651195d..e00b191394f 100644 --- a/js/src/xpconnect/src/qsgen.py +++ b/js/src/xpconnect/src/qsgen.py @@ -835,7 +835,7 @@ def writeQuickStub(f, customMethodCalls, member, stubName, isSetter=False): if isGetter: pthisval = 'vp' elif isSetter: - f.write(" JSAutoTempValueRooter tvr(cx);\n") + f.write(" js::AutoValueRooter tvr(cx);\n") pthisval = 'tvr.addr()' else: pthisval = '&vp[1]' # as above, ok to overwrite vp[1] diff --git a/js/src/xpconnect/src/xpccomponents.cpp b/js/src/xpconnect/src/xpccomponents.cpp index e1995d9aef7..35513279d4b 100644 --- a/js/src/xpconnect/src/xpccomponents.cpp +++ b/js/src/xpconnect/src/xpccomponents.cpp @@ -3221,7 +3221,7 @@ xpc_CreateSandboxObject(JSContext * cx, jsval * vp, nsISupports *prinOrSop) nsnull, nsnull); if (!sandbox) return NS_ERROR_XPC_UNEXPECTED; - JSAutoTempValueRooter tvr(cx, sandbox); + js::AutoValueRooter tvr(cx, sandbox); nsCOMPtr sop(do_QueryInterface(prinOrSop)); @@ -3548,7 +3548,7 @@ xpc_EvalInSandbox(JSContext *cx, JSObject *sandbox, const nsAString& source, const char *filename, PRInt32 lineNo, JSVersion jsVersion, PRBool returnStringOnly, jsval *rval) { - if (STOBJ_GET_CLASS(sandbox) != &SandboxClass) + if (sandbox->getClass() != &SandboxClass) return NS_ERROR_INVALID_ARG; nsIScriptObjectPrincipal *sop = @@ -3594,7 +3594,7 @@ xpc_EvalInSandbox(JSContext *cx, JSObject *sandbox, const nsAString& source, nsresult rv = NS_OK; { - AutoJSRequestWithNoCallContext req(sandcx->GetJSContext()); + JSAutoRequest req(sandcx->GetJSContext()); JSString *str = nsnull; if (!JS_EvaluateUCScriptForPrincipals(sandcx->GetJSContext(), sandbox, jsPrincipals, @@ -3610,9 +3610,7 @@ xpc_EvalInSandbox(JSContext *cx, JSObject *sandbox, const nsAString& source, // Stash the exception in |cx| so we can execute code on // sandcx without a pending exception. { - AutoJSSuspendRequestWithNoCallContext sus(sandcx->GetJSContext()); - AutoJSRequestWithNoCallContext cxreq(cx); - + JSAutoTransferRequest transfer(sandcx->GetJSContext(), cx); JS_SetPendingException(cx, exn); } @@ -3622,8 +3620,7 @@ xpc_EvalInSandbox(JSContext *cx, JSObject *sandbox, const nsAString& source, // exception into a string. str = JS_ValueToString(sandcx->GetJSContext(), exn); - AutoJSSuspendRequestWithNoCallContext sus(sandcx->GetJSContext()); - AutoJSRequestWithNoCallContext cxreq(cx); + JSAutoTransferRequest transfer(sandcx->GetJSContext(), cx); if (str) { // We converted the exception to a string. Use that // as the value exception. diff --git a/js/src/xpconnect/src/xpcconvert.cpp b/js/src/xpconnect/src/xpcconvert.cpp index 03294e4e8a1..eee4378062e 100644 --- a/js/src/xpconnect/src/xpcconvert.cpp +++ b/js/src/xpconnect/src/xpcconvert.cpp @@ -153,7 +153,7 @@ XPCConvert::IsMethodReflectable(const XPTMethodDescriptor& info) JSBool XPCConvert::GetISupportsFromJSObject(JSObject* obj, nsISupports** iface) { - JSClass* jsclass = STOBJ_GET_CLASS(obj); + JSClass* jsclass = obj->getClass(); NS_ASSERTION(jsclass, "obj has no class"); if(jsclass && (jsclass->flags & JSCLASS_HAS_PRIVATE) && @@ -474,7 +474,7 @@ XPCConvert::NativeData2JS(XPCLazyCallContext& lccx, jsval* d, const void* s, #ifdef DEBUG JSObject* jsobj = JSVAL_TO_OBJECT(*d); if(jsobj && !jsobj->getParent()) - NS_ASSERTION(STOBJ_GET_CLASS(jsobj)->flags & JSCLASS_IS_GLOBAL, + NS_ASSERTION(jsobj->getClass()->flags & JSCLASS_IS_GLOBAL, "Why did we recreate this wrapper?"); #endif } @@ -1185,7 +1185,7 @@ XPCConvert::NativeInterface2JSObject(XPCLazyCallContext& lccx, } } - NS_ASSERTION(!flat || IS_WRAPPER_CLASS(STOBJ_GET_CLASS(flat)), + NS_ASSERTION(!flat || IS_WRAPPER_CLASS(flat->getClass()), "What kind of wrapper is this?"); nsresult rv; @@ -1397,7 +1397,7 @@ XPCConvert::NativeInterface2JSObject(XPCLazyCallContext& lccx, } } - const char *name = STOBJ_GET_CLASS(flat)->name; + const char *name = flat->getClass()->name; if(allowNativeWrapper && !(flags & JSFILENAME_SYSTEM) && !JS_IsSystemObject(ccx, flat) && @@ -1582,23 +1582,23 @@ XPCConvert::ConstructException(nsresult rv, const char* message, /********************************/ -class AutoExceptionRestorer : public JSAutoTempValueRooter +class AutoExceptionRestorer { public: AutoExceptionRestorer(JSContext *cx, jsval v) - : JSAutoTempValueRooter(cx, v), - mVal(v) + : mContext(cx), tvr(cx, v) { JS_ClearPendingException(mContext); } ~AutoExceptionRestorer() { - JS_SetPendingException(mContext, mVal); + JS_SetPendingException(mContext, tvr.value()); } private: - jsval mVal; + JSContext * const mContext; + js::AutoValueRooter tvr; }; // static diff --git a/js/src/xpconnect/src/xpcdebug.cpp b/js/src/xpconnect/src/xpcdebug.cpp index fc40f8e4151..b35bbf8819e 100644 --- a/js/src/xpconnect/src/xpcdebug.cpp +++ b/js/src/xpconnect/src/xpcdebug.cpp @@ -426,9 +426,9 @@ static const int tab_width = 2; static void PrintObjectBasics(JSObject* obj) { - if(OBJ_IS_NATIVE(obj)) + if (obj->isNative()) printf("%p 'native' <%s>", - (void *)obj, STOBJ_GET_CLASS(obj)->name); + (void *)obj, obj->getClass()->name); else printf("%p 'host'", (void *)obj); } @@ -450,7 +450,7 @@ static void PrintObject(JSObject* obj, int depth, ObjectPile* pile) return; } - if(!OBJ_IS_NATIVE(obj)) + if(!obj->isNative()) return; JSObject* parent = obj->getParent(); diff --git a/js/src/xpconnect/src/xpclog.cpp b/js/src/xpconnect/src/xpclog.cpp index 2e48c2da076..01060cc0cd0 100644 --- a/js/src/xpconnect/src/xpclog.cpp +++ b/js/src/xpconnect/src/xpclog.cpp @@ -157,7 +157,7 @@ LogSlimWrapperNotCreated(JSContext *cx, nsISupports *obj, const char *reason) className ? " for " : "", className ? className : "", reason, obj); if(className) PR_Free(className); - AutoJSRequestWithNoCallContext autoRequest(cx); + JSAutoRequest autoRequest(cx); xpc_DumpJSStack(cx, JS_FALSE, JS_FALSE, JS_FALSE); } #endif diff --git a/js/src/xpconnect/src/xpcprivate.h b/js/src/xpconnect/src/xpcprivate.h index 8f892130d42..30c02384b5b 100644 --- a/js/src/xpconnect/src/xpcprivate.h +++ b/js/src/xpconnect/src/xpcprivate.h @@ -1333,7 +1333,7 @@ xpc_InitWrappedNativeJSOps(); inline JSBool DebugCheckWrapperClass(JSObject* obj) { - NS_ASSERTION(IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)), + NS_ASSERTION(IS_WRAPPER_CLASS(obj->getClass()), "Forgot to check if this is a wrapper?"); return JS_TRUE; } @@ -1344,21 +1344,21 @@ DebugCheckWrapperClass(JSObject* obj) // also holds a pointer to its XPCWrappedNativeProto in a reserved slot, we can // check that slot for a non-void value to distinguish between the two. -// Only use these macros if IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)) is true. +// Only use these macros if IS_WRAPPER_CLASS(obj->getClass()) is true. #define IS_WN_WRAPPER_OBJECT(obj) \ (DebugCheckWrapperClass(obj) && \ - JSVAL_IS_VOID(STOBJ_GET_SLOT(obj, JSSLOT_START(STOBJ_GET_CLASS(obj))))) + JSVAL_IS_VOID(obj->getSlot(JSSLOT_START(obj->getClass())))) #define IS_SLIM_WRAPPER_OBJECT(obj) \ (DebugCheckWrapperClass(obj) && \ - !JSVAL_IS_VOID(STOBJ_GET_SLOT(obj, JSSLOT_START(STOBJ_GET_CLASS(obj))))) + !JSVAL_IS_VOID(obj->getSlot(JSSLOT_START(obj->getClass())))) -// Use these macros if IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)) might be false. -// Avoid calling them if IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)) can only be +// Use these macros if IS_WRAPPER_CLASS(obj->getClass()) might be false. +// Avoid calling them if IS_WRAPPER_CLASS(obj->getClass()) can only be // true, as we'd do a redundant call to IS_WRAPPER_CLASS. #define IS_WN_WRAPPER(obj) \ - (IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)) && IS_WN_WRAPPER_OBJECT(obj)) + (IS_WRAPPER_CLASS(obj->getClass()) && IS_WN_WRAPPER_OBJECT(obj)) #define IS_SLIM_WRAPPER(obj) \ - (IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)) && IS_SLIM_WRAPPER_OBJECT(obj)) + (IS_WRAPPER_CLASS(obj->getClass()) && IS_SLIM_WRAPPER_OBJECT(obj)) // Comes from xpcwrappednativeops.cpp extern void @@ -2241,7 +2241,7 @@ extern JSBool MorphSlimWrapper(JSContext *cx, JSObject *obj); static inline XPCWrappedNativeProto* GetSlimWrapperProto(JSObject *obj) { - jsval v = STOBJ_GET_SLOT(obj, JSSLOT_START(STOBJ_GET_CLASS(obj))); + jsval v = obj->getSlot(JSSLOT_START(obj->getClass())); return static_cast(JSVAL_TO_PRIVATE(v)); } diff --git a/js/src/xpconnect/src/xpcquickstubs.cpp b/js/src/xpconnect/src/xpcquickstubs.cpp index 35200fc9139..92ea4a741ef 100644 --- a/js/src/xpconnect/src/xpcquickstubs.cpp +++ b/js/src/xpconnect/src/xpcquickstubs.cpp @@ -173,7 +173,7 @@ GeneratePropertyOp(JSContext *cx, JSObject *obj, jsval idval, uintN argc, JSObject *funobj = JS_GetFunctionObject(fun); - JSAutoTempValueRooter tvr(cx, OBJECT_TO_JSVAL(funobj)); + js::AutoObjectRooter tvr(cx, funobj); // Unfortunately, we cannot guarantee that JSPropertyOp is aligned. Use a // second object to work around this. @@ -198,7 +198,7 @@ ReifyPropertyOps(JSContext *cx, JSObject *obj, jsval idval, jsid interned_id, { // Generate both getter and setter and stash them in the prototype. jsval roots[2] = { JSVAL_NULL, JSVAL_NULL }; - JSAutoTempValueRooter tvr(cx, 2, roots); + js::AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(roots), roots); uintN attrs = JSPROP_SHARED; JSObject *getterobj; @@ -293,10 +293,10 @@ LookupGetterOrSetter(JSContext *cx, JSBool wantGetter, uintN argc, jsval *vp) ? JS_GetStringBytes(JSVAL_TO_STRING(idval)) : nsnull; if(!name || - !IS_PROTO_CLASS(STOBJ_GET_CLASS(desc.obj)) || + !IS_PROTO_CLASS(desc.obj->getClass()) || (desc.attrs & (JSPROP_GETTER | JSPROP_SETTER)) || !(desc.getter || desc.setter) || - desc.setter == STOBJ_GET_CLASS(desc.obj)->setProperty) + desc.setter == desc.obj->getClass()->setProperty) { JS_SET_RVAL(cx, vp, JSVAL_VOID); return JS_TRUE; @@ -363,7 +363,7 @@ DefineGetterOrSetter(JSContext *cx, uintN argc, JSBool wantGetter, jsval *vp) if(!obj2 || (attrs & (JSPROP_GETTER | JSPROP_SETTER)) || !(getter || setter) || - !IS_PROTO_CLASS(STOBJ_GET_CLASS(obj2))) + !IS_PROTO_CLASS(obj2->getClass())) return forward(cx, argc, vp); // Reify the getter and setter... @@ -504,8 +504,8 @@ GetMemberInfo(JSObject *obj, // but this code often produces a more specific error message, e.g. *ifaceName = "Unknown"; - NS_ASSERTION(IS_WRAPPER_CLASS(STOBJ_GET_CLASS(obj)) || - STOBJ_GET_CLASS(obj) == &XPC_WN_Tearoff_JSClass, + NS_ASSERTION(IS_WRAPPER_CLASS(obj->getClass()) || + obj->getClass() == &XPC_WN_Tearoff_JSClass, "obj must be a wrapper"); XPCWrappedNativeProto *proto; if(IS_SLIM_WRAPPER(obj)) @@ -1093,7 +1093,7 @@ xpc_qsXPCOMObjectToJsval(XPCLazyCallContext &lccx, nsISupports *p, #ifdef DEBUG JSObject* jsobj = JSVAL_TO_OBJECT(*rval); if(jsobj && !jsobj->getParent()) - NS_ASSERTION(STOBJ_GET_CLASS(jsobj)->flags & JSCLASS_IS_GLOBAL, + NS_ASSERTION(jsobj->getClass()->flags & JSCLASS_IS_GLOBAL, "Why did we recreate this wrapper?"); #endif diff --git a/js/src/xpconnect/src/xpcquickstubs.h b/js/src/xpconnect/src/xpcquickstubs.h index de2446cd243..0943aab68a1 100644 --- a/js/src/xpconnect/src/xpcquickstubs.h +++ b/js/src/xpconnect/src/xpcquickstubs.h @@ -321,7 +321,7 @@ struct xpc_qsArgValArray memset(array, 0, N * sizeof(jsval)); } - JSAutoTempValueRooter tvr; + js::AutoArrayRooter tvr; jsval array[N]; }; diff --git a/js/src/xpconnect/src/xpcthreadcontext.cpp b/js/src/xpconnect/src/xpcthreadcontext.cpp index ffab3326684..15da9612972 100644 --- a/js/src/xpconnect/src/xpcthreadcontext.cpp +++ b/js/src/xpconnect/src/xpcthreadcontext.cpp @@ -248,12 +248,12 @@ XPCJSContextStack::GetSafeJSContext(JSContext * *aSafeJSContext) if(xpc && (xpcrt = xpc->GetRuntime()) && (rt = xpcrt->GetJSRuntime())) { + JSObject *glob; mSafeJSContext = JS_NewContext(rt, 8192); if(mSafeJSContext) { // scoped JS Request - AutoJSRequestWithNoCallContext req(mSafeJSContext); - JSObject *glob; + JSAutoRequest req(mSafeJSContext); glob = JS_NewObject(mSafeJSContext, &global_class, NULL, NULL); #ifndef XPCONNECT_STANDALONE @@ -276,23 +276,26 @@ XPCJSContextStack::GetSafeJSContext(JSContext * *aSafeJSContext) // nsCOMPtr or dealt with, or we'll release in the finalize // hook. #endif - if(!glob || NS_FAILED(xpc->InitClasses(mSafeJSContext, glob))) + if(glob && NS_FAILED(xpc->InitClasses(mSafeJSContext, glob))) { - // Explicitly end the request since we are about to kill - // the JSContext that 'req' will try to use when it - // goes out of scope. - req.EndRequest(); - JS_DestroyContext(mSafeJSContext); - mSafeJSContext = nsnull; + glob = nsnull; } - // Save it off so we can destroy it later, even if - // mSafeJSContext has been set to another context - // via SetSafeJSContext. If we don't get here, - // then mSafeJSContext must have been set via - // SetSafeJSContext, and we're not responsible for - // destroying the passed-in context. - mOwnSafeJSContext = mSafeJSContext; + } + if(!glob && mSafeJSContext) + { + // Destroy the context outside the scope of JSAutoRequest that + // uses the context in its destructor. + JS_DestroyContext(mSafeJSContext); + mSafeJSContext = nsnull; + } + // Save it off so we can destroy it later, even if + // mSafeJSContext has been set to another context + // via SetSafeJSContext. If we don't get here, + // then mSafeJSContext must have been set via + // SetSafeJSContext, and we're not responsible for + // destroying the passed-in context. + mOwnSafeJSContext = mSafeJSContext; } } diff --git a/js/src/xpconnect/src/xpcwrappedjsclass.cpp b/js/src/xpconnect/src/xpcwrappedjsclass.cpp index 43b75e0aa1b..2d7e244e919 100644 --- a/js/src/xpconnect/src/xpcwrappedjsclass.cpp +++ b/js/src/xpconnect/src/xpcwrappedjsclass.cpp @@ -779,7 +779,7 @@ nsXPCWrappedJSClass::DelegatedQueryInterface(nsXPCWrappedJS* self, PRBool isSystem; rv = secMan->IsSystemPrincipal(objPrin, &isSystem); if((NS_FAILED(rv) || !isSystem) && - !IS_WRAPPER_CLASS(STOBJ_GET_CLASS(selfObj))) + !IS_WRAPPER_CLASS(selfObj->getClass())) { // A content object. nsRefPtr checked = diff --git a/js/src/xpconnect/src/xpcwrappednative.cpp b/js/src/xpconnect/src/xpcwrappednative.cpp index 56536f92252..17bb1b0b31c 100644 --- a/js/src/xpconnect/src/xpcwrappednative.cpp +++ b/js/src/xpconnect/src/xpcwrappednative.cpp @@ -1639,7 +1639,7 @@ XPCWrappedNative::GetWrappedNativeOfJSObject(JSContext* cx, JSObject* funObjParent = funobj->getParent(); NS_ASSERTION(funObjParent, "funobj has no parent"); - JSClass* funObjParentClass = STOBJ_GET_CLASS(funObjParent); + JSClass* funObjParentClass = funObjParent->getClass(); if(IS_PROTO_CLASS(funObjParentClass)) { @@ -1670,7 +1670,7 @@ XPCWrappedNative::GetWrappedNativeOfJSObject(JSContext* cx, { // this is on two lines to make the compiler happy given the goto. JSClass* clazz; - clazz = STOBJ_GET_CLASS(cur); + clazz = cur->getClass(); if(IS_WRAPPER_CLASS(clazz)) { @@ -1727,7 +1727,7 @@ return_tearoff: // If we didn't find a wrapper using the given funobj and obj, try // again with obj's outer object, if it's got one. - JSClass *clazz = STOBJ_GET_CLASS(obj); + JSClass *clazz = obj->getClass(); if((clazz->flags & JSCLASS_IS_EXTENDED) && ((JSExtendedClass*)clazz)->outerObject) @@ -1736,7 +1736,7 @@ return_tearoff: // Protect against infinite recursion through XOWs. JSObject *unsafeObj; - clazz = STOBJ_GET_CLASS(outer); + clazz = outer->getClass(); if(clazz == &XPCCrossOriginWrapper::XOWClass.base && (unsafeObj = XPCWrapper::UnwrapXOW(cx, outer))) { @@ -2311,8 +2311,15 @@ XPCWrappedNative::CallMethod(XPCCallContext& ccx, nsISupports* qiresult = nsnull; { - AutoJSSuspendNonMainThreadRequest req(ccx.GetJSContext()); - invokeResult = callee->QueryInterface(*iid, (void**) &qiresult); + if(XPCPerThreadData::IsMainThread(ccx)) + { + invokeResult = callee->QueryInterface(*iid, (void**) &qiresult); + } + else + { + JSAutoSuspendRequest suspended(ccx); + invokeResult = callee->QueryInterface(*iid, (void**) &qiresult); + } } xpcc->SetLastResult(invokeResult); @@ -2722,10 +2729,18 @@ XPCWrappedNative::CallMethod(XPCCallContext& ccx, // do the invoke { - AutoJSSuspendNonMainThreadRequest req(ccx.GetJSContext()); - invokeResult = NS_InvokeByIndex(callee, vtblIndex, - paramCount + wantsOptArgc, - dispatchParams); + uint8 allParamCount = paramCount + wantsOptArgc; + if(XPCPerThreadData::IsMainThread(ccx)) + { + invokeResult = NS_InvokeByIndex(callee, vtblIndex, + allParamCount, dispatchParams); + } + else + { + JSAutoSuspendRequest suspended(ccx); + invokeResult = NS_InvokeByIndex(callee, vtblIndex, + allParamCount, dispatchParams); + } } xpcc->SetLastResult(invokeResult); diff --git a/js/src/xpconnect/src/xpcwrappednativeinfo.cpp b/js/src/xpconnect/src/xpcwrappednativeinfo.cpp index 8e6e3df0b66..519927b1de9 100644 --- a/js/src/xpconnect/src/xpcwrappednativeinfo.cpp +++ b/js/src/xpconnect/src/xpcwrappednativeinfo.cpp @@ -203,19 +203,11 @@ XPCNativeMember::Resolve(XPCCallContext& ccx, XPCNativeInterface* iface) const char *memberName = iface->GetMemberName(ccx, this); - jsrefcount suspendDepth = 0; - if(cx != ccx) { - // Switching contexts, suspend the old and enter the new request. - suspendDepth = JS_SuspendRequest(ccx); - JS_BeginRequest(cx); - } - - JSFunction *fun = JS_NewFunction(cx, callback, argc, flags, nsnull, - memberName); - - if(suspendDepth) { - JS_EndRequest(cx); - JS_ResumeRequest(ccx, suspendDepth); + JSFunction *fun; + // Switching contexts, suspend the old and enter the new request. + { + JSAutoTransferRequest transfer(ccx, cx); + fun = JS_NewFunction(cx, callback, argc, flags, nsnull, memberName); } if(!fun) diff --git a/js/src/xpconnect/src/xpcwrappednativejsops.cpp b/js/src/xpconnect/src/xpcwrappednativejsops.cpp index fb7e1192a64..551ede0a388 100644 --- a/js/src/xpconnect/src/xpcwrappednativejsops.cpp +++ b/js/src/xpconnect/src/xpcwrappednativejsops.cpp @@ -853,7 +853,7 @@ XPC_WN_Equality(JSContext *cx, JSObject *obj, jsval v, JSBool *bp) return Throw(rv, cx); if(!*bp && !JSVAL_IS_PRIMITIVE(v) && - STOBJ_GET_CLASS(JSVAL_TO_OBJECT(v)) == &XPCSafeJSObjectWrapper::SJOWClass.base) + JSVAL_TO_OBJECT(v)->getClass() == &XPCSafeJSObjectWrapper::SJOWClass.base) { v = OBJECT_TO_JSVAL(XPCSafeJSObjectWrapper::GetUnsafeObject(cx, JSVAL_TO_OBJECT(v))); @@ -1327,7 +1327,7 @@ static JSBool XPC_WN_JSOp_Enumerate(JSContext *cx, JSObject *obj, JSIterateOp enum_op, jsval *statep, jsid *idp) { - JSClass *clazz = STOBJ_GET_CLASS(obj); + JSClass *clazz = obj->getClass(); if(!IS_WRAPPER_CLASS(clazz) || clazz == &XPC_WN_NoHelper_JSClass.base) { // obj must be a prototype object or a wrapper w/o a @@ -1517,8 +1517,7 @@ XPC_WN_JSOp_ThisObject(JSContext *cx, JSObject *obj) JSStackFrame *fp; nsIPrincipal *principal = secMan->GetCxSubjectPrincipalAndFrame(cx, &fp); - jsval retval = OBJECT_TO_JSVAL(obj); - JSAutoTempValueRooter atvr(cx, 1, &retval); + js::AutoValueRooter retval(cx, obj); if(principal && fp) { @@ -1535,7 +1534,7 @@ XPC_WN_JSOp_ThisObject(JSContext *cx, JSObject *obj) } nsresult rv = xpc->GetWrapperForObject(cx, obj, scope, principal, flags, - &retval); + retval.addr()); if(NS_FAILED(rv)) { XPCThrower::Throw(rv, cx); @@ -1543,7 +1542,7 @@ XPC_WN_JSOp_ThisObject(JSContext *cx, JSObject *obj) } } - return JSVAL_TO_OBJECT(retval); + return JSVAL_TO_OBJECT(retval.value()); } JSObjectOps * diff --git a/js/src/xpconnect/src/xpcwrappednativescope.cpp b/js/src/xpconnect/src/xpcwrappednativescope.cpp index 85517b4f096..3a990ffab3e 100644 --- a/js/src/xpconnect/src/xpcwrappednativescope.cpp +++ b/js/src/xpconnect/src/xpcwrappednativescope.cpp @@ -241,7 +241,7 @@ XPCWrappedNativeScope::SetGlobal(XPCCallContext& ccx, JSObject* aGlobal) mScriptObjectPrincipal = nsnull; // Now init our script object principal, if the new global has one - const JSClass* jsClass = STOBJ_GET_CLASS(aGlobal); + const JSClass* jsClass = aGlobal->getClass(); if(!(~jsClass->flags & (JSCLASS_HAS_PRIVATE | JSCLASS_PRIVATE_IS_NSISUPPORTS))) { @@ -720,7 +720,7 @@ XPCWrappedNativeScope* GetScopeOfObject(JSObject* obj) { nsISupports* supports; - JSClass* clazz = STOBJ_GET_CLASS(obj); + JSClass* clazz = obj->getClass(); JSBool isWrapper = IS_WRAPPER_CLASS(clazz); if(isWrapper && IS_SLIM_WRAPPER_OBJECT(obj)) diff --git a/js/src/xpconnect/tests/TestXPC.cpp b/js/src/xpconnect/tests/TestXPC.cpp index f4955f45771..7d6bfe5b5f6 100644 --- a/js/src/xpconnect/tests/TestXPC.cpp +++ b/js/src/xpconnect/tests/TestXPC.cpp @@ -562,7 +562,7 @@ TestArgFormatter(JSContext* jscontext, JSObject* glob, nsIXPConnect* xpc) // Prepare an array of arguments for JS_ConvertArguments jsval argv[5]; - JSAutoTempValueRooter tvr(jscontext, 5, argv); + js::AutoArrayRooter tvr(jscontext, JS_ARRAY_LENGTH(argv), argv); if (!PushArguments(jscontext, 5, argv, "s %ip %iv %is s", diff --git a/js/src/xpconnect/tests/mochitest/Makefile.in b/js/src/xpconnect/tests/mochitest/Makefile.in index c1d158b7224..5bb1394f12c 100644 --- a/js/src/xpconnect/tests/mochitest/Makefile.in +++ b/js/src/xpconnect/tests/mochitest/Makefile.in @@ -69,6 +69,7 @@ _TEST_FILES = bug500931_helper.html \ test_bug504877.html \ test_bug505915.html \ test_bug517163.html \ + test_bug553407.html \ test_cows.html \ test_frameWrapping.html \ $(NULL) diff --git a/js/src/xpconnect/tests/mochitest/test_bug553407.html b/js/src/xpconnect/tests/mochitest/test_bug553407.html new file mode 100644 index 00000000000..93f90dab60c --- /dev/null +++ b/js/src/xpconnect/tests/mochitest/test_bug553407.html @@ -0,0 +1,30 @@ + + + + + Test for Bug 553407 + + + + + +Mozilla Bug 553407 +

+ +
+
+
+ + diff --git a/layout/reftests/bugs/reftest.list b/layout/reftests/bugs/reftest.list index 56f8de60687..a1bce194941 100644 --- a/layout/reftests/bugs/reftest.list +++ b/layout/reftests/bugs/reftest.list @@ -1354,7 +1354,8 @@ fails-if(MOZ_WIDGET_TOOLKIT!="cocoa") == 488692-1.html 488692-1-ref.html # needs == 507762-2.html 507762-2-ref.html == 507762-3.html 507762-1-ref.html == 507762-4.html 507762-2-ref.html -== 508908-1.xul 508908-1-ref.xul +# see bug 556124 +fails == 508908-1.xul 508908-1-ref.xul == 508919-1.xhtml 508919-1-ref.xhtml == 509155-1.xhtml 509155-1-ref.xhtml == 512410.html 512410-ref.html diff --git a/modules/plugin/base/src/nsJSNPRuntime.cpp b/modules/plugin/base/src/nsJSNPRuntime.cpp index 8b61cd6c2ae..dfb92957dca 100644 --- a/modules/plugin/base/src/nsJSNPRuntime.cpp +++ b/modules/plugin/base/src/nsJSNPRuntime.cpp @@ -673,35 +673,35 @@ doInvoke(NPObject *npobj, NPIdentifier method, const NPVariant *args, } } - JSTempValueRooter tvr; - JS_PUSH_TEMP_ROOT(cx, 0, jsargs, &tvr); - - // Convert args - for (PRUint32 i = 0; i < argCount; ++i) { - jsargs[i] = NPVariantToJSVal(npp, cx, args + i); - ++tvr.count; - } - jsval v; JSBool ok; - if (ctorCall) { - JSObject *global = ::JS_GetGlobalForObject(cx, npjsobj->mJSObj); - JSObject *newObj = - ::JS_ConstructObjectWithArguments(cx, JS_GET_CLASS(cx, npjsobj->mJSObj), - nsnull, global, argCount, jsargs); + { + js::AutoArrayRooter tvr(cx, 0, jsargs); - if (newObj) { - v = OBJECT_TO_JSVAL(newObj); - ok = JS_TRUE; - } else { - ok = JS_FALSE; + // Convert args + for (PRUint32 i = 0; i < argCount; ++i) { + jsargs[i] = NPVariantToJSVal(npp, cx, args + i); + tvr.changeLength(i + 1); } - } else { - ok = ::JS_CallFunctionValue(cx, npjsobj->mJSObj, fv, argCount, jsargs, &v); - } - JS_POP_TEMP_ROOT(cx, &tvr); + if (ctorCall) { + JSObject *global = ::JS_GetGlobalForObject(cx, npjsobj->mJSObj); + JSObject *newObj = + ::JS_ConstructObjectWithArguments(cx, JS_GET_CLASS(cx, npjsobj->mJSObj), + nsnull, global, argCount, jsargs); + + if (newObj) { + v = OBJECT_TO_JSVAL(newObj); + ok = JS_TRUE; + } else { + ok = JS_FALSE; + } + } else { + ok = ::JS_CallFunctionValue(cx, npjsobj->mJSObj, fv, argCount, jsargs, &v); + } + + } if (jsargs != jsargs_buf) PR_Free(jsargs); @@ -837,7 +837,7 @@ nsJSObjWrapper::NP_SetProperty(NPObject *npobj, NPIdentifier identifier, AutoJSExceptionReporter reporter(cx); jsval v = NPVariantToJSVal(npp, cx, value); - JSAutoTempValueRooter tvr(cx, v); + js::AutoValueRooter tvr(cx, v); if (JSVAL_IS_STRING(id)) { JSString *str = JSVAL_TO_STRING(id); diff --git a/modules/plugin/base/src/nsNPAPIPlugin.cpp b/modules/plugin/base/src/nsNPAPIPlugin.cpp index e54f0fb76e0..263448b56ba 100644 --- a/modules/plugin/base/src/nsNPAPIPlugin.cpp +++ b/modules/plugin/base/src/nsNPAPIPlugin.cpp @@ -1745,7 +1745,7 @@ _evaluate(NPP npp, NPObject* npobj, NPString *script, NPVariant *result) // Root obj and the rval (below). jsval vec[] = { OBJECT_TO_JSVAL(obj), JSVAL_NULL }; - JSAutoTempValueRooter tvr(cx, NS_ARRAY_LENGTH(vec), vec); + js::AutoArrayRooter tvr(cx, NS_ARRAY_LENGTH(vec), vec); jsval *rval = &vec[1]; if (result) { diff --git a/toolkit/components/Makefile.in b/toolkit/components/Makefile.in index ffb46b565ae..30102d9b90a 100644 --- a/toolkit/components/Makefile.in +++ b/toolkit/components/Makefile.in @@ -69,6 +69,12 @@ PARALLEL_DIRS += \ viewconfig \ $(NULL) +ifdef BUILD_CTYPES +PARALLEL_DIRS += \ + ctypes \ + $(NULL) +endif + ifneq (,$(filter cocoa, $(MOZ_WIDGET_TOOLKIT))) TOOL_DIRS += alerts else diff --git a/js/ctypes/Makefile.in b/toolkit/components/ctypes/Makefile.in similarity index 84% rename from js/ctypes/Makefile.in rename to toolkit/components/ctypes/Makefile.in index 4b001a0fe29..7ee6c98be47 100644 --- a/js/ctypes/Makefile.in +++ b/toolkit/components/ctypes/Makefile.in @@ -36,7 +36,7 @@ # # ***** END LICENSE BLOCK ***** -DEPTH = ../.. +DEPTH = ../../.. topsrcdir = @top_srcdir@ srcdir = @srcdir@ VPATH = @srcdir@ @@ -47,38 +47,19 @@ MODULE = jsctypes MODULE_NAME = jsctypes GRE_MODULE = 1 -# package the js module whether ctypes is enabled or not. EXTRA_JS_MODULES = \ ctypes.jsm \ $(NULL) -ifdef BUILD_CTYPES - LIBRARY_NAME = jsctypes LIBXUL_LIBRARY = 1 EXPORT_LIBRARY = 1 IS_COMPONENT = 1 CPPSRCS = \ - Library.cpp \ - CTypes.cpp \ Module.cpp \ $(NULL) -LOCAL_INCLUDES = \ - -Ilibffi/include \ - $(NULL) - -ifeq ($(OS_ARCH),OS2) -# libffi builds an aout lib on OS/2; convert it to an OMF lib. -libffi/.libs/libffi.$(LIB_SUFFIX): libffi/.libs/libffi.a - emxomf $< -endif - -SHARED_LIBRARY_LIBS = \ - libffi/.libs/libffi.$(LIB_SUFFIX) \ - $(NULL) - EXTRA_DSO_LDOPTS += \ $(MOZ_COMPONENT_LIBS) \ $(MOZ_JS_LIBS) \ @@ -88,6 +69,4 @@ ifdef ENABLE_TESTS DIRS += tests endif -endif - include $(topsrcdir)/config/rules.mk diff --git a/js/ctypes/Module.cpp b/toolkit/components/ctypes/Module.cpp similarity index 72% rename from js/ctypes/Module.cpp rename to toolkit/components/ctypes/Module.cpp index c7112400d40..b0f9614154d 100644 --- a/js/ctypes/Module.cpp +++ b/toolkit/components/ctypes/Module.cpp @@ -38,8 +38,7 @@ * ***** END LICENSE BLOCK ***** */ #include "Module.h" -#include "Library.h" -#include "CTypes.h" +#include "jsapi.h" #include "nsIGenericFactory.h" #include "nsMemory.h" @@ -70,29 +69,6 @@ Module::~Module() #define XPC_MAP_FLAGS nsIXPCScriptable::WANT_CALL #include "xpc_map_end.h" -NS_IMETHODIMP -Module::Call(nsIXPConnectWrappedNative* wrapper, - JSContext* cx, - JSObject* obj, - PRUint32 argc, - jsval* argv, - jsval* vp, - PRBool* _retval) -{ - JSObject* global = JS_GetGlobalObject(cx); - *_retval = Init(cx, global); - return NS_OK; -} - -#define CTYPESFN_FLAGS \ - (JSFUN_FAST_NATIVE | JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT) - -static JSFunctionSpec sModuleFunctions[] = { - JS_FN("open", Library::Open, 1, CTYPESFN_FLAGS), - JS_FN("cast", CData::Cast, 2, CTYPESFN_FLAGS), - JS_FS_END -}; - static JSBool SealObjectAndPrototype(JSContext* cx, JSObject* parent, const char* name) { @@ -109,39 +85,37 @@ SealObjectAndPrototype(JSContext* cx, JSObject* parent, const char* name) JS_SealObject(cx, prototype, JS_FALSE); } -JSBool -Module::Init(JSContext* cx, JSObject* aGlobal) +static JSBool +InitAndSealCTypesClass(JSContext* cx, JSObject* global) { - // attach ctypes property to global object - JSObject* ctypes = JS_NewObject(cx, NULL, NULL, NULL); - if (!ctypes) + // Init the ctypes object. + if (!JS_InitCTypesClass(cx, global)) return false; - if (!JS_DefineProperty(cx, aGlobal, "ctypes", OBJECT_TO_JSVAL(ctypes), - NULL, NULL, JSPROP_ENUMERATE | JSPROP_READONLY | JSPROP_PERMANENT)) - return false; - - if (!InitTypeClasses(cx, ctypes)) - return false; - - // attach API functions - if (!JS_DefineFunctions(cx, ctypes, sModuleFunctions)) - return false; - - // Seal the ctypes object, to prevent modification. (This single object - // instance is shared amongst everyone who imports the ctypes module.) - if (!JS_SealObject(cx, ctypes, JS_FALSE)) - return false; - - // Seal up Object, Function, and Array and their prototypes. - if (!SealObjectAndPrototype(cx, aGlobal, "Object") || - !SealObjectAndPrototype(cx, aGlobal, "Function") || - !SealObjectAndPrototype(cx, aGlobal, "Array")) + // Seal up Object, Function, and Array and their prototypes. (This single + // object instance is shared amongst everyone who imports the ctypes module.) + if (!SealObjectAndPrototype(cx, global, "Object") || + !SealObjectAndPrototype(cx, global, "Function") || + !SealObjectAndPrototype(cx, global, "Array")) return false; // Finally, seal the global object, for good measure. (But not recursively; // this breaks things.) - return JS_SealObject(cx, aGlobal, JS_FALSE); + return JS_SealObject(cx, global, JS_FALSE); +} + +NS_IMETHODIMP +Module::Call(nsIXPConnectWrappedNative* wrapper, + JSContext* cx, + JSObject* obj, + PRUint32 argc, + jsval* argv, + jsval* vp, + PRBool* _retval) +{ + JSObject* global = JS_GetGlobalObject(cx); + *_retval = InitAndSealCTypesClass(cx, global); + return NS_OK; } } diff --git a/js/ctypes/Module.h b/toolkit/components/ctypes/Module.h similarity index 94% rename from js/ctypes/Module.h rename to toolkit/components/ctypes/Module.h index 993c2521bc3..bf77dfe3600 100644 --- a/js/ctypes/Module.h +++ b/toolkit/components/ctypes/Module.h @@ -52,9 +52,6 @@ public: Module(); - // Creates the ctypes object and attaches it to the global object. - JSBool Init(JSContext* aContext, JSObject* aGlobal); - private: ~Module(); }; diff --git a/js/ctypes/ctypes.jsm b/toolkit/components/ctypes/ctypes.jsm similarity index 100% rename from js/ctypes/ctypes.jsm rename to toolkit/components/ctypes/ctypes.jsm diff --git a/js/ctypes/tests/Makefile.in b/toolkit/components/ctypes/tests/Makefile.in similarity index 97% rename from js/ctypes/tests/Makefile.in rename to toolkit/components/ctypes/tests/Makefile.in index 69a5162f069..78eb03f143b 100644 --- a/js/ctypes/tests/Makefile.in +++ b/toolkit/components/ctypes/tests/Makefile.in @@ -36,7 +36,7 @@ # # ***** END LICENSE BLOCK ***** -DEPTH = ../../.. +DEPTH = ../../../.. topsrcdir = @top_srcdir@ srcdir = @srcdir@ VPATH = @srcdir@ @@ -51,6 +51,10 @@ NO_DIST_INSTALL = 1 CPPSRCS = jsctypes-test.cpp +LOCAL_INCLUDES = \ + -I$(topsrcdir)/js/src/ctypes \ + $(NULL) + EXTRA_DSO_LDOPTS += \ $(XPCOM_STANDALONE_GLUE_LDOPTS) \ $(MOZALLOC_LIB) \ diff --git a/js/ctypes/tests/jsctypes-test.cpp b/toolkit/components/ctypes/tests/jsctypes-test.cpp similarity index 99% rename from js/ctypes/tests/jsctypes-test.cpp rename to toolkit/components/ctypes/tests/jsctypes-test.cpp index 63d1032b959..ebde2b228c1 100644 --- a/js/ctypes/tests/jsctypes-test.cpp +++ b/toolkit/components/ctypes/tests/jsctypes-test.cpp @@ -113,7 +113,7 @@ get_##name##_stats(size_t* align, size_t* size, size_t* nalign, size_t* nsize, \ offsets[2] = offsetof(nested_##name, c); \ } -#include "../typedefs.h" +#include "typedefs.h" #if defined(_WIN32) && !defined(__WIN64) @@ -156,7 +156,7 @@ sum_many_##name##_stdcall( \ return a + b + c + d + e + f + g + h + i + j + k + l + m + n + o + p + q + r;\ } -#include "../typedefs.h" +#include "typedefs.h" void NS_STDCALL test_void_t_stdcall() diff --git a/js/ctypes/tests/jsctypes-test.h b/toolkit/components/ctypes/tests/jsctypes-test.h similarity index 99% rename from js/ctypes/tests/jsctypes-test.h rename to toolkit/components/ctypes/tests/jsctypes-test.h index 1ba322fa1b7..df86c200c34 100644 --- a/js/ctypes/tests/jsctypes-test.h +++ b/toolkit/components/ctypes/tests/jsctypes-test.h @@ -67,7 +67,7 @@ NS_EXTERN_C size_t* nalign, size_t* nsize, \ size_t offsets[]); -#include "../typedefs.h" +#include "typedefs.h" #if defined(_WIN32) && !defined(__WIN64) EXPORT_STDCALL(void) test_void_t_stdcall(); @@ -87,7 +87,7 @@ NS_EXTERN_C type, type, type, type, type, type, type, type, type, \ type, type, type, type, type, type, type, type, type); -#include "../typedefs.h" +#include "typedefs.h" #endif /* defined(_WIN32) && !defined(__WIN64) */ diff --git a/js/ctypes/tests/unit/test_jsctypes.js.in b/toolkit/components/ctypes/tests/unit/test_jsctypes.js.in similarity index 99% rename from js/ctypes/tests/unit/test_jsctypes.js.in rename to toolkit/components/ctypes/tests/unit/test_jsctypes.js.in index 19d799ecb94..5d1ad135eb7 100644 --- a/js/ctypes/tests/unit/test_jsctypes.js.in +++ b/toolkit/components/ctypes/tests/unit/test_jsctypes.js.in @@ -75,8 +75,8 @@ function run_test() // Test ctypes.CType and ctypes.CData are set up correctly. run_abstract_class_tests(); - // open the library with an nsILocalFile - let libfile = do_get_file(CTYPES_TEST_LIB); + // open the library + let libfile = do_get_file(CTYPES_TEST_LIB).path; let library = ctypes.open(libfile); // Make sure we can call a function in the library. @@ -193,11 +193,6 @@ function run_test() run_closure_tests(library); run_variadic_tests(library); - // test the string version of ctypes.open() as well - let libpath = libfile.path; - library = ctypes.open(libpath); - run_void_tests(library); - // test library.close let test_void_t = library.declare("test_void_t_cdecl", ctypes.default_abi, ctypes.void_t); library.close(); @@ -207,7 +202,7 @@ function run_test() }, Error); // test that library functions throw when bound to other objects - library = ctypes.open(libpath); + library = ctypes.open(libfile); let obj = {}; obj.declare = library.declare; do_check_throws(function () { run_void_tests(obj); }, Error); diff --git a/toolkit/toolkit-makefiles.sh b/toolkit/toolkit-makefiles.sh index c8d9eb99d3f..b10a6d66a17 100644 --- a/toolkit/toolkit-makefiles.sh +++ b/toolkit/toolkit-makefiles.sh @@ -213,10 +213,6 @@ MAKEFILES_jsdebugger=" js/jsd/idl/Makefile " -MAKEFILES_jsctypes=" - js/ctypes/Makefile -" - MAKEFILES_content=" content/Makefile content/base/Makefile @@ -774,6 +770,11 @@ MAKEFILES_xulapp=" toolkit/xre/Makefile " +MAKEFILES_ctypes=" + toolkit/components/ctypes/Makefile + toolkit/components/ctypes/tests/Makefile +" + MAKEFILES_libpr0n=" modules/libpr0n/Makefile modules/libpr0n/build/Makefile diff --git a/toolkit/toolkit-tiers.mk b/toolkit/toolkit-tiers.mk index da2d328edfc..ef897fe0314 100644 --- a/toolkit/toolkit-tiers.mk +++ b/toolkit/toolkit-tiers.mk @@ -116,16 +116,9 @@ endif tier_platform_dirs += \ js/src/xpconnect \ - js/ctypes \ intl/chardet \ $(NULL) -ifdef BUILD_CTYPES -tier_platform_staticdirs += \ - js/ctypes/libffi \ - $(NULL) -endif - ifdef MOZ_ENABLE_GTK2 ifdef MOZ_X11 tier_platform_dirs += widget/src/gtkxtbin diff --git a/xpinstall/src/nsXPITriggerInfo.cpp b/xpinstall/src/nsXPITriggerInfo.cpp index 0ef48ccf9d4..0ec4b228469 100644 --- a/xpinstall/src/nsXPITriggerInfo.cpp +++ b/xpinstall/src/nsXPITriggerInfo.cpp @@ -247,7 +247,7 @@ XPITriggerEvent::Run() // Build arguments into rooted jsval array jsval args[2] = { JSVAL_NULL, JSVAL_NULL }; - JSAutoTempValueRooter tvr(cx, JS_ARRAY_LENGTH(args), args); + js::AutoArrayRooter tvr(cx, JS_ARRAY_LENGTH(args), args); // args[0] is the URL JSString *str = JS_NewUCStringCopyZ(cx, reinterpret_cast(URL.get()));