Imported Upstream version 4.8.0.309

Former-commit-id: 5f9c6ae75f295e057a7d2971f3a6df4656fa8850
This commit is contained in:
Xamarin Public Jenkins (auto-signing)
2016-11-10 13:04:39 +00:00
parent ee1447783b
commit 94b2861243
4912 changed files with 390737 additions and 49310 deletions

View File

@@ -226,6 +226,8 @@ DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AOT_BUILD_FLAGS = @AOT_BUILD_FLAGS@
AOT_RUN_FLAGS = @AOT_RUN_FLAGS@
API_VER = @API_VER@
AR = @AR@
AS = @AS@
@@ -234,6 +236,11 @@ AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BOEHM_DEFINES = @BOEHM_DEFINES@
BTLS_ARCH = @BTLS_ARCH@
BTLS_CFLAGS = @BTLS_CFLAGS@
BTLS_CMAKE_ARGS = @BTLS_CMAKE_ARGS@
BTLS_PLATFORM = @BTLS_PLATFORM@
BTLS_ROOT = @BTLS_ROOT@
BUILD_EXEEXT = @BUILD_EXEEXT@
CC = @CC@
CCAS = @CCAS@
@@ -243,6 +250,7 @@ CCDEPMODE = @CCDEPMODE@
CC_FOR_BUILD = @CC_FOR_BUILD@
CFLAGS = @CFLAGS@
CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@
CMAKE = @CMAKE@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
@@ -263,7 +271,6 @@ ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
ENABLE_PERF_EVENTS = @ENABLE_PERF_EVENTS@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GDKX11 = @GDKX11@
@@ -278,10 +285,12 @@ HAVE_MSGFMT = @HAVE_MSGFMT@
HOST_CC = @HOST_CC@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_MOBILE_STATIC = @INSTALL_MOBILE_STATIC@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
INTL = @INTL@
INVARIANT_AOT_OPTIONS = @INVARIANT_AOT_OPTIONS@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBC = @LIBC@
@@ -328,6 +337,7 @@ PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PKG_CONFIG = @PKG_CONFIG@
PLATFORM_AOT_SUFFIX = @PLATFORM_AOT_SUFFIX@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
@@ -337,6 +347,7 @@ SHELL = @SHELL@
SQLITE = @SQLITE@
SQLITE3 = @SQLITE3@
STRIP = @STRIP@
TEST_PROFILE = @TEST_PROFILE@
USE_NLS = @USE_NLS@
VERSION = @VERSION@
VTUNE_CFLAGS = @VTUNE_CFLAGS@

View File

@@ -26,7 +26,13 @@
#elif defined(TARGET_AMD64)
#ifdef HOST_WIN32
/* The Windows x64 ABI defines no "red zone". The ABI states:
"All memory beyond the current address of RSP is considered volatile" */
#define REDZONE_SIZE 0
#else
#define REDZONE_SIZE 128
#endif
#elif defined(TARGET_POWERPC)

View File

@@ -410,7 +410,7 @@ static void
sgen_card_table_clear_cards (void)
{
/*XXX we could do this in 2 ways. using mincore or iterating over all sections/los objects */
sgen_major_collector_iterate_live_block_ranges (clear_cards);
sgen_major_collector_iterate_block_ranges (clear_cards);
sgen_los_iterate_live_block_ranges (clear_cards);
}
@@ -431,7 +431,7 @@ sgen_card_table_scan_remsets (ScanCopyContext ctx)
#ifdef SGEN_HAVE_OVERLAPPING_CARDS
/*FIXME we should have a bit on each block/los object telling if the object have marked cards.*/
/*First we copy*/
sgen_major_collector_iterate_live_block_ranges (move_cards_to_shadow_table);
sgen_major_collector_iterate_block_ranges (move_cards_to_shadow_table);
sgen_los_iterate_live_block_ranges (move_cards_to_shadow_table);
/*Then we clear*/

View File

@@ -8,6 +8,27 @@
* Licensed under the MIT license. See LICENSE file in the project root for full license information.
*/
/*
* Defines
*
* GCObject* copy_object_no_checks (GCObject *obj, SgenGrayQueue *queue)
*
* which allocates new space for `obj`, copies it there, forwards `obj` to its new location,
* and enqueues the copy into `queue`.
*
* To be defined by the includer:
*
* COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION(vt, obj, objsize, has_refs)
*
* Allocates space for promoting object `obj`, with size `objsize`, and initizializes the
* vtable with `vt`. `has_refs` indicates whether the object contains references.
*
* collector_pin_object(obj, queue)
*
* Called when no space for `obj` could be allocated. It must pin `obj` and enqueue it into
* `queue` for scanning.
*/
extern guint64 stat_copy_object_called_nursery;
extern guint64 stat_objects_copied_nursery;
@@ -73,3 +94,6 @@ copy_object_no_checks (GCObject *obj, SgenGrayQueue *queue)
return (GCObject *)destination;
}
#undef COLLECTOR_SERIAL_ALLOC_FOR_PROMOTION
#undef collector_pin_object

View File

@@ -149,15 +149,16 @@ static gboolean missing_remsets;
*/
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
if (*(ptr) && sgen_ptr_in_nursery ((char*)*(ptr))) { \
if (!sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup (*(ptr))) { \
GCVTable __vt = SGEN_LOAD_VTABLE (obj); \
SGEN_LOG (0, "Oldspace->newspace reference %p at offset %zd in object %p (%s.%s) not found in remsets.", *(ptr), (char*)(ptr) - (char*)(obj), (obj), sgen_client_vtable_get_namespace (__vt), sgen_client_vtable_get_name (__vt)); \
binary_protocol_missing_remset ((obj), __vt, (int) ((char*)(ptr) - (char*)(obj)), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
if (!object_is_pinned (*(ptr))) \
missing_remsets = TRUE; \
} \
} \
if (*(ptr) && sgen_ptr_in_nursery ((char*)*(ptr))) { \
if (!sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup (*(ptr))) { \
GCVTable __vt = SGEN_LOAD_VTABLE (obj); \
gboolean is_pinned = object_is_pinned (*(ptr)); \
SGEN_LOG (0, "Oldspace->newspace reference %p at offset %zd in object %p (%s.%s) not found in remsets%s.", *(ptr), (char*)(ptr) - (char*)(obj), (obj), sgen_client_vtable_get_namespace (__vt), sgen_client_vtable_get_name (__vt), is_pinned ? ", but object is pinned" : ""); \
binary_protocol_missing_remset ((obj), __vt, (int) ((char*)(ptr) - (char*)(obj)), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), is_pinned); \
if (!is_pinned) \
missing_remsets = TRUE; \
} \
} \
} while (0)
/*
@@ -181,7 +182,7 @@ check_consistency_callback (GCObject *obj, size_t size, void *dummy)
* Assumes the world is stopped.
*/
void
sgen_check_consistency (void)
sgen_check_remset_consistency (void)
{
// Need to add more checks
@@ -196,6 +197,8 @@ sgen_check_consistency (void)
SGEN_LOG (1, "Heap consistency check done.");
if (missing_remsets)
binary_protocol_flush_buffers (TRUE);
if (!binary_protocol_is_enabled ())
g_assert (!missing_remsets);
}
@@ -213,7 +216,7 @@ is_major_or_los_object_marked (GCObject *obj)
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
if (*(ptr) && !sgen_ptr_in_nursery ((char*)*(ptr)) && !is_major_or_los_object_marked ((GCObject*)*(ptr))) { \
if (!sgen_get_remset ()->find_address_with_cards (start, cards, (char*)(ptr))) { \
if (!cards || !sgen_get_remset ()->find_address_with_cards (start, cards, (char*)(ptr))) { \
GCVTable __vt = SGEN_LOAD_VTABLE (obj); \
SGEN_LOG (0, "major->major reference %p at offset %zd in object %p (%s.%s) not found in remsets.", *(ptr), (char*)(ptr) - (char*)(obj), (obj), sgen_client_vtable_get_namespace (__vt), sgen_client_vtable_get_name (__vt)); \
binary_protocol_missing_remset ((obj), __vt, (int) ((char*)(ptr) - (char*)(obj)), *(ptr), (gpointer)LOAD_VTABLE(*(ptr)), object_is_pinned (*(ptr))); \
@@ -240,8 +243,6 @@ check_mod_union_callback (GCObject *obj, size_t size, void *dummy)
else
cards = sgen_get_major_collector ()->get_cardtable_mod_union_for_reference (start);
SGEN_ASSERT (0, cards, "we must have mod union for marked major objects");
#include "sgen-scan-object.h"
}
@@ -250,7 +251,7 @@ sgen_check_mod_union_consistency (void)
{
missing_remsets = FALSE;
major_collector.iterate_objects (ITERATE_OBJECTS_ALL, (IterateObjectCallbackFunc)check_mod_union_callback, (void*)FALSE);
major_collector.iterate_objects (ITERATE_OBJECTS_SWEEP_ALL, (IterateObjectCallbackFunc)check_mod_union_callback, (void*)FALSE);
sgen_los_iterate_objects ((IterateObjectCallbackFunc)check_mod_union_callback, (void*)TRUE);
@@ -426,15 +427,15 @@ missing_remset_spew (char *obj, char **slot)
FIXME Flag missing remsets due to pinning as non fatal
*/
#undef HANDLE_PTR
#define HANDLE_PTR(ptr,obj) do { \
if (*(char**)ptr) { \
#define HANDLE_PTR(ptr,obj) do { \
if (*(char**)ptr) { \
if (!is_valid_object_pointer (*(char**)ptr)) { \
bad_pointer_spew ((char*)obj, (char**)ptr); \
} else if (!sgen_ptr_in_nursery (obj) && sgen_ptr_in_nursery ((char*)*ptr)) { \
if (!sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup (*(ptr)) && (!allow_missing_pinned || !SGEN_OBJECT_IS_PINNED (*(ptr)))) \
missing_remset_spew ((char*)obj, (char**)ptr); \
} \
} \
bad_pointer_spew ((char*)obj, (char**)ptr); \
} else if (!sgen_ptr_in_nursery (obj) && sgen_ptr_in_nursery ((char*)*ptr)) { \
if (!allow_missing_pinned && !SGEN_OBJECT_IS_PINNED (*(ptr)) && !sgen_get_remset ()->find_address ((char*)(ptr)) && !sgen_cement_lookup (*(ptr))) \
missing_remset_spew ((char*)obj, (char**)ptr); \
} \
} \
} while (0)
static void
@@ -442,7 +443,7 @@ verify_object_pointers_callback (GCObject *obj, size_t size, void *data)
{
char *start = (char*)obj;
gboolean allow_missing_pinned = (gboolean) (size_t) data;
SgenDescriptor desc = sgen_obj_get_descriptor (obj);
SgenDescriptor desc = sgen_obj_get_descriptor_safe (obj);
#include "sgen-scan-object.h"
}

View File

@@ -1 +1 @@
921c94696e63d97030b39de7cc7cb998792fc575
ff078a716739439b233abc2afa39189035ed8c24

View File

@@ -116,16 +116,40 @@ extern guint64 stat_objects_copied_major;
g_error (__VA_ARGS__); \
} } while (0)
#ifndef HOST_WIN32
# define LOG_TIMESTAMP \
do { \
time_t t; \
struct tm tod; \
time(&t); \
localtime_r(&t, &tod); \
strftime(logTime, sizeof(logTime), "%Y-%m-%d %H:%M:%S", &tod); \
} while (0)
#else
# define LOG_TIMESTAMP \
do { \
time_t t; \
struct tm *tod; \
time(&t); \
tod = localtime(&t); \
strftime(logTime, sizeof(logTime), "%F %T", tod); \
} while (0)
#endif
#define SGEN_LOG(level, format, ...) do { \
if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) { \
mono_gc_printf (gc_debug_file, format "\n", ##__VA_ARGS__); \
char logTime[80]; \
LOG_TIMESTAMP; \
mono_gc_printf (gc_debug_file, "%s " format "\n", logTime, ##__VA_ARGS__); \
} } while (0)
#define SGEN_COND_LOG(level, cond, format, ...) do { \
if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) { \
if (cond) \
mono_gc_printf (gc_debug_file, format "\n", ##__VA_ARGS__); \
if (G_UNLIKELY ((level) <= SGEN_MAX_DEBUG_LEVEL && (level) <= gc_debug_level)) { \
if (cond) { \
char logTime[80]; \
LOG_TIMESTAMP; \
mono_gc_printf (gc_debug_file, "%s " format "\n", logTime, ##__VA_ARGS__); \
} \
} } while (0)
extern int gc_debug_level;
@@ -263,6 +287,7 @@ sgen_get_nursery_end (void)
List of what each bit on of the vtable gc bits means.
*/
enum {
// When the Java bridge has determined an object is "bridged", it uses these two bits to cache that information.
SGEN_GC_BIT_BRIDGE_OBJECT = 1,
SGEN_GC_BIT_BRIDGE_OPAQUE_OBJECT = 2,
SGEN_GC_BIT_FINALIZER_AWARE = 4,
@@ -428,9 +453,9 @@ void* sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_fai
void sgen_free_internal_dynamic (void *addr, size_t size, int type);
void sgen_pin_stats_enable (void);
void sgen_pin_stats_register_object (GCObject *obj, size_t size);
void sgen_pin_stats_register_object (GCObject *obj, int generation);
void sgen_pin_stats_register_global_remset (GCObject *obj);
void sgen_pin_stats_print_class_stats (void);
void sgen_pin_stats_report (void);
void sgen_sort_addresses (void **array, size_t size);
void sgen_add_to_global_remset (gpointer ptr, GCObject *obj);
@@ -568,12 +593,12 @@ sgen_update_reference (GCObject **p, GCObject *o, gboolean allow_null)
typedef void (*sgen_cardtable_block_callback) (mword start, mword size);
void sgen_major_collector_iterate_live_block_ranges (sgen_cardtable_block_callback callback);
void sgen_major_collector_iterate_block_ranges (sgen_cardtable_block_callback callback);
typedef enum {
ITERATE_OBJECTS_SWEEP = 1,
ITERATE_OBJECTS_NON_PINNED = 2,
ITERATE_OBJECTS_PINNED = 4,
ITERATE_OBJECTS_ALL = ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED,
ITERATE_OBJECTS_SWEEP_NON_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED,
ITERATE_OBJECTS_SWEEP_PINNED = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_PINNED,
ITERATE_OBJECTS_SWEEP_ALL = ITERATE_OBJECTS_SWEEP | ITERATE_OBJECTS_NON_PINNED | ITERATE_OBJECTS_PINNED
@@ -622,6 +647,7 @@ struct _SgenMajorCollector {
void (*pin_major_object) (GCObject *obj, SgenGrayQueue *queue);
void (*scan_card_table) (CardTableScanType scan_type, ScanCopyContext ctx);
void (*iterate_live_block_ranges) (sgen_cardtable_block_callback callback);
void (*iterate_block_ranges) (sgen_cardtable_block_callback callback);
void (*update_cardtable_mod_union) (void);
void (*init_to_space) (void);
void (*sweep) (void);
@@ -978,7 +1004,7 @@ GCObject* sgen_alloc_obj_mature (GCVTable vtable, size_t size);
/* Debug support */
void sgen_check_consistency (void);
void sgen_check_remset_consistency (void);
void sgen_check_mod_union_consistency (void);
void sgen_check_major_refs (void);
void sgen_check_whole_heap (gboolean allow_missing_pinning);

View File

@@ -38,6 +38,12 @@ guint64 stat_gray_queue_dequeue_slow_path;
#define STATE_ASSERT(s,v)
#endif
/*
* Whenever we dispose a gray queue, we save its free list. Then, in the next collection,
* we reuse that free list for the new gray queue.
*/
static GrayQueueSection *last_gray_queue_free_list;
void
sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue)
{
@@ -212,48 +218,40 @@ sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue)
}
void
sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func)
sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func, gboolean reuse_free_list)
{
g_assert (sgen_gray_object_queue_is_empty (queue));
memset (queue, 0, sizeof (SgenGrayQueue));
queue->alloc_prepare_func = NULL;
queue->alloc_prepare_data = NULL;
#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
queue->enqueue_check_func = enqueue_check_func;
#endif
if (reuse_free_list) {
queue->free_list = last_gray_queue_free_list;
last_gray_queue_free_list = NULL;
}
}
void
sgen_gray_object_queue_dispose (SgenGrayQueue *queue)
{
SGEN_ASSERT (0, sgen_gray_object_queue_is_empty (queue), "Why are we disposing a gray queue that's not empty?");
/* Free the extra sections allocated during the last collection */
sgen_gray_object_queue_trim_free_list (queue);
}
static void
invalid_prepare_func (SgenGrayQueue *queue)
{
g_assert_not_reached ();
SGEN_ASSERT (0, !last_gray_queue_free_list, "Are we disposing two gray queues after another?");
last_gray_queue_free_list = queue->free_list;
/* just to make sure */
memset (queue, 0, sizeof (SgenGrayQueue));
}
void
sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue)
sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func)
{
sgen_gray_object_queue_init (queue, NULL);
queue->alloc_prepare_func = invalid_prepare_func;
queue->alloc_prepare_data = NULL;
}
void
sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
{
SGEN_ASSERT (0, !queue->alloc_prepare_func && !queue->alloc_prepare_data, "Can't set gray queue alloc-prepare twice");
SGEN_ASSERT (0, !queue->alloc_prepare_func, "Can't set gray queue alloc-prepare twice");
queue->alloc_prepare_func = alloc_prepare_func;
queue->alloc_prepare_data = data;
}
void
sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
GrayQueueAllocPrepareFunc alloc_prepare_func, void *data)
{
sgen_gray_object_queue_init (queue, enqueue_check_func);
sgen_gray_queue_set_alloc_prepare (queue, alloc_prepare_func, data);
}
void
@@ -268,13 +266,6 @@ sgen_gray_object_queue_deinit (SgenGrayQueue *queue)
}
}
void
sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue)
{
queue->alloc_prepare_func = NULL;
queue->alloc_prepare_data = NULL;
}
static void
lock_section_queue (SgenSectionGrayQueue *queue)
{

View File

@@ -97,7 +97,6 @@ struct _SgenGrayQueue {
#ifdef SGEN_CHECK_GRAY_OBJECT_ENQUEUE
GrayQueueEnqueueCheckFunc enqueue_check_func;
#endif
void *alloc_prepare_data;
};
typedef struct _SgenSectionGrayQueue SgenSectionGrayQueue;
@@ -130,13 +129,10 @@ GrayQueueEntry sgen_gray_object_dequeue (SgenGrayQueue *queue);
GrayQueueSection* sgen_gray_object_dequeue_section (SgenGrayQueue *queue);
void sgen_gray_object_enqueue_section (SgenGrayQueue *queue, GrayQueueSection *section);
void sgen_gray_object_queue_trim_free_list (SgenGrayQueue *queue);
void sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func);
void sgen_gray_object_queue_init_invalid (SgenGrayQueue *queue);
void sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func, void *data);
void sgen_gray_object_queue_init_with_alloc_prepare (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func,
GrayQueueAllocPrepareFunc func, void *data);
void sgen_gray_object_queue_init (SgenGrayQueue *queue, GrayQueueEnqueueCheckFunc enqueue_check_func, gboolean reuse_free_list);
void sgen_gray_object_queue_dispose (SgenGrayQueue *queue);
void sgen_gray_queue_set_alloc_prepare (SgenGrayQueue *queue, GrayQueueAllocPrepareFunc alloc_prepare_func);
void sgen_gray_object_queue_deinit (SgenGrayQueue *queue);
void sgen_gray_object_queue_disable_alloc_prepare (SgenGrayQueue *queue);
void sgen_gray_object_alloc_queue_section (SgenGrayQueue *queue);
void sgen_gray_object_free_queue_section (GrayQueueSection *section);

View File

@@ -323,7 +323,7 @@ sgen_los_free_object (LOSObject *obj)
los_num_objects--;
#ifdef USE_MALLOC
free (obj);
g_free (obj);
#else
if (size > LOS_SECTION_OBJECT_LIMIT) {
int pagesize = mono_pagesize ();
@@ -379,7 +379,7 @@ sgen_los_alloc_large_inner (GCVTable vtable, size_t size)
sgen_ensure_free_space (size, GENERATION_OLD);
#ifdef USE_MALLOC
obj = malloc (size + sizeof (LOSObject));
obj = g_malloc (size + sizeof (LOSObject));
memset (obj, 0, size + sizeof (LOSObject));
#else
if (size > LOS_SECTION_OBJECT_LIMIT) {
@@ -526,12 +526,14 @@ sgen_ptr_is_in_los (char *ptr, char **start)
{
LOSObject *obj;
*start = NULL;
if (start)
*start = NULL;
for (obj = los_object_list; obj; obj = obj->next) {
char *end = (char*)obj->data + sgen_los_object_size (obj);
if (ptr >= (char*)obj->data && ptr < end) {
*start = (char*)obj->data;
if (start)
*start = (char*)obj->data;
return TRUE;
}
}

View File

@@ -117,7 +117,7 @@ COPY_OR_MARK_FUNCTION_NAME (GCObject **ptr, GCObject *obj, SgenGrayQueue *queue)
MS_CALC_MARK_BIT (word, bit, obj);
SGEN_ASSERT (9, !MS_MARK_BIT (block, word, bit), "object %p already marked", obj);
MS_SET_MARK_BIT (block, word, bit);
binary_protocol_mark (obj, (gpointer)LOAD_VTABLE (obj), sgen_safe_object_get_size (obj));
binary_protocol_mark (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size (obj));
return FALSE;
#endif

View File

@@ -194,16 +194,23 @@ static SgenArrayList allocated_blocks = SGEN_ARRAY_LIST_INIT (NULL, NULL, NULL,
static void *empty_blocks = NULL;
static size_t num_empty_blocks = 0;
/*
* We can iterate the block list also while sweep is in progress but we
* need to account for blocks that will be checked for sweeping and even
* freed in the process.
*/
#define FOREACH_BLOCK_NO_LOCK(bl) { \
volatile gpointer *slot; \
SGEN_ASSERT (0, !sweep_in_progress (), "Can't iterate blocks while sweep is in progress."); \
SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) { \
(bl) = BLOCK_UNTAG (*slot);
(bl) = BLOCK_UNTAG (*slot); \
if (!(bl)) \
continue;
#define FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK(bl,hr) { \
volatile gpointer *slot; \
SGEN_ASSERT (0, !sweep_in_progress (), "Can't iterate blocks while sweep is in progress."); \
SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) { \
(bl) = (MSBlockInfo *) (*slot); \
if (!(bl)) \
continue; \
(hr) = BLOCK_IS_TAGGED_HAS_REFERENCES ((bl)); \
(bl) = BLOCK_UNTAG ((bl));
#define END_FOREACH_BLOCK_NO_LOCK } SGEN_ARRAY_LIST_END_FOREACH_SLOT; }
@@ -549,16 +556,6 @@ ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
add_free_block (free_blocks, size_index, info);
/*
* Adding to the allocated_blocks array is racy with the removal of nulls when
* sweeping. We wait for sweep to finish to avoid that.
*
* The memory barrier here and in `sweep_job_func()` are required because we need
* `allocated_blocks` synchronized between this and the sweep thread.
*/
major_finish_sweep_checking ();
mono_memory_barrier ();
sgen_array_list_add (&allocated_blocks, BLOCK_TAG (info), 0, FALSE);
SGEN_ATOMIC_ADD_P (num_major_sections, 1);
@@ -566,17 +563,41 @@ ms_alloc_block (int size_index, gboolean pinned, gboolean has_references)
}
static gboolean
ptr_is_from_pinned_alloc (char *ptr)
ptr_is_in_major_block (char *ptr, char **start, gboolean *pinned)
{
MSBlockInfo *block;
FOREACH_BLOCK_NO_LOCK (block) {
if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE)
return block->pinned;
if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
int count = MS_BLOCK_FREE / block->obj_size;
int i;
if (start)
*start = NULL;
for (i = 0; i <= count; ++i) {
if (ptr >= (char*)MS_BLOCK_OBJ (block, i) && ptr < (char*)MS_BLOCK_OBJ (block, i + 1)) {
if (start)
*start = (char *)MS_BLOCK_OBJ (block, i);
break;
}
}
if (pinned)
*pinned = block->pinned;
return TRUE;
}
} END_FOREACH_BLOCK_NO_LOCK;
return FALSE;
}
static gboolean
ptr_is_from_pinned_alloc (char *ptr)
{
gboolean pinned;
if (ptr_is_in_major_block (ptr, NULL, &pinned))
return pinned;
return FALSE;
}
static void
ensure_can_access_block_free_list (MSBlockInfo *block)
{
@@ -775,23 +796,9 @@ major_is_object_live (GCObject *obj)
static gboolean
major_ptr_is_in_non_pinned_space (char *ptr, char **start)
{
MSBlockInfo *block;
FOREACH_BLOCK_NO_LOCK (block) {
if (ptr >= MS_BLOCK_FOR_BLOCK_INFO (block) && ptr <= MS_BLOCK_FOR_BLOCK_INFO (block) + MS_BLOCK_SIZE) {
int count = MS_BLOCK_FREE / block->obj_size;
int i;
*start = NULL;
for (i = 0; i <= count; ++i) {
if (ptr >= (char*)MS_BLOCK_OBJ (block, i) && ptr < (char*)MS_BLOCK_OBJ (block, i + 1)) {
*start = (char *)MS_BLOCK_OBJ (block, i);
break;
}
}
return !block->pinned;
}
} END_FOREACH_BLOCK_NO_LOCK;
gboolean pinned;
if (ptr_is_in_major_block (ptr, start, &pinned))
return !pinned;
return FALSE;
}
@@ -865,6 +872,7 @@ major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc call
gboolean pinned = flags & ITERATE_OBJECTS_PINNED;
MSBlockInfo *block;
/* No actual sweeping will take place if we are in the middle of a major collection. */
major_finish_sweep_checking ();
FOREACH_BLOCK_NO_LOCK (block) {
int count = MS_BLOCK_FREE / block->obj_size;
@@ -874,26 +882,13 @@ major_iterate_objects (IterateObjectsFlags flags, IterateObjectCallbackFunc call
continue;
if (!block->pinned && !non_pinned)
continue;
if (sweep && lazy_sweep) {
if (sweep && lazy_sweep && !block_is_swept_or_marking (block)) {
sweep_block (block);
SGEN_ASSERT (6, block->state == BLOCK_STATE_SWEPT, "Block must be swept after sweeping");
}
for (i = 0; i < count; ++i) {
void **obj = (void**) MS_BLOCK_OBJ (block, i);
/*
* We've finished sweep checking, but if we're sweeping lazily and
* the flags don't require us to sweep, the block might still need
* sweeping. In that case, we need to consult the mark bits to tell
* us whether an object slot is live.
*/
if (!block_is_swept_or_marking (block)) {
int word, bit;
SGEN_ASSERT (6, !sweep && block->state == BLOCK_STATE_NEED_SWEEPING, "Has sweeping not finished?");
MS_CALC_MARK_BIT (word, bit, obj);
if (!MS_MARK_BIT (block, word, bit))
continue;
}
if (MS_OBJ_ALLOCED (obj, block))
callback ((GCObject*)obj, block->obj_size, data);
}
@@ -1084,19 +1079,6 @@ major_block_is_evacuating (MSBlockInfo *block)
return FALSE;
}
#define LOAD_VTABLE SGEN_LOAD_VTABLE
#define MS_MARK_OBJECT_AND_ENQUEUE_CHECKED(obj,desc,block,queue) do { \
int __word, __bit; \
MS_CALC_MARK_BIT (__word, __bit, (obj)); \
if (!MS_MARK_BIT ((block), __word, __bit) && MS_OBJ_ALLOCED ((obj), (block))) { \
MS_SET_MARK_BIT ((block), __word, __bit); \
if (sgen_gc_descr_has_references (desc)) \
GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
#define MS_MARK_OBJECT_AND_ENQUEUE(obj,desc,block,queue) do { \
int __word, __bit; \
MS_CALC_MARK_BIT (__word, __bit, (obj)); \
@@ -1105,7 +1087,7 @@ major_block_is_evacuating (MSBlockInfo *block)
MS_SET_MARK_BIT ((block), __word, __bit); \
if (sgen_gc_descr_has_references (desc)) \
GRAY_OBJECT_ENQUEUE ((queue), (obj), (desc)); \
binary_protocol_mark ((obj), (gpointer)LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((obj))); \
binary_protocol_mark ((obj), (gpointer)SGEN_LOAD_VTABLE ((obj)), sgen_safe_object_get_size ((obj))); \
INC_NUM_MAJOR_OBJECTS_MARKED (); \
} \
} while (0)
@@ -1253,8 +1235,6 @@ mark_pinned_objects_in_block (MSBlockInfo *block, size_t first_entry, size_t las
if (first_entry == last_entry)
return;
block->has_pinned = TRUE;
entry = sgen_pinning_get_entry (first_entry);
end = sgen_pinning_get_entry (last_entry);
@@ -1265,9 +1245,19 @@ mark_pinned_objects_in_block (MSBlockInfo *block, size_t first_entry, size_t las
if (index == last_index)
continue;
obj = MS_BLOCK_OBJ (block, index);
MS_MARK_OBJECT_AND_ENQUEUE_CHECKED (obj, sgen_obj_get_descriptor (obj), block, queue);
if (!MS_OBJ_ALLOCED (obj, block))
continue;
MS_MARK_OBJECT_AND_ENQUEUE (obj, sgen_obj_get_descriptor (obj), block, queue);
sgen_pin_stats_register_object (obj, GENERATION_OLD);
last_index = index;
}
/*
* There might have been potential pinning "pointers" into this block, but none of
* them pointed to occupied slots, in which case we don't have to pin the block.
*/
if (last_index >= 0)
block->has_pinned = TRUE;
}
static inline void
@@ -1315,6 +1305,7 @@ set_block_state (MSBlockInfo *block, gint32 new_state, gint32 expected_state)
{
SGEN_ASSERT (6, block->state == expected_state, "Block state incorrect before set");
block->state = new_state;
binary_protocol_block_set_state (block, MS_BLOCK_SIZE, expected_state, new_state);
}
/*
@@ -1428,6 +1419,8 @@ sweep_start (void)
for (j = 0; j < num_block_obj_sizes; ++j)
free_blocks [j] = NULL;
}
sgen_array_list_remove_nulls (&allocated_blocks);
}
static void sweep_finish (void);
@@ -1588,9 +1581,12 @@ static void
sweep_blocks_job_func (void *thread_data_untyped, SgenThreadPoolJob *job)
{
volatile gpointer *slot;
MSBlockInfo *bl;
SGEN_ARRAY_LIST_FOREACH_SLOT (&allocated_blocks, slot) {
sweep_block (BLOCK_UNTAG (*slot));
bl = BLOCK_UNTAG (*slot);
if (bl)
sweep_block (bl);
} SGEN_ARRAY_LIST_END_FOREACH_SLOT;
mono_memory_write_barrier ();
@@ -1637,8 +1633,6 @@ sweep_job_func (void *thread_data_untyped, SgenThreadPoolJob *job)
}
}
sgen_array_list_remove_nulls (&allocated_blocks);
/*
* Concurrently sweep all the blocks to reduce workload during minor
* pauses where we need certain blocks to be swept. At the start of
@@ -1717,7 +1711,7 @@ static int count_nonpinned_nonref;
static void
count_nonpinned_callback (GCObject *obj, size_t size, void *data)
{
GCVTable vtable = LOAD_VTABLE (obj);
GCVTable vtable = SGEN_LOAD_VTABLE (obj);
if (SGEN_VTABLE_HAS_REFERENCES (vtable))
++count_nonpinned_ref;
@@ -1728,7 +1722,7 @@ count_nonpinned_callback (GCObject *obj, size_t size, void *data)
static void
count_pinned_callback (GCObject *obj, size_t size, void *data)
{
GCVTable vtable = LOAD_VTABLE (obj);
GCVTable vtable = SGEN_LOAD_VTABLE (obj);
if (SGEN_VTABLE_HAS_REFERENCES (vtable))
++count_pinned_ref;
@@ -2227,6 +2221,18 @@ major_print_gc_param_usage (void)
/*
* This callback is used to clear cards, move cards to the shadow table and do counting.
*/
static void
major_iterate_block_ranges (sgen_cardtable_block_callback callback)
{
MSBlockInfo *block;
gboolean has_references;
FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
if (has_references)
callback ((mword)MS_BLOCK_FOR_BLOCK_INFO (block), MS_BLOCK_SIZE);
} END_FOREACH_BLOCK_NO_LOCK;
}
static void
major_iterate_live_block_ranges (sgen_cardtable_block_callback callback)
{
@@ -2426,12 +2432,15 @@ static void
major_scan_card_table (CardTableScanType scan_type, ScanCopyContext ctx)
{
MSBlockInfo *block;
gboolean has_references;
gboolean has_references, was_sweeping, skip_scan;
if (!concurrent_mark)
g_assert (scan_type == CARDTABLE_SCAN_GLOBAL);
major_finish_sweep_checking ();
if (scan_type != CARDTABLE_SCAN_GLOBAL)
SGEN_ASSERT (0, !sweep_in_progress (), "Sweep should be finished when we scan mod union card table");
was_sweeping = sweep_in_progress ();
binary_protocol_major_card_table_scan_start (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
FOREACH_BLOCK_HAS_REFERENCES_NO_LOCK (block, has_references) {
#ifdef PREFETCH_CARDS
@@ -2449,8 +2458,36 @@ major_scan_card_table (CardTableScanType scan_type, ScanCopyContext ctx)
if (!has_references)
continue;
skip_scan = FALSE;
scan_card_table_for_block (block, scan_type, ctx);
if (scan_type == CARDTABLE_SCAN_GLOBAL) {
gpointer *card_start = (gpointer*) sgen_card_table_get_card_scan_address ((mword)MS_BLOCK_FOR_BLOCK_INFO (block));
gboolean has_dirty_cards = FALSE;
int i;
for (i = 0; i < CARDS_PER_BLOCK / sizeof(gpointer); i++) {
if (card_start [i]) {
has_dirty_cards = TRUE;
break;
}
}
if (!has_dirty_cards) {
skip_scan = TRUE;
} else {
/*
* After the start of the concurrent collections, blocks change state
* to marking. We should not sweep it in that case. We can't race with
* sweep start since we are in a nursery collection. Also avoid CAS-ing
*/
if (sweep_in_progress ()) {
skip_scan = !ensure_block_is_checked_for_sweeping (__index, TRUE, NULL);
} else if (was_sweeping) {
/* Recheck in case sweep finished after dereferencing the slot */
skip_scan = *sgen_array_list_get_slot (&allocated_blocks, __index) == 0;
}
}
}
if (!skip_scan)
scan_card_table_for_block (block, scan_type, ctx);
} END_FOREACH_BLOCK_NO_LOCK;
binary_protocol_major_card_table_scan_end (sgen_timestamp (), scan_type & CARDTABLE_SCAN_MOD_UNION);
}
@@ -2587,6 +2624,7 @@ sgen_marksweep_init_internal (SgenMajorCollector *collector, gboolean is_concurr
collector->pin_major_object = pin_major_object;
collector->scan_card_table = major_scan_card_table;
collector->iterate_live_block_ranges = major_iterate_live_block_ranges;
collector->iterate_block_ranges = major_iterate_block_ranges;
if (is_concurrent) {
collector->update_cardtable_mod_union = update_cardtable_mod_union;
collector->get_cardtable_mod_union_for_reference = major_get_cardtable_mod_union_for_reference;

View File

@@ -37,6 +37,9 @@ static gboolean do_pin_stats = FALSE;
static PinStatAddress *pin_stat_addresses = NULL;
static size_t pinned_byte_counts [PIN_TYPE_MAX];
static size_t pinned_bytes_in_generation [GENERATION_MAX];
static int pinned_objects_in_generation [GENERATION_MAX];
static SgenPointerQueue pinned_objects = SGEN_POINTER_QUEUE_INIT (INTERNAL_MEM_STATISTICS);
static SgenHashTable pinned_class_hash_table = SGEN_HASH_TABLE_INIT (INTERNAL_MEM_STATISTICS, INTERNAL_MEM_STAT_PINNED_CLASS, sizeof (PinnedClassEntry), g_str_hash, g_str_equal);
@@ -66,6 +69,10 @@ sgen_pin_stats_reset (void)
pin_stat_addresses = NULL;
for (i = 0; i < PIN_TYPE_MAX; ++i)
pinned_byte_counts [i] = 0;
for (i = 0; i < GENERATION_MAX; ++i) {
pinned_bytes_in_generation [i] = 0;
pinned_objects_in_generation [i] = 0;
}
sgen_pointer_queue_clear (&pinned_objects);
sgen_hash_table_clean (&pinned_class_hash_table);
sgen_hash_table_clean (&global_remset_class_hash_table);
@@ -78,6 +85,8 @@ sgen_pin_stats_register_address (char *addr, int pin_type)
PinStatAddress *node;
int pin_type_bit = 1 << pin_type;
if (!do_pin_stats)
return;
while (*node_ptr) {
node = *node_ptr;
if (addr == node->addr) {
@@ -153,13 +162,23 @@ register_vtable (GCVTable vtable, int pin_types)
}
void
sgen_pin_stats_register_object (GCObject *obj, size_t size)
sgen_pin_stats_register_object (GCObject *obj, int generation)
{
int pin_types = 0;
size_t size = 0;
if (binary_protocol_is_enabled ()) {
size = sgen_safe_object_get_size (obj);
pinned_bytes_in_generation [generation] += size;
++pinned_objects_in_generation [generation];
}
if (!do_pin_stats)
return;
if (!size)
size = sgen_safe_object_get_size (obj);
pin_stats_count_object_from_tree (obj, size, pin_stat_addresses, &pin_types);
sgen_pointer_queue_add (&pinned_objects, obj);
@@ -183,12 +202,15 @@ sgen_pin_stats_register_global_remset (GCObject *obj)
}
void
sgen_pin_stats_print_class_stats (void)
sgen_pin_stats_report (void)
{
char *name;
PinnedClassEntry *pinned_entry;
GlobalRemsetClassEntry *remset_entry;
binary_protocol_pin_stats (pinned_objects_in_generation [GENERATION_NURSERY], pinned_bytes_in_generation [GENERATION_NURSERY],
pinned_objects_in_generation [GENERATION_OLD], pinned_bytes_in_generation [GENERATION_OLD]);
if (!do_pin_stats)
return;

View File

@@ -449,6 +449,13 @@ MATCH_INDEX (BINARY_PROTOCOL_MATCH)
IS_VTABLE_MATCH (FALSE)
END_PROTOCOL_ENTRY_FLUSH
BEGIN_PROTOCOL_ENTRY4 (binary_protocol_pin_stats, TYPE_INT, objects_pinned_in_nursery, TYPE_SIZE, bytes_pinned_in_nursery, TYPE_INT, objects_pinned_in_major, TYPE_SIZE, bytes_pinned_in_major)
DEFAULT_PRINT ()
IS_ALWAYS_MATCH (TRUE)
MATCH_INDEX (BINARY_PROTOCOL_MATCH)
IS_VTABLE_MATCH (FALSE)
END_PROTOCOL_ENTRY
#undef BEGIN_PROTOCOL_ENTRY0
#undef BEGIN_PROTOCOL_ENTRY1
#undef BEGIN_PROTOCOL_ENTRY2

View File

@@ -262,27 +262,33 @@ binary_protocol_check_file_overflow (void)
*
* The protocol entries that do flush have `FLUSH()` in their definition.
*/
void
gboolean
binary_protocol_flush_buffers (gboolean force)
{
#ifdef HAVE_UNISTD_H
int num_buffers = 0, i;
BinaryProtocolBuffer *header;
BinaryProtocolBuffer *buf;
BinaryProtocolBuffer **bufs;
if (binary_protocol_file == -1)
return;
return FALSE;
if (!force && !try_lock_exclusive ())
return;
return FALSE;
for (buf = binary_protocol_buffers; buf != NULL; buf = buf->next)
header = binary_protocol_buffers;
for (buf = header; buf != NULL; buf = buf->next)
++num_buffers;
bufs = (BinaryProtocolBuffer **)sgen_alloc_internal_dynamic (num_buffers * sizeof (BinaryProtocolBuffer*), INTERNAL_MEM_BINARY_PROTOCOL, TRUE);
for (buf = binary_protocol_buffers, i = 0; buf != NULL; buf = buf->next, i++)
for (buf = header, i = 0; buf != NULL; buf = buf->next, i++)
bufs [i] = buf;
SGEN_ASSERT (0, i == num_buffers, "Binary protocol buffer count error");
/*
* This might be incorrect when forcing, but all bets are off in that case, anyway,
* because we're trying to figure out a bug in the debugger.
*/
binary_protocol_buffers = NULL;
for (i = num_buffers - 1; i >= 0; --i) {
@@ -294,6 +300,8 @@ binary_protocol_flush_buffers (gboolean force)
if (!force)
unlock_exclusive ();
return TRUE;
#endif
}

View File

@@ -154,7 +154,7 @@ enum {
void binary_protocol_init (const char *filename, long long limit);
gboolean binary_protocol_is_enabled (void);
void binary_protocol_flush_buffers (gboolean force);
gboolean binary_protocol_flush_buffers (gboolean force);
#define BEGIN_PROTOCOL_ENTRY0(method) \
void method (void);

View File

@@ -77,7 +77,7 @@ state_is_working_or_enqueued (State state)
return state == STATE_WORKING || state == STATE_WORK_ENQUEUED;
}
void
static void
sgen_workers_ensure_awake (void)
{
State old_state;
@@ -177,7 +177,8 @@ static void
init_private_gray_queue (WorkerData *data)
{
sgen_gray_object_queue_init (&data->private_gray_queue,
sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL);
sgen_get_major_collector ()->is_concurrent ? concurrent_enqueue_check : NULL,
FALSE);
}
static void
@@ -354,10 +355,29 @@ sgen_workers_are_working (void)
return state_is_working_or_enqueued (workers_state);
}
SgenSectionGrayQueue*
sgen_workers_get_distribute_section_gray_queue (void)
void
sgen_workers_assert_gray_queue_is_empty (void)
{
return &workers_distribute_gray_queue;
SGEN_ASSERT (0, sgen_section_gray_queue_is_empty (&workers_distribute_gray_queue), "Why is the workers gray queue not empty?");
}
void
sgen_workers_take_from_queue_and_awake (SgenGrayQueue *queue)
{
gboolean wake = FALSE;
for (;;) {
GrayQueueSection *section = sgen_gray_object_dequeue_section (queue);
if (!section)
break;
sgen_section_gray_queue_enqueue (&workers_distribute_gray_queue, section);
wake = TRUE;
}
if (wake) {
SGEN_ASSERT (0, sgen_concurrent_collection_in_progress (), "Why is there work to take when there's no concurrent collection in progress?");
sgen_workers_ensure_awake ();
}
}
#endif

View File

@@ -20,7 +20,6 @@ struct _WorkerData {
void sgen_workers_init (int num_workers);
void sgen_workers_stop_all_workers (void);
void sgen_workers_start_all_workers (SgenObjectOperations *object_ops, SgenThreadPoolJob *finish_job);
void sgen_workers_ensure_awake (void);
void sgen_workers_init_distribute_gray_queue (void);
void sgen_workers_enqueue_job (SgenThreadPoolJob *job, gboolean enqueue);
void sgen_workers_wait_for_jobs_finished (void);
@@ -30,6 +29,7 @@ void sgen_workers_join (void);
gboolean sgen_workers_have_idle_work (void);
gboolean sgen_workers_all_done (void);
gboolean sgen_workers_are_working (void);
SgenSectionGrayQueue* sgen_workers_get_distribute_section_gray_queue (void);
void sgen_workers_assert_gray_queue_is_empty (void);
void sgen_workers_take_from_queue_and_awake (SgenGrayQueue *queue);
#endif