Imported Upstream version 3.6.0

Former-commit-id: da6be194a6b1221998fc28233f2503bd61dd9d14
This commit is contained in:
Jo Shields
2014-08-13 10:39:27 +01:00
commit a575963da9
50588 changed files with 8155799 additions and 0 deletions

View File

@@ -0,0 +1 @@
EXTRA_DIST = $(srcdir)/*.h

View File

@@ -0,0 +1,446 @@
# Makefile.in generated by automake 1.14.1 from Makefile.am.
# @configure_input@
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
VPATH = @srcdir@
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
subdir = include/private
DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
$(top_srcdir)/mkinstalldirs
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \
$(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \
$(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \
$(top_srcdir)/acinclude.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
SOURCES =
DIST_SOURCES =
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_CPPFLAGS = @AM_CPPFLAGS@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCAS = @CCAS@
CCASDEPMODE = @CCASDEPMODE@
CCASFLAGS = @CCASFLAGS@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CXXINCLUDES = @CXXINCLUDES@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DOLT_BASH = @DOLT_BASH@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
EXTRA_TEST_LIBS = @EXTRA_TEST_LIBS@
FGREP = @FGREP@
GC_CFLAGS = @GC_CFLAGS@
GC_VERSION = @GC_VERSION@
GREP = @GREP@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTCOMPILE = @LTCOMPILE@
LTCXXCOMPILE = @LTCXXCOMPILE@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MKDIR_P = @MKDIR_P@
MY_CFLAGS = @MY_CFLAGS@
NM = @NM@
NMEDIT = @NMEDIT@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
THREADDLLIBS = @THREADDLLIBS@
UNWINDLIBS = @UNWINDLIBS@
VERSION = @VERSION@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
addincludes = @addincludes@
addlibs = @addlibs@
addtests = @addtests@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
target_all = @target_all@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
EXTRA_DIST = $(srcdir)/*.h
all: all-am
.SUFFIXES:
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign include/private/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign include/private/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
tags TAGS:
ctags CTAGS:
cscope cscopelist:
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile
installdirs:
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic clean-libtool mostlyclean-am
distclean: distclean-am
-rm -f Makefile
distclean-am: clean-am distclean-generic
dvi: dvi-am
dvi-am:
html: html-am
html-am:
info: info-am
info-am:
install-data-am:
install-dvi: install-dvi-am
install-dvi-am:
install-exec-am:
install-html: install-html-am
install-html-am:
install-info: install-info-am
install-info-am:
install-man:
install-pdf: install-pdf-am
install-pdf-am:
install-ps: install-ps-am
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-generic mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am:
.MAKE: install-am install-strip
.PHONY: all all-am check check-am clean clean-generic clean-libtool \
cscopelist-am ctags-am distclean distclean-generic \
distclean-libtool distdir dvi dvi-am html html-am info info-am \
install install-am install-data install-data-am install-dvi \
install-dvi-am install-exec install-exec-am install-html \
install-html-am install-info install-info-am install-man \
install-pdf install-pdf-am install-ps install-ps-am \
install-strip installcheck installcheck-am installdirs \
maintainer-clean maintainer-clean-generic mostlyclean \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags-am uninstall uninstall-am
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@@ -0,0 +1,118 @@
/*
* Copyright (c) 1993-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Boehm, May 19, 1994 2:23 pm PDT */
# ifndef CORD_POSITION_H
/* The representation of CORD_position. This is private to the */
/* implementation, but the size is known to clients. Also */
/* the implementation of some exported macros relies on it. */
/* Don't use anything defined here and not in cord.h. */
# define MAX_DEPTH 48
/* The maximum depth of a balanced cord + 1. */
/* We don't let cords get deeper than MAX_DEPTH. */
struct CORD_pe {
CORD pe_cord;
size_t pe_start_pos;
};
/* A structure describing an entry on the path from the root */
/* to current position. */
typedef struct CORD_Pos {
size_t cur_pos;
int path_len;
# define CORD_POS_INVALID (0x55555555)
/* path_len == INVALID <==> position invalid */
const char *cur_leaf; /* Current leaf, if it is a string. */
/* If the current leaf is a function, */
/* then this may point to function_buf */
/* containing the next few characters. */
/* Always points to a valid string */
/* containing the current character */
/* unless cur_end is 0. */
size_t cur_start; /* Start position of cur_leaf */
size_t cur_end; /* Ending position of cur_leaf */
/* 0 if cur_leaf is invalid. */
struct CORD_pe path[MAX_DEPTH + 1];
/* path[path_len] is the leaf corresponding to cur_pos */
/* path[0].pe_cord is the cord we point to. */
# define FUNCTION_BUF_SZ 8
char function_buf[FUNCTION_BUF_SZ]; /* Space for next few chars */
/* from function node. */
} CORD_pos[1];
/* Extract the cord from a position: */
CORD CORD_pos_to_cord(CORD_pos p);
/* Extract the current index from a position: */
size_t CORD_pos_to_index(CORD_pos p);
/* Fetch the character located at the given position: */
char CORD_pos_fetch(CORD_pos p);
/* Initialize the position to refer to the give cord and index. */
/* Note that this is the most expensive function on positions: */
void CORD_set_pos(CORD_pos p, CORD x, size_t i);
/* Advance the position to the next character. */
/* P must be initialized and valid. */
/* Invalidates p if past end: */
void CORD_next(CORD_pos p);
/* Move the position to the preceding character. */
/* P must be initialized and valid. */
/* Invalidates p if past beginning: */
void CORD_prev(CORD_pos p);
/* Is the position valid, i.e. inside the cord? */
int CORD_pos_valid(CORD_pos p);
char CORD__pos_fetch(CORD_pos);
void CORD__next(CORD_pos);
void CORD__prev(CORD_pos);
#define CORD_pos_fetch(p) \
(((p)[0].cur_end != 0)? \
(p)[0].cur_leaf[(p)[0].cur_pos - (p)[0].cur_start] \
: CORD__pos_fetch(p))
#define CORD_next(p) \
(((p)[0].cur_pos + 1 < (p)[0].cur_end)? \
(p)[0].cur_pos++ \
: (CORD__next(p), 0))
#define CORD_prev(p) \
(((p)[0].cur_end != 0 && (p)[0].cur_pos > (p)[0].cur_start)? \
(p)[0].cur_pos-- \
: (CORD__prev(p), 0))
#define CORD_pos_to_index(p) ((p)[0].cur_pos)
#define CORD_pos_to_cord(p) ((p)[0].path[0].pe_cord)
#define CORD_pos_valid(p) ((p)[0].path_len != CORD_POS_INVALID)
/* Some grubby stuff for performance-critical friends: */
#define CORD_pos_chars_left(p) ((long)((p)[0].cur_end) - (long)((p)[0].cur_pos))
/* Number of characters in cache. <= 0 ==> none */
#define CORD_pos_advance(p,n) ((p)[0].cur_pos += (n) - 1, CORD_next(p))
/* Advance position by n characters */
/* 0 < n < CORD_pos_chars_left(p) */
#define CORD_pos_cur_char_addr(p) \
(p)[0].cur_leaf + ((p)[0].cur_pos - (p)[0].cur_start)
/* address of current character in cache. */
#endif

View File

@@ -0,0 +1,68 @@
#ifndef GC_DARWIN_SEMAPHORE_H
#define GC_DARWIN_SEMAPHORE_H
#if !defined(GC_DARWIN_THREADS)
#error darwin_semaphore.h included with GC_DARWIN_THREADS not defined
#endif
/*
This is a very simple semaphore implementation for darwin. It
is implemented in terms of pthreads calls so it isn't async signal
safe. This isn't a problem because signals aren't used to
suspend threads on darwin.
*/
typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
int value;
} sem_t;
static int sem_init(sem_t *sem, int pshared, int value) {
int ret;
if(pshared)
GC_abort("sem_init with pshared set");
sem->value = value;
ret = pthread_mutex_init(&sem->mutex,NULL);
if(ret < 0) return -1;
ret = pthread_cond_init(&sem->cond,NULL);
if(ret < 0) return -1;
return 0;
}
static int sem_post(sem_t *sem) {
if(pthread_mutex_lock(&sem->mutex) < 0)
return -1;
sem->value++;
if(pthread_cond_signal(&sem->cond) < 0) {
pthread_mutex_unlock(&sem->mutex);
return -1;
}
if(pthread_mutex_unlock(&sem->mutex) < 0)
return -1;
return 0;
}
static int sem_wait(sem_t *sem) {
if(pthread_mutex_lock(&sem->mutex) < 0)
return -1;
while(sem->value == 0) {
pthread_cond_wait(&sem->cond,&sem->mutex);
}
sem->value--;
if(pthread_mutex_unlock(&sem->mutex) < 0)
return -1;
return 0;
}
static int sem_destroy(sem_t *sem) {
int ret;
ret = pthread_cond_destroy(&sem->cond);
if(ret < 0) return -1;
ret = pthread_mutex_destroy(&sem->mutex);
if(ret < 0) return -1;
return 0;
}
#endif

View File

@@ -0,0 +1,28 @@
#ifndef GC_DARWIN_STOP_WORLD_H
#define GC_DARWIN_STOP_WORLD_H
#if !defined(GC_DARWIN_THREADS)
#error darwin_stop_world.h included without GC_DARWIN_THREADS defined
#endif
#include <mach/mach.h>
#include <mach/thread_act.h>
struct thread_stop_info {
mach_port_t mach_thread;
int signal;
word last_stop_count; /* GC_last_stop_count value when thread */
/* last successfully handled a suspend */
/* signal. */
ptr_t stack_ptr; /* Valid only when stopped. */
};
struct GC_mach_thread {
thread_act_t thread;
int already_suspended;
};
void GC_darwin_register_mach_handler_thread(mach_port_t thread);
#endif

View File

@@ -0,0 +1,175 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright (c) 1997 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/*
* This is mostly an internal header file. Typical clients should
* not use it. Clients that define their own object kinds with
* debugging allocators will probably want to include this, however.
* No attempt is made to keep the namespace clean. This should not be
* included from header files that are frequently included by clients.
*/
#ifndef _DBG_MLC_H
#define _DBG_MLC_H
# define I_HIDE_POINTERS
# include "gc_priv.h"
# ifdef KEEP_BACK_PTRS
# include "gc_backptr.h"
# endif
#ifndef HIDE_POINTER
/* Gc.h was previously included, and hence the I_HIDE_POINTERS */
/* definition had no effect. Repeat the gc.h definitions here to */
/* get them anyway. */
typedef GC_word GC_hidden_pointer;
# define HIDE_POINTER(p) (~(GC_hidden_pointer)(p))
# define REVEAL_POINTER(p) ((GC_PTR)(HIDE_POINTER(p)))
#endif /* HIDE_POINTER */
# define START_FLAG ((word)0xfedcedcb)
# define END_FLAG ((word)0xbcdecdef)
/* Stored both one past the end of user object, and one before */
/* the end of the object as seen by the allocator. */
# if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST) \
|| defined(MAKE_BACK_GRAPH)
/* Pointer "source"s that aren't real locations. */
/* Used in oh_back_ptr fields and as "source" */
/* argument to some marking functions. */
# define NOT_MARKED (ptr_t)(0)
# define MARKED_FOR_FINALIZATION (ptr_t)(2)
/* Object was marked because it is finalizable. */
# define MARKED_FROM_REGISTER (ptr_t)(4)
/* Object was marked from a rgister. Hence the */
/* source of the reference doesn't have an address. */
# endif /* KEEP_BACK_PTRS || PRINT_BLACK_LIST */
/* Object header */
typedef struct {
# if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
/* We potentially keep two different kinds of back */
/* pointers. KEEP_BACK_PTRS stores a single back */
/* pointer in each reachable object to allow reporting */
/* of why an object was retained. MAKE_BACK_GRAPH */
/* builds a graph containing the inverse of all */
/* "points-to" edges including those involving */
/* objects that have just become unreachable. This */
/* allows detection of growing chains of unreachable */
/* objects. It may be possible to eventually combine */
/* both, but for now we keep them separate. Both */
/* kinds of back pointers are hidden using the */
/* following macros. In both cases, the plain version */
/* is constrained to have an least significant bit of 1,*/
/* to allow it to be distinguished from a free list */
/* link. This means the plain version must have an */
/* lsb of 0. */
/* Note that blocks dropped by black-listing will */
/* also have the lsb clear once debugging has */
/* started. */
/* We're careful never to overwrite a value with lsb 0. */
# if ALIGNMENT == 1
/* Fudge back pointer to be even. */
# define HIDE_BACK_PTR(p) HIDE_POINTER(~1 & (GC_word)(p))
# else
# define HIDE_BACK_PTR(p) HIDE_POINTER(p)
# endif
# ifdef KEEP_BACK_PTRS
GC_hidden_pointer oh_back_ptr;
# endif
# ifdef MAKE_BACK_GRAPH
GC_hidden_pointer oh_bg_ptr;
# endif
# if defined(ALIGN_DOUBLE) && \
(defined(KEEP_BACK_PTRS) != defined(MAKE_BACK_GRAPH))
word oh_dummy;
# endif
# endif
GC_CONST char * oh_string; /* object descriptor string */
word oh_int; /* object descriptor integers */
# ifdef NEED_CALLINFO
struct callinfo oh_ci[NFRAMES];
# endif
# ifndef SHORT_DBG_HDRS
word oh_sz; /* Original malloc arg. */
word oh_sf; /* start flag */
# endif /* SHORT_DBG_HDRS */
} oh;
/* The size of the above structure is assumed not to dealign things, */
/* and to be a multiple of the word length. */
#ifdef SHORT_DBG_HDRS
# define DEBUG_BYTES (sizeof (oh))
# define UNCOLLECTABLE_DEBUG_BYTES DEBUG_BYTES
#else
/* Add space for END_FLAG, but use any extra space that was already */
/* added to catch off-the-end pointers. */
/* For uncollectable objects, the extra byte is not added. */
# define UNCOLLECTABLE_DEBUG_BYTES (sizeof (oh) + sizeof (word))
# define DEBUG_BYTES (UNCOLLECTABLE_DEBUG_BYTES - EXTRA_BYTES)
#endif
/* Round bytes to words without adding extra byte at end. */
#define SIMPLE_ROUNDED_UP_WORDS(n) BYTES_TO_WORDS((n) + WORDS_TO_BYTES(1) - 1)
/* ADD_CALL_CHAIN stores a (partial) call chain into an object */
/* header. It may be called with or without the allocation */
/* lock. */
/* PRINT_CALL_CHAIN prints the call chain stored in an object */
/* to stderr. It requires that we do not hold the lock. */
#ifdef SAVE_CALL_CHAIN
# define ADD_CALL_CHAIN(base, ra) GC_save_callers(((oh *)(base)) -> oh_ci)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
#else
# ifdef GC_ADD_CALLER
# define ADD_CALL_CHAIN(base, ra) ((oh *)(base)) -> oh_ci[0].ci_pc = (ra)
# define PRINT_CALL_CHAIN(base) GC_print_callers(((oh *)(base)) -> oh_ci)
# else
# define ADD_CALL_CHAIN(base, ra)
# define PRINT_CALL_CHAIN(base)
# endif
#endif
# ifdef GC_ADD_CALLER
# define OPT_RA ra,
# else
# define OPT_RA
# endif
/* Check whether object with base pointer p has debugging info */
/* p is assumed to point to a legitimate object in our part */
/* of the heap. */
#ifdef SHORT_DBG_HDRS
# define GC_has_other_debug_info(p) TRUE
#else
GC_bool GC_has_other_debug_info(/* p */);
#endif
#if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
# define GC_HAS_DEBUG_INFO(p) \
((*((word *)p) & 1) && GC_has_other_debug_info(p))
#else
# define GC_HAS_DEBUG_INFO(p) GC_has_other_debug_info(p)
#endif
/* Store debugging info into p. Return displaced pointer. */
/* Assumes we don't hold allocation lock. */
ptr_t GC_store_debug_info(/* p, sz, string, integer */);
#endif /* _DBG_MLC_H */

View File

@@ -0,0 +1,233 @@
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Boehm, July 11, 1995 11:54 am PDT */
# ifndef GC_HEADERS_H
# define GC_HEADERS_H
typedef struct hblkhdr hdr;
# if CPP_WORDSZ != 32 && CPP_WORDSZ < 36
--> Get a real machine.
# endif
/*
* The 2 level tree data structure that is used to find block headers.
* If there are more than 32 bits in a pointer, the top level is a hash
* table.
*
* This defines HDR, GET_HDR, and SET_HDR, the main macros used to
* retrieve and set object headers.
*
* Since 5.0 alpha 5, we can also take advantage of a header lookup
* cache. This is a locally declared direct mapped cache, used inside
* the marker. The HC_GET_HDR macro uses and maintains this
* cache. Assuming we get reasonable hit rates, this shaves a few
* memory references from each pointer validation.
*/
# if CPP_WORDSZ > 32
# define HASH_TL
# endif
/* Define appropriate out-degrees for each of the two tree levels */
# ifdef SMALL_CONFIG
# define LOG_BOTTOM_SZ 11
/* Keep top index size reasonable with smaller blocks. */
# else
# define LOG_BOTTOM_SZ 10
# endif
# ifndef HASH_TL
# define LOG_TOP_SZ (WORDSZ - LOG_BOTTOM_SZ - LOG_HBLKSIZE)
# else
# define LOG_TOP_SZ 11
# endif
# define TOP_SZ (1 << LOG_TOP_SZ)
# define BOTTOM_SZ (1 << LOG_BOTTOM_SZ)
#ifndef SMALL_CONFIG
# define USE_HDR_CACHE
#endif
/* #define COUNT_HDR_CACHE_HITS */
extern hdr * GC_invalid_header; /* header for an imaginary block */
/* containing no objects. */
/* Check whether p and corresponding hhdr point to long or invalid */
/* object. If so, advance hhdr to */
/* beginning of block, or set hhdr to GC_invalid_header. */
#define ADVANCE(p, hhdr, source) \
{ \
hdr * new_hdr = GC_invalid_header; \
p = GC_find_start(p, hhdr, &new_hdr); \
hhdr = new_hdr; \
}
#ifdef USE_HDR_CACHE
# ifdef COUNT_HDR_CACHE_HITS
extern word GC_hdr_cache_hits;
extern word GC_hdr_cache_misses;
# define HC_HIT() ++GC_hdr_cache_hits
# define HC_MISS() ++GC_hdr_cache_misses
# else
# define HC_HIT()
# define HC_MISS()
# endif
typedef struct hce {
word block_addr; /* right shifted by LOG_HBLKSIZE */
hdr * hce_hdr;
} hdr_cache_entry;
# define HDR_CACHE_SIZE 8 /* power of 2 */
# define DECLARE_HDR_CACHE \
hdr_cache_entry hdr_cache[HDR_CACHE_SIZE]
# define INIT_HDR_CACHE BZERO(hdr_cache, sizeof(hdr_cache));
# define HCE(h) hdr_cache + (((word)(h) >> LOG_HBLKSIZE) & (HDR_CACHE_SIZE-1))
# define HCE_VALID_FOR(hce,h) ((hce) -> block_addr == \
((word)(h) >> LOG_HBLKSIZE))
# define HCE_HDR(h) ((hce) -> hce_hdr)
/* Analogous to GET_HDR, except that in the case of large objects, it */
/* Returns the header for the object beginning, and updates p. */
/* Returns GC_invalid_header instead of 0. All of this saves a branch */
/* in the fast path. */
# define HC_GET_HDR(p, hhdr, source) \
{ \
hdr_cache_entry * hce = HCE(p); \
if (HCE_VALID_FOR(hce, p)) { \
HC_HIT(); \
hhdr = hce -> hce_hdr; \
} else { \
HC_MISS(); \
GET_HDR(p, hhdr); \
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
ADVANCE(p, hhdr, source); \
} else { \
hce -> block_addr = (word)(p) >> LOG_HBLKSIZE; \
hce -> hce_hdr = hhdr; \
} \
} \
}
#else /* !USE_HDR_CACHE */
# define DECLARE_HDR_CACHE
# define INIT_HDR_CACHE
# define HC_GET_HDR(p, hhdr, source) \
{ \
GET_HDR(p, hhdr); \
if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) { \
ADVANCE(p, hhdr, source); \
} \
}
#endif
typedef struct bi {
hdr * index[BOTTOM_SZ];
/*
* The bottom level index contains one of three kinds of values:
* 0 means we're not responsible for this block,
* or this is a block other than the first one in a free block.
* 1 < (long)X <= MAX_JUMP means the block starts at least
* X * HBLKSIZE bytes before the current address.
* A valid pointer points to a hdr structure. (The above can't be
* valid pointers due to the GET_MEM return convention.)
*/
struct bi * asc_link; /* All indices are linked in */
/* ascending order... */
struct bi * desc_link; /* ... and in descending order. */
word key; /* high order address bits. */
# ifdef HASH_TL
struct bi * hash_link; /* Hash chain link. */
# endif
} bottom_index;
/* extern bottom_index GC_all_nils; - really part of GC_arrays */
/* extern bottom_index * GC_top_index []; - really part of GC_arrays */
/* Each entry points to a bottom_index. */
/* On a 32 bit machine, it points to */
/* the index for a set of high order */
/* bits equal to the index. For longer */
/* addresses, we hash the high order */
/* bits to compute the index in */
/* GC_top_index, and each entry points */
/* to a hash chain. */
/* The last entry in each chain is */
/* GC_all_nils. */
# define MAX_JUMP (HBLKSIZE - 1)
# define HDR_FROM_BI(bi, p) \
((bi)->index[((word)(p) >> LOG_HBLKSIZE) & (BOTTOM_SZ - 1)])
# ifndef HASH_TL
# define BI(p) (GC_top_index \
[(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE)])
# define HDR_INNER(p) HDR_FROM_BI(BI(p),p)
# ifdef SMALL_CONFIG
# define HDR(p) GC_find_header((ptr_t)(p))
# else
# define HDR(p) HDR_INNER(p)
# endif
# define GET_BI(p, bottom_indx) (bottom_indx) = BI(p)
# define GET_HDR(p, hhdr) (hhdr) = HDR(p)
# define SET_HDR(p, hhdr) HDR_INNER(p) = (hhdr)
# define GET_HDR_ADDR(p, ha) (ha) = &(HDR_INNER(p))
# else /* hash */
/* Hash function for tree top level */
# define TL_HASH(hi) ((hi) & (TOP_SZ - 1))
/* Set bottom_indx to point to the bottom index for address p */
# define GET_BI(p, bottom_indx) \
{ \
register word hi = \
(word)(p) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE); \
register bottom_index * _bi = GC_top_index[TL_HASH(hi)]; \
\
while (_bi -> key != hi && _bi != GC_all_nils) \
_bi = _bi -> hash_link; \
(bottom_indx) = _bi; \
}
# define GET_HDR_ADDR(p, ha) \
{ \
register bottom_index * bi; \
\
GET_BI(p, bi); \
(ha) = &(HDR_FROM_BI(bi, p)); \
}
# define GET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
(hhdr) = *_ha; }
# define SET_HDR(p, hhdr) { register hdr ** _ha; GET_HDR_ADDR(p, _ha); \
*_ha = (hhdr); }
# define HDR(p) GC_find_header((ptr_t)(p))
# endif
/* Is the result a forwarding address to someplace closer to the */
/* beginning of the block or NIL? */
# define IS_FORWARDING_ADDR_OR_NIL(hhdr) ((unsigned long) (hhdr) <= MAX_JUMP)
/* Get an HBLKSIZE aligned address closer to the beginning of the block */
/* h. Assumes hhdr == HDR(h) and IS_FORWARDING_ADDR(hhdr). */
# define FORWARDED_ADDR(h, hhdr) ((struct hblk *)(h) - (unsigned long)(hhdr))
# endif /* GC_HEADERS_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,392 @@
/*
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
* Copyright (c) 2001 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
/* Private declarations of GC marker data structures and macros */
/*
* Declarations of mark stack. Needed by marker and client supplied mark
* routines. Transitively include gc_priv.h.
* (Note that gc_priv.h should not be included before this, since this
* includes dbg_mlc.h, which wants to include gc_priv.h AFTER defining
* I_HIDE_POINTERS.)
*/
#ifndef GC_PMARK_H
# define GC_PMARK_H
# if defined(KEEP_BACK_PTRS) || defined(PRINT_BLACK_LIST)
# include "dbg_mlc.h"
# endif
# ifndef GC_MARK_H
# include "../gc_mark.h"
# endif
# ifndef GC_PRIVATE_H
# include "gc_priv.h"
# endif
/* The real declarations of the following is in gc_priv.h, so that */
/* we can avoid scanning the following table. */
/*
extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
*/
/*
* Mark descriptor stuff that should remain private for now, mostly
* because it's hard to export WORDSZ without including gcconfig.h.
*/
# define BITMAP_BITS (WORDSZ - GC_DS_TAG_BITS)
# define PROC(descr) \
(GC_mark_procs[((descr) >> GC_DS_TAG_BITS) & (GC_MAX_MARK_PROCS-1)])
# define ENV(descr) \
((descr) >> (GC_DS_TAG_BITS + GC_LOG_MAX_MARK_PROCS))
# define MAX_ENV \
(((word)1 << (WORDSZ - GC_DS_TAG_BITS - GC_LOG_MAX_MARK_PROCS)) - 1)
extern word GC_n_mark_procs;
/* Number of mark stack entries to discard on overflow. */
#define GC_MARK_STACK_DISCARDS (INITIAL_MARK_STACK_SIZE/8)
typedef struct GC_ms_entry {
GC_word * mse_start; /* First word of object */
GC_word mse_descr; /* Descriptor; low order two bits are tags, */
/* identifying the upper 30 bits as one of the */
/* following: */
} mse;
extern word GC_mark_stack_size;
extern mse * GC_mark_stack_limit;
#ifdef PARALLEL_MARK
extern mse * VOLATILE GC_mark_stack_top;
#else
extern mse * GC_mark_stack_top;
#endif
extern mse * GC_mark_stack;
#ifdef PARALLEL_MARK
/*
* Allow multiple threads to participate in the marking process.
* This works roughly as follows:
* The main mark stack never shrinks, but it can grow.
*
* The initiating threads holds the GC lock, and sets GC_help_wanted.
*
* Other threads:
* 1) update helper_count (while holding mark_lock.)
* 2) allocate a local mark stack
* repeatedly:
* 3) Steal a global mark stack entry by atomically replacing
* its descriptor with 0.
* 4) Copy it to the local stack.
* 5) Mark on the local stack until it is empty, or
* it may be profitable to copy it back.
* 6) If necessary, copy local stack to global one,
* holding mark lock.
* 7) Stop when the global mark stack is empty.
* 8) decrement helper_count (holding mark_lock).
*
* This is an experiment to see if we can do something along the lines
* of the University of Tokyo SGC in a less intrusive, though probably
* also less performant, way.
*/
void GC_do_parallel_mark();
/* inititate parallel marking. */
extern GC_bool GC_help_wanted; /* Protected by mark lock */
extern unsigned GC_helper_count; /* Number of running helpers. */
/* Protected by mark lock */
extern unsigned GC_active_count; /* Number of active helpers. */
/* Protected by mark lock */
/* May increase and decrease */
/* within each mark cycle. But */
/* once it returns to 0, it */
/* stays zero for the cycle. */
/* GC_mark_stack_top is also protected by mark lock. */
extern mse * VOLATILE GC_first_nonempty;
/* Lowest entry on mark stack */
/* that may be nonempty. */
/* Updated only by initiating */
/* thread. */
/*
* GC_notify_all_marker() is used when GC_help_wanted is first set,
* when the last helper becomes inactive,
* when something is added to the global mark stack, and just after
* GC_mark_no is incremented.
* This could be split into multiple CVs (and probably should be to
* scale to really large numbers of processors.)
*/
#endif /* PARALLEL_MARK */
/* Return a pointer to within 1st page of object. */
/* Set *new_hdr_p to corr. hdr. */
#ifdef __STDC__
ptr_t GC_find_start(ptr_t current, hdr *hhdr, hdr **new_hdr_p);
#else
ptr_t GC_find_start();
#endif
mse * GC_signal_mark_stack_overflow GC_PROTO((mse *msp));
# ifdef GATHERSTATS
# define ADD_TO_ATOMIC(sz) GC_atomic_in_use += (sz)
# define ADD_TO_COMPOSITE(sz) GC_composite_in_use += (sz)
# else
# define ADD_TO_ATOMIC(sz)
# define ADD_TO_COMPOSITE(sz)
# endif
/* Push the object obj with corresponding heap block header hhdr onto */
/* the mark stack. */
# define PUSH_OBJ(obj, hhdr, mark_stack_top, mark_stack_limit) \
{ \
register word _descr = (hhdr) -> hb_descr; \
\
if (_descr == 0) { \
ADD_TO_ATOMIC((hhdr) -> hb_sz); \
} else { \
ADD_TO_COMPOSITE((hhdr) -> hb_sz); \
mark_stack_top++; \
if (mark_stack_top >= mark_stack_limit) { \
mark_stack_top = GC_signal_mark_stack_overflow(mark_stack_top); \
} \
mark_stack_top -> mse_start = (obj); \
mark_stack_top -> mse_descr = _descr; \
} \
}
/* Push the contents of current onto the mark stack if it is a valid */
/* ptr to a currently unmarked object. Mark it. */
/* If we assumed a standard-conforming compiler, we could probably */
/* generate the exit_label transparently. */
# define PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
source, exit_label) \
{ \
hdr * my_hhdr; \
ptr_t my_current = current; \
\
GET_HDR(my_current, my_hhdr); \
if (IS_FORWARDING_ADDR_OR_NIL(my_hhdr)) { \
hdr * new_hdr = GC_invalid_header; \
my_current = GC_find_start(my_current, my_hhdr, &new_hdr); \
my_hhdr = new_hdr; \
} \
PUSH_CONTENTS_HDR(my_current, mark_stack_top, mark_stack_limit, \
source, exit_label, my_hhdr); \
exit_label: ; \
}
/* As above, but use header cache for header lookup. */
# define HC_PUSH_CONTENTS(current, mark_stack_top, mark_stack_limit, \
source, exit_label) \
{ \
hdr * my_hhdr; \
ptr_t my_current = current; \
\
HC_GET_HDR(my_current, my_hhdr, source); \
PUSH_CONTENTS_HDR(my_current, mark_stack_top, mark_stack_limit, \
source, exit_label, my_hhdr); \
exit_label: ; \
}
/* Set mark bit, exit if it was already set. */
# ifdef USE_MARK_BYTES
/* Unlike the mark bit case, there is a race here, and we may set */
/* the bit twice in the concurrent case. This can result in the */
/* object being pushed twice. But that's only a performance issue. */
# define SET_MARK_BIT_EXIT_IF_SET(hhdr,displ,exit_label) \
{ \
register VOLATILE char * mark_byte_addr = \
hhdr -> hb_marks + ((displ) >> 1); \
register char mark_byte = *mark_byte_addr; \
\
if (mark_byte) goto exit_label; \
*mark_byte_addr = 1; \
}
# else
# define SET_MARK_BIT_EXIT_IF_SET(hhdr,displ,exit_label) \
{ \
register word * mark_word_addr = hhdr -> hb_marks + divWORDSZ(displ); \
\
OR_WORD_EXIT_IF_SET(mark_word_addr, (word)1 << modWORDSZ(displ), \
exit_label); \
}
# endif /* USE_MARK_BYTES */
/* If the mark bit corresponding to current is not set, set it, and */
/* push the contents of the object on the mark stack. For a small */
/* object we assume that current is the (possibly interior) pointer */
/* to the object. For large objects we assume that current points */
/* to somewhere inside the first page of the object. If */
/* GC_all_interior_pointers is set, it may have been previously */
/* adjusted to make that true. */
# define PUSH_CONTENTS_HDR(current, mark_stack_top, mark_stack_limit, \
source, exit_label, hhdr) \
{ \
int displ; /* Displacement in block; first bytes, then words */ \
int map_entry; \
\
displ = HBLKDISPL(current); \
map_entry = MAP_ENTRY((hhdr -> hb_map), displ); \
displ = BYTES_TO_WORDS(displ); \
if (map_entry > CPP_MAX_OFFSET) { \
if (map_entry == OFFSET_TOO_BIG) { \
map_entry = displ % (hhdr -> hb_sz); \
displ -= map_entry; \
if (displ + (hhdr -> hb_sz) > BYTES_TO_WORDS(HBLKSIZE)) { \
GC_ADD_TO_BLACK_LIST_NORMAL((word)current, source); \
goto exit_label; \
} \
} else { \
GC_ADD_TO_BLACK_LIST_NORMAL((word)current, source); goto exit_label; \
} \
} else { \
displ -= map_entry; \
} \
GC_ASSERT(displ >= 0 && displ < MARK_BITS_PER_HBLK); \
SET_MARK_BIT_EXIT_IF_SET(hhdr, displ, exit_label); \
GC_STORE_BACK_PTR((ptr_t)source, (ptr_t)HBLKPTR(current) \
+ WORDS_TO_BYTES(displ)); \
PUSH_OBJ(((word *)(HBLKPTR(current)) + displ), hhdr, \
mark_stack_top, mark_stack_limit) \
}
#if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
# define PUSH_ONE_CHECKED_STACK(p, source) \
GC_mark_and_push_stack(p, (ptr_t)(source))
#else
# define PUSH_ONE_CHECKED_STACK(p, source) \
GC_mark_and_push_stack(p)
#endif
/*
* Push a single value onto mark stack. Mark from the object pointed to by p.
* Invoke FIXUP_POINTER(p) before any further processing.
* P is considered valid even if it is an interior pointer.
* Previously marked objects are not pushed. Hence we make progress even
* if the mark stack overflows.
*/
# if NEED_FIXUP_POINTER
/* Try both the raw version and the fixed up one. */
# define GC_PUSH_ONE_STACK(p, source) \
if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
&& (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
} \
FIXUP_POINTER(p); \
if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
&& (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
}
# else /* !NEED_FIXUP_POINTER */
# define GC_PUSH_ONE_STACK(p, source) \
if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
&& (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
PUSH_ONE_CHECKED_STACK(p, source); \
}
# endif
/*
* As above, but interior pointer recognition as for
* normal for heap pointers.
*/
# define GC_PUSH_ONE_HEAP(p,source) \
FIXUP_POINTER(p); \
if ((ptr_t)(p) >= (ptr_t)GC_least_plausible_heap_addr \
&& (ptr_t)(p) < (ptr_t)GC_greatest_plausible_heap_addr) { \
GC_mark_stack_top = GC_mark_and_push( \
(GC_PTR)(p), GC_mark_stack_top, \
GC_mark_stack_limit, (GC_PTR *)(source)); \
}
/* Mark starting at mark stack entry top (incl.) down to */
/* mark stack entry bottom (incl.). Stop after performing */
/* about one page worth of work. Return the new mark stack */
/* top entry. */
mse * GC_mark_from GC_PROTO((mse * top, mse * bottom, mse *limit));
#define MARK_FROM_MARK_STACK() \
GC_mark_stack_top = GC_mark_from(GC_mark_stack_top, \
GC_mark_stack, \
GC_mark_stack + GC_mark_stack_size);
/*
* Mark from one finalizable object using the specified
* mark proc. May not mark the object pointed to by
* real_ptr. That is the job of the caller, if appropriate
*/
# define GC_MARK_FO(real_ptr, mark_proc) \
{ \
(*(mark_proc))(real_ptr); \
while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK(); \
if (GC_mark_state != MS_NONE) { \
GC_set_mark_bit(real_ptr); \
while (!GC_mark_some((ptr_t)0)) {} \
} \
}
extern GC_bool GC_mark_stack_too_small;
/* We need a larger mark stack. May be */
/* set by client supplied mark routines.*/
typedef int mark_state_t; /* Current state of marking, as follows:*/
/* Used to remember where we are during */
/* concurrent marking. */
/* We say something is dirty if it was */
/* written since the last time we */
/* retrieved dirty bits. We say it's */
/* grungy if it was marked dirty in the */
/* last set of bits we retrieved. */
/* Invariant I: all roots and marked */
/* objects p are either dirty, or point */
/* to objects q that are either marked */
/* or a pointer to q appears in a range */
/* on the mark stack. */
# define MS_NONE 0 /* No marking in progress. I holds. */
/* Mark stack is empty. */
# define MS_PUSH_RESCUERS 1 /* Rescuing objects are currently */
/* being pushed. I holds, except */
/* that grungy roots may point to */
/* unmarked objects, as may marked */
/* grungy objects above scan_ptr. */
# define MS_PUSH_UNCOLLECTABLE 2
/* I holds, except that marked */
/* uncollectable objects above scan_ptr */
/* may point to unmarked objects. */
/* Roots may point to unmarked objects */
# define MS_ROOTS_PUSHED 3 /* I holds, mark stack may be nonempty */
# define MS_PARTIALLY_INVALID 4 /* I may not hold, e.g. because of M.S. */
/* overflow. However marked heap */
/* objects below scan_ptr point to */
/* marked or stacked objects. */
# define MS_INVALID 5 /* I may not hold. */
extern mark_state_t GC_mark_state;
#endif /* GC_PMARK_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,12 @@
#ifndef GC_OPENBSD_STOP_WORLD_H
#define GC_OPENBSD_STOP_WORLD_H
#if !defined(GC_OPENBSD_THREADS)
#error openbsd_stop_world.h included without GC_OPENBSD_THREADS defined
#endif
struct thread_stop_info {
ptr_t stack_ptr; /* Valid only when stopped. */
};
#endif

View File

@@ -0,0 +1,26 @@
#ifndef GC_PTHREAD_STOP_WORLD_H
#define GC_PTHREAD_STOP_WORLD_H
struct thread_stop_info {
int signal;
word last_stop_count; /* GC_last_stop_count value when thread */
/* last successfully handled a suspend */
/* signal. */
ptr_t stack_ptr; /* Valid only when stopped. */
#ifdef NACL
/* Grab NACL_GC_REG_STORAGE_SIZE pointers off the stack when going into */
/* a syscall. 20 is more than we need, but it's an overestimate in case*/
/* the instrumented function uses any callee saved registers, they may */
/* be pushed to the stack much earlier. Also, on amd64 'push' puts 8 */
/* bytes on the stack even though our pointers are 4 bytes. */
#ifdef __arm__
/* For ARM we save r4-r8, r10-r12, r14 */
#define NACL_GC_REG_STORAGE_SIZE 9
#else
#define NACL_GC_REG_STORAGE_SIZE 20
#endif
ptr_t reg_storage[NACL_GC_REG_STORAGE_SIZE];
#endif
};
#endif

View File

@@ -0,0 +1,117 @@
#ifndef GC_PTHREAD_SUPPORT_H
#define GC_PTHREAD_SUPPORT_H
# include "private/gc_priv.h"
# if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
&& !defined(GC_WIN32_THREADS)
#if defined(GC_DARWIN_THREADS)
# include "private/darwin_stop_world.h"
#elif defined(GC_OPENBSD_THREADS)
# include "private/openbsd_stop_world.h"
#else
# include "private/pthread_stop_world.h"
#endif
/* We use the allocation lock to protect thread-related data structures. */
/* The set of all known threads. We intercept thread creation and */
/* joins. */
/* Protected by allocation/GC lock. */
/* Some of this should be declared volatile, but that's inconsistent */
/* with some library routine declarations. */
typedef struct GC_Thread_Rep {
struct GC_Thread_Rep * next; /* More recently allocated threads */
/* with a given pthread id come */
/* first. (All but the first are */
/* guaranteed to be dead, but we may */
/* not yet have registered the join.) */
pthread_t id;
#ifdef PLATFORM_ANDROID
pid_t kernel_id;
#endif
/* Extra bookkeeping information the stopping code uses */
struct thread_stop_info stop_info;
short flags;
# define FINISHED 1 /* Thread has exited. */
# define DETACHED 2 /* Thread is intended to be detached. */
# define MAIN_THREAD 4 /* True for the original thread only. */
# define FOREIGN_THREAD 8 /* Will not be de-registered by us */
short thread_blocked; /* Protected by GC lock. */
/* Treated as a boolean value. If set, */
/* thread will acquire GC lock before */
/* doing any pointer manipulations, and */
/* has set its sp value. Thus it does */
/* not need to be sent a signal to stop */
/* it. */
ptr_t stack_end; /* Cold end of the stack. */
ptr_t altstack; /* The start of the altstack if there is one, NULL otherwise */
int altstack_size; /* The size of the altstack if there is one */
ptr_t stack; /* The start of the normal stack */
int stack_size; /* The size of the normal stack */
# ifdef IA64
ptr_t backing_store_end;
ptr_t backing_store_ptr;
# endif
void * status; /* The value returned from the thread. */
/* Used only to avoid premature */
/* reclamation of any data it might */
/* reference. */
# ifdef THREAD_LOCAL_ALLOC
# if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
# define GRANULARITY 16
# define NFREELISTS 49
# else
# define GRANULARITY 8
# define NFREELISTS 65
# endif
/* The ith free list corresponds to size i*GRANULARITY */
# define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
# define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
# define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
(NFREELISTS-1)*GRANULARITY)
ptr_t ptrfree_freelists[NFREELISTS];
ptr_t normal_freelists[NFREELISTS];
# ifdef GC_GCJ_SUPPORT
ptr_t gcj_freelists[NFREELISTS];
# endif
/* Free lists contain either a pointer or a small count */
/* reflecting the number of granules allocated at that */
/* size. */
/* 0 ==> thread-local allocation in use, free list */
/* empty. */
/* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
/* too few objects of this size have been */
/* allocated by this thread. */
/* >= HBLKSIZE => pointer to nonempty free list. */
/* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
/* local alloc, equivalent to 0. */
# define DIRECT_GRANULES (HBLKSIZE/GRANULARITY)
/* Don't use local free lists for up to this much */
/* allocation. */
# endif
} * GC_thread;
# define THREAD_TABLE_SZ 128 /* Must be power of 2 */
extern volatile GC_thread GC_threads[THREAD_TABLE_SZ];
#ifdef NACL
extern __thread GC_thread gc_thread_self;
#endif
extern GC_bool GC_thr_initialized;
GC_thread GC_lookup_thread(pthread_t id);
void GC_thread_deregister_foreign (void *data);
void GC_stop_init(void);
extern GC_bool GC_in_thread_creation;
/* We may currently be in thread creation or destruction. */
/* Only set to TRUE while allocation lock is held. */
/* When set, it is OK to run GC from unknown thread. */
#endif /* GC_PTHREADS && !GC_SOLARIS_THREADS.... etc */
#endif /* GC_PTHREAD_SUPPORT_H */

View File

@@ -0,0 +1,37 @@
#ifdef GC_SOLARIS_THREADS
/* The set of all known threads. We intercept thread creation and */
/* joins. We never actually create detached threads. We allocate all */
/* new thread stacks ourselves. These allow us to maintain this */
/* data structure. */
/* Protected by GC_thr_lock. */
/* Some of this should be declared volatile, but that's incosnsistent */
/* with some library routine declarations. In particular, the */
/* definition of cond_t doesn't mention volatile! */
typedef struct GC_Thread_Rep {
struct GC_Thread_Rep * next;
thread_t id;
word flags;
# define FINISHED 1 /* Thread has exited. */
# define DETACHED 2 /* Thread is intended to be detached. */
# define CLIENT_OWNS_STACK 4
/* Stack was supplied by client. */
# define SUSPNDED 8 /* Currently suspended. */
/* SUSPENDED is used insystem header. */
ptr_t stack;
size_t stack_size;
cond_t join_cv;
void * status;
} * GC_thread;
extern GC_thread GC_new_thread(thread_t id);
extern GC_bool GC_thr_initialized;
extern volatile GC_thread GC_threads[];
extern size_t GC_min_stack_sz;
extern size_t GC_page_sz;
extern void GC_thr_init(void);
extern ptr_t GC_stack_alloc(size_t * stack_size);
extern void GC_stack_free(ptr_t stack, size_t size);
# endif /* GC_SOLARIS_THREADS */

View File

@@ -0,0 +1,95 @@
/*
* This is a reimplementation of a subset of the pthread_getspecific/setspecific
* interface. This appears to outperform the standard linuxthreads one
* by a significant margin.
* The major restriction is that each thread may only make a single
* pthread_setspecific call on a single key. (The current data structure
* doesn't really require that. The restriction should be easily removable.)
* We don't currently support the destruction functions, though that
* could be done.
* We also currently assume that only one pthread_setspecific call
* can be executed at a time, though that assumption would be easy to remove
* by adding a lock.
*/
#include <errno.h>
/* Called during key creation or setspecific. */
/* For the GC we already hold lock. */
/* Currently allocated objects leak on thread exit. */
/* That's hard to fix, but OK if we allocate garbage */
/* collected memory. */
#define MALLOC_CLEAR(n) GC_INTERNAL_MALLOC(n, NORMAL)
#define PREFIXED(name) GC_##name
#define TS_CACHE_SIZE 1024
#define CACHE_HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_CACHE_SIZE - 1))
#define TS_HASH_SIZE 1024
#define HASH(n) (((((long)n) >> 8) ^ (long)n) & (TS_HASH_SIZE - 1))
/* An entry describing a thread-specific value for a given thread. */
/* All such accessible structures preserve the invariant that if either */
/* thread is a valid pthread id or qtid is a valid "quick tread id" */
/* for a thread, then value holds the corresponding thread specific */
/* value. This invariant must be preserved at ALL times, since */
/* asynchronous reads are allowed. */
typedef struct thread_specific_entry {
unsigned long qtid; /* quick thread id, only for cache */
void * value;
struct thread_specific_entry *next;
pthread_t thread;
} tse;
/* We represent each thread-specific datum as two tables. The first is */
/* a cache, indexed by a "quick thread identifier". The "quick" thread */
/* identifier is an easy to compute value, which is guaranteed to */
/* determine the thread, though a thread may correspond to more than */
/* one value. We typically use the address of a page in the stack. */
/* The second is a hash table, indexed by pthread_self(). It is used */
/* only as a backup. */
/* Return the "quick thread id". Default version. Assumes page size, */
/* or at least thread stack separation, is at least 4K. */
/* Must be defined so that it never returns 0. (Page 0 can't really */
/* be part of any stack, since that would make 0 a valid stack pointer.)*/
static __inline__ unsigned long quick_thread_id() {
int dummy;
return (unsigned long)(&dummy) >> 12;
}
#define INVALID_QTID ((unsigned long)0)
#define INVALID_THREADID ((pthread_t)0)
typedef struct thread_specific_data {
tse * volatile cache[TS_CACHE_SIZE];
/* A faster index to the hash table */
tse * hash[TS_HASH_SIZE];
pthread_mutex_t lock;
} tsd;
typedef tsd * PREFIXED(key_t);
extern int PREFIXED(key_create) (tsd ** key_ptr, void (* destructor)(void *));
extern int PREFIXED(setspecific) (tsd * key, void * value);
extern void PREFIXED(remove_specific) (tsd * key);
/* An internal version of getspecific that assumes a cache miss. */
void * PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
tse * volatile * cache_entry);
static __inline__ void * PREFIXED(getspecific) (tsd * key) {
long qtid = quick_thread_id();
unsigned hash_val = CACHE_HASH(qtid);
tse * volatile * entry_ptr = key -> cache + hash_val;
tse * entry = *entry_ptr; /* Must be loaded only once. */
if (EXPECT(entry -> qtid == qtid, 1)) {
GC_ASSERT(entry -> thread == pthread_self());
return entry -> value;
}
return PREFIXED(slow_getspecific) (key, qtid, entry_ptr);
}