Revert "bpo-33608: Factor out a private, per-interpreter _Py_AddPendingCall(). (gh-13714)" (GH-13780)

This reverts commit 6a150bcaeb.
This commit is contained in:
Victor Stinner
2019-06-03 18:14:24 +02:00
committed by GitHub
parent 49a7e34797
commit e225bebc14
10 changed files with 185 additions and 320 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -141,11 +141,9 @@ static void recreate_gil(struct _gil_runtime_state *gil)
}
static void
drop_gil(struct _ceval_runtime_state *ceval_r,
struct _ceval_interpreter_state *ceval_i,
PyThreadState *tstate)
drop_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
{
struct _gil_runtime_state *gil = &ceval_r->gil;
struct _gil_runtime_state *gil = &ceval->gil;
if (!_Py_atomic_load_relaxed(&gil->locked)) {
Py_FatalError("drop_gil: GIL is not locked");
}
@@ -165,12 +163,12 @@ drop_gil(struct _ceval_runtime_state *ceval_r,
MUTEX_UNLOCK(gil->mutex);
#ifdef FORCE_SWITCHING
if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request) && tstate != NULL) {
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request) && tstate != NULL) {
MUTEX_LOCK(gil->switch_mutex);
/* Not switched yet => wait */
if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
{
RESET_GIL_DROP_REQUEST(ceval_r, ceval_i);
RESET_GIL_DROP_REQUEST(ceval);
/* NOTE: if COND_WAIT does not atomically start waiting when
releasing the mutex, another thread can run through, take
the GIL and drop it again, and reset the condition
@@ -183,19 +181,13 @@ drop_gil(struct _ceval_runtime_state *ceval_r,
}
static void
take_gil(struct _ceval_runtime_state *ceval_r,
PyThreadState *tstate)
take_gil(struct _ceval_runtime_state *ceval, PyThreadState *tstate)
{
if (tstate == NULL) {
Py_FatalError("take_gil: NULL tstate");
}
PyInterpreterState *interp = tstate->interp;
if (interp == NULL) {
Py_FatalError("take_gil: NULL interp");
}
struct _ceval_interpreter_state *ceval_i = &interp->ceval;
struct _gil_runtime_state *gil = &ceval_r->gil;
struct _gil_runtime_state *gil = &ceval->gil;
int err = errno;
MUTEX_LOCK(gil->mutex);
@@ -218,7 +210,7 @@ take_gil(struct _ceval_runtime_state *ceval_r,
_Py_atomic_load_relaxed(&gil->locked) &&
gil->switch_number == saved_switchnum)
{
SET_GIL_DROP_REQUEST(ceval_r);
SET_GIL_DROP_REQUEST(ceval);
}
}
_ready:
@@ -240,11 +232,11 @@ _ready:
COND_SIGNAL(gil->switch_cond);
MUTEX_UNLOCK(gil->switch_mutex);
#endif
if (_Py_atomic_load_relaxed(&ceval_r->gil_drop_request)) {
RESET_GIL_DROP_REQUEST(ceval_r, ceval_i);
if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
RESET_GIL_DROP_REQUEST(ceval);
}
if (tstate->async_exc != NULL) {
_PyEval_SignalAsyncExc(ceval_r, ceval_i);
_PyEval_SignalAsyncExc(ceval);
}
MUTEX_UNLOCK(gil->mutex);

View File

@@ -1147,31 +1147,15 @@ Py_FinalizeEx(void)
return status;
}
/* Get current thread state and interpreter pointer */
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
PyInterpreterState *interp = tstate->interp;
// Wrap up existing "threading"-module-created, non-daemon threads.
wait_for_thread_shutdown();
// Make any remaining pending calls.
/* XXX For the moment we are going to ignore lingering pending calls.
* We've seen sporadic on some of the buildbots during finalization
* with the changes for per-interpreter pending calls (see bpo-33608),
* meaning the previous _PyEval_FinishPendincCalls() call here is
* a trigger, if not responsible.
*
* Ignoring pending calls at this point in the runtime lifecycle
* is okay (for now) for the following reasons:
*
* * pending calls are still not a widely-used feature
* * this only affects runtime finalization, where the process is
* likely to end soon anyway (except for some embdding cases)
*
* See bpo-37127 about resolving the problem. Ultimately the call
* here should be re-enabled.
*/
//_PyEval_FinishPendingCalls(interp);
_Py_FinishPendingCalls(runtime);
/* Get current thread state and interpreter pointer */
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
PyInterpreterState *interp = tstate->interp;
/* The interpreter is still entirely intact at this point, and the
* exit funcs may be relying on that. In particular, if some thread
@@ -1599,9 +1583,6 @@ Py_EndInterpreter(PyThreadState *tstate)
// Wrap up existing "threading"-module-created, non-daemon threads.
wait_for_thread_shutdown();
// Make any remaining pending calls.
_PyEval_FinishPendingCalls(interp);
call_py_exitfuncs(interp);
if (tstate != interp->tstate_head || tstate->next != NULL)

View File

@@ -218,13 +218,6 @@ PyInterpreterState_New(void)
return NULL;
}
interp->ceval.pending.lock = PyThread_allocate_lock();
if (interp->ceval.pending.lock == NULL) {
PyErr_SetString(PyExc_RuntimeError,
"failed to create interpreter ceval pending mutex");
return NULL;
}
interp->eval_frame = _PyEval_EvalFrameDefault;
#ifdef HAVE_DLOPEN
#if HAVE_DECL_RTLD_NOW
@@ -352,10 +345,6 @@ PyInterpreterState_Delete(PyInterpreterState *interp)
if (interp->id_mutex != NULL) {
PyThread_free_lock(interp->id_mutex);
}
if (interp->ceval.pending.lock != NULL) {
PyThread_free_lock(interp->ceval.pending.lock);
interp->ceval.pending.lock = NULL;
}
PyMem_RawFree(interp);
}
@@ -1025,7 +1014,7 @@ PyThreadState_SetAsyncExc(unsigned long id, PyObject *exc)
p->async_exc = exc;
HEAD_UNLOCK(runtime);
Py_XDECREF(old_exc);
_PyEval_SignalAsyncExc(&runtime->ceval, &interp->ceval);
_PyEval_SignalAsyncExc(&runtime->ceval);
return 1;
}
}
@@ -1455,7 +1444,7 @@ _PyObject_GetCrossInterpreterData(PyObject *obj, _PyCrossInterpreterData *data)
return 0;
}
static int
static void
_release_xidata(void *arg)
{
_PyCrossInterpreterData *data = (_PyCrossInterpreterData *)arg;
@@ -1463,21 +1452,42 @@ _release_xidata(void *arg)
data->free(data->data);
}
Py_XDECREF(data->obj);
PyMem_Free(data);
return 0;
}
static void
_call_in_interpreter(struct _gilstate_runtime_state *gilstate,
PyInterpreterState *interp,
void (*func)(void *), void *arg)
{
/* We would use Py_AddPendingCall() if it weren't specific to the
* main interpreter (see bpo-33608). In the meantime we take a
* naive approach.
*/
PyThreadState *save_tstate = NULL;
if (interp != _PyRuntimeGILState_GetThreadState(gilstate)->interp) {
// XXX Using the "head" thread isn't strictly correct.
PyThreadState *tstate = PyInterpreterState_ThreadHead(interp);
// XXX Possible GILState issues?
save_tstate = _PyThreadState_Swap(gilstate, tstate);
}
func(arg);
// Switch back.
if (save_tstate != NULL) {
_PyThreadState_Swap(gilstate, save_tstate);
}
}
void
_PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
{
_PyRuntimeState *runtime = &_PyRuntime;
if (data->data == NULL && data->obj == NULL) {
// Nothing to release!
return;
}
// Get the original interpreter.
// Switch to the original interpreter.
PyInterpreterState *interp = _PyInterpreterState_LookUpID(data->interp);
if (interp == NULL) {
// The intepreter was already destroyed.
@@ -1486,28 +1496,10 @@ _PyCrossInterpreterData_Release(_PyCrossInterpreterData *data)
}
return;
}
// XXX There's an ever-so-slight race here...
if (interp->finalizing) {
// XXX Someone leaked some memory...
return;
}
// "Release" the data and/or the object.
_PyCrossInterpreterData *copied = PyMem_Malloc(sizeof(_PyCrossInterpreterData));
if (copied == NULL) {
PyErr_SetString(PyExc_MemoryError,
"Not enough memory to preserve cross-interpreter data");
PyErr_Print();
return;
}
memcpy(copied, data, sizeof(_PyCrossInterpreterData));
PyThreadState *tstate = _PyRuntimeState_GetThreadState(runtime);
int res = _PyEval_AddPendingCall(tstate,
&runtime->ceval, &interp->ceval,
0, _release_xidata, copied);
if (res != 0) {
// XXX Queue full or couldn't get lock. Try again somehow?
}
struct _gilstate_runtime_state *gilstate = &_PyRuntime.gilstate;
_call_in_interpreter(gilstate, interp, _release_xidata, data);
}
PyObject *