@@ -1307,57 +1307,136 @@ yet finished.\n\
1307
1307
This function is meant for internal and specialized purposes only.\n\
1308
1308
In most applications `threading.enumerate()` should be used instead." );
1309
1309
1310
+
1311
+ /* Here we're essentinally cleaning up after a thread that finished
1312
+ and has aleady been deallocated (both the threading.Thread and
1313
+ the tstate). Thus it will run in a different thread with the
1314
+ same interpreter (via a "pending call"). */
1310
1315
static int
1311
- release_sentinel (void * wr_raw )
1316
+ clean_up_sentinel (void * data )
1312
1317
{
1313
- PyObject * wr = _PyObject_CAST (wr_raw );
1314
- /* Tricky: this function is called when the current thread state
1315
- is being deleted. Therefore, only simple C code can safely
1316
- execute here. */
1317
- PyObject * obj = PyWeakref_GET_OBJECT (wr );
1318
- lockobject * lock ;
1319
- if (obj != Py_None ) {
1320
- lock = (lockobject * ) obj ;
1321
- if (lock -> locked ) {
1322
- PyThread_release_lock (lock -> lock_lock );
1323
- lock -> locked = 0 ;
1318
+ PyObject * lockobj = (PyObject * )data ;
1319
+ assert (lockobj != NULL );
1320
+ PyThread_type_lock lock = ((lockobject * )lockobj )-> lock_lock ;
1321
+
1322
+ /* Wait here until we know for sure that he thread running
1323
+ _PyThreadState_DeleteCurrent() has released the lock. */
1324
+ if (acquire_timed (lock , 0 ) == PY_LOCK_ACQUIRED ) {
1325
+ /* _PyThreadState_DeleteCurrent() finished, so we can proceed. */
1326
+ PyThread_release_lock (lock );
1327
+ ((lockobject * )lockobj )-> locked = 0 ;
1328
+ }
1329
+ else if (((lockobject * )lockobj )-> locked == 2 ) {
1330
+ /* _PyThreadState_DeleteCurrent() is still holding
1331
+ the lock, so we will wait here until it is released.
1332
+ We don't need to hold the GIL while we wait though. */
1333
+ ((lockobject * )lockobj )-> locked = 1 ;
1334
+
1335
+ PyLockStatus r = acquire_timed (lock , -1 );
1336
+ // XXX Why do we have to loop?
1337
+ while (r == PY_LOCK_FAILURE ) {
1338
+ r = acquire_timed (lock , -1 );
1324
1339
}
1340
+ PyThread_release_lock (lock );
1341
+ ((lockobject * )lockobj )-> locked = 0 ;
1325
1342
}
1343
+ /* Otherwise the current thread acquired the lock right before
1344
+ its eval loop was interrupted to run this pending call.
1345
+ We can simply let it proceed. */
1346
+
1347
+ /* In all cases, at this point we are done with the lock. */
1348
+ Py_DECREF (lockobj );
1349
+ return 0 ;
1350
+ }
1351
+
1352
+ static PyThread_type_lock
1353
+ thread_prepare_delete (PyThreadState * tstate )
1354
+ {
1355
+ assert (tstate -> _threading_thread .pre_delete != NULL );
1356
+ PyThread_type_lock lock = NULL ;
1357
+
1358
+ /* Tricky: this function is called when the current thread state
1359
+ is being deleted. Therefore, only simple C code can safely
1360
+ execute here. The GIL is still held. */
1361
+
1362
+ PyObject * wr = tstate -> _threading_thread .lock_weakref ;
1363
+ assert (wr != NULL );
1364
+ PyObject * lockobj = PyWeakref_GET_OBJECT (wr );
1365
+ if (lockobj == Py_None ) {
1366
+ /* The thread has already been destroyed, so we can clean up now. */
1367
+ goto done ;
1368
+ }
1369
+ if (_PyThreadState_GET () != tstate ) {
1370
+ assert (PyThread_get_thread_ident () != tstate -> thread_id );
1371
+ /* It must be a daemon thread that was killed during
1372
+ * interp/runtime finalization, so there's nothing to do. */
1373
+ goto done ;
1374
+ }
1375
+ assert (((lockobject * )lockobj )-> locked == 1 );
1376
+ assert (acquire_timed (((lockobject * )lockobj )-> lock_lock , 0 ) == PY_LOCK_FAILURE );
1377
+ /* We cheat a little here to allow clean_up_sentinel() to know
1378
+ that this thread is still holding the lock. The value will be
1379
+ reset to the normal 0 or 1 as soon as any other thread
1380
+ uses the lock. */
1381
+ ((lockobject * )lockobj )-> locked = 2 ;
1382
+
1383
+ /* We need to prevent the underlying PyThread_type_lock from getting
1384
+ destroyed before we release it in _PyThreadState_DeleteCurrent(),
1385
+ However, we don't need the weakref any more. */
1386
+ Py_INCREF (lockobj );
1387
+
1388
+ /* The pending call will be run the next time the GIL is taken
1389
+ by one of this interpreter's threads. */
1390
+ void * data = (void * ) lockobj ;
1391
+ if (Py_AddPendingCall (clean_up_sentinel , data ) < 0 ) {
1392
+ Py_DECREF (lockobj );
1393
+ /* We otherwise ignore the error. A non-zero value means
1394
+ there were too many pending calls already queued up.
1395
+ This case is unlikely, and, at worst,
1396
+ we'll just leak the lock.
1397
+ */
1398
+ goto done ;
1399
+ }
1400
+
1401
+ lock = ((lockobject * )lockobj )-> lock_lock ;
1402
+
1403
+ done :
1326
1404
/* Deallocating a weakref with a NULL callback only calls
1327
1405
PyObject_GC_Del(), which can't call any Python code. */
1328
1406
Py_DECREF (wr );
1329
- return 0 ;
1407
+ tstate -> _threading_thread .pre_delete = NULL ;
1408
+ tstate -> _threading_thread .lock_weakref = NULL ;
1409
+ return lock ;
1330
1410
}
1331
1411
1332
1412
static PyObject *
1333
1413
thread__set_sentinel (PyObject * module , PyObject * Py_UNUSED (ignored ))
1334
1414
{
1335
- PyObject * wr ;
1336
1415
PyThreadState * tstate = _PyThreadState_GET ();
1337
- lockobject * lock ;
1338
1416
1339
- if (tstate -> on_delete_data != NULL ) {
1417
+ if (tstate -> _threading_thread . lock_weakref != NULL ) {
1340
1418
/* We must support the re-creation of the lock from a
1341
1419
fork()ed child. */
1342
- assert (tstate -> on_delete == & release_sentinel );
1343
- wr = ( PyObject * ) tstate -> on_delete_data ;
1344
- tstate -> on_delete = NULL ;
1345
- tstate -> on_delete_data = NULL ;
1346
- Py_DECREF ( wr );
1347
- }
1348
- lock = newlockobject (module );
1349
- if (lock == NULL )
1420
+ assert (tstate -> _threading_thread . pre_delete == & thread_prepare_delete );
1421
+ tstate -> _threading_thread . pre_delete = NULL ;
1422
+ tstate -> _threading_thread . lock_weakref = NULL ;
1423
+ Py_DECREF ( tstate -> _threading_thread . lock_weakref ) ;
1424
+ }
1425
+
1426
+ PyObject * lockobj = ( PyObject * ) newlockobject (module );
1427
+ if (lockobj == NULL ) {
1350
1428
return NULL ;
1429
+ }
1351
1430
/* The lock is owned by whoever called _set_sentinel(), but the weakref
1352
1431
hangs to the thread state. */
1353
- wr = PyWeakref_NewRef (( PyObject * ) lock , NULL );
1432
+ PyObject * wr = PyWeakref_NewRef (lockobj , NULL );
1354
1433
if (wr == NULL ) {
1355
- Py_DECREF (lock );
1434
+ Py_DECREF (lockobj );
1356
1435
return NULL ;
1357
1436
}
1358
- tstate -> on_delete_data = ( void * ) wr ;
1359
- tstate -> on_delete = & release_sentinel ;
1360
- return ( PyObject * ) lock ;
1437
+ tstate -> _threading_thread . pre_delete = & thread_prepare_delete ;
1438
+ tstate -> _threading_thread . lock_weakref = wr ;
1439
+ return lockobj ;
1361
1440
}
1362
1441
1363
1442
PyDoc_STRVAR (_set_sentinel_doc ,
0 commit comments