ceval_gil.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089
  1. #include "Python.h"
  2. #include "pycore_atomic.h" // _Py_atomic_int
  3. #include "pycore_ceval.h" // _PyEval_SignalReceived()
  4. #include "pycore_pyerrors.h" // _PyErr_GetRaisedException()
  5. #include "pycore_pylifecycle.h" // _PyErr_Print()
  6. #include "pycore_initconfig.h" // _PyStatus_OK()
  7. #include "pycore_interp.h" // _Py_RunGC()
  8. #include "pycore_pymem.h" // _PyMem_IsPtrFreed()
  9. /*
  10. Notes about the implementation:
  11. - The GIL is just a boolean variable (locked) whose access is protected
  12. by a mutex (gil_mutex), and whose changes are signalled by a condition
  13. variable (gil_cond). gil_mutex is taken for short periods of time,
  14. and therefore mostly uncontended.
  15. - In the GIL-holding thread, the main loop (PyEval_EvalFrameEx) must be
  16. able to release the GIL on demand by another thread. A volatile boolean
  17. variable (gil_drop_request) is used for that purpose, which is checked
  18. at every turn of the eval loop. That variable is set after a wait of
  19. `interval` microseconds on `gil_cond` has timed out.
  20. [Actually, another volatile boolean variable (eval_breaker) is used
  21. which ORs several conditions into one. Volatile booleans are
  22. sufficient as inter-thread signalling means since Python is run
  23. on cache-coherent architectures only.]
  24. - A thread wanting to take the GIL will first let pass a given amount of
  25. time (`interval` microseconds) before setting gil_drop_request. This
  26. encourages a defined switching period, but doesn't enforce it since
  27. opcodes can take an arbitrary time to execute.
  28. The `interval` value is available for the user to read and modify
  29. using the Python API `sys.{get,set}switchinterval()`.
  30. - When a thread releases the GIL and gil_drop_request is set, that thread
  31. ensures that another GIL-awaiting thread gets scheduled.
  32. It does so by waiting on a condition variable (switch_cond) until
  33. the value of last_holder is changed to something else than its
  34. own thread state pointer, indicating that another thread was able to
  35. take the GIL.
  36. This is meant to prohibit the latency-adverse behaviour on multi-core
  37. machines where one thread would speculatively release the GIL, but still
  38. run and end up being the first to re-acquire it, making the "timeslices"
  39. much longer than expected.
  40. (Note: this mechanism is enabled with FORCE_SWITCHING above)
  41. */
  42. // GH-89279: Force inlining by using a macro.
  43. #if defined(_MSC_VER) && SIZEOF_INT == 4
  44. #define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) (assert(sizeof((ATOMIC_VAL)->_value) == 4), *((volatile int*)&((ATOMIC_VAL)->_value)))
  45. #else
  46. #define _Py_atomic_load_relaxed_int32(ATOMIC_VAL) _Py_atomic_load_relaxed(ATOMIC_VAL)
  47. #endif
  48. /* This can set eval_breaker to 0 even though gil_drop_request became
  49. 1. We believe this is all right because the eval loop will release
  50. the GIL eventually anyway. */
  51. static inline void
  52. COMPUTE_EVAL_BREAKER(PyInterpreterState *interp,
  53. struct _ceval_runtime_state *ceval,
  54. struct _ceval_state *ceval2)
  55. {
  56. _Py_atomic_store_relaxed(&ceval2->eval_breaker,
  57. _Py_atomic_load_relaxed_int32(&ceval2->gil_drop_request)
  58. | (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)
  59. && _Py_ThreadCanHandleSignals(interp))
  60. | (_Py_atomic_load_relaxed_int32(&ceval2->pending.calls_to_do))
  61. | (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)
  62. &&_Py_atomic_load_relaxed_int32(&ceval->pending_mainthread.calls_to_do))
  63. | ceval2->pending.async_exc
  64. | _Py_atomic_load_relaxed_int32(&ceval2->gc_scheduled));
  65. }
  66. static inline void
  67. SET_GIL_DROP_REQUEST(PyInterpreterState *interp)
  68. {
  69. struct _ceval_state *ceval2 = &interp->ceval;
  70. _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 1);
  71. _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
  72. }
  73. static inline void
  74. RESET_GIL_DROP_REQUEST(PyInterpreterState *interp)
  75. {
  76. struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
  77. struct _ceval_state *ceval2 = &interp->ceval;
  78. _Py_atomic_store_relaxed(&ceval2->gil_drop_request, 0);
  79. COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
  80. }
  81. static inline void
  82. SIGNAL_PENDING_CALLS(struct _pending_calls *pending, PyInterpreterState *interp)
  83. {
  84. struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
  85. struct _ceval_state *ceval2 = &interp->ceval;
  86. _Py_atomic_store_relaxed(&pending->calls_to_do, 1);
  87. COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
  88. }
  89. static inline void
  90. UNSIGNAL_PENDING_CALLS(PyInterpreterState *interp)
  91. {
  92. struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
  93. struct _ceval_state *ceval2 = &interp->ceval;
  94. if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
  95. _Py_atomic_store_relaxed(&ceval->pending_mainthread.calls_to_do, 0);
  96. }
  97. _Py_atomic_store_relaxed(&ceval2->pending.calls_to_do, 0);
  98. COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
  99. }
  100. static inline void
  101. SIGNAL_PENDING_SIGNALS(PyInterpreterState *interp, int force)
  102. {
  103. struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
  104. struct _ceval_state *ceval2 = &interp->ceval;
  105. _Py_atomic_store_relaxed(&ceval->signals_pending, 1);
  106. if (force) {
  107. _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
  108. }
  109. else {
  110. /* eval_breaker is not set to 1 if thread_can_handle_signals() is false */
  111. COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
  112. }
  113. }
  114. static inline void
  115. UNSIGNAL_PENDING_SIGNALS(PyInterpreterState *interp)
  116. {
  117. struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
  118. struct _ceval_state *ceval2 = &interp->ceval;
  119. _Py_atomic_store_relaxed(&ceval->signals_pending, 0);
  120. COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
  121. }
  122. static inline void
  123. SIGNAL_ASYNC_EXC(PyInterpreterState *interp)
  124. {
  125. struct _ceval_state *ceval2 = &interp->ceval;
  126. ceval2->pending.async_exc = 1;
  127. _Py_atomic_store_relaxed(&ceval2->eval_breaker, 1);
  128. }
  129. static inline void
  130. UNSIGNAL_ASYNC_EXC(PyInterpreterState *interp)
  131. {
  132. struct _ceval_runtime_state *ceval = &interp->runtime->ceval;
  133. struct _ceval_state *ceval2 = &interp->ceval;
  134. ceval2->pending.async_exc = 0;
  135. COMPUTE_EVAL_BREAKER(interp, ceval, ceval2);
  136. }
  137. /*
  138. * Implementation of the Global Interpreter Lock (GIL).
  139. */
  140. #include <stdlib.h>
  141. #include <errno.h>
  142. #include "pycore_atomic.h"
  143. #include "condvar.h"
  144. #define MUTEX_INIT(mut) \
  145. if (PyMUTEX_INIT(&(mut))) { \
  146. Py_FatalError("PyMUTEX_INIT(" #mut ") failed"); };
  147. #define MUTEX_FINI(mut) \
  148. if (PyMUTEX_FINI(&(mut))) { \
  149. Py_FatalError("PyMUTEX_FINI(" #mut ") failed"); };
  150. #define MUTEX_LOCK(mut) \
  151. if (PyMUTEX_LOCK(&(mut))) { \
  152. Py_FatalError("PyMUTEX_LOCK(" #mut ") failed"); };
  153. #define MUTEX_UNLOCK(mut) \
  154. if (PyMUTEX_UNLOCK(&(mut))) { \
  155. Py_FatalError("PyMUTEX_UNLOCK(" #mut ") failed"); };
  156. #define COND_INIT(cond) \
  157. if (PyCOND_INIT(&(cond))) { \
  158. Py_FatalError("PyCOND_INIT(" #cond ") failed"); };
  159. #define COND_FINI(cond) \
  160. if (PyCOND_FINI(&(cond))) { \
  161. Py_FatalError("PyCOND_FINI(" #cond ") failed"); };
  162. #define COND_SIGNAL(cond) \
  163. if (PyCOND_SIGNAL(&(cond))) { \
  164. Py_FatalError("PyCOND_SIGNAL(" #cond ") failed"); };
  165. #define COND_WAIT(cond, mut) \
  166. if (PyCOND_WAIT(&(cond), &(mut))) { \
  167. Py_FatalError("PyCOND_WAIT(" #cond ") failed"); };
  168. #define COND_TIMED_WAIT(cond, mut, microseconds, timeout_result) \
  169. { \
  170. int r = PyCOND_TIMEDWAIT(&(cond), &(mut), (microseconds)); \
  171. if (r < 0) \
  172. Py_FatalError("PyCOND_WAIT(" #cond ") failed"); \
  173. if (r) /* 1 == timeout, 2 == impl. can't say, so assume timeout */ \
  174. timeout_result = 1; \
  175. else \
  176. timeout_result = 0; \
  177. } \
  178. #define DEFAULT_INTERVAL 5000
  179. static void _gil_initialize(struct _gil_runtime_state *gil)
  180. {
  181. _Py_atomic_int uninitialized = {-1};
  182. gil->locked = uninitialized;
  183. gil->interval = DEFAULT_INTERVAL;
  184. }
  185. static int gil_created(struct _gil_runtime_state *gil)
  186. {
  187. if (gil == NULL) {
  188. return 0;
  189. }
  190. return (_Py_atomic_load_explicit(&gil->locked, _Py_memory_order_acquire) >= 0);
  191. }
  192. static void create_gil(struct _gil_runtime_state *gil)
  193. {
  194. MUTEX_INIT(gil->mutex);
  195. #ifdef FORCE_SWITCHING
  196. MUTEX_INIT(gil->switch_mutex);
  197. #endif
  198. COND_INIT(gil->cond);
  199. #ifdef FORCE_SWITCHING
  200. COND_INIT(gil->switch_cond);
  201. #endif
  202. _Py_atomic_store_relaxed(&gil->last_holder, 0);
  203. _Py_ANNOTATE_RWLOCK_CREATE(&gil->locked);
  204. _Py_atomic_store_explicit(&gil->locked, 0, _Py_memory_order_release);
  205. }
  206. static void destroy_gil(struct _gil_runtime_state *gil)
  207. {
  208. /* some pthread-like implementations tie the mutex to the cond
  209. * and must have the cond destroyed first.
  210. */
  211. COND_FINI(gil->cond);
  212. MUTEX_FINI(gil->mutex);
  213. #ifdef FORCE_SWITCHING
  214. COND_FINI(gil->switch_cond);
  215. MUTEX_FINI(gil->switch_mutex);
  216. #endif
  217. _Py_atomic_store_explicit(&gil->locked, -1,
  218. _Py_memory_order_release);
  219. _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
  220. }
  221. #ifdef HAVE_FORK
  222. static void recreate_gil(struct _gil_runtime_state *gil)
  223. {
  224. _Py_ANNOTATE_RWLOCK_DESTROY(&gil->locked);
  225. /* XXX should we destroy the old OS resources here? */
  226. create_gil(gil);
  227. }
  228. #endif
  229. static void
  230. drop_gil(struct _ceval_state *ceval, PyThreadState *tstate)
  231. {
  232. /* If tstate is NULL, the caller is indicating that we're releasing
  233. the GIL for the last time in this thread. This is particularly
  234. relevant when the current thread state is finalizing or its
  235. interpreter is finalizing (either may be in an inconsistent
  236. state). In that case the current thread will definitely
  237. never try to acquire the GIL again. */
  238. // XXX It may be more correct to check tstate->_status.finalizing.
  239. // XXX assert(tstate == NULL || !tstate->_status.cleared);
  240. struct _gil_runtime_state *gil = ceval->gil;
  241. if (!_Py_atomic_load_relaxed(&gil->locked)) {
  242. Py_FatalError("drop_gil: GIL is not locked");
  243. }
  244. /* tstate is allowed to be NULL (early interpreter init) */
  245. if (tstate != NULL) {
  246. /* Sub-interpreter support: threads might have been switched
  247. under our feet using PyThreadState_Swap(). Fix the GIL last
  248. holder variable so that our heuristics work. */
  249. _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
  250. }
  251. MUTEX_LOCK(gil->mutex);
  252. _Py_ANNOTATE_RWLOCK_RELEASED(&gil->locked, /*is_write=*/1);
  253. _Py_atomic_store_relaxed(&gil->locked, 0);
  254. COND_SIGNAL(gil->cond);
  255. MUTEX_UNLOCK(gil->mutex);
  256. #ifdef FORCE_SWITCHING
  257. /* We check tstate first in case we might be releasing the GIL for
  258. the last time in this thread. In that case there's a possible
  259. race with tstate->interp getting deleted after gil->mutex is
  260. unlocked and before the following code runs, leading to a crash.
  261. We can use (tstate == NULL) to indicate the thread is done with
  262. the GIL, and that's the only time we might delete the
  263. interpreter, so checking tstate first prevents the crash.
  264. See https://github.com/python/cpython/issues/104341. */
  265. if (tstate != NULL && _Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
  266. MUTEX_LOCK(gil->switch_mutex);
  267. /* Not switched yet => wait */
  268. if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) == tstate)
  269. {
  270. assert(_PyThreadState_CheckConsistency(tstate));
  271. RESET_GIL_DROP_REQUEST(tstate->interp);
  272. /* NOTE: if COND_WAIT does not atomically start waiting when
  273. releasing the mutex, another thread can run through, take
  274. the GIL and drop it again, and reset the condition
  275. before we even had a chance to wait for it. */
  276. COND_WAIT(gil->switch_cond, gil->switch_mutex);
  277. }
  278. MUTEX_UNLOCK(gil->switch_mutex);
  279. }
  280. #endif
  281. }
  282. /* Take the GIL.
  283. The function saves errno at entry and restores its value at exit.
  284. tstate must be non-NULL. */
  285. static void
  286. take_gil(PyThreadState *tstate)
  287. {
  288. int err = errno;
  289. assert(tstate != NULL);
  290. /* We shouldn't be using a thread state that isn't viable any more. */
  291. // XXX It may be more correct to check tstate->_status.finalizing.
  292. // XXX assert(!tstate->_status.cleared);
  293. if (_PyThreadState_MustExit(tstate)) {
  294. /* bpo-39877: If Py_Finalize() has been called and tstate is not the
  295. thread which called Py_Finalize(), exit immediately the thread.
  296. This code path can be reached by a daemon thread after Py_Finalize()
  297. completes. In this case, tstate is a dangling pointer: points to
  298. PyThreadState freed memory. */
  299. PyThread_exit_thread();
  300. }
  301. assert(_PyThreadState_CheckConsistency(tstate));
  302. PyInterpreterState *interp = tstate->interp;
  303. struct _ceval_state *ceval = &interp->ceval;
  304. struct _gil_runtime_state *gil = ceval->gil;
  305. /* Check that _PyEval_InitThreads() was called to create the lock */
  306. assert(gil_created(gil));
  307. MUTEX_LOCK(gil->mutex);
  308. if (!_Py_atomic_load_relaxed(&gil->locked)) {
  309. goto _ready;
  310. }
  311. int drop_requested = 0;
  312. while (_Py_atomic_load_relaxed(&gil->locked)) {
  313. unsigned long saved_switchnum = gil->switch_number;
  314. unsigned long interval = (gil->interval >= 1 ? gil->interval : 1);
  315. int timed_out = 0;
  316. COND_TIMED_WAIT(gil->cond, gil->mutex, interval, timed_out);
  317. /* If we timed out and no switch occurred in the meantime, it is time
  318. to ask the GIL-holding thread to drop it. */
  319. if (timed_out &&
  320. _Py_atomic_load_relaxed(&gil->locked) &&
  321. gil->switch_number == saved_switchnum)
  322. {
  323. if (_PyThreadState_MustExit(tstate)) {
  324. MUTEX_UNLOCK(gil->mutex);
  325. // gh-96387: If the loop requested a drop request in a previous
  326. // iteration, reset the request. Otherwise, drop_gil() can
  327. // block forever waiting for the thread which exited. Drop
  328. // requests made by other threads are also reset: these threads
  329. // may have to request again a drop request (iterate one more
  330. // time).
  331. if (drop_requested) {
  332. RESET_GIL_DROP_REQUEST(interp);
  333. }
  334. PyThread_exit_thread();
  335. }
  336. assert(_PyThreadState_CheckConsistency(tstate));
  337. SET_GIL_DROP_REQUEST(interp);
  338. drop_requested = 1;
  339. }
  340. }
  341. _ready:
  342. #ifdef FORCE_SWITCHING
  343. /* This mutex must be taken before modifying gil->last_holder:
  344. see drop_gil(). */
  345. MUTEX_LOCK(gil->switch_mutex);
  346. #endif
  347. /* We now hold the GIL */
  348. _Py_atomic_store_relaxed(&gil->locked, 1);
  349. _Py_ANNOTATE_RWLOCK_ACQUIRED(&gil->locked, /*is_write=*/1);
  350. if (tstate != (PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) {
  351. _Py_atomic_store_relaxed(&gil->last_holder, (uintptr_t)tstate);
  352. ++gil->switch_number;
  353. }
  354. #ifdef FORCE_SWITCHING
  355. COND_SIGNAL(gil->switch_cond);
  356. MUTEX_UNLOCK(gil->switch_mutex);
  357. #endif
  358. if (_PyThreadState_MustExit(tstate)) {
  359. /* bpo-36475: If Py_Finalize() has been called and tstate is not
  360. the thread which called Py_Finalize(), exit immediately the
  361. thread.
  362. This code path can be reached by a daemon thread which was waiting
  363. in take_gil() while the main thread called
  364. wait_for_thread_shutdown() from Py_Finalize(). */
  365. MUTEX_UNLOCK(gil->mutex);
  366. drop_gil(ceval, tstate);
  367. PyThread_exit_thread();
  368. }
  369. assert(_PyThreadState_CheckConsistency(tstate));
  370. if (_Py_atomic_load_relaxed(&ceval->gil_drop_request)) {
  371. RESET_GIL_DROP_REQUEST(interp);
  372. }
  373. else {
  374. /* bpo-40010: eval_breaker should be recomputed to be set to 1 if there
  375. is a pending signal: signal received by another thread which cannot
  376. handle signals.
  377. Note: RESET_GIL_DROP_REQUEST() calls COMPUTE_EVAL_BREAKER(). */
  378. COMPUTE_EVAL_BREAKER(interp, &_PyRuntime.ceval, ceval);
  379. }
  380. /* Don't access tstate if the thread must exit */
  381. if (tstate->async_exc != NULL) {
  382. _PyEval_SignalAsyncExc(tstate->interp);
  383. }
  384. MUTEX_UNLOCK(gil->mutex);
  385. errno = err;
  386. }
  387. void _PyEval_SetSwitchInterval(unsigned long microseconds)
  388. {
  389. PyInterpreterState *interp = _PyInterpreterState_Get();
  390. struct _gil_runtime_state *gil = interp->ceval.gil;
  391. assert(gil != NULL);
  392. gil->interval = microseconds;
  393. }
  394. unsigned long _PyEval_GetSwitchInterval(void)
  395. {
  396. PyInterpreterState *interp = _PyInterpreterState_Get();
  397. struct _gil_runtime_state *gil = interp->ceval.gil;
  398. assert(gil != NULL);
  399. return gil->interval;
  400. }
  401. int
  402. _PyEval_ThreadsInitialized(void)
  403. {
  404. /* XXX This is only needed for an assert in PyGILState_Ensure(),
  405. * which currently does not work with subinterpreters.
  406. * Thus we only use the main interpreter. */
  407. PyInterpreterState *interp = _PyInterpreterState_Main();
  408. if (interp == NULL) {
  409. return 0;
  410. }
  411. struct _gil_runtime_state *gil = interp->ceval.gil;
  412. return gil_created(gil);
  413. }
  414. int
  415. PyEval_ThreadsInitialized(void)
  416. {
  417. return _PyEval_ThreadsInitialized();
  418. }
  419. static inline int
  420. current_thread_holds_gil(struct _gil_runtime_state *gil, PyThreadState *tstate)
  421. {
  422. if (((PyThreadState*)_Py_atomic_load_relaxed(&gil->last_holder)) != tstate) {
  423. return 0;
  424. }
  425. return _Py_atomic_load_relaxed(&gil->locked);
  426. }
  427. static void
  428. init_shared_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
  429. {
  430. assert(gil_created(gil));
  431. interp->ceval.gil = gil;
  432. interp->ceval.own_gil = 0;
  433. }
  434. static void
  435. init_own_gil(PyInterpreterState *interp, struct _gil_runtime_state *gil)
  436. {
  437. assert(!gil_created(gil));
  438. create_gil(gil);
  439. assert(gil_created(gil));
  440. interp->ceval.gil = gil;
  441. interp->ceval.own_gil = 1;
  442. }
  443. PyStatus
  444. _PyEval_InitGIL(PyThreadState *tstate, int own_gil)
  445. {
  446. assert(tstate->interp->ceval.gil == NULL);
  447. int locked;
  448. if (!own_gil) {
  449. /* The interpreter will share the main interpreter's instead. */
  450. PyInterpreterState *main_interp = _PyInterpreterState_Main();
  451. assert(tstate->interp != main_interp);
  452. struct _gil_runtime_state *gil = main_interp->ceval.gil;
  453. init_shared_gil(tstate->interp, gil);
  454. locked = current_thread_holds_gil(gil, tstate);
  455. }
  456. else {
  457. PyThread_init_thread();
  458. init_own_gil(tstate->interp, &tstate->interp->_gil);
  459. locked = 0;
  460. }
  461. if (!locked) {
  462. take_gil(tstate);
  463. }
  464. return _PyStatus_OK();
  465. }
  466. void
  467. _PyEval_FiniGIL(PyInterpreterState *interp)
  468. {
  469. struct _gil_runtime_state *gil = interp->ceval.gil;
  470. if (gil == NULL) {
  471. /* It was already finalized (or hasn't been initialized yet). */
  472. assert(!interp->ceval.own_gil);
  473. return;
  474. }
  475. else if (!interp->ceval.own_gil) {
  476. #ifdef Py_DEBUG
  477. PyInterpreterState *main_interp = _PyInterpreterState_Main();
  478. assert(main_interp != NULL && interp != main_interp);
  479. assert(interp->ceval.gil == main_interp->ceval.gil);
  480. #endif
  481. interp->ceval.gil = NULL;
  482. return;
  483. }
  484. if (!gil_created(gil)) {
  485. /* First Py_InitializeFromConfig() call: the GIL doesn't exist
  486. yet: do nothing. */
  487. return;
  488. }
  489. destroy_gil(gil);
  490. assert(!gil_created(gil));
  491. interp->ceval.gil = NULL;
  492. }
  493. void
  494. PyEval_InitThreads(void)
  495. {
  496. /* Do nothing: kept for backward compatibility */
  497. }
  498. void
  499. _PyEval_Fini(void)
  500. {
  501. #ifdef Py_STATS
  502. _Py_PrintSpecializationStats(1);
  503. #endif
  504. }
  505. void
  506. PyEval_AcquireLock(void)
  507. {
  508. PyThreadState *tstate = _PyThreadState_GET();
  509. _Py_EnsureTstateNotNULL(tstate);
  510. take_gil(tstate);
  511. }
  512. void
  513. PyEval_ReleaseLock(void)
  514. {
  515. PyThreadState *tstate = _PyThreadState_GET();
  516. /* This function must succeed when the current thread state is NULL.
  517. We therefore avoid PyThreadState_Get() which dumps a fatal error
  518. in debug mode. */
  519. struct _ceval_state *ceval = &tstate->interp->ceval;
  520. drop_gil(ceval, tstate);
  521. }
  522. void
  523. _PyEval_AcquireLock(PyThreadState *tstate)
  524. {
  525. _Py_EnsureTstateNotNULL(tstate);
  526. take_gil(tstate);
  527. }
  528. void
  529. _PyEval_ReleaseLock(PyInterpreterState *interp, PyThreadState *tstate)
  530. {
  531. /* If tstate is NULL then we do not expect the current thread
  532. to acquire the GIL ever again. */
  533. assert(tstate == NULL || tstate->interp == interp);
  534. struct _ceval_state *ceval = &interp->ceval;
  535. drop_gil(ceval, tstate);
  536. }
  537. void
  538. PyEval_AcquireThread(PyThreadState *tstate)
  539. {
  540. _Py_EnsureTstateNotNULL(tstate);
  541. take_gil(tstate);
  542. if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
  543. Py_FatalError("non-NULL old thread state");
  544. }
  545. }
  546. void
  547. PyEval_ReleaseThread(PyThreadState *tstate)
  548. {
  549. assert(_PyThreadState_CheckConsistency(tstate));
  550. PyThreadState *new_tstate = _PyThreadState_SwapNoGIL(NULL);
  551. if (new_tstate != tstate) {
  552. Py_FatalError("wrong thread state");
  553. }
  554. struct _ceval_state *ceval = &tstate->interp->ceval;
  555. drop_gil(ceval, tstate);
  556. }
  557. #ifdef HAVE_FORK
  558. /* This function is called from PyOS_AfterFork_Child to destroy all threads
  559. which are not running in the child process, and clear internal locks
  560. which might be held by those threads. */
  561. PyStatus
  562. _PyEval_ReInitThreads(PyThreadState *tstate)
  563. {
  564. assert(tstate->interp == _PyInterpreterState_Main());
  565. struct _gil_runtime_state *gil = tstate->interp->ceval.gil;
  566. if (!gil_created(gil)) {
  567. return _PyStatus_OK();
  568. }
  569. recreate_gil(gil);
  570. take_gil(tstate);
  571. struct _pending_calls *pending = &tstate->interp->ceval.pending;
  572. if (_PyThread_at_fork_reinit(&pending->lock) < 0) {
  573. return _PyStatus_ERR("Can't reinitialize pending calls lock");
  574. }
  575. /* Destroy all threads except the current one */
  576. _PyThreadState_DeleteExcept(tstate);
  577. return _PyStatus_OK();
  578. }
  579. #endif
  580. /* This function is used to signal that async exceptions are waiting to be
  581. raised. */
  582. void
  583. _PyEval_SignalAsyncExc(PyInterpreterState *interp)
  584. {
  585. SIGNAL_ASYNC_EXC(interp);
  586. }
  587. PyThreadState *
  588. PyEval_SaveThread(void)
  589. {
  590. PyThreadState *tstate = _PyThreadState_SwapNoGIL(NULL);
  591. _Py_EnsureTstateNotNULL(tstate);
  592. struct _ceval_state *ceval = &tstate->interp->ceval;
  593. assert(gil_created(ceval->gil));
  594. drop_gil(ceval, tstate);
  595. return tstate;
  596. }
  597. void
  598. PyEval_RestoreThread(PyThreadState *tstate)
  599. {
  600. _Py_EnsureTstateNotNULL(tstate);
  601. take_gil(tstate);
  602. _PyThreadState_SwapNoGIL(tstate);
  603. }
  604. /* Mechanism whereby asynchronously executing callbacks (e.g. UNIX
  605. signal handlers or Mac I/O completion routines) can schedule calls
  606. to a function to be called synchronously.
  607. The synchronous function is called with one void* argument.
  608. It should return 0 for success or -1 for failure -- failure should
  609. be accompanied by an exception.
  610. If registry succeeds, the registry function returns 0; if it fails
  611. (e.g. due to too many pending calls) it returns -1 (without setting
  612. an exception condition).
  613. Note that because registry may occur from within signal handlers,
  614. or other asynchronous events, calling malloc() is unsafe!
  615. Any thread can schedule pending calls, but only the main thread
  616. will execute them.
  617. There is no facility to schedule calls to a particular thread, but
  618. that should be easy to change, should that ever be required. In
  619. that case, the static variables here should go into the python
  620. threadstate.
  621. */
  622. void
  623. _PyEval_SignalReceived(PyInterpreterState *interp)
  624. {
  625. #ifdef MS_WINDOWS
  626. // bpo-42296: On Windows, _PyEval_SignalReceived() is called from a signal
  627. // handler which can run in a thread different than the Python thread, in
  628. // which case _Py_ThreadCanHandleSignals() is wrong. Ignore
  629. // _Py_ThreadCanHandleSignals() and always set eval_breaker to 1.
  630. //
  631. // The next eval_frame_handle_pending() call will call
  632. // _Py_ThreadCanHandleSignals() to recompute eval_breaker.
  633. int force = 1;
  634. #else
  635. int force = 0;
  636. #endif
  637. /* bpo-30703: Function called when the C signal handler of Python gets a
  638. signal. We cannot queue a callback using _PyEval_AddPendingCall() since
  639. that function is not async-signal-safe. */
  640. SIGNAL_PENDING_SIGNALS(interp, force);
  641. }
  642. /* Push one item onto the queue while holding the lock. */
  643. static int
  644. _push_pending_call(struct _pending_calls *pending,
  645. int (*func)(void *), void *arg)
  646. {
  647. int i = pending->last;
  648. int j = (i + 1) % NPENDINGCALLS;
  649. if (j == pending->first) {
  650. return -1; /* Queue full */
  651. }
  652. pending->calls[i].func = func;
  653. pending->calls[i].arg = arg;
  654. pending->last = j;
  655. return 0;
  656. }
  657. static int
  658. _next_pending_call(struct _pending_calls *pending,
  659. int (**func)(void *), void **arg)
  660. {
  661. int i = pending->first;
  662. if (i == pending->last) {
  663. /* Queue empty */
  664. assert(pending->calls[i].func == NULL);
  665. return -1;
  666. }
  667. *func = pending->calls[i].func;
  668. *arg = pending->calls[i].arg;
  669. return i;
  670. }
  671. /* Pop one item off the queue while holding the lock. */
  672. static void
  673. _pop_pending_call(struct _pending_calls *pending,
  674. int (**func)(void *), void **arg)
  675. {
  676. int i = _next_pending_call(pending, func, arg);
  677. if (i >= 0) {
  678. pending->calls[i] = (struct _pending_call){0};
  679. pending->first = (i + 1) % NPENDINGCALLS;
  680. }
  681. }
  682. /* This implementation is thread-safe. It allows
  683. scheduling to be made from any thread, and even from an executing
  684. callback.
  685. */
  686. int
  687. _PyEval_AddPendingCall(PyInterpreterState *interp,
  688. int (*func)(void *), void *arg,
  689. int mainthreadonly)
  690. {
  691. assert(!mainthreadonly || _Py_IsMainInterpreter(interp));
  692. struct _pending_calls *pending = &interp->ceval.pending;
  693. if (mainthreadonly) {
  694. /* The main thread only exists in the main interpreter. */
  695. assert(_Py_IsMainInterpreter(interp));
  696. pending = &_PyRuntime.ceval.pending_mainthread;
  697. }
  698. /* Ensure that _PyEval_InitState() was called
  699. and that _PyEval_FiniState() is not called yet. */
  700. assert(pending->lock != NULL);
  701. PyThread_acquire_lock(pending->lock, WAIT_LOCK);
  702. int result = _push_pending_call(pending, func, arg);
  703. PyThread_release_lock(pending->lock);
  704. /* signal main loop */
  705. SIGNAL_PENDING_CALLS(pending, interp);
  706. return result;
  707. }
  708. int
  709. Py_AddPendingCall(int (*func)(void *), void *arg)
  710. {
  711. /* Legacy users of this API will continue to target the main thread
  712. (of the main interpreter). */
  713. PyInterpreterState *interp = _PyInterpreterState_Main();
  714. return _PyEval_AddPendingCall(interp, func, arg, 1);
  715. }
  716. static int
  717. handle_signals(PyThreadState *tstate)
  718. {
  719. assert(_PyThreadState_CheckConsistency(tstate));
  720. if (!_Py_ThreadCanHandleSignals(tstate->interp)) {
  721. return 0;
  722. }
  723. UNSIGNAL_PENDING_SIGNALS(tstate->interp);
  724. if (_PyErr_CheckSignalsTstate(tstate) < 0) {
  725. /* On failure, re-schedule a call to handle_signals(). */
  726. SIGNAL_PENDING_SIGNALS(tstate->interp, 0);
  727. return -1;
  728. }
  729. return 0;
  730. }
  731. static inline int
  732. maybe_has_pending_calls(PyInterpreterState *interp)
  733. {
  734. struct _pending_calls *pending = &interp->ceval.pending;
  735. if (_Py_atomic_load_relaxed_int32(&pending->calls_to_do)) {
  736. return 1;
  737. }
  738. if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(interp)) {
  739. return 0;
  740. }
  741. pending = &_PyRuntime.ceval.pending_mainthread;
  742. return _Py_atomic_load_relaxed_int32(&pending->calls_to_do);
  743. }
  744. static int
  745. _make_pending_calls(struct _pending_calls *pending)
  746. {
  747. /* perform a bounded number of calls, in case of recursion */
  748. for (int i=0; i<NPENDINGCALLS; i++) {
  749. int (*func)(void *) = NULL;
  750. void *arg = NULL;
  751. /* pop one item off the queue while holding the lock */
  752. PyThread_acquire_lock(pending->lock, WAIT_LOCK);
  753. _pop_pending_call(pending, &func, &arg);
  754. PyThread_release_lock(pending->lock);
  755. /* having released the lock, perform the callback */
  756. if (func == NULL) {
  757. break;
  758. }
  759. if (func(arg) != 0) {
  760. return -1;
  761. }
  762. }
  763. return 0;
  764. }
  765. static int
  766. make_pending_calls(PyInterpreterState *interp)
  767. {
  768. struct _pending_calls *pending = &interp->ceval.pending;
  769. struct _pending_calls *pending_main = &_PyRuntime.ceval.pending_mainthread;
  770. /* Only one thread (per interpreter) may run the pending calls
  771. at once. In the same way, we don't do recursive pending calls. */
  772. PyThread_acquire_lock(pending->lock, WAIT_LOCK);
  773. if (pending->busy) {
  774. /* A pending call was added after another thread was already
  775. handling the pending calls (and had already "unsignaled").
  776. Once that thread is done, it may have taken care of all the
  777. pending calls, or there might be some still waiting.
  778. Regardless, this interpreter's pending calls will stay
  779. "signaled" until that first thread has finished. At that
  780. point the next thread to trip the eval breaker will take
  781. care of any remaining pending calls. Until then, though,
  782. all the interpreter's threads will be tripping the eval
  783. breaker every time it's checked. */
  784. PyThread_release_lock(pending->lock);
  785. return 0;
  786. }
  787. pending->busy = 1;
  788. PyThread_release_lock(pending->lock);
  789. /* unsignal before starting to call callbacks, so that any callback
  790. added in-between re-signals */
  791. UNSIGNAL_PENDING_CALLS(interp);
  792. if (_make_pending_calls(pending) != 0) {
  793. pending->busy = 0;
  794. /* There might not be more calls to make, but we play it safe. */
  795. SIGNAL_PENDING_CALLS(pending, interp);
  796. return -1;
  797. }
  798. if (_Py_IsMainThread() && _Py_IsMainInterpreter(interp)) {
  799. if (_make_pending_calls(pending_main) != 0) {
  800. pending->busy = 0;
  801. /* There might not be more calls to make, but we play it safe. */
  802. SIGNAL_PENDING_CALLS(pending_main, interp);
  803. return -1;
  804. }
  805. }
  806. pending->busy = 0;
  807. return 0;
  808. }
  809. void
  810. _Py_FinishPendingCalls(PyThreadState *tstate)
  811. {
  812. assert(PyGILState_Check());
  813. assert(_PyThreadState_CheckConsistency(tstate));
  814. if (make_pending_calls(tstate->interp) < 0) {
  815. PyObject *exc = _PyErr_GetRaisedException(tstate);
  816. PyErr_BadInternalCall();
  817. _PyErr_ChainExceptions1(exc);
  818. _PyErr_Print(tstate);
  819. }
  820. }
  821. int
  822. _PyEval_MakePendingCalls(PyThreadState *tstate)
  823. {
  824. int res;
  825. if (_Py_IsMainThread() && _Py_IsMainInterpreter(tstate->interp)) {
  826. /* Python signal handler doesn't really queue a callback:
  827. it only signals that a signal was received,
  828. see _PyEval_SignalReceived(). */
  829. res = handle_signals(tstate);
  830. if (res != 0) {
  831. return res;
  832. }
  833. }
  834. res = make_pending_calls(tstate->interp);
  835. if (res != 0) {
  836. return res;
  837. }
  838. return 0;
  839. }
  840. /* Py_MakePendingCalls() is a simple wrapper for the sake
  841. of backward-compatibility. */
  842. int
  843. Py_MakePendingCalls(void)
  844. {
  845. assert(PyGILState_Check());
  846. PyThreadState *tstate = _PyThreadState_GET();
  847. assert(_PyThreadState_CheckConsistency(tstate));
  848. /* Only execute pending calls on the main thread. */
  849. if (!_Py_IsMainThread() || !_Py_IsMainInterpreter(tstate->interp)) {
  850. return 0;
  851. }
  852. return _PyEval_MakePendingCalls(tstate);
  853. }
  854. void
  855. _PyEval_InitState(PyInterpreterState *interp, PyThread_type_lock pending_lock)
  856. {
  857. _gil_initialize(&interp->_gil);
  858. struct _pending_calls *pending = &interp->ceval.pending;
  859. assert(pending->lock == NULL);
  860. pending->lock = pending_lock;
  861. }
  862. void
  863. _PyEval_FiniState(struct _ceval_state *ceval)
  864. {
  865. struct _pending_calls *pending = &ceval->pending;
  866. if (pending->lock != NULL) {
  867. PyThread_free_lock(pending->lock);
  868. pending->lock = NULL;
  869. }
  870. }
  871. /* Handle signals, pending calls, GIL drop request
  872. and asynchronous exception */
  873. int
  874. _Py_HandlePending(PyThreadState *tstate)
  875. {
  876. _PyRuntimeState * const runtime = &_PyRuntime;
  877. struct _ceval_runtime_state *ceval = &runtime->ceval;
  878. struct _ceval_state *interp_ceval_state = &tstate->interp->ceval;
  879. /* Pending signals */
  880. if (_Py_atomic_load_relaxed_int32(&ceval->signals_pending)) {
  881. if (handle_signals(tstate) != 0) {
  882. return -1;
  883. }
  884. }
  885. /* Pending calls */
  886. if (maybe_has_pending_calls(tstate->interp)) {
  887. if (make_pending_calls(tstate->interp) != 0) {
  888. return -1;
  889. }
  890. }
  891. /* GC scheduled to run */
  892. if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gc_scheduled)) {
  893. _Py_atomic_store_relaxed(&interp_ceval_state->gc_scheduled, 0);
  894. COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
  895. _Py_RunGC(tstate);
  896. }
  897. /* GIL drop request */
  898. if (_Py_atomic_load_relaxed_int32(&interp_ceval_state->gil_drop_request)) {
  899. /* Give another thread a chance */
  900. if (_PyThreadState_SwapNoGIL(NULL) != tstate) {
  901. Py_FatalError("tstate mix-up");
  902. }
  903. drop_gil(interp_ceval_state, tstate);
  904. /* Other threads may run now */
  905. take_gil(tstate);
  906. if (_PyThreadState_SwapNoGIL(tstate) != NULL) {
  907. Py_FatalError("orphan tstate");
  908. }
  909. }
  910. /* Check for asynchronous exception. */
  911. if (tstate->async_exc != NULL) {
  912. PyObject *exc = tstate->async_exc;
  913. tstate->async_exc = NULL;
  914. UNSIGNAL_ASYNC_EXC(tstate->interp);
  915. _PyErr_SetNone(tstate, exc);
  916. Py_DECREF(exc);
  917. return -1;
  918. }
  919. // It is possible that some of the conditions that trigger the eval breaker
  920. // are called in a different thread than the Python thread. An example of
  921. // this is bpo-42296: On Windows, _PyEval_SignalReceived() can be called in
  922. // a different thread than the Python thread, in which case
  923. // _Py_ThreadCanHandleSignals() is wrong. Recompute eval_breaker in the
  924. // current Python thread with the correct _Py_ThreadCanHandleSignals()
  925. // value. It prevents to interrupt the eval loop at every instruction if
  926. // the current Python thread cannot handle signals (if
  927. // _Py_ThreadCanHandleSignals() is false).
  928. COMPUTE_EVAL_BREAKER(tstate->interp, ceval, interp_ceval_state);
  929. return 0;
  930. }