thread_pthread.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925
  1. #include "pycore_interp.h" // _PyInterpreterState.threads.stacksize
  2. /* Posix threads interface */
  3. #include <stdlib.h>
  4. #include <string.h>
  5. #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
  6. #define destructor xxdestructor
  7. #endif
  8. #ifndef HAVE_PTHREAD_STUBS
  9. # include <pthread.h>
  10. #endif
  11. #if defined(__APPLE__) || defined(HAVE_PTHREAD_DESTRUCTOR)
  12. #undef destructor
  13. #endif
  14. #include <signal.h>
  15. #if defined(__linux__)
  16. # include <sys/syscall.h> /* syscall(SYS_gettid) */
  17. #elif defined(__FreeBSD__)
  18. # include <pthread_np.h> /* pthread_getthreadid_np() */
  19. #elif defined(__OpenBSD__)
  20. # include <unistd.h> /* getthrid() */
  21. #elif defined(_AIX)
  22. # include <sys/thread.h> /* thread_self() */
  23. #elif defined(__NetBSD__)
  24. # include <lwp.h> /* _lwp_self() */
  25. #elif defined(__DragonFly__)
  26. # error #include <sys/lwp.h> /* lwp_gettid() */
  27. #endif
  28. /* The POSIX spec requires that use of pthread_attr_setstacksize
  29. be conditional on _POSIX_THREAD_ATTR_STACKSIZE being defined. */
  30. #ifdef _POSIX_THREAD_ATTR_STACKSIZE
  31. #ifndef THREAD_STACK_SIZE
  32. #define THREAD_STACK_SIZE 0 /* use default stack size */
  33. #endif
  34. /* The default stack size for new threads on BSD is small enough that
  35. * we'll get hard crashes instead of 'maximum recursion depth exceeded'
  36. * exceptions.
  37. *
  38. * The default stack size below is the empirically determined minimal stack
  39. * sizes where a simple recursive function doesn't cause a hard crash.
  40. *
  41. * For macOS the value of THREAD_STACK_SIZE is determined in configure.ac
  42. * as it also depends on the other configure options like chosen sanitizer
  43. * runtimes.
  44. */
  45. #if defined(__FreeBSD__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
  46. #undef THREAD_STACK_SIZE
  47. #define THREAD_STACK_SIZE 0x400000
  48. #endif
  49. #if defined(_AIX) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
  50. #undef THREAD_STACK_SIZE
  51. #define THREAD_STACK_SIZE 0x200000
  52. #endif
  53. /* bpo-38852: test_threading.test_recursion_limit() checks that 1000 recursive
  54. Python calls (default recursion limit) doesn't crash, but raise a regular
  55. RecursionError exception. In debug mode, Python function calls allocates
  56. more memory on the stack, so use a stack of 8 MiB. */
  57. #if defined(__ANDROID__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
  58. # ifdef Py_DEBUG
  59. # undef THREAD_STACK_SIZE
  60. # define THREAD_STACK_SIZE 0x800000
  61. # endif
  62. #endif
  63. #if defined(__VXWORKS__) && defined(THREAD_STACK_SIZE) && THREAD_STACK_SIZE == 0
  64. #undef THREAD_STACK_SIZE
  65. #define THREAD_STACK_SIZE 0x100000
  66. #endif
  67. /* for safety, ensure a viable minimum stacksize */
  68. #define THREAD_STACK_MIN 0x8000 /* 32 KiB */
  69. #else /* !_POSIX_THREAD_ATTR_STACKSIZE */
  70. #ifdef THREAD_STACK_SIZE
  71. #error "THREAD_STACK_SIZE defined but _POSIX_THREAD_ATTR_STACKSIZE undefined"
  72. #endif
  73. #endif
  74. /* The POSIX spec says that implementations supporting the sem_*
  75. family of functions must indicate this by defining
  76. _POSIX_SEMAPHORES. */
  77. #ifdef _POSIX_SEMAPHORES
  78. /* On FreeBSD 4.x, _POSIX_SEMAPHORES is defined empty, so
  79. we need to add 0 to make it work there as well. */
  80. #if (_POSIX_SEMAPHORES+0) == -1
  81. #define HAVE_BROKEN_POSIX_SEMAPHORES
  82. #else
  83. #include <semaphore.h>
  84. #include <errno.h>
  85. #endif
  86. #endif
  87. /* Thread sanitizer doesn't currently support sem_clockwait */
  88. #ifdef _Py_THREAD_SANITIZER
  89. #undef HAVE_SEM_CLOCKWAIT
  90. #endif
  91. /* Whether or not to use semaphores directly rather than emulating them with
  92. * mutexes and condition variables:
  93. */
  94. #if (defined(_POSIX_SEMAPHORES) && !defined(HAVE_BROKEN_POSIX_SEMAPHORES) && \
  95. (defined(HAVE_SEM_TIMEDWAIT) || defined(HAVE_SEM_CLOCKWAIT)))
  96. # define USE_SEMAPHORES
  97. #else
  98. # undef USE_SEMAPHORES
  99. #endif
  100. /* On platforms that don't use standard POSIX threads pthread_sigmask()
  101. * isn't present. DEC threads uses sigprocmask() instead as do most
  102. * other UNIX International compliant systems that don't have the full
  103. * pthread implementation.
  104. */
  105. #if defined(HAVE_PTHREAD_SIGMASK) && !defined(HAVE_BROKEN_PTHREAD_SIGMASK)
  106. # define SET_THREAD_SIGMASK pthread_sigmask
  107. #else
  108. # define SET_THREAD_SIGMASK sigprocmask
  109. #endif
  110. /*
  111. * pthread_cond support
  112. */
  113. #define condattr_monotonic _PyRuntime.threads._condattr_monotonic.ptr
  114. static void
  115. init_condattr(void)
  116. {
  117. #ifdef CONDATTR_MONOTONIC
  118. # define ca _PyRuntime.threads._condattr_monotonic.val
  119. // XXX We need to check the return code?
  120. pthread_condattr_init(&ca);
  121. // XXX We need to run pthread_condattr_destroy() during runtime fini.
  122. if (pthread_condattr_setclock(&ca, CLOCK_MONOTONIC) == 0) {
  123. condattr_monotonic = &ca; // Use monotonic clock
  124. }
  125. # undef ca
  126. #endif // CONDATTR_MONOTONIC
  127. }
  128. int
  129. _PyThread_cond_init(PyCOND_T *cond)
  130. {
  131. return pthread_cond_init(cond, condattr_monotonic);
  132. }
  133. void
  134. _PyThread_cond_after(long long us, struct timespec *abs)
  135. {
  136. _PyTime_t timeout = _PyTime_FromMicrosecondsClamp(us);
  137. _PyTime_t t;
  138. #ifdef CONDATTR_MONOTONIC
  139. if (condattr_monotonic) {
  140. t = _PyTime_GetMonotonicClock();
  141. }
  142. else
  143. #endif
  144. {
  145. t = _PyTime_GetSystemClock();
  146. }
  147. t = _PyTime_Add(t, timeout);
  148. _PyTime_AsTimespec_clamp(t, abs);
  149. }
  150. /* A pthread mutex isn't sufficient to model the Python lock type
  151. * because, according to Draft 5 of the docs (P1003.4a/D5), both of the
  152. * following are undefined:
  153. * -> a thread tries to lock a mutex it already has locked
  154. * -> a thread tries to unlock a mutex locked by a different thread
  155. * pthread mutexes are designed for serializing threads over short pieces
  156. * of code anyway, so wouldn't be an appropriate implementation of
  157. * Python's locks regardless.
  158. *
  159. * The pthread_lock struct implements a Python lock as a "locked?" bit
  160. * and a <condition, mutex> pair. In general, if the bit can be acquired
  161. * instantly, it is, else the pair is used to block the thread until the
  162. * bit is cleared. 9 May 1994 tim@ksr.com
  163. */
  164. typedef struct {
  165. char locked; /* 0=unlocked, 1=locked */
  166. /* a <cond, mutex> pair to handle an acquire of a locked lock */
  167. pthread_cond_t lock_released;
  168. pthread_mutex_t mut;
  169. } pthread_lock;
  170. #define CHECK_STATUS(name) if (status != 0) { perror(name); error = 1; }
  171. #define CHECK_STATUS_PTHREAD(name) if (status != 0) { fprintf(stderr, \
  172. "%s: %s\n", name, strerror(status)); error = 1; }
  173. /*
  174. * Initialization for the current runtime.
  175. */
  176. static void
  177. PyThread__init_thread(void)
  178. {
  179. // The library is only initialized once in the process,
  180. // regardless of how many times the Python runtime is initialized.
  181. static int lib_initialized = 0;
  182. if (!lib_initialized) {
  183. lib_initialized = 1;
  184. #if defined(_AIX) && defined(__GNUC__)
  185. extern void pthread_init(void);
  186. pthread_init();
  187. #endif
  188. }
  189. init_condattr();
  190. }
  191. /*
  192. * Thread support.
  193. */
  194. /* bpo-33015: pythread_callback struct and pythread_wrapper() cast
  195. "void func(void *)" to "void* func(void *)": always return NULL.
  196. PyThread_start_new_thread() uses "void func(void *)" type, whereas
  197. pthread_create() requires a void* return value. */
  198. typedef struct {
  199. void (*func) (void *);
  200. void *arg;
  201. } pythread_callback;
  202. static void *
  203. pythread_wrapper(void *arg)
  204. {
  205. /* copy func and func_arg and free the temporary structure */
  206. pythread_callback *callback = arg;
  207. void (*func)(void *) = callback->func;
  208. void *func_arg = callback->arg;
  209. PyMem_RawFree(arg);
  210. func(func_arg);
  211. return NULL;
  212. }
  213. unsigned long
  214. PyThread_start_new_thread(void (*func)(void *), void *arg)
  215. {
  216. pthread_t th;
  217. int status;
  218. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  219. pthread_attr_t attrs;
  220. #endif
  221. #if defined(THREAD_STACK_SIZE)
  222. size_t tss;
  223. #endif
  224. if (!initialized)
  225. PyThread_init_thread();
  226. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  227. if (pthread_attr_init(&attrs) != 0)
  228. return PYTHREAD_INVALID_THREAD_ID;
  229. #endif
  230. #if defined(THREAD_STACK_SIZE)
  231. PyThreadState *tstate = _PyThreadState_GET();
  232. size_t stacksize = tstate ? tstate->interp->threads.stacksize : 0;
  233. tss = (stacksize != 0) ? stacksize : THREAD_STACK_SIZE;
  234. if (tss != 0) {
  235. if (pthread_attr_setstacksize(&attrs, tss) != 0) {
  236. pthread_attr_destroy(&attrs);
  237. return PYTHREAD_INVALID_THREAD_ID;
  238. }
  239. }
  240. #endif
  241. #if defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  242. pthread_attr_setscope(&attrs, PTHREAD_SCOPE_SYSTEM);
  243. #endif
  244. pythread_callback *callback = PyMem_RawMalloc(sizeof(pythread_callback));
  245. if (callback == NULL) {
  246. return PYTHREAD_INVALID_THREAD_ID;
  247. }
  248. callback->func = func;
  249. callback->arg = arg;
  250. status = pthread_create(&th,
  251. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  252. &attrs,
  253. #else
  254. (pthread_attr_t*)NULL,
  255. #endif
  256. pythread_wrapper, callback);
  257. #if defined(THREAD_STACK_SIZE) || defined(PTHREAD_SYSTEM_SCHED_SUPPORTED)
  258. pthread_attr_destroy(&attrs);
  259. #endif
  260. if (status != 0) {
  261. PyMem_RawFree(callback);
  262. return PYTHREAD_INVALID_THREAD_ID;
  263. }
  264. pthread_detach(th);
  265. #if SIZEOF_PTHREAD_T <= SIZEOF_LONG
  266. return (unsigned long) th;
  267. #else
  268. return (unsigned long) *(unsigned long *) &th;
  269. #endif
  270. }
  271. /* XXX This implementation is considered (to quote Tim Peters) "inherently
  272. hosed" because:
  273. - It does not guarantee the promise that a non-zero integer is returned.
  274. - The cast to unsigned long is inherently unsafe.
  275. - It is not clear that the 'volatile' (for AIX?) are any longer necessary.
  276. */
  277. unsigned long
  278. PyThread_get_thread_ident(void)
  279. {
  280. volatile pthread_t threadid;
  281. if (!initialized)
  282. PyThread_init_thread();
  283. threadid = pthread_self();
  284. return (unsigned long) threadid;
  285. }
  286. #ifdef PY_HAVE_THREAD_NATIVE_ID
  287. unsigned long
  288. PyThread_get_thread_native_id(void)
  289. {
  290. if (!initialized)
  291. PyThread_init_thread();
  292. #ifdef __APPLE__
  293. uint64_t native_id;
  294. (void) pthread_threadid_np(NULL, &native_id);
  295. #elif defined(__linux__)
  296. pid_t native_id;
  297. native_id = syscall(SYS_gettid);
  298. #elif defined(__FreeBSD__)
  299. int native_id;
  300. native_id = pthread_getthreadid_np();
  301. #elif defined(__OpenBSD__)
  302. pid_t native_id;
  303. native_id = getthrid();
  304. #elif defined(_AIX)
  305. tid_t native_id;
  306. native_id = thread_self();
  307. #elif defined(__NetBSD__)
  308. lwpid_t native_id;
  309. native_id = _lwp_self();
  310. #elif defined(__DragonFly__)
  311. lwpid_t native_id;
  312. native_id = lwp_gettid();
  313. #endif
  314. return (unsigned long) native_id;
  315. }
  316. #endif
  317. void _Py_NO_RETURN
  318. PyThread_exit_thread(void)
  319. {
  320. if (!initialized)
  321. exit(0);
  322. #if defined(__wasi__)
  323. /*
  324. * wasi-threads doesn't have pthread_exit right now
  325. * cf. https://github.com/WebAssembly/wasi-threads/issues/7
  326. */
  327. abort();
  328. #else
  329. pthread_exit(0);
  330. #endif
  331. }
  332. #ifdef USE_SEMAPHORES
  333. /*
  334. * Lock support.
  335. */
  336. PyThread_type_lock
  337. PyThread_allocate_lock(void)
  338. {
  339. sem_t *lock;
  340. int status, error = 0;
  341. if (!initialized)
  342. PyThread_init_thread();
  343. lock = (sem_t *)PyMem_RawMalloc(sizeof(sem_t));
  344. if (lock) {
  345. status = sem_init(lock,0,1);
  346. CHECK_STATUS("sem_init");
  347. if (error) {
  348. PyMem_RawFree((void *)lock);
  349. lock = NULL;
  350. }
  351. }
  352. return (PyThread_type_lock)lock;
  353. }
  354. void
  355. PyThread_free_lock(PyThread_type_lock lock)
  356. {
  357. sem_t *thelock = (sem_t *)lock;
  358. int status, error = 0;
  359. (void) error; /* silence unused-but-set-variable warning */
  360. if (!thelock)
  361. return;
  362. status = sem_destroy(thelock);
  363. CHECK_STATUS("sem_destroy");
  364. PyMem_RawFree((void *)thelock);
  365. }
  366. /*
  367. * As of February 2002, Cygwin thread implementations mistakenly report error
  368. * codes in the return value of the sem_ calls (like the pthread_ functions).
  369. * Correct implementations return -1 and put the code in errno. This supports
  370. * either.
  371. */
  372. static int
  373. fix_status(int status)
  374. {
  375. return (status == -1) ? errno : status;
  376. }
  377. PyLockStatus
  378. PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
  379. int intr_flag)
  380. {
  381. PyLockStatus success;
  382. sem_t *thelock = (sem_t *)lock;
  383. int status, error = 0;
  384. (void) error; /* silence unused-but-set-variable warning */
  385. _PyTime_t timeout; // relative timeout
  386. if (microseconds >= 0) {
  387. // bpo-41710: PyThread_acquire_lock_timed() cannot report timeout
  388. // overflow to the caller, so clamp the timeout to
  389. // [_PyTime_MIN, _PyTime_MAX].
  390. //
  391. // _PyTime_MAX nanoseconds is around 292.3 years.
  392. //
  393. // _thread.Lock.acquire() and _thread.RLock.acquire() raise an
  394. // OverflowError if microseconds is greater than PY_TIMEOUT_MAX.
  395. timeout = _PyTime_FromMicrosecondsClamp(microseconds);
  396. }
  397. else {
  398. timeout = _PyTime_FromNanoseconds(-1);
  399. }
  400. #ifdef HAVE_SEM_CLOCKWAIT
  401. struct timespec abs_timeout;
  402. // Local scope for deadline
  403. {
  404. _PyTime_t deadline = _PyTime_Add(_PyTime_GetMonotonicClock(), timeout);
  405. _PyTime_AsTimespec_clamp(deadline, &abs_timeout);
  406. }
  407. #else
  408. _PyTime_t deadline = 0;
  409. if (timeout > 0 && !intr_flag) {
  410. deadline = _PyDeadline_Init(timeout);
  411. }
  412. #endif
  413. while (1) {
  414. if (timeout > 0) {
  415. #ifdef HAVE_SEM_CLOCKWAIT
  416. status = fix_status(sem_clockwait(thelock, CLOCK_MONOTONIC,
  417. &abs_timeout));
  418. #else
  419. _PyTime_t abs_time = _PyTime_Add(_PyTime_GetSystemClock(),
  420. timeout);
  421. struct timespec ts;
  422. _PyTime_AsTimespec_clamp(abs_time, &ts);
  423. status = fix_status(sem_timedwait(thelock, &ts));
  424. #endif
  425. }
  426. else if (timeout == 0) {
  427. status = fix_status(sem_trywait(thelock));
  428. }
  429. else {
  430. status = fix_status(sem_wait(thelock));
  431. }
  432. /* Retry if interrupted by a signal, unless the caller wants to be
  433. notified. */
  434. if (intr_flag || status != EINTR) {
  435. break;
  436. }
  437. // sem_clockwait() uses an absolute timeout, there is no need
  438. // to recompute the relative timeout.
  439. #ifndef HAVE_SEM_CLOCKWAIT
  440. if (timeout > 0) {
  441. /* wait interrupted by a signal (EINTR): recompute the timeout */
  442. timeout = _PyDeadline_Get(deadline);
  443. if (timeout < 0) {
  444. status = ETIMEDOUT;
  445. break;
  446. }
  447. }
  448. #endif
  449. }
  450. /* Don't check the status if we're stopping because of an interrupt. */
  451. if (!(intr_flag && status == EINTR)) {
  452. if (timeout > 0) {
  453. if (status != ETIMEDOUT) {
  454. #ifdef HAVE_SEM_CLOCKWAIT
  455. CHECK_STATUS("sem_clockwait");
  456. #else
  457. CHECK_STATUS("sem_timedwait");
  458. #endif
  459. }
  460. }
  461. else if (timeout == 0) {
  462. if (status != EAGAIN) {
  463. CHECK_STATUS("sem_trywait");
  464. }
  465. }
  466. else {
  467. CHECK_STATUS("sem_wait");
  468. }
  469. }
  470. if (status == 0) {
  471. success = PY_LOCK_ACQUIRED;
  472. } else if (intr_flag && status == EINTR) {
  473. success = PY_LOCK_INTR;
  474. } else {
  475. success = PY_LOCK_FAILURE;
  476. }
  477. return success;
  478. }
  479. void
  480. PyThread_release_lock(PyThread_type_lock lock)
  481. {
  482. sem_t *thelock = (sem_t *)lock;
  483. int status, error = 0;
  484. (void) error; /* silence unused-but-set-variable warning */
  485. status = sem_post(thelock);
  486. CHECK_STATUS("sem_post");
  487. }
  488. #else /* USE_SEMAPHORES */
  489. /*
  490. * Lock support.
  491. */
  492. PyThread_type_lock
  493. PyThread_allocate_lock(void)
  494. {
  495. pthread_lock *lock;
  496. int status, error = 0;
  497. if (!initialized)
  498. PyThread_init_thread();
  499. lock = (pthread_lock *) PyMem_RawCalloc(1, sizeof(pthread_lock));
  500. if (lock) {
  501. lock->locked = 0;
  502. status = pthread_mutex_init(&lock->mut, NULL);
  503. CHECK_STATUS_PTHREAD("pthread_mutex_init");
  504. /* Mark the pthread mutex underlying a Python mutex as
  505. pure happens-before. We can't simply mark the
  506. Python-level mutex as a mutex because it can be
  507. acquired and released in different threads, which
  508. will cause errors. */
  509. _Py_ANNOTATE_PURE_HAPPENS_BEFORE_MUTEX(&lock->mut);
  510. status = _PyThread_cond_init(&lock->lock_released);
  511. CHECK_STATUS_PTHREAD("pthread_cond_init");
  512. if (error) {
  513. PyMem_RawFree((void *)lock);
  514. lock = 0;
  515. }
  516. }
  517. return (PyThread_type_lock) lock;
  518. }
  519. void
  520. PyThread_free_lock(PyThread_type_lock lock)
  521. {
  522. pthread_lock *thelock = (pthread_lock *)lock;
  523. int status, error = 0;
  524. (void) error; /* silence unused-but-set-variable warning */
  525. /* some pthread-like implementations tie the mutex to the cond
  526. * and must have the cond destroyed first.
  527. */
  528. status = pthread_cond_destroy( &thelock->lock_released );
  529. CHECK_STATUS_PTHREAD("pthread_cond_destroy");
  530. status = pthread_mutex_destroy( &thelock->mut );
  531. CHECK_STATUS_PTHREAD("pthread_mutex_destroy");
  532. PyMem_RawFree((void *)thelock);
  533. }
  534. PyLockStatus
  535. PyThread_acquire_lock_timed(PyThread_type_lock lock, PY_TIMEOUT_T microseconds,
  536. int intr_flag)
  537. {
  538. PyLockStatus success = PY_LOCK_FAILURE;
  539. pthread_lock *thelock = (pthread_lock *)lock;
  540. int status, error = 0;
  541. if (microseconds == 0) {
  542. status = pthread_mutex_trylock( &thelock->mut );
  543. if (status != EBUSY) {
  544. CHECK_STATUS_PTHREAD("pthread_mutex_trylock[1]");
  545. }
  546. }
  547. else {
  548. status = pthread_mutex_lock( &thelock->mut );
  549. CHECK_STATUS_PTHREAD("pthread_mutex_lock[1]");
  550. }
  551. if (status != 0) {
  552. goto done;
  553. }
  554. if (thelock->locked == 0) {
  555. success = PY_LOCK_ACQUIRED;
  556. goto unlock;
  557. }
  558. if (microseconds == 0) {
  559. goto unlock;
  560. }
  561. struct timespec abs_timeout;
  562. if (microseconds > 0) {
  563. _PyThread_cond_after(microseconds, &abs_timeout);
  564. }
  565. // Continue trying until we get the lock
  566. // mut must be locked by me -- part of the condition protocol
  567. while (1) {
  568. if (microseconds > 0) {
  569. status = pthread_cond_timedwait(&thelock->lock_released,
  570. &thelock->mut, &abs_timeout);
  571. if (status == 1) {
  572. break;
  573. }
  574. if (status == ETIMEDOUT) {
  575. break;
  576. }
  577. CHECK_STATUS_PTHREAD("pthread_cond_timedwait");
  578. }
  579. else {
  580. status = pthread_cond_wait(
  581. &thelock->lock_released,
  582. &thelock->mut);
  583. CHECK_STATUS_PTHREAD("pthread_cond_wait");
  584. }
  585. if (intr_flag && status == 0 && thelock->locked) {
  586. // We were woken up, but didn't get the lock. We probably received
  587. // a signal. Return PY_LOCK_INTR to allow the caller to handle
  588. // it and retry.
  589. success = PY_LOCK_INTR;
  590. break;
  591. }
  592. if (status == 0 && !thelock->locked) {
  593. success = PY_LOCK_ACQUIRED;
  594. break;
  595. }
  596. // Wait got interrupted by a signal: retry
  597. }
  598. unlock:
  599. if (success == PY_LOCK_ACQUIRED) {
  600. thelock->locked = 1;
  601. }
  602. status = pthread_mutex_unlock( &thelock->mut );
  603. CHECK_STATUS_PTHREAD("pthread_mutex_unlock[1]");
  604. done:
  605. if (error) {
  606. success = PY_LOCK_FAILURE;
  607. }
  608. return success;
  609. }
  610. void
  611. PyThread_release_lock(PyThread_type_lock lock)
  612. {
  613. pthread_lock *thelock = (pthread_lock *)lock;
  614. int status, error = 0;
  615. (void) error; /* silence unused-but-set-variable warning */
  616. status = pthread_mutex_lock( &thelock->mut );
  617. CHECK_STATUS_PTHREAD("pthread_mutex_lock[3]");
  618. thelock->locked = 0;
  619. /* wake up someone (anyone, if any) waiting on the lock */
  620. status = pthread_cond_signal( &thelock->lock_released );
  621. CHECK_STATUS_PTHREAD("pthread_cond_signal");
  622. status = pthread_mutex_unlock( &thelock->mut );
  623. CHECK_STATUS_PTHREAD("pthread_mutex_unlock[3]");
  624. }
  625. #endif /* USE_SEMAPHORES */
  626. int
  627. _PyThread_at_fork_reinit(PyThread_type_lock *lock)
  628. {
  629. PyThread_type_lock new_lock = PyThread_allocate_lock();
  630. if (new_lock == NULL) {
  631. return -1;
  632. }
  633. /* bpo-6721, bpo-40089: The old lock can be in an inconsistent state.
  634. fork() can be called in the middle of an operation on the lock done by
  635. another thread. So don't call PyThread_free_lock(*lock).
  636. Leak memory on purpose. Don't release the memory either since the
  637. address of a mutex is relevant. Putting two mutexes at the same address
  638. can lead to problems. */
  639. *lock = new_lock;
  640. return 0;
  641. }
  642. int
  643. PyThread_acquire_lock(PyThread_type_lock lock, int waitflag)
  644. {
  645. return PyThread_acquire_lock_timed(lock, waitflag ? -1 : 0, /*intr_flag=*/0);
  646. }
  647. /* set the thread stack size.
  648. * Return 0 if size is valid, -1 if size is invalid,
  649. * -2 if setting stack size is not supported.
  650. */
  651. static int
  652. _pythread_pthread_set_stacksize(size_t size)
  653. {
  654. #if defined(THREAD_STACK_SIZE)
  655. pthread_attr_t attrs;
  656. size_t tss_min;
  657. int rc = 0;
  658. #endif
  659. /* set to default */
  660. if (size == 0) {
  661. _PyInterpreterState_GET()->threads.stacksize = 0;
  662. return 0;
  663. }
  664. #if defined(THREAD_STACK_SIZE)
  665. #if defined(PTHREAD_STACK_MIN)
  666. tss_min = PTHREAD_STACK_MIN > THREAD_STACK_MIN ? PTHREAD_STACK_MIN
  667. : THREAD_STACK_MIN;
  668. #else
  669. tss_min = THREAD_STACK_MIN;
  670. #endif
  671. if (size >= tss_min) {
  672. /* validate stack size by setting thread attribute */
  673. if (pthread_attr_init(&attrs) == 0) {
  674. rc = pthread_attr_setstacksize(&attrs, size);
  675. pthread_attr_destroy(&attrs);
  676. if (rc == 0) {
  677. _PyInterpreterState_GET()->threads.stacksize = size;
  678. return 0;
  679. }
  680. }
  681. }
  682. return -1;
  683. #else
  684. return -2;
  685. #endif
  686. }
  687. #define THREAD_SET_STACKSIZE(x) _pythread_pthread_set_stacksize(x)
  688. /* Thread Local Storage (TLS) API
  689. This API is DEPRECATED since Python 3.7. See PEP 539 for details.
  690. */
  691. /* Issue #25658: On platforms where native TLS key is defined in a way that
  692. cannot be safely cast to int, PyThread_create_key returns immediately a
  693. failure status and other TLS functions all are no-ops. This indicates
  694. clearly that the old API is not supported on platforms where it cannot be
  695. used reliably, and that no effort will be made to add such support.
  696. Note: PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT will be unnecessary after
  697. removing this API.
  698. */
  699. int
  700. PyThread_create_key(void)
  701. {
  702. #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
  703. pthread_key_t key;
  704. int fail = pthread_key_create(&key, NULL);
  705. if (fail)
  706. return -1;
  707. if (key > INT_MAX) {
  708. /* Issue #22206: handle integer overflow */
  709. pthread_key_delete(key);
  710. errno = ENOMEM;
  711. return -1;
  712. }
  713. return (int)key;
  714. #else
  715. return -1; /* never return valid key value. */
  716. #endif
  717. }
  718. void
  719. PyThread_delete_key(int key)
  720. {
  721. #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
  722. pthread_key_delete(key);
  723. #endif
  724. }
  725. void
  726. PyThread_delete_key_value(int key)
  727. {
  728. #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
  729. pthread_setspecific(key, NULL);
  730. #endif
  731. }
  732. int
  733. PyThread_set_key_value(int key, void *value)
  734. {
  735. #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
  736. int fail = pthread_setspecific(key, value);
  737. return fail ? -1 : 0;
  738. #else
  739. return -1;
  740. #endif
  741. }
  742. void *
  743. PyThread_get_key_value(int key)
  744. {
  745. #ifdef PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT
  746. return pthread_getspecific(key);
  747. #else
  748. return NULL;
  749. #endif
  750. }
  751. void
  752. PyThread_ReInitTLS(void)
  753. {
  754. }
  755. /* Thread Specific Storage (TSS) API
  756. Platform-specific components of TSS API implementation.
  757. */
  758. int
  759. PyThread_tss_create(Py_tss_t *key)
  760. {
  761. assert(key != NULL);
  762. /* If the key has been created, function is silently skipped. */
  763. if (key->_is_initialized) {
  764. return 0;
  765. }
  766. int fail = pthread_key_create(&(key->_key), NULL);
  767. if (fail) {
  768. return -1;
  769. }
  770. key->_is_initialized = 1;
  771. return 0;
  772. }
  773. void
  774. PyThread_tss_delete(Py_tss_t *key)
  775. {
  776. assert(key != NULL);
  777. /* If the key has not been created, function is silently skipped. */
  778. if (!key->_is_initialized) {
  779. return;
  780. }
  781. pthread_key_delete(key->_key);
  782. /* pthread has not provided the defined invalid value for the key. */
  783. key->_is_initialized = 0;
  784. }
  785. int
  786. PyThread_tss_set(Py_tss_t *key, void *value)
  787. {
  788. assert(key != NULL);
  789. int fail = pthread_setspecific(key->_key, value);
  790. return fail ? -1 : 0;
  791. }
  792. void *
  793. PyThread_tss_get(Py_tss_t *key)
  794. {
  795. assert(key != NULL);
  796. return pthread_getspecific(key->_key);
  797. }