pthread.go 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. // Copyright 2021 The Libc Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package libc // import "modernc.org/libc"
  5. import (
  6. "runtime"
  7. "sync"
  8. "sync/atomic"
  9. "time"
  10. "unsafe"
  11. "modernc.org/libc/errno"
  12. "modernc.org/libc/pthread"
  13. "modernc.org/libc/sys/types"
  14. ctime "modernc.org/libc/time"
  15. )
  16. var (
  17. mutexes = map[uintptr]*mutex{}
  18. mutexesMu sync.Mutex
  19. threads = map[int32]*TLS{}
  20. threadsMu sync.Mutex
  21. threadKey pthread.Pthread_key_t
  22. threadKeyDestructors = map[pthread.Pthread_key_t][]uintptr{} // key: []destructor
  23. threadsKeysMu sync.Mutex
  24. conds = map[uintptr]*cond{}
  25. condsMu sync.Mutex
  26. )
  27. // Thread local storage.
  28. type TLS struct {
  29. errnop uintptr
  30. pthreadData
  31. stack stackHeader
  32. ID int32
  33. reentryGuard int32 // memgrind
  34. stackHeaderBalance int32
  35. }
  36. var errno0 int32 // Temp errno for NewTLS
  37. func NewTLS() *TLS {
  38. return newTLS(false)
  39. }
  40. func newTLS(detached bool) *TLS {
  41. id := atomic.AddInt32(&tid, 1)
  42. t := &TLS{ID: id, errnop: uintptr(unsafe.Pointer(&errno0))}
  43. t.pthreadData.init(t, detached)
  44. if memgrind {
  45. atomic.AddInt32(&tlsBalance, 1)
  46. }
  47. t.errnop = t.Alloc(int(unsafe.Sizeof(int32(0))))
  48. *(*int32)(unsafe.Pointer(t.errnop)) = 0
  49. return t
  50. }
  51. // Pthread specific part of a TLS.
  52. type pthreadData struct {
  53. done chan struct{}
  54. kv map[pthread.Pthread_key_t]uintptr
  55. retVal uintptr
  56. wait chan struct{} // cond var interaction
  57. detached bool
  58. }
  59. func (d *pthreadData) init(t *TLS, detached bool) {
  60. d.detached = detached
  61. d.wait = make(chan struct{}, 1)
  62. if detached {
  63. return
  64. }
  65. d.done = make(chan struct{})
  66. threadsMu.Lock()
  67. defer threadsMu.Unlock()
  68. threads[t.ID] = t
  69. }
  70. func (d *pthreadData) close(t *TLS) {
  71. threadsMu.Lock()
  72. defer threadsMu.Unlock()
  73. delete(threads, t.ID)
  74. }
  75. // int pthread_attr_destroy(pthread_attr_t *attr);
  76. func Xpthread_attr_destroy(t *TLS, pAttr uintptr) int32 {
  77. return 0
  78. }
  79. // int pthread_attr_setscope(pthread_attr_t *attr, int contentionscope);
  80. func Xpthread_attr_setscope(t *TLS, pAttr uintptr, contentionScope int32) int32 {
  81. switch contentionScope {
  82. case pthread.PTHREAD_SCOPE_SYSTEM:
  83. return 0
  84. default:
  85. panic(todo("", contentionScope))
  86. }
  87. }
  88. // int pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize);
  89. func Xpthread_attr_setstacksize(t *TLS, attr uintptr, stackSize types.Size_t) int32 {
  90. panic(todo(""))
  91. }
  92. // Go side data of pthread_cond_t.
  93. type cond struct {
  94. sync.Mutex
  95. waiters map[*TLS]struct{}
  96. }
  97. func newCond() *cond {
  98. return &cond{
  99. waiters: map[*TLS]struct{}{},
  100. }
  101. }
  102. func (c *cond) signal(all bool) int32 {
  103. if c == nil {
  104. return errno.EINVAL
  105. }
  106. c.Lock()
  107. defer c.Unlock()
  108. // The pthread_cond_broadcast() and pthread_cond_signal() functions shall have
  109. // no effect if there are no threads currently blocked on cond.
  110. for tls := range c.waiters {
  111. tls.wait <- struct{}{}
  112. delete(c.waiters, tls)
  113. if !all {
  114. break
  115. }
  116. }
  117. return 0
  118. }
  119. // The pthread_cond_init() function shall initialize the condition variable
  120. // referenced by cond with attributes referenced by attr. If attr is NULL, the
  121. // default condition variable attributes shall be used; the effect is the same
  122. // as passing the address of a default condition variable attributes object.
  123. // Upon successful initialization, the state of the condition variable shall
  124. // become initialized.
  125. //
  126. // If successful, the pthread_cond_destroy() and pthread_cond_init() functions
  127. // shall return zero; otherwise, an error number shall be returned to indicate
  128. // the error.
  129. //
  130. // int pthread_cond_init(pthread_cond_t *restrict cond, const pthread_condattr_t *restrict attr);
  131. func Xpthread_cond_init(t *TLS, pCond, pAttr uintptr) int32 {
  132. if pCond == 0 {
  133. return errno.EINVAL
  134. }
  135. if pAttr != 0 {
  136. panic(todo("%#x %#x", pCond, pAttr))
  137. }
  138. condsMu.Lock()
  139. defer condsMu.Unlock()
  140. conds[pCond] = newCond()
  141. return 0
  142. }
  143. // int pthread_cond_destroy(pthread_cond_t *cond);
  144. func Xpthread_cond_destroy(t *TLS, pCond uintptr) int32 {
  145. if pCond == 0 {
  146. return errno.EINVAL
  147. }
  148. condsMu.Lock()
  149. defer condsMu.Unlock()
  150. cond := conds[pCond]
  151. if cond == nil {
  152. return errno.EINVAL
  153. }
  154. cond.Lock()
  155. defer cond.Unlock()
  156. if len(cond.waiters) != 0 {
  157. return errno.EBUSY
  158. }
  159. delete(conds, pCond)
  160. return 0
  161. }
  162. // int pthread_cond_signal(pthread_cond_t *cond);
  163. func Xpthread_cond_signal(t *TLS, pCond uintptr) int32 {
  164. return condSignal(pCond, false)
  165. }
  166. // int pthread_cond_broadcast(pthread_cond_t *cond);
  167. func Xpthread_cond_broadcast(t *TLS, pCond uintptr) int32 {
  168. return condSignal(pCond, true)
  169. }
  170. func condSignal(pCond uintptr, all bool) int32 {
  171. if pCond == 0 {
  172. return errno.EINVAL
  173. }
  174. condsMu.Lock()
  175. cond := conds[pCond]
  176. condsMu.Unlock()
  177. return cond.signal(all)
  178. }
  179. // int pthread_cond_wait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex);
  180. func Xpthread_cond_wait(t *TLS, pCond, pMutex uintptr) int32 {
  181. if pCond == 0 {
  182. return errno.EINVAL
  183. }
  184. condsMu.Lock()
  185. cond := conds[pCond]
  186. if cond == nil { // static initialized condition variables are valid
  187. cond = newCond()
  188. conds[pCond] = cond
  189. }
  190. cond.Lock()
  191. cond.waiters[t] = struct{}{}
  192. cond.Unlock()
  193. condsMu.Unlock()
  194. mutexesMu.Lock()
  195. mu := mutexes[pMutex]
  196. mutexesMu.Unlock()
  197. mu.Unlock()
  198. <-t.wait
  199. mu.Lock()
  200. return 0
  201. }
  202. // int pthread_cond_timedwait(pthread_cond_t *restrict cond, pthread_mutex_t *restrict mutex, const struct timespec *restrict abstime);
  203. func Xpthread_cond_timedwait(t *TLS, pCond, pMutex, pAbsTime uintptr) int32 {
  204. if pCond == 0 {
  205. return errno.EINVAL
  206. }
  207. condsMu.Lock()
  208. cond := conds[pCond]
  209. if cond == nil { // static initialized condition variables are valid
  210. cond = newCond()
  211. conds[pCond] = cond
  212. }
  213. cond.Lock()
  214. cond.waiters[t] = struct{}{}
  215. cond.Unlock()
  216. condsMu.Unlock()
  217. mutexesMu.Lock()
  218. mu := mutexes[pMutex]
  219. mutexesMu.Unlock()
  220. deadlineSecs := (*ctime.Timespec)(unsafe.Pointer(pAbsTime)).Ftv_sec
  221. deadlineNsecs := (*ctime.Timespec)(unsafe.Pointer(pAbsTime)).Ftv_nsec
  222. deadline := time.Unix(int64(deadlineSecs), int64(deadlineNsecs))
  223. d := deadline.Sub(time.Now())
  224. switch {
  225. case d <= 0:
  226. return errno.ETIMEDOUT
  227. default:
  228. to := time.After(d)
  229. mu.Unlock()
  230. defer mu.Lock()
  231. select {
  232. case <-t.wait:
  233. return 0
  234. case <-to:
  235. cond.Lock()
  236. defer cond.Unlock()
  237. delete(cond.waiters, t)
  238. return errno.ETIMEDOUT
  239. }
  240. }
  241. }
  242. // Go side data of pthread_mutex_t
  243. type mutex struct {
  244. sync.Mutex
  245. typ int // PTHREAD_MUTEX_NORMAL, ...
  246. wait sync.Mutex
  247. id int32 // owner's t.ID
  248. cnt int32
  249. robust bool
  250. }
  251. func newMutex(typ int) *mutex {
  252. return &mutex{
  253. typ: typ,
  254. }
  255. }
  256. func (m *mutex) lock(id int32) int32 {
  257. if m.robust {
  258. panic(todo(""))
  259. }
  260. // If successful, the pthread_mutex_lock() and pthread_mutex_unlock() functions
  261. // shall return zero; otherwise, an error number shall be returned to indicate
  262. // the error.
  263. switch m.typ {
  264. case pthread.PTHREAD_MUTEX_NORMAL:
  265. // If the mutex type is PTHREAD_MUTEX_NORMAL, deadlock detection shall not be
  266. // provided. Attempting to relock the mutex causes deadlock. If a thread
  267. // attempts to unlock a mutex that it has not locked or a mutex which is
  268. // unlocked, undefined behavior results.
  269. m.Lock()
  270. m.id = id
  271. return 0
  272. case pthread.PTHREAD_MUTEX_RECURSIVE:
  273. for {
  274. m.Lock()
  275. switch m.id {
  276. case 0:
  277. m.cnt = 1
  278. m.id = id
  279. m.wait.Lock()
  280. m.Unlock()
  281. return 0
  282. case id:
  283. m.cnt++
  284. m.Unlock()
  285. return 0
  286. }
  287. m.Unlock()
  288. m.wait.Lock()
  289. m.wait.Unlock()
  290. }
  291. default:
  292. panic(todo("", m.typ))
  293. }
  294. }
  295. func (m *mutex) tryLock(id int32) int32 {
  296. if m.robust {
  297. panic(todo(""))
  298. }
  299. switch m.typ {
  300. case pthread.PTHREAD_MUTEX_NORMAL:
  301. return errno.EBUSY
  302. case pthread.PTHREAD_MUTEX_RECURSIVE:
  303. m.Lock()
  304. switch m.id {
  305. case 0:
  306. m.cnt = 1
  307. m.id = id
  308. m.wait.Lock()
  309. m.Unlock()
  310. return 0
  311. case id:
  312. m.cnt++
  313. m.Unlock()
  314. return 0
  315. }
  316. m.Unlock()
  317. return errno.EBUSY
  318. default:
  319. panic(todo("", m.typ))
  320. }
  321. }
  322. func (m *mutex) unlock() int32 {
  323. if m.robust {
  324. panic(todo(""))
  325. }
  326. // If successful, the pthread_mutex_lock() and pthread_mutex_unlock() functions
  327. // shall return zero; otherwise, an error number shall be returned to indicate
  328. // the error.
  329. switch m.typ {
  330. case pthread.PTHREAD_MUTEX_NORMAL:
  331. // If the mutex type is PTHREAD_MUTEX_NORMAL, deadlock detection shall not be
  332. // provided. Attempting to relock the mutex causes deadlock. If a thread
  333. // attempts to unlock a mutex that it has not locked or a mutex which is
  334. // unlocked, undefined behavior results.
  335. m.id = 0
  336. m.Unlock()
  337. return 0
  338. case pthread.PTHREAD_MUTEX_RECURSIVE:
  339. m.Lock()
  340. m.cnt--
  341. if m.cnt == 0 {
  342. m.id = 0
  343. m.wait.Unlock()
  344. }
  345. m.Unlock()
  346. return 0
  347. default:
  348. panic(todo("", m.typ))
  349. }
  350. }
  351. // int pthread_mutex_destroy(pthread_mutex_t *mutex);
  352. func Xpthread_mutex_destroy(t *TLS, pMutex uintptr) int32 {
  353. mutexesMu.Lock()
  354. defer mutexesMu.Unlock()
  355. delete(mutexes, pMutex)
  356. return 0
  357. }
  358. // int pthread_mutex_lock(pthread_mutex_t *mutex);
  359. func Xpthread_mutex_lock(t *TLS, pMutex uintptr) int32 {
  360. mutexesMu.Lock()
  361. mu := mutexes[pMutex]
  362. if mu == nil { // static initialized mutexes are valid
  363. mu = newMutex(int(X__ccgo_getMutexType(t, pMutex)))
  364. mutexes[pMutex] = mu
  365. }
  366. mutexesMu.Unlock()
  367. return mu.lock(t.ID)
  368. }
  369. // int pthread_mutex_trylock(pthread_mutex_t *mutex);
  370. func Xpthread_mutex_trylock(t *TLS, pMutex uintptr) int32 {
  371. mutexesMu.Lock()
  372. mu := mutexes[pMutex]
  373. if mu == nil { // static initialized mutexes are valid
  374. mu = newMutex(int(X__ccgo_getMutexType(t, pMutex)))
  375. mutexes[pMutex] = mu
  376. }
  377. mutexesMu.Unlock()
  378. return mu.tryLock(t.ID)
  379. }
  380. // int pthread_mutex_unlock(pthread_mutex_t *mutex);
  381. func Xpthread_mutex_unlock(t *TLS, pMutex uintptr) int32 {
  382. mutexesMu.Lock()
  383. defer mutexesMu.Unlock()
  384. return mutexes[pMutex].unlock()
  385. }
  386. // int pthread_key_create(pthread_key_t *key, void (*destructor)(void*));
  387. func Xpthread_key_create(t *TLS, pKey, destructor uintptr) int32 {
  388. threadsKeysMu.Lock()
  389. defer threadsKeysMu.Unlock()
  390. threadKey++
  391. r := threadKey
  392. if destructor != 0 {
  393. threadKeyDestructors[r] = append(threadKeyDestructors[r], destructor)
  394. }
  395. *(*pthread.Pthread_key_t)(unsafe.Pointer(pKey)) = pthread.Pthread_key_t(r)
  396. return 0
  397. }
  398. // int pthread_key_delete(pthread_key_t key);
  399. func Xpthread_key_delete(t *TLS, key pthread.Pthread_key_t) int32 {
  400. if _, ok := t.kv[key]; ok {
  401. delete(t.kv, key)
  402. return 0
  403. }
  404. panic(todo(""))
  405. }
  406. // void *pthread_getspecific(pthread_key_t key);
  407. func Xpthread_getspecific(t *TLS, key pthread.Pthread_key_t) uintptr {
  408. return t.kv[key]
  409. }
  410. // int pthread_setspecific(pthread_key_t key, const void *value);
  411. func Xpthread_setspecific(t *TLS, key pthread.Pthread_key_t, value uintptr) int32 {
  412. if t.kv == nil {
  413. t.kv = map[pthread.Pthread_key_t]uintptr{}
  414. }
  415. t.kv[key] = value
  416. return 0
  417. }
  418. // int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
  419. func Xpthread_create(t *TLS, pThread, pAttr, startRoutine, arg uintptr) int32 {
  420. fn := (*struct {
  421. f func(*TLS, uintptr) uintptr
  422. })(unsafe.Pointer(&struct{ uintptr }{startRoutine})).f
  423. detached := pAttr != 0 && X__ccgo_pthreadAttrGetDetachState(t, pAttr) == pthread.PTHREAD_CREATE_DETACHED
  424. tls := newTLS(detached)
  425. *(*pthread.Pthread_t)(unsafe.Pointer(pThread)) = pthread.Pthread_t(tls.ID)
  426. go func() {
  427. Xpthread_exit(tls, fn(tls, arg))
  428. }()
  429. return 0
  430. }
  431. // int pthread_detach(pthread_t thread);
  432. func Xpthread_detach(t *TLS, thread pthread.Pthread_t) int32 {
  433. threadsMu.Lock()
  434. threads[int32(thread)].detached = true
  435. threadsMu.Unlock()
  436. return 0
  437. }
  438. // int pthread_equal(pthread_t t1, pthread_t t2);
  439. func Xpthread_equal(t *TLS, t1, t2 pthread.Pthread_t) int32 {
  440. return Bool32(t1 == t2)
  441. }
  442. // void pthread_exit(void *value_ptr);
  443. func Xpthread_exit(t *TLS, value uintptr) {
  444. t.retVal = value
  445. // At thread exit, if a key value has a non-NULL destructor pointer, and the
  446. // thread has a non-NULL value associated with that key, the value of the key
  447. // is set to NULL, and then the function pointed to is called with the
  448. // previously associated value as its sole argument. The order of destructor
  449. // calls is unspecified if more than one destructor exists for a thread when it
  450. // exits.
  451. for k, v := range t.kv {
  452. if v == 0 {
  453. continue
  454. }
  455. threadsKeysMu.Lock()
  456. destructors := threadKeyDestructors[k]
  457. threadsKeysMu.Unlock()
  458. for _, destructor := range destructors {
  459. delete(t.kv, k)
  460. panic(todo("%#x", destructor)) //TODO call destructor(v)
  461. }
  462. }
  463. switch {
  464. case t.detached:
  465. threadsMu.Lock()
  466. delete(threads, t.ID)
  467. threadsMu.Unlock()
  468. default:
  469. close(t.done)
  470. }
  471. runtime.Goexit()
  472. }
  473. // int pthread_join(pthread_t thread, void **value_ptr);
  474. func Xpthread_join(t *TLS, thread pthread.Pthread_t, pValue uintptr) int32 {
  475. threadsMu.Lock()
  476. tls := threads[int32(thread)]
  477. delete(threads, int32(thread))
  478. threadsMu.Unlock()
  479. <-tls.done
  480. if pValue != 0 {
  481. *(*uintptr)(unsafe.Pointer(pValue)) = tls.retVal
  482. }
  483. return 0
  484. }
  485. // pthread_t pthread_self(void);
  486. func Xpthread_self(t *TLS) pthread.Pthread_t {
  487. return pthread.Pthread_t(t.ID)
  488. }