libc.go 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434
  1. // Copyright 2020 The Libc Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. //go.generate echo package libc > ccgo.go
  5. //go:generate go run generate.go
  6. //go:generate go fmt ./...
  7. // Package libc provides run time support for ccgo generated programs and
  8. // implements selected parts of the C standard library.
  9. package libc // import "modernc.org/libc"
  10. //TODO use O_RDONLY etc. from fcntl header
  11. //TODO use t.Alloc/Free where appropriate
  12. import (
  13. "bufio"
  14. crand "crypto/rand"
  15. "fmt"
  16. "math"
  17. mbits "math/bits"
  18. "math/rand"
  19. "os"
  20. "runtime"
  21. "runtime/debug"
  22. "sort"
  23. "strings"
  24. "sync"
  25. "sync/atomic"
  26. gotime "time"
  27. "unsafe"
  28. "github.com/mattn/go-isatty"
  29. "modernc.org/libc/errno"
  30. "modernc.org/libc/stdio"
  31. "modernc.org/libc/sys/types"
  32. "modernc.org/libc/time"
  33. "modernc.org/libc/unistd"
  34. "modernc.org/mathutil"
  35. )
  36. type (
  37. // RawMem64 represents the biggest uint64 array the runtime can handle.
  38. RawMem64 [unsafe.Sizeof(RawMem{}) / unsafe.Sizeof(uint64(0))]uint64
  39. )
  40. var (
  41. allocMu sync.Mutex
  42. environInitialized bool
  43. isWindows bool
  44. )
  45. // Keep these outside of the var block otherwise go generate will miss them.
  46. var Xenviron uintptr
  47. var Xstdin = newFile(nil, unistd.STDIN_FILENO)
  48. var Xstdout = newFile(nil, unistd.STDOUT_FILENO)
  49. var Xstderr = newFile(nil, unistd.STDERR_FILENO)
  50. func setEnviron() {
  51. SetEnviron(nil, os.Environ())
  52. }
  53. func Environ() uintptr {
  54. if !environInitialized {
  55. SetEnviron(nil, os.Environ())
  56. }
  57. return Xenviron
  58. }
  59. func EnvironP() uintptr {
  60. if !environInitialized {
  61. SetEnviron(nil, os.Environ())
  62. }
  63. return uintptr(unsafe.Pointer(&Xenviron))
  64. }
  65. func X___errno_location(t *TLS) uintptr {
  66. return X__errno_location(t)
  67. }
  68. // int * __errno_location(void);
  69. func X__errno_location(t *TLS) uintptr {
  70. return t.errnop
  71. }
  72. func Start(main func(*TLS, int32, uintptr) int32) {
  73. if dmesgs {
  74. wd, err := os.Getwd()
  75. dmesg("%v: %v, wd %v, %v", origin(1), os.Args, wd, err)
  76. defer func() {
  77. if err := recover(); err != nil {
  78. dmesg("%v: CRASH: %v\n%s", origin(1), err, debug.Stack())
  79. }
  80. }()
  81. }
  82. runtime.LockOSThread()
  83. t := &TLS{errnop: uintptr(unsafe.Pointer(&errno0))}
  84. argv := Xcalloc(t, 1, types.Size_t((len(os.Args)+1)*int(uintptrSize)))
  85. if argv == 0 {
  86. panic("OOM")
  87. }
  88. p := argv
  89. for _, v := range os.Args {
  90. s := Xcalloc(t, 1, types.Size_t(len(v)+1))
  91. if s == 0 {
  92. panic("OOM")
  93. }
  94. copy((*RawMem)(unsafe.Pointer(s))[:len(v):len(v)], v)
  95. *(*uintptr)(unsafe.Pointer(p)) = s
  96. p += uintptrSize
  97. }
  98. SetEnviron(t, os.Environ())
  99. audit := false
  100. if memgrind {
  101. if s := os.Getenv("LIBC_MEMGRIND_START"); s != "0" {
  102. MemAuditStart()
  103. audit = true
  104. }
  105. }
  106. t = NewTLS()
  107. rc := main(t, int32(len(os.Args)), argv)
  108. exit(t, rc, audit)
  109. }
  110. func Xexit(t *TLS, status int32) { exit(t, status, false) }
  111. func exit(t *TLS, status int32, audit bool) {
  112. if len(Covered) != 0 {
  113. buf := bufio.NewWriter(os.Stdout)
  114. CoverReport(buf)
  115. buf.Flush()
  116. }
  117. if len(CoveredC) != 0 {
  118. buf := bufio.NewWriter(os.Stdout)
  119. CoverCReport(buf)
  120. buf.Flush()
  121. }
  122. for _, v := range atExit {
  123. v()
  124. }
  125. if audit {
  126. t.Close()
  127. if tlsBalance != 0 {
  128. fmt.Fprintf(os.Stderr, "non zero TLS balance: %d\n", tlsBalance)
  129. status = 1
  130. }
  131. }
  132. X_exit(nil, status)
  133. }
  134. // void _exit(int status);
  135. func X_exit(_ *TLS, status int32) {
  136. if dmesgs {
  137. dmesg("%v: EXIT %v", origin(1), status)
  138. }
  139. os.Exit(int(status))
  140. }
  141. func SetEnviron(t *TLS, env []string) {
  142. if environInitialized {
  143. return
  144. }
  145. environInitialized = true
  146. p := Xcalloc(t, 1, types.Size_t((len(env)+1)*(int(uintptrSize))))
  147. if p == 0 {
  148. panic("OOM")
  149. }
  150. Xenviron = p
  151. for _, v := range env {
  152. s := Xcalloc(t, 1, types.Size_t(len(v)+1))
  153. if s == 0 {
  154. panic("OOM")
  155. }
  156. copy((*(*RawMem)(unsafe.Pointer(s)))[:len(v):len(v)], v)
  157. *(*uintptr)(unsafe.Pointer(p)) = s
  158. p += uintptrSize
  159. }
  160. }
  161. // void setbuf(FILE *stream, char *buf);
  162. func Xsetbuf(t *TLS, stream, buf uintptr) {
  163. //TODO panic(todo(""))
  164. }
  165. // size_t confstr(int name, char *buf, size_t len);
  166. func Xconfstr(t *TLS, name int32, buf uintptr, len types.Size_t) types.Size_t {
  167. panic(todo(""))
  168. }
  169. // int puts(const char *s);
  170. func Xputs(t *TLS, s uintptr) int32 {
  171. n, err := fmt.Printf("%s\n", GoString(s))
  172. if err != nil {
  173. return stdio.EOF
  174. }
  175. return int32(n)
  176. }
  177. var (
  178. randomMu sync.Mutex
  179. randomGen = rand.New(rand.NewSource(42))
  180. )
  181. // long int random(void);
  182. func Xrandom(t *TLS) long {
  183. randomMu.Lock()
  184. r := randomGen.Int63n(math.MaxInt32 + 1)
  185. randomMu.Unlock()
  186. return long(r)
  187. }
  188. func write(b []byte) (int, error) {
  189. // if dmesgs {
  190. // dmesg("%v: %s", origin(1), b)
  191. // }
  192. if _, err := os.Stdout.Write(b); err != nil {
  193. return -1, err
  194. }
  195. return len(b), nil
  196. }
  197. func X__builtin_bzero(t *TLS, s uintptr, n types.Size_t) { Xbzero(t, s, n) }
  198. func X__builtin_abort(t *TLS) { Xabort(t) }
  199. func X__builtin_abs(t *TLS, j int32) int32 { return Xabs(t, j) }
  200. func X__builtin_clz(t *TLS, n uint32) int32 { return int32(mbits.LeadingZeros32(n)) }
  201. func X__builtin_clzl(t *TLS, n ulong) int32 { return int32(mbits.LeadingZeros64(uint64(n))) }
  202. func X__builtin_clzll(t *TLS, n uint64) int32 { return int32(mbits.LeadingZeros64(n)) }
  203. func X__builtin_constant_p_impl() { panic(todo("internal error: should never be called")) }
  204. func X__builtin_copysign(t *TLS, x, y float64) float64 { return Xcopysign(t, x, y) }
  205. func X__builtin_copysignf(t *TLS, x, y float32) float32 { return Xcopysignf(t, x, y) }
  206. func X__builtin_copysignl(t *TLS, x, y float64) float64 { return Xcopysign(t, x, y) }
  207. func X__builtin_exit(t *TLS, status int32) { Xexit(t, status) }
  208. func X__builtin_expect(t *TLS, exp, c long) long { return exp }
  209. func X__builtin_fabs(t *TLS, x float64) float64 { return Xfabs(t, x) }
  210. func X__builtin_fabsf(t *TLS, x float32) float32 { return Xfabsf(t, x) }
  211. func X__builtin_fabsl(t *TLS, x float64) float64 { return Xfabsl(t, x) }
  212. func X__builtin_free(t *TLS, ptr uintptr) { Xfree(t, ptr) }
  213. func X__builtin_getentropy(t *TLS, buf uintptr, n types.Size_t) int32 { return Xgetentropy(t, buf, n) }
  214. func X__builtin_huge_val(t *TLS) float64 { return math.Inf(1) }
  215. func X__builtin_huge_valf(t *TLS) float32 { return float32(math.Inf(1)) }
  216. func X__builtin_inf(t *TLS) float64 { return math.Inf(1) }
  217. func X__builtin_inff(t *TLS) float32 { return float32(math.Inf(1)) }
  218. func X__builtin_infl(t *TLS) float64 { return math.Inf(1) }
  219. func X__builtin_malloc(t *TLS, size types.Size_t) uintptr { return Xmalloc(t, size) }
  220. func X__builtin_memcmp(t *TLS, s1, s2 uintptr, n types.Size_t) int32 { return Xmemcmp(t, s1, s2, n) }
  221. func X__builtin_nan(t *TLS, s uintptr) float64 { return math.NaN() }
  222. func X__builtin_nanf(t *TLS, s uintptr) float32 { return float32(math.NaN()) }
  223. func X__builtin_nanl(t *TLS, s uintptr) float64 { return math.NaN() }
  224. func X__builtin_prefetch(t *TLS, addr, args uintptr) {}
  225. func X__builtin_printf(t *TLS, s, args uintptr) int32 { return Xprintf(t, s, args) }
  226. func X__builtin_strchr(t *TLS, s uintptr, c int32) uintptr { return Xstrchr(t, s, c) }
  227. func X__builtin_strcmp(t *TLS, s1, s2 uintptr) int32 { return Xstrcmp(t, s1, s2) }
  228. func X__builtin_strcpy(t *TLS, dest, src uintptr) uintptr { return Xstrcpy(t, dest, src) }
  229. func X__builtin_strlen(t *TLS, s uintptr) types.Size_t { return Xstrlen(t, s) }
  230. func X__builtin_trap(t *TLS) { Xabort(t) }
  231. func X__isnan(t *TLS, arg float64) int32 { return X__builtin_isnan(t, arg) }
  232. func X__isnanf(t *TLS, arg float32) int32 { return Xisnanf(t, arg) }
  233. func X__isnanl(t *TLS, arg float64) int32 { return Xisnanl(t, arg) }
  234. func Xvfprintf(t *TLS, stream, format, ap uintptr) int32 { return Xfprintf(t, stream, format, ap) }
  235. // int __builtin_popcount (unsigned int x)
  236. func X__builtin_popcount(t *TLS, x uint32) int32 {
  237. return int32(mbits.OnesCount32(x))
  238. }
  239. // int __builtin_popcountl (unsigned long x)
  240. func X__builtin_popcountl(t *TLS, x ulong) int32 {
  241. return int32(mbits.OnesCount64(uint64(x)))
  242. }
  243. // char * __builtin___strcpy_chk (char *dest, const char *src, size_t os);
  244. func X__builtin___strcpy_chk(t *TLS, dest, src uintptr, os types.Size_t) uintptr {
  245. return Xstrcpy(t, dest, src)
  246. }
  247. func X__builtin_mmap(t *TLS, addr uintptr, length types.Size_t, prot, flags, fd int32, offset types.Off_t) uintptr {
  248. return Xmmap(t, addr, length, prot, flags, fd, offset)
  249. }
  250. // uint16_t __builtin_bswap16 (uint32_t x)
  251. func X__builtin_bswap16(t *TLS, x uint16) uint16 {
  252. return x<<8 |
  253. x>>8
  254. }
  255. // uint32_t __builtin_bswap32 (uint32_t x)
  256. func X__builtin_bswap32(t *TLS, x uint32) uint32 {
  257. return x<<24 |
  258. x&0xff00<<8 |
  259. x&0xff0000>>8 |
  260. x>>24
  261. }
  262. // uint64_t __builtin_bswap64 (uint64_t x)
  263. func X__builtin_bswap64(t *TLS, x uint64) uint64 {
  264. return x<<56 |
  265. x&0xff00<<40 |
  266. x&0xff0000<<24 |
  267. x&0xff000000<<8 |
  268. x&0xff00000000>>8 |
  269. x&0xff0000000000>>24 |
  270. x&0xff000000000000>>40 |
  271. x>>56
  272. }
  273. // bool __builtin_add_overflow (type1 a, type2 b, type3 *res)
  274. func X__builtin_add_overflowInt64(t *TLS, a, b int64, res uintptr) int32 {
  275. r, ovf := mathutil.AddOverflowInt64(a, b)
  276. *(*int64)(unsafe.Pointer(res)) = r
  277. return Bool32(ovf)
  278. }
  279. // bool __builtin_add_overflow (type1 a, type2 b, type3 *res)
  280. func X__builtin_add_overflowUint32(t *TLS, a, b uint32, res uintptr) int32 {
  281. r := a + b
  282. *(*uint32)(unsafe.Pointer(res)) = r
  283. return Bool32(r < a)
  284. }
  285. // bool __builtin_add_overflow (type1 a, type2 b, type3 *res)
  286. func X__builtin_add_overflowUint64(t *TLS, a, b uint64, res uintptr) int32 {
  287. r := a + b
  288. *(*uint64)(unsafe.Pointer(res)) = r
  289. return Bool32(r < a)
  290. }
  291. // bool __builtin_sub_overflow (type1 a, type2 b, type3 *res)
  292. func X__builtin_sub_overflowInt64(t *TLS, a, b int64, res uintptr) int32 {
  293. r, ovf := mathutil.SubOverflowInt64(a, b)
  294. *(*int64)(unsafe.Pointer(res)) = r
  295. return Bool32(ovf)
  296. }
  297. // bool __builtin_mul_overflow (type1 a, type2 b, type3 *res)
  298. func X__builtin_mul_overflowInt64(t *TLS, a, b int64, res uintptr) int32 {
  299. r, ovf := mathutil.MulOverflowInt64(a, b)
  300. *(*int64)(unsafe.Pointer(res)) = r
  301. return Bool32(ovf)
  302. }
  303. // bool __builtin_mul_overflow (type1 a, type2 b, type3 *res)
  304. func X__builtin_mul_overflowUint64(t *TLS, a, b uint64, res uintptr) int32 {
  305. hi, lo := mbits.Mul64(a, b)
  306. *(*uint64)(unsafe.Pointer(res)) = lo
  307. return Bool32(hi != 0)
  308. }
  309. // bool __builtin_mul_overflow (type1 a, type2 b, type3 *res)
  310. func X__builtin_mul_overflowUint128(t *TLS, a, b Uint128, res uintptr) int32 {
  311. r, ovf := a.mulOvf(b)
  312. *(*Uint128)(unsafe.Pointer(res)) = r
  313. return Bool32(ovf)
  314. }
  315. func X__builtin_unreachable(t *TLS) {
  316. fmt.Fprintf(os.Stderr, "unrechable\n")
  317. os.Stderr.Sync()
  318. Xexit(t, 1)
  319. }
  320. func X__builtin_snprintf(t *TLS, str uintptr, size types.Size_t, format, args uintptr) int32 {
  321. return Xsnprintf(t, str, size, format, args)
  322. }
  323. func X__builtin_sprintf(t *TLS, str, format, args uintptr) (r int32) {
  324. return Xsprintf(t, str, format, args)
  325. }
  326. func X__builtin_memcpy(t *TLS, dest, src uintptr, n types.Size_t) (r uintptr) {
  327. return Xmemcpy(t, dest, src, n)
  328. }
  329. // void * __builtin___memcpy_chk (void *dest, const void *src, size_t n, size_t os);
  330. func X__builtin___memcpy_chk(t *TLS, dest, src uintptr, n, os types.Size_t) (r uintptr) {
  331. if os != ^types.Size_t(0) && n < os {
  332. Xabort(t)
  333. }
  334. return Xmemcpy(t, dest, src, n)
  335. }
  336. func X__builtin_memset(t *TLS, s uintptr, c int32, n types.Size_t) uintptr {
  337. return Xmemset(t, s, c, n)
  338. }
  339. // void * __builtin___memset_chk (void *s, int c, size_t n, size_t os);
  340. func X__builtin___memset_chk(t *TLS, s uintptr, c int32, n, os types.Size_t) uintptr {
  341. if os < n {
  342. Xabort(t)
  343. }
  344. return Xmemset(t, s, c, n)
  345. }
  346. // size_t __builtin_object_size (const void * ptr, int type)
  347. func X__builtin_object_size(t *TLS, p uintptr, typ int32) types.Size_t {
  348. return ^types.Size_t(0) //TODO frontend magic
  349. }
  350. var atomicLoadStore16 sync.Mutex
  351. func AtomicLoadNUint16(ptr uintptr, memorder int32) uint16 {
  352. atomicLoadStore16.Lock()
  353. r := *(*uint16)(unsafe.Pointer(ptr))
  354. atomicLoadStore16.Unlock()
  355. return r
  356. }
  357. func AtomicStoreNUint16(ptr uintptr, val uint16, memorder int32) {
  358. atomicLoadStore16.Lock()
  359. *(*uint16)(unsafe.Pointer(ptr)) = val
  360. atomicLoadStore16.Unlock()
  361. }
  362. // int sprintf(char *str, const char *format, ...);
  363. func Xsprintf(t *TLS, str, format, args uintptr) (r int32) {
  364. b := printf(format, args)
  365. r = int32(len(b))
  366. copy((*RawMem)(unsafe.Pointer(str))[:r:r], b)
  367. *(*byte)(unsafe.Pointer(str + uintptr(r))) = 0
  368. return int32(len(b))
  369. }
  370. // int __builtin___sprintf_chk (char *s, int flag, size_t os, const char *fmt, ...);
  371. func X__builtin___sprintf_chk(t *TLS, s uintptr, flag int32, os types.Size_t, format, args uintptr) (r int32) {
  372. return Xsprintf(t, s, format, args)
  373. }
  374. // void qsort(void *base, size_t nmemb, size_t size, int (*compar)(const void *, const void *));
  375. func Xqsort(t *TLS, base uintptr, nmemb, size types.Size_t, compar uintptr) {
  376. sort.Sort(&sorter{
  377. len: int(nmemb),
  378. base: base,
  379. sz: uintptr(size),
  380. f: (*struct {
  381. f func(*TLS, uintptr, uintptr) int32
  382. })(unsafe.Pointer(&struct{ uintptr }{compar})).f,
  383. t: t,
  384. })
  385. }
  386. // void __assert_fail(const char * assertion, const char * file, unsigned int line, const char * function);
  387. func X__assert_fail(t *TLS, assertion, file uintptr, line uint32, function uintptr) {
  388. fmt.Fprintf(os.Stderr, "assertion failure: %s:%d.%s: %s\n", GoString(file), line, GoString(function), GoString(assertion))
  389. if memgrind {
  390. fmt.Fprintf(os.Stderr, "%s\n", debug.Stack())
  391. }
  392. os.Stderr.Sync()
  393. Xexit(t, 1)
  394. }
  395. // int vprintf(const char *format, va_list ap);
  396. func Xvprintf(t *TLS, s, ap uintptr) int32 { return Xprintf(t, s, ap) }
  397. // int vsprintf(char *str, const char *format, va_list ap);
  398. func Xvsprintf(t *TLS, str, format, va uintptr) int32 {
  399. return Xsprintf(t, str, format, va)
  400. }
  401. // int vsnprintf(char *str, size_t size, const char *format, va_list ap);
  402. func Xvsnprintf(t *TLS, str uintptr, size types.Size_t, format, va uintptr) int32 {
  403. return Xsnprintf(t, str, size, format, va)
  404. }
  405. // int obstack_vprintf (struct obstack *obstack, const char *template, va_list ap)
  406. func Xobstack_vprintf(t *TLS, obstack, template, va uintptr) int32 {
  407. panic(todo(""))
  408. }
  409. // extern void _obstack_newchunk(struct obstack *, int);
  410. func X_obstack_newchunk(t *TLS, obstack uintptr, length int32) int32 {
  411. panic(todo(""))
  412. }
  413. // int _obstack_begin (struct obstack *h, _OBSTACK_SIZE_T size, _OBSTACK_SIZE_T alignment, void *(*chunkfun) (size_t), void (*freefun) (void *))
  414. func X_obstack_begin(t *TLS, obstack uintptr, size, alignment int32, chunkfun, freefun uintptr) int32 {
  415. panic(todo(""))
  416. }
  417. // void obstack_free (struct obstack *h, void *obj)
  418. func Xobstack_free(t *TLS, obstack, obj uintptr) {
  419. panic(todo(""))
  420. }
  421. // unsigned int sleep(unsigned int seconds);
  422. func Xsleep(t *TLS, seconds uint32) uint32 {
  423. gotime.Sleep(gotime.Second * gotime.Duration(seconds))
  424. return 0
  425. }
  426. // size_t strcspn(const char *s, const char *reject);
  427. func Xstrcspn(t *TLS, s, reject uintptr) (r types.Size_t) {
  428. bits := newBits(256)
  429. for {
  430. c := *(*byte)(unsafe.Pointer(reject))
  431. if c == 0 {
  432. break
  433. }
  434. reject++
  435. bits.set(int(c))
  436. }
  437. for {
  438. c := *(*byte)(unsafe.Pointer(s))
  439. if c == 0 || bits.has(int(c)) {
  440. return r
  441. }
  442. s++
  443. r++
  444. }
  445. }
  446. // int printf(const char *format, ...);
  447. func Xprintf(t *TLS, format, args uintptr) int32 {
  448. n, _ := write(printf(format, args))
  449. return int32(n)
  450. }
  451. // int snprintf(char *str, size_t size, const char *format, ...);
  452. func Xsnprintf(t *TLS, str uintptr, size types.Size_t, format, args uintptr) (r int32) {
  453. if format == 0 {
  454. return 0
  455. }
  456. b := printf(format, args)
  457. r = int32(len(b))
  458. if size == 0 {
  459. return r
  460. }
  461. if len(b)+1 > int(size) {
  462. b = b[:size-1]
  463. }
  464. n := len(b)
  465. copy((*RawMem)(unsafe.Pointer(str))[:n:n], b)
  466. *(*byte)(unsafe.Pointer(str + uintptr(n))) = 0
  467. return r
  468. }
  469. // int __builtin___snprintf_chk(char * str, size_t maxlen, int flag, size_t os, const char * format, ...);
  470. func X__builtin___snprintf_chk(t *TLS, str uintptr, maxlen types.Size_t, flag int32, os types.Size_t, format, args uintptr) (r int32) {
  471. if os != ^types.Size_t(0) && maxlen > os {
  472. Xabort(t)
  473. }
  474. return Xsnprintf(t, str, maxlen, format, args)
  475. }
  476. // int __builtin___vsnprintf_chk (char *s, size_t maxlen, int flag, size_t os, const char *fmt, va_list ap);
  477. func X__builtin___vsnprintf_chk(t *TLS, str uintptr, maxlen types.Size_t, flag int32, os types.Size_t, format, args uintptr) (r int32) {
  478. if os != ^types.Size_t(0) && maxlen > os {
  479. Xabort(t)
  480. }
  481. return Xsnprintf(t, str, maxlen, format, args)
  482. }
  483. // int abs(int j);
  484. func Xabs(t *TLS, j int32) int32 {
  485. if j >= 0 {
  486. return j
  487. }
  488. return -j
  489. }
  490. func Xllabs(tls *TLS, a int64) int64 {
  491. if a >= int64(0) {
  492. return a
  493. }
  494. return -a
  495. }
  496. func X__builtin_isnan(t *TLS, x float64) int32 { return Bool32(math.IsNaN(x)) }
  497. func X__builtin_llabs(tls *TLS, a int64) int64 { return Xllabs(tls, a) }
  498. func Xacos(t *TLS, x float64) float64 { return math.Acos(x) }
  499. func Xacosh(t *TLS, x float64) float64 { return math.Acosh(x) }
  500. func Xasin(t *TLS, x float64) float64 { return math.Asin(x) }
  501. func Xasinh(t *TLS, x float64) float64 { return math.Asinh(x) }
  502. func Xatan(t *TLS, x float64) float64 { return math.Atan(x) }
  503. func Xatan2(t *TLS, x, y float64) float64 { return math.Atan2(x, y) }
  504. func Xatanh(t *TLS, x float64) float64 { return math.Atanh(x) }
  505. func Xceil(t *TLS, x float64) float64 { return math.Ceil(x) }
  506. func Xceilf(t *TLS, x float32) float32 { return float32(math.Ceil(float64(x))) }
  507. func Xcopysign(t *TLS, x, y float64) float64 { return math.Copysign(x, y) }
  508. func Xcopysignf(t *TLS, x, y float32) float32 { return float32(math.Copysign(float64(x), float64(y))) }
  509. func Xcos(t *TLS, x float64) float64 { return math.Cos(x) }
  510. func Xcosf(t *TLS, x float32) float32 { return float32(math.Cos(float64(x))) }
  511. func Xcosh(t *TLS, x float64) float64 { return math.Cosh(x) }
  512. func Xexp(t *TLS, x float64) float64 { return math.Exp(x) }
  513. func Xfabs(t *TLS, x float64) float64 { return math.Abs(x) }
  514. func Xfabsf(t *TLS, x float32) float32 { return float32(math.Abs(float64(x))) }
  515. func Xfloor(t *TLS, x float64) float64 { return math.Floor(x) }
  516. func Xfmod(t *TLS, x, y float64) float64 { return math.Mod(x, y) }
  517. func Xhypot(t *TLS, x, y float64) float64 { return math.Hypot(x, y) }
  518. func Xisnan(t *TLS, x float64) int32 { return X__builtin_isnan(t, x) }
  519. func Xisnanf(t *TLS, x float32) int32 { return Bool32(math.IsNaN(float64(x))) }
  520. func Xisnanl(t *TLS, x float64) int32 { return Bool32(math.IsNaN(x)) } // ccgo has to handle long double as double as Go does not support long double.
  521. func Xldexp(t *TLS, x float64, exp int32) float64 { return math.Ldexp(x, int(exp)) }
  522. func Xlog(t *TLS, x float64) float64 { return math.Log(x) }
  523. func Xlog10(t *TLS, x float64) float64 { return math.Log10(x) }
  524. func Xlog2(t *TLS, x float64) float64 { return math.Log2(x) }
  525. func Xround(t *TLS, x float64) float64 { return math.Round(x) }
  526. func Xsin(t *TLS, x float64) float64 { return math.Sin(x) }
  527. func Xsinf(t *TLS, x float32) float32 { return float32(math.Sin(float64(x))) }
  528. func Xsinh(t *TLS, x float64) float64 { return math.Sinh(x) }
  529. func Xsqrt(t *TLS, x float64) float64 { return math.Sqrt(x) }
  530. func Xtan(t *TLS, x float64) float64 { return math.Tan(x) }
  531. func Xtanh(t *TLS, x float64) float64 { return math.Tanh(x) }
  532. func Xtrunc(t *TLS, x float64) float64 { return math.Trunc(x) }
  533. var nextRand = uint64(1)
  534. // int rand(void);
  535. func Xrand(t *TLS) int32 {
  536. nextRand = nextRand*1103515245 + 12345
  537. return int32(uint32(nextRand / (math.MaxUint32 + 1) % math.MaxInt32))
  538. }
  539. func Xpow(t *TLS, x, y float64) float64 {
  540. r := math.Pow(x, y)
  541. if x > 0 && r == 1 && y >= -1.0000000000000000715e-18 && y < -1e-30 {
  542. r = 0.9999999999999999
  543. }
  544. return r
  545. }
  546. func Xfrexp(t *TLS, x float64, exp uintptr) float64 {
  547. f, e := math.Frexp(x)
  548. *(*int32)(unsafe.Pointer(exp)) = int32(e)
  549. return f
  550. }
  551. func Xmodf(t *TLS, x float64, iptr uintptr) float64 {
  552. i, f := math.Modf(x)
  553. *(*float64)(unsafe.Pointer(iptr)) = i
  554. return f
  555. }
  556. // char *strncpy(char *dest, const char *src, size_t n)
  557. func Xstrncpy(t *TLS, dest, src uintptr, n types.Size_t) (r uintptr) {
  558. r = dest
  559. for c := *(*int8)(unsafe.Pointer(src)); c != 0 && n > 0; n-- {
  560. *(*int8)(unsafe.Pointer(dest)) = c
  561. dest++
  562. src++
  563. c = *(*int8)(unsafe.Pointer(src))
  564. }
  565. for ; uintptr(n) > 0; n-- {
  566. *(*int8)(unsafe.Pointer(dest)) = 0
  567. dest++
  568. }
  569. return r
  570. }
  571. // char * __builtin___strncpy_chk (char *dest, const char *src, size_t n, size_t os);
  572. func X__builtin___strncpy_chk(t *TLS, dest, src uintptr, n, os types.Size_t) (r uintptr) {
  573. if n != ^types.Size_t(0) && os < n {
  574. Xabort(t)
  575. }
  576. return Xstrncpy(t, dest, src, n)
  577. }
  578. // int strcmp(const char *s1, const char *s2)
  579. func Xstrcmp(t *TLS, s1, s2 uintptr) int32 {
  580. for {
  581. ch1 := *(*byte)(unsafe.Pointer(s1))
  582. s1++
  583. ch2 := *(*byte)(unsafe.Pointer(s2))
  584. s2++
  585. if ch1 != ch2 || ch1 == 0 || ch2 == 0 {
  586. return int32(ch1) - int32(ch2)
  587. }
  588. }
  589. }
  590. // size_t strlen(const char *s)
  591. func Xstrlen(t *TLS, s uintptr) (r types.Size_t) {
  592. if s == 0 {
  593. return 0
  594. }
  595. for ; *(*int8)(unsafe.Pointer(s)) != 0; s++ {
  596. r++
  597. }
  598. return r
  599. }
  600. // char *strcat(char *dest, const char *src)
  601. func Xstrcat(t *TLS, dest, src uintptr) (r uintptr) {
  602. r = dest
  603. for *(*int8)(unsafe.Pointer(dest)) != 0 {
  604. dest++
  605. }
  606. for {
  607. c := *(*int8)(unsafe.Pointer(src))
  608. src++
  609. *(*int8)(unsafe.Pointer(dest)) = c
  610. dest++
  611. if c == 0 {
  612. return r
  613. }
  614. }
  615. }
  616. // char * __builtin___strcat_chk (char *dest, const char *src, size_t os);
  617. func X__builtin___strcat_chk(t *TLS, dest, src uintptr, os types.Size_t) (r uintptr) {
  618. return Xstrcat(t, dest, src)
  619. }
  620. // int strncmp(const char *s1, const char *s2, size_t n)
  621. func Xstrncmp(t *TLS, s1, s2 uintptr, n types.Size_t) int32 {
  622. var ch1, ch2 byte
  623. for ; n != 0; n-- {
  624. ch1 = *(*byte)(unsafe.Pointer(s1))
  625. s1++
  626. ch2 = *(*byte)(unsafe.Pointer(s2))
  627. s2++
  628. if ch1 != ch2 {
  629. return int32(ch1) - int32(ch2)
  630. }
  631. if ch1 == 0 {
  632. return 0
  633. }
  634. }
  635. return 0
  636. }
  637. // char *strcpy(char *dest, const char *src)
  638. func Xstrcpy(t *TLS, dest, src uintptr) (r uintptr) {
  639. r = dest
  640. // src0 := src
  641. for ; ; dest++ {
  642. c := *(*int8)(unsafe.Pointer(src))
  643. src++
  644. *(*int8)(unsafe.Pointer(dest)) = c
  645. if c == 0 {
  646. return r
  647. }
  648. }
  649. }
  650. // char *strchr(const char *s, int c)
  651. func Xstrchr(t *TLS, s uintptr, c int32) uintptr {
  652. for {
  653. ch2 := *(*byte)(unsafe.Pointer(s))
  654. if ch2 == byte(c) {
  655. return s
  656. }
  657. if ch2 == 0 {
  658. return 0
  659. }
  660. s++
  661. }
  662. }
  663. // char *strrchr(const char *s, int c)
  664. func Xstrrchr(t *TLS, s uintptr, c int32) (r uintptr) {
  665. for {
  666. ch2 := *(*byte)(unsafe.Pointer(s))
  667. if ch2 == 0 {
  668. return r
  669. }
  670. if ch2 == byte(c) {
  671. r = s
  672. }
  673. s++
  674. }
  675. }
  676. // void *memset(void *s, int c, size_t n)
  677. func Xmemset(t *TLS, s uintptr, c int32, n types.Size_t) uintptr {
  678. if n != 0 {
  679. c := byte(c & 0xff)
  680. // This will make sure that on platforms where they are not equally aligned we
  681. // clear out the first few bytes until allignment
  682. bytesBeforeAllignment := s % unsafe.Alignof(uint64(0))
  683. if bytesBeforeAllignment > uintptr(n) {
  684. bytesBeforeAllignment = uintptr(n)
  685. }
  686. b := (*RawMem)(unsafe.Pointer(s))[:bytesBeforeAllignment:bytesBeforeAllignment]
  687. n -= types.Size_t(bytesBeforeAllignment)
  688. for i := range b {
  689. b[i] = c
  690. }
  691. if n >= 8 {
  692. i64 := uint64(c) + uint64(c)<<8 + uint64(c)<<16 + uint64(c)<<24 + uint64(c)<<32 + uint64(c)<<40 + uint64(c)<<48 + uint64(c)<<56
  693. b8 := (*RawMem64)(unsafe.Pointer(s + bytesBeforeAllignment))[: n/8 : n/8]
  694. for i := range b8 {
  695. b8[i] = i64
  696. }
  697. }
  698. if n%8 != 0 {
  699. b = (*RawMem)(unsafe.Pointer(s + bytesBeforeAllignment + uintptr(n-n%8)))[: n%8 : n%8]
  700. for i := range b {
  701. b[i] = c
  702. }
  703. }
  704. }
  705. return s
  706. }
  707. // void *memcpy(void *dest, const void *src, size_t n);
  708. func Xmemcpy(t *TLS, dest, src uintptr, n types.Size_t) (r uintptr) {
  709. if n != 0 {
  710. copy((*RawMem)(unsafe.Pointer(dest))[:n:n], (*RawMem)(unsafe.Pointer(src))[:n:n])
  711. }
  712. return dest
  713. }
  714. // int memcmp(const void *s1, const void *s2, size_t n);
  715. func Xmemcmp(t *TLS, s1, s2 uintptr, n types.Size_t) int32 {
  716. for ; n != 0; n-- {
  717. c1 := *(*byte)(unsafe.Pointer(s1))
  718. s1++
  719. c2 := *(*byte)(unsafe.Pointer(s2))
  720. s2++
  721. if c1 < c2 {
  722. return -1
  723. }
  724. if c1 > c2 {
  725. return 1
  726. }
  727. }
  728. return 0
  729. }
  730. // void *memchr(const void *s, int c, size_t n);
  731. func Xmemchr(t *TLS, s uintptr, c int32, n types.Size_t) uintptr {
  732. for ; n != 0; n-- {
  733. if *(*byte)(unsafe.Pointer(s)) == byte(c) {
  734. return s
  735. }
  736. s++
  737. }
  738. return 0
  739. }
  740. // void *memmove(void *dest, const void *src, size_t n);
  741. func Xmemmove(t *TLS, dest, src uintptr, n types.Size_t) uintptr {
  742. if n == 0 {
  743. return dest
  744. }
  745. copy((*RawMem)(unsafe.Pointer(uintptr(dest)))[:n:n], (*RawMem)(unsafe.Pointer(uintptr(src)))[:n:n])
  746. return dest
  747. }
  748. // void * __builtin___memmove_chk (void *dest, const void *src, size_t n, size_t os);
  749. func X__builtin___memmove_chk(t *TLS, dest, src uintptr, n, os types.Size_t) uintptr {
  750. if os != ^types.Size_t(0) && os < n {
  751. Xabort(t)
  752. }
  753. return Xmemmove(t, dest, src, n)
  754. }
  755. // char *getenv(const char *name);
  756. func Xgetenv(t *TLS, name uintptr) uintptr {
  757. return getenv(Environ(), GoString(name))
  758. }
  759. func getenv(p uintptr, nm string) uintptr {
  760. for ; ; p += uintptrSize {
  761. q := *(*uintptr)(unsafe.Pointer(p))
  762. if q == 0 {
  763. return 0
  764. }
  765. s := GoString(q)
  766. a := strings.SplitN(s, "=", 2)
  767. if len(a) != 2 {
  768. panic(todo("%q %q %q", nm, s, a))
  769. }
  770. if a[0] == nm {
  771. return q + uintptr(len(nm)) + 1
  772. }
  773. }
  774. }
  775. // char *strstr(const char *haystack, const char *needle);
  776. func Xstrstr(t *TLS, haystack, needle uintptr) uintptr {
  777. hs := GoString(haystack)
  778. nd := GoString(needle)
  779. if i := strings.Index(hs, nd); i >= 0 {
  780. r := haystack + uintptr(i)
  781. return r
  782. }
  783. return 0
  784. }
  785. // int putc(int c, FILE *stream);
  786. func Xputc(t *TLS, c int32, fp uintptr) int32 {
  787. return Xfputc(t, c, fp)
  788. }
  789. // int atoi(const char *nptr);
  790. func Xatoi(t *TLS, nptr uintptr) int32 {
  791. _, neg, _, n, _ := strToUint64(t, nptr, 10)
  792. switch {
  793. case neg:
  794. return int32(-n)
  795. default:
  796. return int32(n)
  797. }
  798. }
  799. // double atof(const char *nptr);
  800. func Xatof(t *TLS, nptr uintptr) float64 {
  801. n, _ := strToFloatt64(t, nptr, 64)
  802. // if dmesgs {
  803. // dmesg("%v: %q: %v", origin(1), GoString(nptr), n)
  804. // }
  805. return n
  806. }
  807. // int tolower(int c);
  808. func Xtolower(t *TLS, c int32) int32 {
  809. if c >= 'A' && c <= 'Z' {
  810. return c + ('a' - 'A')
  811. }
  812. return c
  813. }
  814. // int toupper(int c);
  815. func Xtoupper(t *TLS, c int32) int32 {
  816. if c >= 'a' && c <= 'z' {
  817. return c - ('a' - 'A')
  818. }
  819. return c
  820. }
  821. // int isatty(int fd);
  822. func Xisatty(t *TLS, fd int32) int32 {
  823. return Bool32(isatty.IsTerminal(uintptr(fd)))
  824. }
  825. // long atol(const char *nptr);
  826. func Xatol(t *TLS, nptr uintptr) long {
  827. _, neg, _, n, _ := strToUint64(t, nptr, 10)
  828. switch {
  829. case neg:
  830. return long(-n)
  831. default:
  832. return long(n)
  833. }
  834. }
  835. // time_t mktime(struct tm *tm);
  836. func Xmktime(t *TLS, ptm uintptr) time.Time_t {
  837. loc := gotime.Local
  838. if r := getenv(Environ(), "TZ"); r != 0 {
  839. zone, off := parseZone(GoString(r))
  840. loc = gotime.FixedZone(zone, off)
  841. }
  842. tt := gotime.Date(
  843. int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_year+1900),
  844. gotime.Month((*time.Tm)(unsafe.Pointer(ptm)).Ftm_mon+1),
  845. int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_mday),
  846. int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_hour),
  847. int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_min),
  848. int((*time.Tm)(unsafe.Pointer(ptm)).Ftm_sec),
  849. 0,
  850. loc,
  851. )
  852. (*time.Tm)(unsafe.Pointer(ptm)).Ftm_wday = int32(tt.Weekday())
  853. (*time.Tm)(unsafe.Pointer(ptm)).Ftm_yday = int32(tt.YearDay() - 1)
  854. return time.Time_t(tt.Unix())
  855. }
  856. // char *strpbrk(const char *s, const char *accept);
  857. func Xstrpbrk(t *TLS, s, accept uintptr) uintptr {
  858. bits := newBits(256)
  859. for {
  860. b := *(*byte)(unsafe.Pointer(accept))
  861. if b == 0 {
  862. break
  863. }
  864. bits.set(int(b))
  865. accept++
  866. }
  867. for {
  868. b := *(*byte)(unsafe.Pointer(s))
  869. if b == 0 {
  870. return 0
  871. }
  872. if bits.has(int(b)) {
  873. return s
  874. }
  875. s++
  876. }
  877. }
  878. // int strcasecmp(const char *s1, const char *s2);
  879. func Xstrcasecmp(t *TLS, s1, s2 uintptr) int32 {
  880. for {
  881. ch1 := *(*byte)(unsafe.Pointer(s1))
  882. if ch1 >= 'a' && ch1 <= 'z' {
  883. ch1 = ch1 - ('a' - 'A')
  884. }
  885. s1++
  886. ch2 := *(*byte)(unsafe.Pointer(s2))
  887. if ch2 >= 'a' && ch2 <= 'z' {
  888. ch2 = ch2 - ('a' - 'A')
  889. }
  890. s2++
  891. if ch1 != ch2 || ch1 == 0 || ch2 == 0 {
  892. r := int32(ch1) - int32(ch2)
  893. return r
  894. }
  895. }
  896. }
  897. func Xntohs(t *TLS, netshort uint16) uint16 {
  898. return uint16((*[2]byte)(unsafe.Pointer(&netshort))[0])<<8 | uint16((*[2]byte)(unsafe.Pointer(&netshort))[1])
  899. }
  900. // uint16_t htons(uint16_t hostshort);
  901. func Xhtons(t *TLS, hostshort uint16) uint16 {
  902. var a [2]byte
  903. a[0] = byte(hostshort >> 8)
  904. a[1] = byte(hostshort)
  905. return *(*uint16)(unsafe.Pointer(&a))
  906. }
  907. // uint32_t htonl(uint32_t hostlong);
  908. func Xhtonl(t *TLS, hostlong uint32) uint32 {
  909. var a [4]byte
  910. a[0] = byte(hostlong >> 24)
  911. a[1] = byte(hostlong >> 16)
  912. a[2] = byte(hostlong >> 8)
  913. a[3] = byte(hostlong)
  914. return *(*uint32)(unsafe.Pointer(&a))
  915. }
  916. // FILE *fopen(const char *pathname, const char *mode);
  917. func Xfopen(t *TLS, pathname, mode uintptr) uintptr {
  918. return Xfopen64(t, pathname, mode) //TODO 32 bit
  919. }
  920. func Dmesg(s string, args ...interface{}) {
  921. if dmesgs {
  922. dmesg(s, args...)
  923. }
  924. }
  925. // void sqlite3_log(int iErrCode, const char *zFormat, ...);
  926. func X__ccgo_sqlite3_log(t *TLS, iErrCode int32, zFormat uintptr, args uintptr) {
  927. // if dmesgs {
  928. // dmesg("%v: iErrCode: %v, msg: %s\n%s", origin(1), iErrCode, printf(zFormat, args), debug.Stack())
  929. // }
  930. }
  931. // int _IO_putc(int __c, _IO_FILE *__fp);
  932. func X_IO_putc(t *TLS, c int32, fp uintptr) int32 {
  933. return Xputc(t, c, fp)
  934. }
  935. // int atexit(void (*function)(void));
  936. func Xatexit(t *TLS, function uintptr) int32 {
  937. AtExit(func() {
  938. (*struct{ f func(*TLS) })(unsafe.Pointer(&struct{ uintptr }{function})).f(t)
  939. })
  940. return 0
  941. }
  942. // int vasprintf(char **strp, const char *fmt, va_list ap);
  943. func Xvasprintf(t *TLS, strp, fmt, ap uintptr) int32 {
  944. panic(todo(""))
  945. }
  946. func AtomicLoadInt32(addr *int32) (val int32) { return atomic.LoadInt32(addr) }
  947. func AtomicLoadInt64(addr *int64) (val int64) { return atomic.LoadInt64(addr) }
  948. func AtomicLoadUint32(addr *uint32) (val uint32) { return atomic.LoadUint32(addr) }
  949. func AtomicLoadUint64(addr *uint64) (val uint64) { return atomic.LoadUint64(addr) }
  950. func AtomicLoadUintptr(addr *uintptr) (val uintptr) { return atomic.LoadUintptr(addr) }
  951. func AtomicLoadFloat32(addr *float32) (val float32) {
  952. return math.Float32frombits(atomic.LoadUint32((*uint32)(unsafe.Pointer(addr))))
  953. }
  954. func AtomicLoadFloat64(addr *float64) (val float64) {
  955. return math.Float64frombits(atomic.LoadUint64((*uint64)(unsafe.Pointer(addr))))
  956. }
  957. func AtomicLoadPInt32(addr uintptr) (val int32) {
  958. return atomic.LoadInt32((*int32)(unsafe.Pointer(addr)))
  959. }
  960. func AtomicLoadPInt64(addr uintptr) (val int64) {
  961. return atomic.LoadInt64((*int64)(unsafe.Pointer(addr)))
  962. }
  963. func AtomicLoadPUint32(addr uintptr) (val uint32) {
  964. return atomic.LoadUint32((*uint32)(unsafe.Pointer(addr)))
  965. }
  966. func AtomicLoadPUint64(addr uintptr) (val uint64) {
  967. return atomic.LoadUint64((*uint64)(unsafe.Pointer(addr)))
  968. }
  969. func AtomicLoadPUintptr(addr uintptr) (val uintptr) {
  970. return atomic.LoadUintptr((*uintptr)(unsafe.Pointer(addr)))
  971. }
  972. func AtomicLoadPFloat32(addr uintptr) (val float32) {
  973. return math.Float32frombits(atomic.LoadUint32((*uint32)(unsafe.Pointer(addr))))
  974. }
  975. func AtomicLoadPFloat64(addr uintptr) (val float64) {
  976. return math.Float64frombits(atomic.LoadUint64((*uint64)(unsafe.Pointer(addr))))
  977. }
  978. func AtomicStoreInt32(addr *int32, val int32) { atomic.StoreInt32(addr, val) }
  979. func AtomicStoreInt64(addr *int64, val int64) { atomic.StoreInt64(addr, val) }
  980. func AtomicStoreUint32(addr *uint32, val uint32) { atomic.StoreUint32(addr, val) }
  981. func AtomicStoreUint64(addr *uint64, val uint64) { atomic.StoreUint64(addr, val) }
  982. func AtomicStoreUintptr(addr *uintptr, val uintptr) { atomic.StoreUintptr(addr, val) }
  983. func AtomicStoreFloat32(addr *float32, val float32) {
  984. atomic.StoreUint32((*uint32)(unsafe.Pointer(addr)), math.Float32bits(val))
  985. }
  986. func AtomicStoreFloat64(addr *float64, val float64) {
  987. atomic.StoreUint64((*uint64)(unsafe.Pointer(addr)), math.Float64bits(val))
  988. }
  989. func AtomicStorePInt32(addr uintptr, val int32) {
  990. atomic.StoreInt32((*int32)(unsafe.Pointer(addr)), val)
  991. }
  992. func AtomicStorePInt64(addr uintptr, val int64) {
  993. atomic.StoreInt64((*int64)(unsafe.Pointer(addr)), val)
  994. }
  995. func AtomicStorePUint32(addr uintptr, val uint32) {
  996. atomic.StoreUint32((*uint32)(unsafe.Pointer(addr)), val)
  997. }
  998. func AtomicStorePUint64(addr uintptr, val uint64) {
  999. atomic.StoreUint64((*uint64)(unsafe.Pointer(addr)), val)
  1000. }
  1001. func AtomicStorePUintptr(addr uintptr, val uintptr) {
  1002. atomic.StoreUintptr((*uintptr)(unsafe.Pointer(addr)), val)
  1003. }
  1004. func AtomicStorePFloat32(addr uintptr, val float32) {
  1005. atomic.StoreUint32((*uint32)(unsafe.Pointer(addr)), math.Float32bits(val))
  1006. }
  1007. func AtomicStorePFloat64(addr uintptr, val float64) {
  1008. atomic.StoreUint64((*uint64)(unsafe.Pointer(addr)), math.Float64bits(val))
  1009. }
  1010. func AtomicAddInt32(addr *int32, delta int32) (new int32) { return atomic.AddInt32(addr, delta) }
  1011. func AtomicAddInt64(addr *int64, delta int64) (new int64) { return atomic.AddInt64(addr, delta) }
  1012. func AtomicAddUint32(addr *uint32, delta uint32) (new uint32) { return atomic.AddUint32(addr, delta) }
  1013. func AtomicAddUint64(addr *uint64, delta uint64) (new uint64) { return atomic.AddUint64(addr, delta) }
  1014. func AtomicAddUintptr(addr *uintptr, delta uintptr) (new uintptr) {
  1015. return atomic.AddUintptr(addr, delta)
  1016. }
  1017. func AtomicAddFloat32(addr *float32, delta float32) (new float32) {
  1018. v := AtomicLoadFloat32(addr) + delta
  1019. AtomicStoreFloat32(addr, v)
  1020. return v
  1021. }
  1022. func AtomicAddFloat64(addr *float64, delta float64) (new float64) {
  1023. v := AtomicLoadFloat64(addr) + delta
  1024. AtomicStoreFloat64(addr, v)
  1025. return v
  1026. }
  1027. // size_t mbstowcs(wchar_t *dest, const char *src, size_t n);
  1028. func Xmbstowcs(t *TLS, dest, src uintptr, n types.Size_t) types.Size_t {
  1029. panic(todo(""))
  1030. }
  1031. // int mbtowc(wchar_t *pwc, const char *s, size_t n);
  1032. func Xmbtowc(t *TLS, pwc, s uintptr, n types.Size_t) int32 {
  1033. panic(todo(""))
  1034. }
  1035. // size_t __ctype_get_mb_cur_max(void);
  1036. func X__ctype_get_mb_cur_max(t *TLS) types.Size_t {
  1037. panic(todo(""))
  1038. }
  1039. // int wctomb(char *s, wchar_t wc);
  1040. func Xwctomb(t *TLS, s uintptr, wc wchar_t) int32 {
  1041. panic(todo(""))
  1042. }
  1043. // int mblen(const char *s, size_t n);
  1044. func Xmblen(t *TLS, s uintptr, n types.Size_t) int32 {
  1045. panic(todo(""))
  1046. }
  1047. // ssize_t readv(int fd, const struct iovec *iov, int iovcnt);
  1048. func Xreadv(t *TLS, fd int32, iov uintptr, iovcnt int32) types.Ssize_t {
  1049. panic(todo(""))
  1050. }
  1051. // int openpty(int *amaster, int *aslave, char *name,
  1052. //
  1053. // const struct termios *termp,
  1054. // const struct winsize *winp);
  1055. func Xopenpty(t *TLS, amaster, aslave, name, termp, winp uintptr) int32 {
  1056. panic(todo(""))
  1057. }
  1058. // pid_t setsid(void);
  1059. func Xsetsid(t *TLS) types.Pid_t {
  1060. panic(todo(""))
  1061. }
  1062. // int pselect(int nfds, fd_set *readfds, fd_set *writefds,
  1063. //
  1064. // fd_set *exceptfds, const struct timespec *timeout,
  1065. // const sigset_t *sigmask);
  1066. func Xpselect(t *TLS, nfds int32, readfds, writefds, exceptfds, timeout, sigmask uintptr) int32 {
  1067. panic(todo(""))
  1068. }
  1069. // int kill(pid_t pid, int sig);
  1070. func Xkill(t *TLS, pid types.Pid_t, sig int32) int32 {
  1071. panic(todo(""))
  1072. }
  1073. // int tcsendbreak(int fd, int duration);
  1074. func Xtcsendbreak(t *TLS, fd, duration int32) int32 {
  1075. panic(todo(""))
  1076. }
  1077. // int wcwidth(wchar_t c);
  1078. func Xwcwidth(t *TLS, c wchar_t) int32 {
  1079. panic(todo(""))
  1080. }
  1081. // int clock_gettime(clockid_t clk_id, struct timespec *tp);
  1082. func Xclock_gettime(t *TLS, clk_id int32, tp uintptr) int32 {
  1083. panic(todo(""))
  1084. }
  1085. // AtExit will attempt to run f at process exit. The execution cannot be
  1086. // guaranteed, neither its ordering with respect to any other handlers
  1087. // registered by AtExit.
  1088. func AtExit(f func()) {
  1089. atExitMu.Lock()
  1090. atExit = append(atExit, f)
  1091. atExitMu.Unlock()
  1092. }
  1093. func X__ccgo_dmesg(t *TLS, fmt uintptr, va uintptr) {
  1094. if dmesgs {
  1095. dmesg("%s", printf(fmt, va))
  1096. }
  1097. }
  1098. // int getentropy(void *buffer, size_t length);
  1099. //
  1100. // The getentropy() function writes length bytes of high-quality random data
  1101. // to the buffer starting at the location pointed to by buffer. The maximum
  1102. // permitted value for the length argument is 256.
  1103. func Xgetentropy(t *TLS, buffer uintptr, length size_t) int32 {
  1104. const max = 256
  1105. switch {
  1106. case length == 0:
  1107. return 0
  1108. case buffer == 0:
  1109. t.setErrno(errno.EFAULT)
  1110. return -1
  1111. case length > max:
  1112. t.setErrno(errno.EIO)
  1113. return -1
  1114. }
  1115. if _, err := crand.Read((*RawMem)(unsafe.Pointer(buffer))[:length]); err != nil {
  1116. t.setErrno(errno.EIO)
  1117. return -1
  1118. }
  1119. return 0
  1120. }
  1121. // void * reallocarray(void *ptr, size_t nmemb, size_t size);
  1122. func Xreallocarray(t *TLS, ptr uintptr, nmemb, size size_t) uintptr {
  1123. hi, lo := mathutil.MulUint128_64(uint64(nmemb), uint64(size))
  1124. if hi != 0 || lo > uint64(unsafe.Sizeof(RawMem{})) {
  1125. t.setErrno(errno.ENOMEM)
  1126. return 0
  1127. }
  1128. return Xrealloc(t, ptr, size_t(lo))
  1129. }
  1130. // int setjmp(jmp_buf env);
  1131. func Xsetjmp(t *TLS, env uintptr) int32 {
  1132. return 0 //TODO
  1133. }
  1134. // void longjmp(jmp_buf env, int val);
  1135. func Xlongjmp(t *TLS, env uintptr, val int32) {
  1136. panic(todo(""))
  1137. }
  1138. // https://linux.die.net/man/3/_setjmp
  1139. //
  1140. // The _longjmp() and _setjmp() functions shall be equivalent to longjmp() and
  1141. // setjmp(), respectively, with the additional restriction that _longjmp() and
  1142. // _setjmp() shall not manipulate the signal mask.
  1143. // int _setjmp(jmp_buf env);
  1144. func X_setjmp(t *TLS, env uintptr) int32 {
  1145. return 0 //TODO
  1146. }
  1147. // void _longjmp(jmp_buf env, int val);
  1148. func X_longjmp(t *TLS, env uintptr, val int32) {
  1149. panic(todo(""))
  1150. }
  1151. // unsigned __sync_add_and_fetch_uint32(*unsigned, unsigned)
  1152. func X__sync_add_and_fetch_uint32(t *TLS, p uintptr, v uint32) uint32 {
  1153. return atomic.AddUint32((*uint32)(unsafe.Pointer(p)), v)
  1154. }
  1155. // unsigned __sync_sub_and_fetch_uint32(*unsigned, unsigned)
  1156. func X__sync_sub_and_fetch_uint32(t *TLS, p uintptr, v uint32) uint32 {
  1157. return atomic.AddUint32((*uint32)(unsafe.Pointer(p)), -v)
  1158. }
  1159. // int sched_yield(void);
  1160. func Xsched_yield(t *TLS) {
  1161. runtime.Gosched()
  1162. }
  1163. // int getc(FILE *stream);
  1164. func Xgetc(t *TLS, stream uintptr) int32 {
  1165. return Xfgetc(t, stream)
  1166. }
  1167. // char *fgets(char *s, int size, FILE *stream);
  1168. func Xfgets(t *TLS, s uintptr, size int32, stream uintptr) uintptr {
  1169. var b []byte
  1170. out:
  1171. for ; size > 0; size-- {
  1172. switch c := Xfgetc(t, stream); c {
  1173. case '\n':
  1174. b = append(b, byte(c))
  1175. break out
  1176. case stdio.EOF:
  1177. break out
  1178. default:
  1179. b = append(b, byte(c))
  1180. }
  1181. }
  1182. if len(b) == 0 {
  1183. return 0
  1184. }
  1185. b = append(b, 0)
  1186. copy((*RawMem)(unsafe.Pointer(s))[:len(b):len(b)], b)
  1187. return s
  1188. }
  1189. // void bzero(void *s, size_t n);
  1190. func Xbzero(t *TLS, s uintptr, n types.Size_t) {
  1191. b := (*RawMem)(unsafe.Pointer(s))[:n]
  1192. for i := range b {
  1193. b[i] = 0
  1194. }
  1195. }
  1196. // char *rindex(const char *s, int c);
  1197. func Xrindex(t *TLS, s uintptr, c int32) uintptr {
  1198. if s == 0 {
  1199. return 0
  1200. }
  1201. var r uintptr
  1202. for {
  1203. c2 := int32(*(*byte)(unsafe.Pointer(s)))
  1204. if c2 == c {
  1205. r = s
  1206. }
  1207. if c2 == 0 {
  1208. return r
  1209. }
  1210. s++
  1211. }
  1212. }
  1213. // int isascii(int c);
  1214. func Xisascii(t *TLS, c int32) int32 {
  1215. return Bool32(c >= 0 && c <= 0x7f)
  1216. }
  1217. func X__builtin_isunordered(t *TLS, a, b float64) int32 {
  1218. return Bool32(math.IsNaN(a) || math.IsNaN(b))
  1219. }