linux_syscall_support.h 95 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185
  1. #pragma once
  2. /* Copyright (c) 2005-2008, Google Inc.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are
  7. * met:
  8. *
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * * Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following disclaimer
  13. * in the documentation and/or other materials provided with the
  14. * distribution.
  15. * * Neither the name of Google Inc. nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. *
  31. * ---
  32. * Author: Markus Gutschke
  33. */
  34. /* This file includes Linux-specific support functions common to the
  35. * coredumper and the thread lister; primarily, this is a collection
  36. * of direct system calls, and a couple of symbols missing from
  37. * standard header files.
  38. * There are a few options that the including file can set to control
  39. * the behavior of this file:
  40. *
  41. * SYS_CPLUSPLUS:
  42. * The entire header file will normally be wrapped in 'extern "C" { }",
  43. * making it suitable for compilation as both C and C++ source. If you
  44. * do not want to do this, you can set the SYS_CPLUSPLUS macro to inhibit
  45. * the wrapping. N.B. doing so will suppress inclusion of all prerequisite
  46. * system header files, too. It is the caller's responsibility to provide
  47. * the necessary definitions.
  48. *
  49. * SYS_ERRNO:
  50. * All system calls will update "errno" unless overriden by setting the
  51. * SYS_ERRNO macro prior to including this file. SYS_ERRNO should be
  52. * an l-value.
  53. *
  54. * SYS_INLINE:
  55. * New symbols will be defined "static inline", unless overridden by
  56. * the SYS_INLINE macro.
  57. *
  58. * SYS_LINUX_SYSCALL_SUPPORT_H
  59. * This macro is used to avoid multiple inclusions of this header file.
  60. * If you need to include this file more than once, make sure to
  61. * unset SYS_LINUX_SYSCALL_SUPPORT_H before each inclusion.
  62. *
  63. * SYS_PREFIX:
  64. * New system calls will have a prefix of "sys_" unless overridden by
  65. * the SYS_PREFIX macro. Valid values for this macro are [0..9] which
  66. * results in prefixes "sys[0..9]_". It is also possible to set this
  67. * macro to -1, which avoids all prefixes.
  68. *
  69. * This file defines a few internal symbols that all start with "LSS_".
  70. * Do not access these symbols from outside this file. They are not part
  71. * of the supported API.
  72. *
  73. * NOTE: This is a stripped down version of the official opensource
  74. * version of linux_syscall_support.h, which lives at
  75. * http://code.google.com/p/linux-syscall-support/
  76. * It includes only the syscalls that are used in perftools, plus a
  77. * few extra. Here's the breakdown:
  78. * 1) Perftools uses these: grep -rho 'sys_[a-z0-9_A-Z]* *(' src | sort -u
  79. * sys__exit(
  80. * sys_clone(
  81. * sys_close(
  82. * sys_fcntl(
  83. * sys_fstat(
  84. * sys_futex(
  85. * sys_futex1(
  86. * sys_getcpu(
  87. * sys_getdents(
  88. * sys_getppid(
  89. * sys_gettid(
  90. * sys_lseek(
  91. * sys_mmap(
  92. * sys_mremap(
  93. * sys_munmap(
  94. * sys_open(
  95. * sys_pipe(
  96. * sys_prctl(
  97. * sys_ptrace(
  98. * sys_ptrace_detach(
  99. * sys_read(
  100. * sys_sched_yield(
  101. * sys_sigaction(
  102. * sys_sigaltstack(
  103. * sys_sigdelset(
  104. * sys_sigfillset(
  105. * sys_sigprocmask(
  106. * sys_socket(
  107. * sys_stat(
  108. * sys_waitpid(
  109. * 2) These are used as subroutines of the above:
  110. * sys_getpid -- gettid
  111. * sys_kill -- ptrace_detach
  112. * sys_restore -- sigaction
  113. * sys_restore_rt -- sigaction
  114. * sys_socketcall -- socket
  115. * sys_wait4 -- waitpid
  116. * 3) I left these in even though they're not used. They either
  117. * complement the above (write vs read) or are variants (rt_sigaction):
  118. * sys_fstat64
  119. * sys_getdents64
  120. * sys_llseek
  121. * sys_mmap2
  122. * sys_openat
  123. * sys_rt_sigaction
  124. * sys_rt_sigprocmask
  125. * sys_sigaddset
  126. * sys_sigemptyset
  127. * sys_stat64
  128. * sys_write
  129. */
  130. #ifndef SYS_LINUX_SYSCALL_SUPPORT_H
  131. #define SYS_LINUX_SYSCALL_SUPPORT_H
  132. /* We currently only support x86-32, x86-64, ARM, MIPS, and PPC on Linux.
  133. * Porting to other related platforms should not be difficult.
  134. */
  135. #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__) || \
  136. defined(__mips__) || defined(__PPC__)) && defined(__linux)
  137. #ifndef SYS_CPLUSPLUS
  138. #ifdef __cplusplus
  139. /* Some system header files in older versions of gcc neglect to properly
  140. * handle being included from C++. As it appears to be harmless to have
  141. * multiple nested 'extern "C"' blocks, just add another one here.
  142. */
  143. extern "C" {
  144. #endif
  145. #include <errno.h>
  146. #include <signal.h>
  147. #include <stdarg.h>
  148. #include <string.h>
  149. #include <sys/ptrace.h>
  150. #include <sys/resource.h>
  151. #include <sys/time.h>
  152. #include <sys/types.h>
  153. #include <syscall.h>
  154. #include <unistd.h>
  155. #include <linux/unistd.h>
  156. #include <endian.h>
  157. #ifdef __mips__
  158. /* Include definitions of the ABI currently in use. */
  159. #include <sgidefs.h>
  160. #endif
  161. #endif
  162. /* As glibc often provides subtly incompatible data structures (and implicit
  163. * wrapper functions that convert them), we provide our own kernel data
  164. * structures for use by the system calls.
  165. * These structures have been developed by using Linux 2.6.23 headers for
  166. * reference. Note though, we do not care about exact API compatibility
  167. * with the kernel, and in fact the kernel often does not have a single
  168. * API that works across architectures. Instead, we try to mimic the glibc
  169. * API where reasonable, and only guarantee ABI compatibility with the
  170. * kernel headers.
  171. * Most notably, here are a few changes that were made to the structures
  172. * defined by kernel headers:
  173. *
  174. * - we only define structures, but not symbolic names for kernel data
  175. * types. For the latter, we directly use the native C datatype
  176. * (i.e. "unsigned" instead of "mode_t").
  177. * - in a few cases, it is possible to define identical structures for
  178. * both 32bit (e.g. i386) and 64bit (e.g. x86-64) platforms by
  179. * standardizing on the 64bit version of the data types. In particular,
  180. * this means that we use "unsigned" where the 32bit headers say
  181. * "unsigned long".
  182. * - overall, we try to minimize the number of cases where we need to
  183. * conditionally define different structures.
  184. * - the "struct kernel_sigaction" class of structures have been
  185. * modified to more closely mimic glibc's API by introducing an
  186. * anonymous union for the function pointer.
  187. * - a small number of field names had to have an underscore appended to
  188. * them, because glibc defines a global macro by the same name.
  189. */
  190. /* include/linux/dirent.h */
  191. struct kernel_dirent64 {
  192. unsigned long long d_ino;
  193. long long d_off;
  194. unsigned short d_reclen;
  195. unsigned char d_type;
  196. char d_name[256];
  197. };
  198. /* include/linux/dirent.h */
  199. struct kernel_dirent {
  200. long d_ino;
  201. long d_off;
  202. unsigned short d_reclen;
  203. char d_name[256];
  204. };
  205. /* include/linux/time.h */
  206. struct kernel_timespec {
  207. long tv_sec;
  208. long tv_nsec;
  209. };
  210. /* include/linux/time.h */
  211. struct kernel_timeval {
  212. long tv_sec;
  213. long tv_usec;
  214. };
  215. /* include/linux/resource.h */
  216. struct kernel_rusage {
  217. struct kernel_timeval ru_utime;
  218. struct kernel_timeval ru_stime;
  219. long ru_maxrss;
  220. long ru_ixrss;
  221. long ru_idrss;
  222. long ru_isrss;
  223. long ru_minflt;
  224. long ru_majflt;
  225. long ru_nswap;
  226. long ru_inblock;
  227. long ru_oublock;
  228. long ru_msgsnd;
  229. long ru_msgrcv;
  230. long ru_nsignals;
  231. long ru_nvcsw;
  232. long ru_nivcsw;
  233. };
  234. struct siginfo;
  235. #if defined(__i386__) || defined(__arm__) || defined(__PPC__)
  236. /* include/asm-{arm,i386,mips,ppc}/signal.h */
  237. struct kernel_old_sigaction {
  238. union {
  239. void (*sa_handler_)(int);
  240. void (*sa_sigaction_)(int, struct siginfo *, void *);
  241. };
  242. unsigned long sa_mask;
  243. unsigned long sa_flags;
  244. void (*sa_restorer)(void);
  245. } __attribute__((packed,aligned(4)));
  246. #elif (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
  247. #define kernel_old_sigaction kernel_sigaction
  248. #endif
  249. /* Some kernel functions (e.g. sigaction() in 2.6.23) require that the
  250. * exactly match the size of the signal set, even though the API was
  251. * intended to be extensible. We define our own KERNEL_NSIG to deal with
  252. * this.
  253. * Please note that glibc provides signals [1.._NSIG-1], whereas the
  254. * kernel (and this header) provides the range [1..KERNEL_NSIG]. The
  255. * actual number of signals is obviously the same, but the constants
  256. * differ by one.
  257. */
  258. #ifdef __mips__
  259. #define KERNEL_NSIG 128
  260. #else
  261. #define KERNEL_NSIG 64
  262. #endif
  263. /* include/asm-{arm,i386,mips,x86_64}/signal.h */
  264. struct kernel_sigset_t {
  265. unsigned long sig[(KERNEL_NSIG + 8*sizeof(unsigned long) - 1)/
  266. (8*sizeof(unsigned long))];
  267. };
  268. /* include/asm-{arm,i386,mips,x86_64,ppc}/signal.h */
  269. struct kernel_sigaction {
  270. #ifdef __mips__
  271. unsigned long sa_flags;
  272. union {
  273. void (*sa_handler_)(int);
  274. void (*sa_sigaction_)(int, struct siginfo *, void *);
  275. };
  276. struct kernel_sigset_t sa_mask;
  277. #else
  278. union {
  279. void (*sa_handler_)(int);
  280. void (*sa_sigaction_)(int, struct siginfo *, void *);
  281. };
  282. unsigned long sa_flags;
  283. void (*sa_restorer)(void);
  284. struct kernel_sigset_t sa_mask;
  285. #endif
  286. };
  287. /* include/asm-{arm,i386,mips,ppc}/stat.h */
  288. #ifdef __mips__
  289. #if _MIPS_SIM == _MIPS_SIM_ABI64
  290. struct kernel_stat {
  291. #else
  292. struct kernel_stat64 {
  293. #endif
  294. unsigned st_dev;
  295. unsigned __pad0[3];
  296. unsigned long long st_ino;
  297. unsigned st_mode;
  298. unsigned st_nlink;
  299. unsigned st_uid;
  300. unsigned st_gid;
  301. unsigned st_rdev;
  302. unsigned __pad1[3];
  303. long long st_size;
  304. unsigned st_atime_;
  305. unsigned st_atime_nsec_;
  306. unsigned st_mtime_;
  307. unsigned st_mtime_nsec_;
  308. unsigned st_ctime_;
  309. unsigned st_ctime_nsec_;
  310. unsigned st_blksize;
  311. unsigned __pad2;
  312. unsigned long long st_blocks;
  313. };
  314. #elif defined __PPC__
  315. struct kernel_stat64 {
  316. unsigned long long st_dev;
  317. unsigned long long st_ino;
  318. unsigned st_mode;
  319. unsigned st_nlink;
  320. unsigned st_uid;
  321. unsigned st_gid;
  322. unsigned long long st_rdev;
  323. unsigned short int __pad2;
  324. long long st_size;
  325. long st_blksize;
  326. long long st_blocks;
  327. long st_atime_;
  328. unsigned long st_atime_nsec_;
  329. long st_mtime_;
  330. unsigned long st_mtime_nsec_;
  331. long st_ctime_;
  332. unsigned long st_ctime_nsec_;
  333. unsigned long __unused4;
  334. unsigned long __unused5;
  335. };
  336. #else
  337. struct kernel_stat64 {
  338. unsigned long long st_dev;
  339. unsigned char __pad0[4];
  340. unsigned __st_ino;
  341. unsigned st_mode;
  342. unsigned st_nlink;
  343. unsigned st_uid;
  344. unsigned st_gid;
  345. unsigned long long st_rdev;
  346. unsigned char __pad3[4];
  347. long long st_size;
  348. unsigned st_blksize;
  349. unsigned long long st_blocks;
  350. unsigned st_atime_;
  351. unsigned st_atime_nsec_;
  352. unsigned st_mtime_;
  353. unsigned st_mtime_nsec_;
  354. unsigned st_ctime_;
  355. unsigned st_ctime_nsec_;
  356. unsigned long long st_ino;
  357. };
  358. #endif
  359. /* include/asm-{arm,i386,mips,x86_64,ppc}/stat.h */
  360. #if defined(__i386__) || defined(__arm__)
  361. struct kernel_stat {
  362. /* The kernel headers suggest that st_dev and st_rdev should be 32bit
  363. * quantities encoding 12bit major and 20bit minor numbers in an interleaved
  364. * format. In reality, we do not see useful data in the top bits. So,
  365. * we'll leave the padding in here, until we find a better solution.
  366. */
  367. unsigned short st_dev;
  368. short pad1;
  369. unsigned st_ino;
  370. unsigned short st_mode;
  371. unsigned short st_nlink;
  372. unsigned short st_uid;
  373. unsigned short st_gid;
  374. unsigned short st_rdev;
  375. short pad2;
  376. unsigned st_size;
  377. unsigned st_blksize;
  378. unsigned st_blocks;
  379. unsigned st_atime_;
  380. unsigned st_atime_nsec_;
  381. unsigned st_mtime_;
  382. unsigned st_mtime_nsec_;
  383. unsigned st_ctime_;
  384. unsigned st_ctime_nsec_;
  385. unsigned __unused4;
  386. unsigned __unused5;
  387. };
  388. #elif defined(__x86_64__)
  389. struct kernel_stat {
  390. unsigned long st_dev;
  391. unsigned long st_ino;
  392. unsigned long st_nlink;
  393. unsigned st_mode;
  394. unsigned st_uid;
  395. unsigned st_gid;
  396. unsigned __pad0;
  397. unsigned long st_rdev;
  398. long st_size;
  399. long st_blksize;
  400. long st_blocks;
  401. unsigned long st_atime_;
  402. unsigned long st_atime_nsec_;
  403. unsigned long st_mtime_;
  404. unsigned long st_mtime_nsec_;
  405. unsigned long st_ctime_;
  406. unsigned long st_ctime_nsec_;
  407. long __unused[3];
  408. };
  409. #elif defined(__PPC__)
  410. struct kernel_stat {
  411. unsigned st_dev;
  412. unsigned long st_ino; // ino_t
  413. unsigned long st_mode; // mode_t
  414. unsigned short st_nlink; // nlink_t
  415. unsigned st_uid; // uid_t
  416. unsigned st_gid; // gid_t
  417. unsigned st_rdev;
  418. long st_size; // off_t
  419. unsigned long st_blksize;
  420. unsigned long st_blocks;
  421. unsigned long st_atime_;
  422. unsigned long st_atime_nsec_;
  423. unsigned long st_mtime_;
  424. unsigned long st_mtime_nsec_;
  425. unsigned long st_ctime_;
  426. unsigned long st_ctime_nsec_;
  427. unsigned long __unused4;
  428. unsigned long __unused5;
  429. };
  430. #elif (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI64)
  431. struct kernel_stat {
  432. unsigned st_dev;
  433. int st_pad1[3];
  434. unsigned st_ino;
  435. unsigned st_mode;
  436. unsigned st_nlink;
  437. unsigned st_uid;
  438. unsigned st_gid;
  439. unsigned st_rdev;
  440. int st_pad2[2];
  441. long st_size;
  442. int st_pad3;
  443. long st_atime_;
  444. long st_atime_nsec_;
  445. long st_mtime_;
  446. long st_mtime_nsec_;
  447. long st_ctime_;
  448. long st_ctime_nsec_;
  449. int st_blksize;
  450. int st_blocks;
  451. int st_pad4[14];
  452. };
  453. #endif
  454. /* Definitions missing from the standard header files */
  455. #ifndef O_DIRECTORY
  456. #if defined(__arm__)
  457. #define O_DIRECTORY 0040000
  458. #else
  459. #define O_DIRECTORY 0200000
  460. #endif
  461. #endif
  462. #ifndef PR_GET_DUMPABLE
  463. #define PR_GET_DUMPABLE 3
  464. #endif
  465. #ifndef PR_SET_DUMPABLE
  466. #define PR_SET_DUMPABLE 4
  467. #endif
  468. #ifndef AT_FDCWD
  469. #define AT_FDCWD (-100)
  470. #endif
  471. #ifndef AT_SYMLINK_NOFOLLOW
  472. #define AT_SYMLINK_NOFOLLOW 0x100
  473. #endif
  474. #ifndef AT_REMOVEDIR
  475. #define AT_REMOVEDIR 0x200
  476. #endif
  477. #ifndef MREMAP_FIXED
  478. #define MREMAP_FIXED 2
  479. #endif
  480. #ifndef SA_RESTORER
  481. #define SA_RESTORER 0x04000000
  482. #endif
  483. #if defined(__i386__)
  484. #ifndef __NR_rt_sigaction
  485. #define __NR_rt_sigaction 174
  486. #define __NR_rt_sigprocmask 175
  487. #endif
  488. #ifndef __NR_stat64
  489. #define __NR_stat64 195
  490. #endif
  491. #ifndef __NR_fstat64
  492. #define __NR_fstat64 197
  493. #endif
  494. #ifndef __NR_getdents64
  495. #define __NR_getdents64 220
  496. #endif
  497. #ifndef __NR_gettid
  498. #define __NR_gettid 224
  499. #endif
  500. #ifndef __NR_futex
  501. #define __NR_futex 240
  502. #endif
  503. #ifndef __NR_openat
  504. #define __NR_openat 295
  505. #endif
  506. #ifndef __NR_getcpu
  507. #define __NR_getcpu 318
  508. #endif
  509. /* End of i386 definitions */
  510. #elif defined(__arm__)
  511. #ifndef __syscall
  512. #if defined(__thumb__) || defined(__ARM_EABI__)
  513. #define __SYS_REG(name) register long __sysreg __asm__("r6") = __NR_##name;
  514. #define __SYS_REG_LIST(regs...) [sysreg] "r" (__sysreg) , ##regs
  515. #define __syscall(name) "swi\t0"
  516. #define __syscall_safe(name) \
  517. "push {r7}\n" \
  518. "mov r7,%[sysreg]\n" \
  519. __syscall(name)"\n" \
  520. "pop {r7}"
  521. #else
  522. #define __SYS_REG(name)
  523. #define __SYS_REG_LIST(regs...) regs
  524. #define __syscall(name) "swi\t" __sys1(__NR_##name) ""
  525. #define __syscall_safe(name) __syscall(name)
  526. #endif
  527. #endif
  528. #ifndef __NR_rt_sigaction
  529. #define __NR_rt_sigaction (__NR_SYSCALL_BASE + 174)
  530. #define __NR_rt_sigprocmask (__NR_SYSCALL_BASE + 175)
  531. #endif
  532. #ifndef __NR_stat64
  533. #define __NR_stat64 (__NR_SYSCALL_BASE + 195)
  534. #endif
  535. #ifndef __NR_fstat64
  536. #define __NR_fstat64 (__NR_SYSCALL_BASE + 197)
  537. #endif
  538. #ifndef __NR_getdents64
  539. #define __NR_getdents64 (__NR_SYSCALL_BASE + 217)
  540. #endif
  541. #ifndef __NR_gettid
  542. #define __NR_gettid (__NR_SYSCALL_BASE + 224)
  543. #endif
  544. #ifndef __NR_futex
  545. #define __NR_futex (__NR_SYSCALL_BASE + 240)
  546. #endif
  547. /* End of ARM definitions */
  548. #elif defined(__x86_64__)
  549. #ifndef __NR_gettid
  550. #define __NR_gettid 186
  551. #endif
  552. #ifndef __NR_futex
  553. #define __NR_futex 202
  554. #endif
  555. #ifndef __NR_getdents64
  556. #define __NR_getdents64 217
  557. #endif
  558. #ifndef __NR_openat
  559. #define __NR_openat 257
  560. #endif
  561. /* End of x86-64 definitions */
  562. #elif defined(__mips__)
  563. #if _MIPS_SIM == _MIPS_SIM_ABI32
  564. #ifndef __NR_rt_sigaction
  565. #define __NR_rt_sigaction (__NR_Linux + 194)
  566. #define __NR_rt_sigprocmask (__NR_Linux + 195)
  567. #endif
  568. #ifndef __NR_stat64
  569. #define __NR_stat64 (__NR_Linux + 213)
  570. #endif
  571. #ifndef __NR_fstat64
  572. #define __NR_fstat64 (__NR_Linux + 215)
  573. #endif
  574. #ifndef __NR_getdents64
  575. #define __NR_getdents64 (__NR_Linux + 219)
  576. #endif
  577. #ifndef __NR_gettid
  578. #define __NR_gettid (__NR_Linux + 222)
  579. #endif
  580. #ifndef __NR_futex
  581. #define __NR_futex (__NR_Linux + 238)
  582. #endif
  583. #ifndef __NR_openat
  584. #define __NR_openat (__NR_Linux + 288)
  585. #endif
  586. #ifndef __NR_fstatat
  587. #define __NR_fstatat (__NR_Linux + 293)
  588. #endif
  589. #ifndef __NR_getcpu
  590. #define __NR_getcpu (__NR_Linux + 312)
  591. #endif
  592. /* End of MIPS (old 32bit API) definitions */
  593. #elif _MIPS_SIM == _MIPS_SIM_ABI64
  594. #ifndef __NR_gettid
  595. #define __NR_gettid (__NR_Linux + 178)
  596. #endif
  597. #ifndef __NR_futex
  598. #define __NR_futex (__NR_Linux + 194)
  599. #endif
  600. #ifndef __NR_openat
  601. #define __NR_openat (__NR_Linux + 247)
  602. #endif
  603. #ifndef __NR_fstatat
  604. #define __NR_fstatat (__NR_Linux + 252)
  605. #endif
  606. #ifndef __NR_getcpu
  607. #define __NR_getcpu (__NR_Linux + 271)
  608. #endif
  609. /* End of MIPS (64bit API) definitions */
  610. #else
  611. #ifndef __NR_gettid
  612. #define __NR_gettid (__NR_Linux + 178)
  613. #endif
  614. #ifndef __NR_futex
  615. #define __NR_futex (__NR_Linux + 194)
  616. #endif
  617. #ifndef __NR_openat
  618. #define __NR_openat (__NR_Linux + 251)
  619. #endif
  620. #ifndef __NR_fstatat
  621. #define __NR_fstatat (__NR_Linux + 256)
  622. #endif
  623. #ifndef __NR_getcpu
  624. #define __NR_getcpu (__NR_Linux + 275)
  625. #endif
  626. /* End of MIPS (new 32bit API) definitions */
  627. #endif
  628. /* End of MIPS definitions */
  629. #elif defined(__PPC__)
  630. #ifndef __NR_rt_sigaction
  631. #define __NR_rt_sigaction 173
  632. #define __NR_rt_sigprocmask 174
  633. #endif
  634. #ifndef __NR_stat64
  635. #define __NR_stat64 195
  636. #endif
  637. #ifndef __NR_fstat64
  638. #define __NR_fstat64 197
  639. #endif
  640. #ifndef __NR_getdents64
  641. #define __NR_getdents64 202
  642. #endif
  643. #ifndef __NR_gettid
  644. #define __NR_gettid 207
  645. #endif
  646. #ifndef __NR_futex
  647. #define __NR_futex 221
  648. #endif
  649. #ifndef __NR_openat
  650. #define __NR_openat 286
  651. #endif
  652. #ifndef __NR_getcpu
  653. #define __NR_getcpu 302
  654. #endif
  655. /* End of powerpc defininitions */
  656. #endif
  657. /* After forking, we must make sure to only call system calls. */
  658. #if __BOUNDED_POINTERS__
  659. #error "Need to port invocations of syscalls for bounded ptrs"
  660. #else
  661. /* The core dumper and the thread lister get executed after threads
  662. * have been suspended. As a consequence, we cannot call any functions
  663. * that acquire locks. Unfortunately, libc wraps most system calls
  664. * (e.g. in order to implement pthread_atfork, and to make calls
  665. * cancellable), which means we cannot call these functions. Instead,
  666. * we have to call syscall() directly.
  667. */
  668. #undef LSS_ERRNO
  669. #ifdef SYS_ERRNO
  670. /* Allow the including file to override the location of errno. This can
  671. * be useful when using clone() with the CLONE_VM option.
  672. */
  673. #define LSS_ERRNO SYS_ERRNO
  674. #else
  675. #define LSS_ERRNO errno
  676. #endif
  677. #undef LSS_INLINE
  678. #ifdef SYS_INLINE
  679. #define LSS_INLINE SYS_INLINE
  680. #else
  681. #define LSS_INLINE static inline
  682. #endif
  683. /* Allow the including file to override the prefix used for all new
  684. * system calls. By default, it will be set to "sys_".
  685. */
  686. #undef LSS_NAME
  687. #ifndef SYS_PREFIX
  688. #define LSS_NAME(name) sys_##name
  689. #elif SYS_PREFIX < 0
  690. #define LSS_NAME(name) name
  691. #elif SYS_PREFIX == 0
  692. #define LSS_NAME(name) sys0_##name
  693. #elif SYS_PREFIX == 1
  694. #define LSS_NAME(name) sys1_##name
  695. #elif SYS_PREFIX == 2
  696. #define LSS_NAME(name) sys2_##name
  697. #elif SYS_PREFIX == 3
  698. #define LSS_NAME(name) sys3_##name
  699. #elif SYS_PREFIX == 4
  700. #define LSS_NAME(name) sys4_##name
  701. #elif SYS_PREFIX == 5
  702. #define LSS_NAME(name) sys5_##name
  703. #elif SYS_PREFIX == 6
  704. #define LSS_NAME(name) sys6_##name
  705. #elif SYS_PREFIX == 7
  706. #define LSS_NAME(name) sys7_##name
  707. #elif SYS_PREFIX == 8
  708. #define LSS_NAME(name) sys8_##name
  709. #elif SYS_PREFIX == 9
  710. #define LSS_NAME(name) sys9_##name
  711. #endif
  712. #undef LSS_RETURN
  713. #if (defined(__i386__) || defined(__x86_64__) || defined(__arm__))
  714. /* Failing system calls return a negative result in the range of
  715. * -1..-4095. These are "errno" values with the sign inverted.
  716. */
  717. #define LSS_RETURN(type, res) \
  718. do { \
  719. if ((unsigned long)(res) >= (unsigned long)(-4095)) { \
  720. LSS_ERRNO = -(res); \
  721. res = -1; \
  722. } \
  723. return (type) (res); \
  724. } while (0)
  725. #elif defined(__mips__)
  726. /* On MIPS, failing system calls return -1, and set errno in a
  727. * separate CPU register.
  728. */
  729. #define LSS_RETURN(type, res, err) \
  730. do { \
  731. if (err) { \
  732. LSS_ERRNO = (res); \
  733. res = -1; \
  734. } \
  735. return (type) (res); \
  736. } while (0)
  737. #elif defined(__PPC__)
  738. /* On PPC, failing system calls return -1, and set errno in a
  739. * separate CPU register. See linux/unistd.h.
  740. */
  741. #define LSS_RETURN(type, res, err) \
  742. do { \
  743. if (err & 0x10000000 ) { \
  744. LSS_ERRNO = (res); \
  745. res = -1; \
  746. } \
  747. return (type) (res); \
  748. } while (0)
  749. #endif
  750. #if defined(__i386__)
  751. #if defined(NO_FRAME_POINTER) && (100 * __GNUC__ + __GNUC_MINOR__ >= 404)
  752. /* This only works for GCC-4.4 and above -- the first version to use
  753. .cfi directives for dwarf unwind info. */
  754. #define CFI_ADJUST_CFA_OFFSET(adjust) \
  755. ".cfi_adjust_cfa_offset " #adjust "\n"
  756. #else
  757. #define CFI_ADJUST_CFA_OFFSET(adjust) /**/
  758. #endif
  759. /* In PIC mode (e.g. when building shared libraries), gcc for i386
  760. * reserves ebx. Unfortunately, most distribution ship with implementations
  761. * of _syscallX() which clobber ebx.
  762. * Also, most definitions of _syscallX() neglect to mark "memory" as being
  763. * clobbered. This causes problems with compilers, that do a better job
  764. * at optimizing across __asm__ calls.
  765. * So, we just have to redefine all of the _syscallX() macros.
  766. */
  767. #undef LSS_BODY
  768. #define LSS_BODY(type,args...) \
  769. long __res; \
  770. __asm__ __volatile__("push %%ebx\n" \
  771. CFI_ADJUST_CFA_OFFSET(4) \
  772. "movl %2,%%ebx\n" \
  773. "int $0x80\n" \
  774. "pop %%ebx\n" \
  775. CFI_ADJUST_CFA_OFFSET(-4) \
  776. args \
  777. : "esp", "memory"); \
  778. LSS_RETURN(type,__res)
  779. #undef _syscall0
  780. #define _syscall0(type,name) \
  781. type LSS_NAME(name)(void) { \
  782. long __res; \
  783. __asm__ volatile("int $0x80" \
  784. : "=a" (__res) \
  785. : "0" (__NR_##name) \
  786. : "memory"); \
  787. LSS_RETURN(type,__res); \
  788. }
  789. #undef _syscall1
  790. #define _syscall1(type,name,type1,arg1) \
  791. type LSS_NAME(name)(type1 arg1) { \
  792. LSS_BODY(type, \
  793. : "=a" (__res) \
  794. : "0" (__NR_##name), "ri" ((long)(arg1))); \
  795. }
  796. #undef _syscall2
  797. #define _syscall2(type,name,type1,arg1,type2,arg2) \
  798. type LSS_NAME(name)(type1 arg1,type2 arg2) { \
  799. LSS_BODY(type, \
  800. : "=a" (__res) \
  801. : "0" (__NR_##name),"ri" ((long)(arg1)), "c" ((long)(arg2))); \
  802. }
  803. #undef _syscall3
  804. #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
  805. type LSS_NAME(name)(type1 arg1,type2 arg2,type3 arg3) { \
  806. LSS_BODY(type, \
  807. : "=a" (__res) \
  808. : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \
  809. "d" ((long)(arg3))); \
  810. }
  811. #undef _syscall4
  812. #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
  813. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
  814. LSS_BODY(type, \
  815. : "=a" (__res) \
  816. : "0" (__NR_##name), "ri" ((long)(arg1)), "c" ((long)(arg2)), \
  817. "d" ((long)(arg3)),"S" ((long)(arg4))); \
  818. }
  819. #undef _syscall5
  820. #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  821. type5,arg5) \
  822. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  823. type5 arg5) { \
  824. long __res; \
  825. __asm__ __volatile__("push %%ebx\n" \
  826. "movl %2,%%ebx\n" \
  827. "movl %1,%%eax\n" \
  828. "int $0x80\n" \
  829. "pop %%ebx" \
  830. : "=a" (__res) \
  831. : "i" (__NR_##name), "ri" ((long)(arg1)), \
  832. "c" ((long)(arg2)), "d" ((long)(arg3)), \
  833. "S" ((long)(arg4)), "D" ((long)(arg5)) \
  834. : "esp", "memory"); \
  835. LSS_RETURN(type,__res); \
  836. }
  837. #undef _syscall6
  838. #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  839. type5,arg5,type6,arg6) \
  840. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  841. type5 arg5, type6 arg6) { \
  842. long __res; \
  843. struct { long __a1; long __a6; } __s = { (long)arg1, (long) arg6 }; \
  844. __asm__ __volatile__("push %%ebp\n" \
  845. "push %%ebx\n" \
  846. "movl 4(%2),%%ebp\n" \
  847. "movl 0(%2), %%ebx\n" \
  848. "movl %1,%%eax\n" \
  849. "int $0x80\n" \
  850. "pop %%ebx\n" \
  851. "pop %%ebp" \
  852. : "=a" (__res) \
  853. : "i" (__NR_##name), "0" ((long)(&__s)), \
  854. "c" ((long)(arg2)), "d" ((long)(arg3)), \
  855. "S" ((long)(arg4)), "D" ((long)(arg5)) \
  856. : "esp", "memory"); \
  857. LSS_RETURN(type,__res); \
  858. }
  859. LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
  860. int flags, void *arg, int *parent_tidptr,
  861. void *newtls, int *child_tidptr) {
  862. long __res;
  863. __asm__ __volatile__(/* if (fn == NULL)
  864. * return -EINVAL;
  865. */
  866. "movl %3,%%ecx\n"
  867. "jecxz 1f\n"
  868. /* if (child_stack == NULL)
  869. * return -EINVAL;
  870. */
  871. "movl %4,%%ecx\n"
  872. "jecxz 1f\n"
  873. /* Set up alignment of the child stack:
  874. * child_stack = (child_stack & ~0xF) - 20;
  875. */
  876. "andl $-16,%%ecx\n"
  877. "subl $20,%%ecx\n"
  878. /* Push "arg" and "fn" onto the stack that will be
  879. * used by the child.
  880. */
  881. "movl %6,%%eax\n"
  882. "movl %%eax,4(%%ecx)\n"
  883. "movl %3,%%eax\n"
  884. "movl %%eax,(%%ecx)\n"
  885. /* %eax = syscall(%eax = __NR_clone,
  886. * %ebx = flags,
  887. * %ecx = child_stack,
  888. * %edx = parent_tidptr,
  889. * %esi = newtls,
  890. * %edi = child_tidptr)
  891. * Also, make sure that %ebx gets preserved as it is
  892. * used in PIC mode.
  893. */
  894. "movl %8,%%esi\n"
  895. "movl %7,%%edx\n"
  896. "movl %5,%%eax\n"
  897. "movl %9,%%edi\n"
  898. "pushl %%ebx\n"
  899. "movl %%eax,%%ebx\n"
  900. "movl %2,%%eax\n"
  901. "int $0x80\n"
  902. /* In the parent: restore %ebx
  903. * In the child: move "fn" into %ebx
  904. */
  905. "popl %%ebx\n"
  906. /* if (%eax != 0)
  907. * return %eax;
  908. */
  909. "test %%eax,%%eax\n"
  910. "jnz 1f\n"
  911. /* In the child, now. Terminate frame pointer chain.
  912. */
  913. "movl $0,%%ebp\n"
  914. /* Call "fn". "arg" is already on the stack.
  915. */
  916. "call *%%ebx\n"
  917. /* Call _exit(%ebx). Unfortunately older versions
  918. * of gcc restrict the number of arguments that can
  919. * be passed to asm(). So, we need to hard-code the
  920. * system call number.
  921. */
  922. "movl %%eax,%%ebx\n"
  923. "movl $1,%%eax\n"
  924. "int $0x80\n"
  925. /* Return to parent.
  926. */
  927. "1:\n"
  928. : "=a" (__res)
  929. : "0"(-EINVAL), "i"(__NR_clone),
  930. "m"(fn), "m"(child_stack), "m"(flags), "m"(arg),
  931. "m"(parent_tidptr), "m"(newtls), "m"(child_tidptr)
  932. : "esp", "memory", "ecx", "edx", "esi", "edi");
  933. LSS_RETURN(int, __res);
  934. }
  935. LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) {
  936. /* On i386, the kernel does not know how to return from a signal
  937. * handler. Instead, it relies on user space to provide a
  938. * restorer function that calls the {rt_,}sigreturn() system call.
  939. * Unfortunately, we cannot just reference the glibc version of this
  940. * function, as glibc goes out of its way to make it inaccessible.
  941. */
  942. void (*res)(void);
  943. __asm__ __volatile__("call 2f\n"
  944. "0:.align 16\n"
  945. "1:movl %1,%%eax\n"
  946. "int $0x80\n"
  947. "2:popl %0\n"
  948. "addl $(1b-0b),%0\n"
  949. : "=a" (res)
  950. : "i" (__NR_rt_sigreturn));
  951. return res;
  952. }
  953. LSS_INLINE void (*LSS_NAME(restore)(void))(void) {
  954. /* On i386, the kernel does not know how to return from a signal
  955. * handler. Instead, it relies on user space to provide a
  956. * restorer function that calls the {rt_,}sigreturn() system call.
  957. * Unfortunately, we cannot just reference the glibc version of this
  958. * function, as glibc goes out of its way to make it inaccessible.
  959. */
  960. void (*res)(void);
  961. __asm__ __volatile__("call 2f\n"
  962. "0:.align 16\n"
  963. "1:pop %%eax\n"
  964. "movl %1,%%eax\n"
  965. "int $0x80\n"
  966. "2:popl %0\n"
  967. "addl $(1b-0b),%0\n"
  968. : "=a" (res)
  969. : "i" (__NR_sigreturn));
  970. return res;
  971. }
  972. #elif defined(__x86_64__)
  973. /* There are no known problems with any of the _syscallX() macros
  974. * currently shipping for x86_64, but we still need to be able to define
  975. * our own version so that we can override the location of the errno
  976. * location (e.g. when using the clone() system call with the CLONE_VM
  977. * option).
  978. */
  979. #undef LSS_BODY
  980. #define LSS_BODY(type,name, ...) \
  981. long __res; \
  982. __asm__ __volatile__("syscall" : "=a" (__res) : "0" (__NR_##name), \
  983. ##__VA_ARGS__ : "r11", "rcx", "memory"); \
  984. LSS_RETURN(type, __res)
  985. #undef _syscall0
  986. #define _syscall0(type,name) \
  987. type LSS_NAME(name)() { \
  988. LSS_BODY(type, name); \
  989. }
  990. #undef _syscall1
  991. #define _syscall1(type,name,type1,arg1) \
  992. type LSS_NAME(name)(type1 arg1) { \
  993. LSS_BODY(type, name, "D" ((long)(arg1))); \
  994. }
  995. #undef _syscall2
  996. #define _syscall2(type,name,type1,arg1,type2,arg2) \
  997. type LSS_NAME(name)(type1 arg1, type2 arg2) { \
  998. LSS_BODY(type, name, "D" ((long)(arg1)), "S" ((long)(arg2))); \
  999. }
  1000. #undef _syscall3
  1001. #define _syscall3(type,name,type1,arg1,type2,arg2,type3,arg3) \
  1002. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
  1003. LSS_BODY(type, name, "D" ((long)(arg1)), "S" ((long)(arg2)), \
  1004. "d" ((long)(arg3))); \
  1005. }
  1006. #undef _syscall4
  1007. #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
  1008. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
  1009. long __res; \
  1010. __asm__ __volatile__("movq %5,%%r10; syscall" : \
  1011. "=a" (__res) : "0" (__NR_##name), \
  1012. "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
  1013. "r" ((long)(arg4)) : "r10", "r11", "rcx", "memory"); \
  1014. LSS_RETURN(type, __res); \
  1015. }
  1016. #undef _syscall5
  1017. #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  1018. type5,arg5) \
  1019. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1020. type5 arg5) { \
  1021. long __res; \
  1022. __asm__ __volatile__("movq %5,%%r10; movq %6,%%r8; syscall" : \
  1023. "=a" (__res) : "0" (__NR_##name), \
  1024. "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
  1025. "r" ((long)(arg4)), "r" ((long)(arg5)) : \
  1026. "r8", "r10", "r11", "rcx", "memory"); \
  1027. LSS_RETURN(type, __res); \
  1028. }
  1029. #undef _syscall6
  1030. #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  1031. type5,arg5,type6,arg6) \
  1032. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1033. type5 arg5, type6 arg6) { \
  1034. long __res; \
  1035. __asm__ __volatile__("movq %5,%%r10; movq %6,%%r8; movq %7,%%r9;" \
  1036. "syscall" : \
  1037. "=a" (__res) : "0" (__NR_##name), \
  1038. "D" ((long)(arg1)), "S" ((long)(arg2)), "d" ((long)(arg3)), \
  1039. "r" ((long)(arg4)), "r" ((long)(arg5)), "r" ((long)(arg6)) : \
  1040. "r8", "r9", "r10", "r11", "rcx", "memory"); \
  1041. LSS_RETURN(type, __res); \
  1042. }
  1043. LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
  1044. int flags, void *arg, int *parent_tidptr,
  1045. void *newtls, int *child_tidptr) {
  1046. long __res;
  1047. {
  1048. __asm__ __volatile__(/* if (fn == NULL)
  1049. * return -EINVAL;
  1050. */
  1051. "testq %4,%4\n"
  1052. "jz 1f\n"
  1053. /* if (child_stack == NULL)
  1054. * return -EINVAL;
  1055. */
  1056. "testq %5,%5\n"
  1057. "jz 1f\n"
  1058. /* Set up alignment of the child stack:
  1059. * child_stack = (child_stack & ~0xF) - 16;
  1060. */
  1061. "andq $-16,%5\n"
  1062. "subq $16,%5\n"
  1063. /* Push "arg" and "fn" onto the stack that will be
  1064. * used by the child.
  1065. */
  1066. "movq %7,8(%5)\n"
  1067. "movq %4,0(%5)\n"
  1068. /* %rax = syscall(%rax = __NR_clone,
  1069. * %rdi = flags,
  1070. * %rsi = child_stack,
  1071. * %rdx = parent_tidptr,
  1072. * %r8 = new_tls,
  1073. * %r10 = child_tidptr)
  1074. */
  1075. "movq %2,%%rax\n"
  1076. "movq %9,%%r8\n"
  1077. "movq %10,%%r10\n"
  1078. "syscall\n"
  1079. /* if (%rax != 0)
  1080. * return;
  1081. */
  1082. "testq %%rax,%%rax\n"
  1083. "jnz 1f\n"
  1084. /* In the child. Terminate frame pointer chain.
  1085. */
  1086. "xorq %%rbp,%%rbp\n"
  1087. /* Call "fn(arg)".
  1088. */
  1089. "popq %%rax\n"
  1090. "popq %%rdi\n"
  1091. "call *%%rax\n"
  1092. /* Call _exit(%ebx).
  1093. */
  1094. "movq %%rax,%%rdi\n"
  1095. "movq %3,%%rax\n"
  1096. "syscall\n"
  1097. /* Return to parent.
  1098. */
  1099. "1:\n"
  1100. : "=a" (__res)
  1101. : "0"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit),
  1102. "r"(fn), "S"(child_stack), "D"(flags), "r"(arg),
  1103. "d"(parent_tidptr), "g"(newtls), "g"(child_tidptr)
  1104. : "rsp", "memory", "r8", "r10", "r11", "rcx");
  1105. }
  1106. LSS_RETURN(int, __res);
  1107. }
  1108. LSS_INLINE void (*LSS_NAME(restore_rt)(void))(void) {
  1109. /* On x86-64, the kernel does not know how to return from
  1110. * a signal handler. Instead, it relies on user space to provide a
  1111. * restorer function that calls the rt_sigreturn() system call.
  1112. * Unfortunately, we cannot just reference the glibc version of this
  1113. * function, as glibc goes out of its way to make it inaccessible.
  1114. */
  1115. void (*res)(void);
  1116. __asm__ __volatile__("call 2f\n"
  1117. "0:.align 16\n"
  1118. "1:movq %1,%%rax\n"
  1119. "syscall\n"
  1120. "2:popq %0\n"
  1121. "addq $(1b-0b),%0\n"
  1122. : "=a" (res)
  1123. : "i" (__NR_rt_sigreturn));
  1124. return res;
  1125. }
  1126. #elif defined(__arm__)
  1127. /* Most definitions of _syscallX() neglect to mark "memory" as being
  1128. * clobbered. This causes problems with compilers, that do a better job
  1129. * at optimizing across __asm__ calls.
  1130. * So, we just have to redefine all fo the _syscallX() macros.
  1131. */
  1132. #undef LSS_REG
  1133. #define LSS_REG(r,a) register long __r##r __asm__("r"#r) = (long)a
  1134. /* r0..r3 are scratch registers and not preserved across function
  1135. * calls. We need to first evaluate the first 4 syscall arguments
  1136. * and store them on stack. They must be loaded into r0..r3 after
  1137. * all function calls to avoid r0..r3 being clobbered.
  1138. */
  1139. #undef LSS_SAVE_ARG
  1140. #define LSS_SAVE_ARG(r,a) long __tmp##r = (long)a
  1141. #undef LSS_LOAD_ARG
  1142. #define LSS_LOAD_ARG(r) register long __r##r __asm__("r"#r) = __tmp##r
  1143. #undef LSS_BODY
  1144. #define LSS_BODY(type, name, args...) \
  1145. long __res_r0 __asm__("r0"); \
  1146. long __res; \
  1147. __SYS_REG(name) \
  1148. __asm__ __volatile__ (__syscall_safe(name) \
  1149. : "=r"(__res_r0) \
  1150. : __SYS_REG_LIST(args) \
  1151. : "lr", "memory"); \
  1152. __res = __res_r0; \
  1153. LSS_RETURN(type, __res)
  1154. #undef _syscall0
  1155. #define _syscall0(type, name) \
  1156. type LSS_NAME(name)() { \
  1157. LSS_BODY(type, name); \
  1158. }
  1159. #undef _syscall1
  1160. #define _syscall1(type, name, type1, arg1) \
  1161. type LSS_NAME(name)(type1 arg1) { \
  1162. /* There is no need for using a volatile temp. */ \
  1163. LSS_REG(0, arg1); \
  1164. LSS_BODY(type, name, "r"(__r0)); \
  1165. }
  1166. #undef _syscall2
  1167. #define _syscall2(type, name, type1, arg1, type2, arg2) \
  1168. type LSS_NAME(name)(type1 arg1, type2 arg2) { \
  1169. LSS_SAVE_ARG(0, arg1); \
  1170. LSS_SAVE_ARG(1, arg2); \
  1171. LSS_LOAD_ARG(0); \
  1172. LSS_LOAD_ARG(1); \
  1173. LSS_BODY(type, name, "r"(__r0), "r"(__r1)); \
  1174. }
  1175. #undef _syscall3
  1176. #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
  1177. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
  1178. LSS_SAVE_ARG(0, arg1); \
  1179. LSS_SAVE_ARG(1, arg2); \
  1180. LSS_SAVE_ARG(2, arg3); \
  1181. LSS_LOAD_ARG(0); \
  1182. LSS_LOAD_ARG(1); \
  1183. LSS_LOAD_ARG(2); \
  1184. LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2)); \
  1185. }
  1186. #undef _syscall4
  1187. #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
  1188. type4, arg4) \
  1189. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
  1190. LSS_SAVE_ARG(0, arg1); \
  1191. LSS_SAVE_ARG(1, arg2); \
  1192. LSS_SAVE_ARG(2, arg3); \
  1193. LSS_SAVE_ARG(3, arg4); \
  1194. LSS_LOAD_ARG(0); \
  1195. LSS_LOAD_ARG(1); \
  1196. LSS_LOAD_ARG(2); \
  1197. LSS_LOAD_ARG(3); \
  1198. LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3)); \
  1199. }
  1200. #undef _syscall5
  1201. #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
  1202. type4, arg4, type5, arg5) \
  1203. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1204. type5 arg5) { \
  1205. LSS_SAVE_ARG(0, arg1); \
  1206. LSS_SAVE_ARG(1, arg2); \
  1207. LSS_SAVE_ARG(2, arg3); \
  1208. LSS_SAVE_ARG(3, arg4); \
  1209. LSS_REG(4, arg5); \
  1210. LSS_LOAD_ARG(0); \
  1211. LSS_LOAD_ARG(1); \
  1212. LSS_LOAD_ARG(2); \
  1213. LSS_LOAD_ARG(3); \
  1214. LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
  1215. "r"(__r4)); \
  1216. }
  1217. #undef _syscall6
  1218. #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
  1219. type4, arg4, type5, arg5, type6, arg6) \
  1220. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1221. type5 arg5, type6 arg6) { \
  1222. LSS_SAVE_ARG(0, arg1); \
  1223. LSS_SAVE_ARG(1, arg2); \
  1224. LSS_SAVE_ARG(2, arg3); \
  1225. LSS_SAVE_ARG(3, arg4); \
  1226. LSS_REG(4, arg5); \
  1227. LSS_REG(5, arg6); \
  1228. LSS_LOAD_ARG(0); \
  1229. LSS_LOAD_ARG(1); \
  1230. LSS_LOAD_ARG(2); \
  1231. LSS_LOAD_ARG(3); \
  1232. LSS_BODY(type, name, "r"(__r0), "r"(__r1), "r"(__r2), "r"(__r3), \
  1233. "r"(__r4), "r"(__r5)); \
  1234. }
  1235. LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
  1236. int flags, void *arg, int *parent_tidptr,
  1237. void *newtls, int *child_tidptr) {
  1238. long __res __asm__("r5");
  1239. {
  1240. if (fn == NULL || child_stack == NULL) {
  1241. __res = -EINVAL;
  1242. goto clone_exit;
  1243. }
  1244. /* stash first 4 arguments on stack first because we can only load
  1245. * them after all function calls.
  1246. */
  1247. int tmp_flags = flags;
  1248. int * tmp_stack = (int*) child_stack;
  1249. void * tmp_ptid = parent_tidptr;
  1250. void * tmp_tls = newtls;
  1251. int *__ctid __asm__("r4") = child_tidptr;
  1252. /* Push "arg" and "fn" onto the stack that will be
  1253. * used by the child.
  1254. */
  1255. *(--tmp_stack) = (int) arg;
  1256. *(--tmp_stack) = (int) fn;
  1257. /* We must load r0..r3 last after all possible function calls. */
  1258. int __flags __asm__("r0") = tmp_flags;
  1259. void *__stack __asm__("r1") = tmp_stack;
  1260. void *__ptid __asm__("r2") = tmp_ptid;
  1261. void *__tls __asm__("r3") = tmp_tls;
  1262. /* %r0 = syscall(%r0 = flags,
  1263. * %r1 = child_stack,
  1264. * %r2 = parent_tidptr,
  1265. * %r3 = newtls,
  1266. * %r4 = child_tidptr)
  1267. */
  1268. __SYS_REG(clone)
  1269. __asm__ __volatile__(/* %r0 = syscall(%r0 = flags,
  1270. * %r1 = child_stack,
  1271. * %r2 = parent_tidptr,
  1272. * %r3 = newtls,
  1273. * %r4 = child_tidptr)
  1274. */
  1275. "push {r7}\n"
  1276. "mov r7,%1\n"
  1277. __syscall(clone)"\n"
  1278. /* if (%r0 != 0)
  1279. * return %r0;
  1280. */
  1281. "movs %0,r0\n"
  1282. "bne 1f\n"
  1283. /* In the child, now. Call "fn(arg)".
  1284. */
  1285. "ldr r0,[sp, #4]\n"
  1286. "mov lr,pc\n"
  1287. "ldr pc,[sp]\n"
  1288. /* Call _exit(%r0), which never returns. We only
  1289. * need to set r7 for EABI syscall ABI but we do
  1290. * this always to simplify code sharing between
  1291. * old and new syscall ABIs.
  1292. */
  1293. "mov r7,%2\n"
  1294. __syscall(exit)"\n"
  1295. /* Pop r7 from the stack only in the parent.
  1296. */
  1297. "1: pop {r7}\n"
  1298. : "=r" (__res)
  1299. : "r"(__sysreg),
  1300. "i"(__NR_exit), "r"(__stack), "r"(__flags),
  1301. "r"(__ptid), "r"(__tls), "r"(__ctid)
  1302. : "cc", "lr", "memory");
  1303. }
  1304. clone_exit:
  1305. LSS_RETURN(int, __res);
  1306. }
  1307. #elif defined(__mips__)
  1308. #undef LSS_REG
  1309. #define LSS_REG(r,a) register unsigned long __r##r __asm__("$"#r) = \
  1310. (unsigned long)(a)
  1311. #if _MIPS_SIM == _MIPS_SIM_ABI32
  1312. // See http://sources.redhat.com/ml/libc-alpha/2004-10/msg00050.html
  1313. // or http://www.linux-mips.org/archives/linux-mips/2004-10/msg00142.html
  1314. #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$8", "$9", "$10", "$11", "$12",\
  1315. "$13", "$14", "$15", "$24", "$25", "memory"
  1316. #else
  1317. #define MIPS_SYSCALL_CLOBBERS "$1", "$3", "$10", "$11", "$12", "$13", \
  1318. "$14", "$15", "$24", "$25", "memory"
  1319. #endif
  1320. #undef LSS_BODY
  1321. #define LSS_BODY(type,name,r7,...) \
  1322. unsigned long __v0 __asm__("$2") = __NR_##name; \
  1323. __asm__ __volatile__ ("syscall\n" \
  1324. : "=&r"(__v0), r7 (__r7) \
  1325. : "0"(__v0), ##__VA_ARGS__ \
  1326. : MIPS_SYSCALL_CLOBBERS); \
  1327. LSS_RETURN(type, __v0, __r7)
  1328. #undef _syscall0
  1329. #define _syscall0(type, name) \
  1330. type LSS_NAME(name)() { \
  1331. unsigned long __r7 __asm__("$7"); \
  1332. LSS_BODY(type, name, "=r"); \
  1333. }
  1334. #undef _syscall1
  1335. #define _syscall1(type, name, type1, arg1) \
  1336. type LSS_NAME(name)(type1 arg1) { \
  1337. unsigned long __r7 __asm__("$7"); \
  1338. LSS_REG(4, arg1); LSS_BODY(type, name, "=r", "r"(__r4)); \
  1339. }
  1340. #undef _syscall2
  1341. #define _syscall2(type, name, type1, arg1, type2, arg2) \
  1342. type LSS_NAME(name)(type1 arg1, type2 arg2) { \
  1343. unsigned long __r7 __asm__("$7"); \
  1344. LSS_REG(4, arg1); LSS_REG(5, arg2); \
  1345. LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5)); \
  1346. }
  1347. #undef _syscall3
  1348. #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
  1349. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
  1350. unsigned long __r7 __asm__("$7"); \
  1351. LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
  1352. LSS_BODY(type, name, "=r", "r"(__r4), "r"(__r5), "r"(__r6)); \
  1353. }
  1354. #undef _syscall4
  1355. #define _syscall4(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4) \
  1356. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
  1357. LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
  1358. LSS_REG(7, arg4); \
  1359. LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6)); \
  1360. }
  1361. #undef _syscall5
  1362. #if _MIPS_SIM == _MIPS_SIM_ABI32
  1363. /* The old 32bit MIPS system call API passes the fifth and sixth argument
  1364. * on the stack, whereas the new APIs use registers "r8" and "r9".
  1365. */
  1366. #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  1367. type5,arg5) \
  1368. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1369. type5 arg5) { \
  1370. LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
  1371. LSS_REG(7, arg4); \
  1372. unsigned long __v0 __asm__("$2"); \
  1373. __asm__ __volatile__ (".set noreorder\n" \
  1374. "lw $2, %6\n" \
  1375. "subu $29, 32\n" \
  1376. "sw $2, 16($29)\n" \
  1377. "li $2, %2\n" \
  1378. "syscall\n" \
  1379. "addiu $29, 32\n" \
  1380. ".set reorder\n" \
  1381. : "=&r"(__v0), "+r" (__r7) \
  1382. : "i" (__NR_##name), "r"(__r4), "r"(__r5), \
  1383. "r"(__r6), "m" ((unsigned long)arg5) \
  1384. : MIPS_SYSCALL_CLOBBERS); \
  1385. LSS_RETURN(type, __v0, __r7); \
  1386. }
  1387. #else
  1388. #define _syscall5(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  1389. type5,arg5) \
  1390. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1391. type5 arg5) { \
  1392. LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
  1393. LSS_REG(7, arg4); LSS_REG(8, arg5); \
  1394. LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \
  1395. "r"(__r8)); \
  1396. }
  1397. #endif
  1398. #undef _syscall6
  1399. #if _MIPS_SIM == _MIPS_SIM_ABI32
  1400. /* The old 32bit MIPS system call API passes the fifth and sixth argument
  1401. * on the stack, whereas the new APIs use registers "r8" and "r9".
  1402. */
  1403. #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  1404. type5,arg5,type6,arg6) \
  1405. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1406. type5 arg5, type6 arg6) { \
  1407. LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
  1408. LSS_REG(7, arg4); \
  1409. unsigned long __v0 __asm__("$2"); \
  1410. __asm__ __volatile__ (".set noreorder\n" \
  1411. "lw $2, %6\n" \
  1412. "lw $8, %7\n" \
  1413. "subu $29, 32\n" \
  1414. "sw $2, 16($29)\n" \
  1415. "sw $8, 20($29)\n" \
  1416. "li $2, %2\n" \
  1417. "syscall\n" \
  1418. "addiu $29, 32\n" \
  1419. ".set reorder\n" \
  1420. : "=&r"(__v0), "+r" (__r7) \
  1421. : "i" (__NR_##name), "r"(__r4), "r"(__r5), \
  1422. "r"(__r6), "r" ((unsigned long)arg5), \
  1423. "r" ((unsigned long)arg6) \
  1424. : MIPS_SYSCALL_CLOBBERS); \
  1425. LSS_RETURN(type, __v0, __r7); \
  1426. }
  1427. #else
  1428. #define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4, \
  1429. type5,arg5,type6,arg6) \
  1430. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1431. type5 arg5,type6 arg6) { \
  1432. LSS_REG(4, arg1); LSS_REG(5, arg2); LSS_REG(6, arg3); \
  1433. LSS_REG(7, arg4); LSS_REG(8, arg5); LSS_REG(9, arg6); \
  1434. LSS_BODY(type, name, "+r", "r"(__r4), "r"(__r5), "r"(__r6), \
  1435. "r"(__r8), "r"(__r9)); \
  1436. }
  1437. #endif
  1438. LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
  1439. int flags, void *arg, int *parent_tidptr,
  1440. void *newtls, int *child_tidptr) {
  1441. unsigned long __v0 __asm__("$2");
  1442. unsigned long __r7 __asm__("$7") = (unsigned long)newtls;
  1443. {
  1444. int __flags __asm__("$4") = flags;
  1445. void *__stack __asm__("$5") = child_stack;
  1446. void *__ptid __asm__("$6") = parent_tidptr;
  1447. int *__ctid __asm__("$8") = child_tidptr;
  1448. __asm__ __volatile__(
  1449. #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
  1450. "subu $29,24\n"
  1451. #elif _MIPS_SIM == _MIPS_SIM_NABI32
  1452. "sub $29,16\n"
  1453. #else
  1454. "dsubu $29,16\n"
  1455. #endif
  1456. /* if (fn == NULL || child_stack == NULL)
  1457. * return -EINVAL;
  1458. */
  1459. "li %0,%2\n"
  1460. "beqz %5,1f\n"
  1461. "beqz %6,1f\n"
  1462. /* Push "arg" and "fn" onto the stack that will be
  1463. * used by the child.
  1464. */
  1465. #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
  1466. "subu %6,32\n"
  1467. "sw %5,0(%6)\n"
  1468. "sw %8,4(%6)\n"
  1469. #elif _MIPS_SIM == _MIPS_SIM_NABI32
  1470. "sub %6,32\n"
  1471. "sw %5,0(%6)\n"
  1472. "sw %8,8(%6)\n"
  1473. #else
  1474. "dsubu %6,32\n"
  1475. "sd %5,0(%6)\n"
  1476. "sd %8,8(%6)\n"
  1477. #endif
  1478. /* $7 = syscall($4 = flags,
  1479. * $5 = child_stack,
  1480. * $6 = parent_tidptr,
  1481. * $7 = newtls,
  1482. * $8 = child_tidptr)
  1483. */
  1484. "li $2,%3\n"
  1485. "syscall\n"
  1486. /* if ($7 != 0)
  1487. * return $2;
  1488. */
  1489. "bnez $7,1f\n"
  1490. "bnez $2,1f\n"
  1491. /* In the child, now. Call "fn(arg)".
  1492. */
  1493. #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
  1494. "lw $25,0($29)\n"
  1495. "lw $4,4($29)\n"
  1496. #elif _MIPS_SIM == _MIPS_SIM_NABI32
  1497. "lw $25,0($29)\n"
  1498. "lw $4,8($29)\n"
  1499. #else
  1500. "ld $25,0($29)\n"
  1501. "ld $4,8($29)\n"
  1502. #endif
  1503. "jalr $25\n"
  1504. /* Call _exit($2)
  1505. */
  1506. "move $4,$2\n"
  1507. "li $2,%4\n"
  1508. "syscall\n"
  1509. "1:\n"
  1510. #if _MIPS_SIM == _MIPS_SIM_ABI32 && _MIPS_SZPTR == 32
  1511. "addu $29, 24\n"
  1512. #elif _MIPS_SIM == _MIPS_SIM_NABI32
  1513. "add $29, 16\n"
  1514. #else
  1515. "daddu $29,16\n"
  1516. #endif
  1517. : "=&r" (__v0), "=r" (__r7)
  1518. : "i"(-EINVAL), "i"(__NR_clone), "i"(__NR_exit),
  1519. "r"(fn), "r"(__stack), "r"(__flags), "r"(arg),
  1520. "r"(__ptid), "r"(__r7), "r"(__ctid)
  1521. : "$9", "$10", "$11", "$12", "$13", "$14", "$15",
  1522. "$24", "memory");
  1523. }
  1524. LSS_RETURN(int, __v0, __r7);
  1525. }
  1526. #elif defined (__PPC__)
  1527. #undef LSS_LOADARGS_0
  1528. #define LSS_LOADARGS_0(name, dummy...) \
  1529. __sc_0 = __NR_##name
  1530. #undef LSS_LOADARGS_1
  1531. #define LSS_LOADARGS_1(name, arg1) \
  1532. LSS_LOADARGS_0(name); \
  1533. __sc_3 = (unsigned long) (arg1)
  1534. #undef LSS_LOADARGS_2
  1535. #define LSS_LOADARGS_2(name, arg1, arg2) \
  1536. LSS_LOADARGS_1(name, arg1); \
  1537. __sc_4 = (unsigned long) (arg2)
  1538. #undef LSS_LOADARGS_3
  1539. #define LSS_LOADARGS_3(name, arg1, arg2, arg3) \
  1540. LSS_LOADARGS_2(name, arg1, arg2); \
  1541. __sc_5 = (unsigned long) (arg3)
  1542. #undef LSS_LOADARGS_4
  1543. #define LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4) \
  1544. LSS_LOADARGS_3(name, arg1, arg2, arg3); \
  1545. __sc_6 = (unsigned long) (arg4)
  1546. #undef LSS_LOADARGS_5
  1547. #define LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5) \
  1548. LSS_LOADARGS_4(name, arg1, arg2, arg3, arg4); \
  1549. __sc_7 = (unsigned long) (arg5)
  1550. #undef LSS_LOADARGS_6
  1551. #define LSS_LOADARGS_6(name, arg1, arg2, arg3, arg4, arg5, arg6) \
  1552. LSS_LOADARGS_5(name, arg1, arg2, arg3, arg4, arg5); \
  1553. __sc_8 = (unsigned long) (arg6)
  1554. #undef LSS_ASMINPUT_0
  1555. #define LSS_ASMINPUT_0 "0" (__sc_0)
  1556. #undef LSS_ASMINPUT_1
  1557. #define LSS_ASMINPUT_1 LSS_ASMINPUT_0, "1" (__sc_3)
  1558. #undef LSS_ASMINPUT_2
  1559. #define LSS_ASMINPUT_2 LSS_ASMINPUT_1, "2" (__sc_4)
  1560. #undef LSS_ASMINPUT_3
  1561. #define LSS_ASMINPUT_3 LSS_ASMINPUT_2, "3" (__sc_5)
  1562. #undef LSS_ASMINPUT_4
  1563. #define LSS_ASMINPUT_4 LSS_ASMINPUT_3, "4" (__sc_6)
  1564. #undef LSS_ASMINPUT_5
  1565. #define LSS_ASMINPUT_5 LSS_ASMINPUT_4, "5" (__sc_7)
  1566. #undef LSS_ASMINPUT_6
  1567. #define LSS_ASMINPUT_6 LSS_ASMINPUT_5, "6" (__sc_8)
  1568. #undef LSS_BODY
  1569. #define LSS_BODY(nr, type, name, args...) \
  1570. long __sc_ret, __sc_err; \
  1571. { \
  1572. unsigned long __sc_0 __asm__ ("r0"); \
  1573. unsigned long __sc_3 __asm__ ("r3"); \
  1574. unsigned long __sc_4 __asm__ ("r4"); \
  1575. unsigned long __sc_5 __asm__ ("r5"); \
  1576. unsigned long __sc_6 __asm__ ("r6"); \
  1577. unsigned long __sc_7 __asm__ ("r7"); \
  1578. unsigned long __sc_8 __asm__ ("r8"); \
  1579. \
  1580. LSS_LOADARGS_##nr(name, args); \
  1581. __asm__ __volatile__ \
  1582. ("sc\n\t" \
  1583. "mfcr %0" \
  1584. : "=&r" (__sc_0), \
  1585. "=&r" (__sc_3), "=&r" (__sc_4), \
  1586. "=&r" (__sc_5), "=&r" (__sc_6), \
  1587. "=&r" (__sc_7), "=&r" (__sc_8) \
  1588. : LSS_ASMINPUT_##nr \
  1589. : "cr0", "ctr", "memory", \
  1590. "r9", "r10", "r11", "r12"); \
  1591. __sc_ret = __sc_3; \
  1592. __sc_err = __sc_0; \
  1593. } \
  1594. LSS_RETURN(type, __sc_ret, __sc_err)
  1595. #undef _syscall0
  1596. #define _syscall0(type, name) \
  1597. type LSS_NAME(name)(void) { \
  1598. LSS_BODY(0, type, name); \
  1599. }
  1600. #undef _syscall1
  1601. #define _syscall1(type, name, type1, arg1) \
  1602. type LSS_NAME(name)(type1 arg1) { \
  1603. LSS_BODY(1, type, name, arg1); \
  1604. }
  1605. #undef _syscall2
  1606. #define _syscall2(type, name, type1, arg1, type2, arg2) \
  1607. type LSS_NAME(name)(type1 arg1, type2 arg2) { \
  1608. LSS_BODY(2, type, name, arg1, arg2); \
  1609. }
  1610. #undef _syscall3
  1611. #define _syscall3(type, name, type1, arg1, type2, arg2, type3, arg3) \
  1612. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3) { \
  1613. LSS_BODY(3, type, name, arg1, arg2, arg3); \
  1614. }
  1615. #undef _syscall4
  1616. #define _syscall4(type, name, type1, arg1, type2, arg2, type3, arg3, \
  1617. type4, arg4) \
  1618. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4) { \
  1619. LSS_BODY(4, type, name, arg1, arg2, arg3, arg4); \
  1620. }
  1621. #undef _syscall5
  1622. #define _syscall5(type, name, type1, arg1, type2, arg2, type3, arg3, \
  1623. type4, arg4, type5, arg5) \
  1624. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1625. type5 arg5) { \
  1626. LSS_BODY(5, type, name, arg1, arg2, arg3, arg4, arg5); \
  1627. }
  1628. #undef _syscall6
  1629. #define _syscall6(type, name, type1, arg1, type2, arg2, type3, arg3, \
  1630. type4, arg4, type5, arg5, type6, arg6) \
  1631. type LSS_NAME(name)(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
  1632. type5 arg5, type6 arg6) { \
  1633. LSS_BODY(6, type, name, arg1, arg2, arg3, arg4, arg5, arg6); \
  1634. }
  1635. /* clone function adapted from glibc 2.3.6 clone.S */
  1636. /* TODO(csilvers): consider wrapping some args up in a struct, like we
  1637. * do for i386's _syscall6, so we can compile successfully on gcc 2.95
  1638. */
  1639. LSS_INLINE int LSS_NAME(clone)(int (*fn)(void *), void *child_stack,
  1640. int flags, void *arg, int *parent_tidptr,
  1641. void *newtls, int *child_tidptr) {
  1642. long __ret, __err;
  1643. {
  1644. int (*__fn)(void *) __asm__ ("r8") = fn;
  1645. void *__cstack __asm__ ("r4") = child_stack;
  1646. int __flags __asm__ ("r3") = flags;
  1647. void * __arg __asm__ ("r9") = arg;
  1648. int * __ptidptr __asm__ ("r5") = parent_tidptr;
  1649. void * __newtls __asm__ ("r6") = newtls;
  1650. int * __ctidptr __asm__ ("r7") = child_tidptr;
  1651. __asm__ __volatile__(
  1652. /* check for fn == NULL
  1653. * and child_stack == NULL
  1654. */
  1655. "cmpwi cr0, %6, 0\n\t"
  1656. "cmpwi cr1, %7, 0\n\t"
  1657. "cror cr0*4+eq, cr1*4+eq, cr0*4+eq\n\t"
  1658. "beq- cr0, 1f\n\t"
  1659. /* set up stack frame for child */
  1660. "clrrwi %7, %7, 4\n\t"
  1661. "li 0, 0\n\t"
  1662. "stwu 0, -16(%7)\n\t"
  1663. /* fn, arg, child_stack are saved across the syscall: r28-30 */
  1664. "mr 28, %6\n\t"
  1665. "mr 29, %7\n\t"
  1666. "mr 27, %9\n\t"
  1667. /* syscall */
  1668. "li 0, %4\n\t"
  1669. /* flags already in r3
  1670. * child_stack already in r4
  1671. * ptidptr already in r5
  1672. * newtls already in r6
  1673. * ctidptr already in r7
  1674. */
  1675. "sc\n\t"
  1676. /* Test if syscall was successful */
  1677. "cmpwi cr1, 3, 0\n\t"
  1678. "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
  1679. "bne- cr1, 1f\n\t"
  1680. /* Do the function call */
  1681. "mtctr 28\n\t"
  1682. "mr 3, 27\n\t"
  1683. "bctrl\n\t"
  1684. /* Call _exit(r3) */
  1685. "li 0, %5\n\t"
  1686. "sc\n\t"
  1687. /* Return to parent */
  1688. "1:\n"
  1689. "mfcr %1\n\t"
  1690. "mr %0, 3\n\t"
  1691. : "=r" (__ret), "=r" (__err)
  1692. : "0" (-1), "1" (EINVAL),
  1693. "i" (__NR_clone), "i" (__NR_exit),
  1694. "r" (__fn), "r" (__cstack), "r" (__flags),
  1695. "r" (__arg), "r" (__ptidptr), "r" (__newtls),
  1696. "r" (__ctidptr)
  1697. : "cr0", "cr1", "memory", "ctr",
  1698. "r0", "r29", "r27", "r28");
  1699. }
  1700. LSS_RETURN(int, __ret, __err);
  1701. }
  1702. #endif
  1703. #define __NR__exit __NR_exit
  1704. #define __NR__gettid __NR_gettid
  1705. #define __NR__mremap __NR_mremap
  1706. LSS_INLINE _syscall1(int, close, int, f)
  1707. LSS_INLINE _syscall1(int, _exit, int, e)
  1708. LSS_INLINE _syscall3(int, fcntl, int, f,
  1709. int, c, long, a)
  1710. LSS_INLINE _syscall2(int, fstat, int, f,
  1711. struct kernel_stat*, b)
  1712. LSS_INLINE _syscall4(int, futex, int*, a,
  1713. int, o, int, v,
  1714. struct kernel_timespec*, t)
  1715. LSS_INLINE _syscall3(int, getdents, int, f,
  1716. struct kernel_dirent*, d, int, c)
  1717. #ifdef __NR_getdents64
  1718. LSS_INLINE _syscall3(int, getdents64, int, f,
  1719. struct kernel_dirent64*, d, int, c)
  1720. #endif
  1721. LSS_INLINE _syscall0(pid_t, getpid)
  1722. LSS_INLINE _syscall0(pid_t, getppid)
  1723. LSS_INLINE _syscall0(pid_t, _gettid)
  1724. LSS_INLINE _syscall2(int, kill, pid_t, p,
  1725. int, s)
  1726. LSS_INLINE _syscall3(off_t, lseek, int, f,
  1727. off_t, o, int, w)
  1728. LSS_INLINE _syscall2(int, munmap, void*, s,
  1729. size_t, l)
  1730. LSS_INLINE _syscall5(void*, _mremap, void*, o,
  1731. size_t, os, size_t, ns,
  1732. unsigned long, f, void *, a)
  1733. LSS_INLINE _syscall3(int, open, const char*, p,
  1734. int, f, int, m)
  1735. LSS_INLINE _syscall2(int, prctl, int, o,
  1736. long, a)
  1737. LSS_INLINE _syscall4(long, ptrace, int, r,
  1738. pid_t, p, void *, a, void *, d)
  1739. LSS_INLINE _syscall3(ssize_t, read, int, f,
  1740. void *, b, size_t, c)
  1741. LSS_INLINE _syscall4(int, rt_sigaction, int, s,
  1742. const struct kernel_sigaction*, a,
  1743. struct kernel_sigaction*, o, size_t, c)
  1744. LSS_INLINE _syscall4(int, rt_sigprocmask, int, h,
  1745. const struct kernel_sigset_t*, s,
  1746. struct kernel_sigset_t*, o, size_t, c);
  1747. LSS_INLINE _syscall0(int, sched_yield)
  1748. LSS_INLINE _syscall2(int, sigaltstack, const stack_t*, s,
  1749. const stack_t*, o)
  1750. LSS_INLINE _syscall2(int, stat, const char*, f,
  1751. struct kernel_stat*, b)
  1752. LSS_INLINE _syscall3(ssize_t, write, int, f,
  1753. const void *, b, size_t, c)
  1754. #if defined(__NR_getcpu)
  1755. LSS_INLINE _syscall3(long, getcpu, unsigned *, cpu,
  1756. unsigned *, node, void *, unused);
  1757. #endif
  1758. #if defined(__x86_64__) || \
  1759. (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
  1760. LSS_INLINE _syscall3(int, socket, int, d,
  1761. int, t, int, p)
  1762. #endif
  1763. #if defined(__x86_64__)
  1764. LSS_INLINE _syscall6(void*, mmap, void*, s,
  1765. size_t, l, int, p,
  1766. int, f, int, d,
  1767. off64_t, o)
  1768. LSS_INLINE int LSS_NAME(sigaction)(int signum,
  1769. const struct kernel_sigaction *act,
  1770. struct kernel_sigaction *oldact) {
  1771. /* On x86_64, the kernel requires us to always set our own
  1772. * SA_RESTORER in order to be able to return from a signal handler.
  1773. * This function must have a "magic" signature that the "gdb"
  1774. * (and maybe the kernel?) can recognize.
  1775. */
  1776. if (act != NULL && !(act->sa_flags & SA_RESTORER)) {
  1777. struct kernel_sigaction a = *act;
  1778. a.sa_flags |= SA_RESTORER;
  1779. a.sa_restorer = LSS_NAME(restore_rt)();
  1780. return LSS_NAME(rt_sigaction)(signum, &a, oldact,
  1781. (KERNEL_NSIG+7)/8);
  1782. } else {
  1783. return LSS_NAME(rt_sigaction)(signum, act, oldact,
  1784. (KERNEL_NSIG+7)/8);
  1785. }
  1786. }
  1787. LSS_INLINE int LSS_NAME(sigprocmask)(int how,
  1788. const struct kernel_sigset_t *set,
  1789. struct kernel_sigset_t *oldset) {
  1790. return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8);
  1791. }
  1792. #endif
  1793. #if defined(__x86_64__) || \
  1794. defined(__arm__) || \
  1795. (defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
  1796. LSS_INLINE _syscall4(pid_t, wait4, pid_t, p,
  1797. int*, s, int, o,
  1798. struct kernel_rusage*, r)
  1799. LSS_INLINE pid_t LSS_NAME(waitpid)(pid_t pid, int *status, int options){
  1800. return LSS_NAME(wait4)(pid, status, options, 0);
  1801. }
  1802. #endif
  1803. #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
  1804. LSS_INLINE _syscall4(int, openat, int, d, const char *, p, int, f, int, m)
  1805. #endif
  1806. LSS_INLINE int LSS_NAME(sigemptyset)(struct kernel_sigset_t *set) {
  1807. memset(&set->sig, 0, sizeof(set->sig));
  1808. return 0;
  1809. }
  1810. LSS_INLINE int LSS_NAME(sigfillset)(struct kernel_sigset_t *set) {
  1811. memset(&set->sig, -1, sizeof(set->sig));
  1812. return 0;
  1813. }
  1814. LSS_INLINE int LSS_NAME(sigaddset)(struct kernel_sigset_t *set,
  1815. int signum) {
  1816. if (signum < 1 || signum > (int)(8*sizeof(set->sig))) {
  1817. LSS_ERRNO = EINVAL;
  1818. return -1;
  1819. } else {
  1820. set->sig[(signum - 1)/(8*sizeof(set->sig[0]))]
  1821. |= 1UL << ((signum - 1) % (8*sizeof(set->sig[0])));
  1822. return 0;
  1823. }
  1824. }
  1825. LSS_INLINE int LSS_NAME(sigdelset)(struct kernel_sigset_t *set,
  1826. int signum) {
  1827. if (signum < 1 || signum > (int)(8*sizeof(set->sig))) {
  1828. LSS_ERRNO = EINVAL;
  1829. return -1;
  1830. } else {
  1831. set->sig[(signum - 1)/(8*sizeof(set->sig[0]))]
  1832. &= ~(1UL << ((signum - 1) % (8*sizeof(set->sig[0]))));
  1833. return 0;
  1834. }
  1835. }
  1836. #if defined(__i386__) || \
  1837. defined(__arm__) || \
  1838. (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32) || defined(__PPC__)
  1839. #define __NR__sigaction __NR_sigaction
  1840. #define __NR__sigprocmask __NR_sigprocmask
  1841. LSS_INLINE _syscall2(int, fstat64, int, f,
  1842. struct kernel_stat64 *, b)
  1843. LSS_INLINE _syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
  1844. loff_t *, res, uint, wh)
  1845. #ifdef __PPC64__
  1846. LSS_INLINE _syscall6(void*, mmap, void*, s,
  1847. size_t, l, int, p,
  1848. int, f, int, d,
  1849. off_t, o)
  1850. #else
  1851. #ifndef __ARM_EABI__
  1852. /* Not available on ARM EABI Linux. */
  1853. LSS_INLINE _syscall1(void*, mmap, void*, a)
  1854. #endif
  1855. LSS_INLINE _syscall6(void*, mmap2, void*, s,
  1856. size_t, l, int, p,
  1857. int, f, int, d,
  1858. off_t, o)
  1859. #endif
  1860. LSS_INLINE _syscall3(int, _sigaction, int, s,
  1861. const struct kernel_old_sigaction*, a,
  1862. struct kernel_old_sigaction*, o)
  1863. LSS_INLINE _syscall3(int, _sigprocmask, int, h,
  1864. const unsigned long*, s,
  1865. unsigned long*, o)
  1866. LSS_INLINE _syscall2(int, stat64, const char *, p,
  1867. struct kernel_stat64 *, b)
  1868. LSS_INLINE int LSS_NAME(sigaction)(int signum,
  1869. const struct kernel_sigaction *act,
  1870. struct kernel_sigaction *oldact) {
  1871. int old_errno = LSS_ERRNO;
  1872. int rc;
  1873. struct kernel_sigaction a;
  1874. if (act != NULL) {
  1875. a = *act;
  1876. #ifdef __i386__
  1877. /* On i386, the kernel requires us to always set our own
  1878. * SA_RESTORER when using realtime signals. Otherwise, it does not
  1879. * know how to return from a signal handler. This function must have
  1880. * a "magic" signature that the "gdb" (and maybe the kernel?) can
  1881. * recognize.
  1882. * Apparently, a SA_RESTORER is implicitly set by the kernel, when
  1883. * using non-realtime signals.
  1884. *
  1885. * TODO: Test whether ARM needs a restorer
  1886. */
  1887. if (!(a.sa_flags & SA_RESTORER)) {
  1888. a.sa_flags |= SA_RESTORER;
  1889. a.sa_restorer = (a.sa_flags & SA_SIGINFO)
  1890. ? LSS_NAME(restore_rt)() : LSS_NAME(restore)();
  1891. }
  1892. #endif
  1893. }
  1894. rc = LSS_NAME(rt_sigaction)(signum, act ? &a : act, oldact,
  1895. (KERNEL_NSIG+7)/8);
  1896. if (rc < 0 && LSS_ERRNO == ENOSYS) {
  1897. struct kernel_old_sigaction oa, ooa, *ptr_a = &oa, *ptr_oa = &ooa;
  1898. if (!act) {
  1899. ptr_a = NULL;
  1900. } else {
  1901. oa.sa_handler_ = act->sa_handler_;
  1902. memcpy(&oa.sa_mask, &act->sa_mask, sizeof(oa.sa_mask));
  1903. #ifndef __mips__
  1904. oa.sa_restorer = act->sa_restorer;
  1905. #endif
  1906. oa.sa_flags = act->sa_flags;
  1907. }
  1908. if (!oldact) {
  1909. ptr_oa = NULL;
  1910. }
  1911. LSS_ERRNO = old_errno;
  1912. rc = LSS_NAME(_sigaction)(signum, ptr_a, ptr_oa);
  1913. if (rc == 0 && oldact) {
  1914. if (act) {
  1915. memcpy(oldact, act, sizeof(*act));
  1916. } else {
  1917. memset(oldact, 0, sizeof(*oldact));
  1918. }
  1919. oldact->sa_handler_ = ptr_oa->sa_handler_;
  1920. oldact->sa_flags = ptr_oa->sa_flags;
  1921. memcpy(&oldact->sa_mask, &ptr_oa->sa_mask, sizeof(ptr_oa->sa_mask));
  1922. #ifndef __mips__
  1923. oldact->sa_restorer = ptr_oa->sa_restorer;
  1924. #endif
  1925. }
  1926. }
  1927. return rc;
  1928. }
  1929. LSS_INLINE int LSS_NAME(sigprocmask)(int how,
  1930. const struct kernel_sigset_t *set,
  1931. struct kernel_sigset_t *oldset) {
  1932. int olderrno = LSS_ERRNO;
  1933. int rc = LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8);
  1934. if (rc < 0 && LSS_ERRNO == ENOSYS) {
  1935. LSS_ERRNO = olderrno;
  1936. if (oldset) {
  1937. LSS_NAME(sigemptyset)(oldset);
  1938. }
  1939. rc = LSS_NAME(_sigprocmask)(how,
  1940. set ? &set->sig[0] : NULL,
  1941. oldset ? &oldset->sig[0] : NULL);
  1942. }
  1943. return rc;
  1944. }
  1945. #endif
  1946. #if defined(__PPC__)
  1947. #undef LSS_SC_LOADARGS_0
  1948. #define LSS_SC_LOADARGS_0(dummy...)
  1949. #undef LSS_SC_LOADARGS_1
  1950. #define LSS_SC_LOADARGS_1(arg1) \
  1951. __sc_4 = (unsigned long) (arg1)
  1952. #undef LSS_SC_LOADARGS_2
  1953. #define LSS_SC_LOADARGS_2(arg1, arg2) \
  1954. LSS_SC_LOADARGS_1(arg1); \
  1955. __sc_5 = (unsigned long) (arg2)
  1956. #undef LSS_SC_LOADARGS_3
  1957. #define LSS_SC_LOADARGS_3(arg1, arg2, arg3) \
  1958. LSS_SC_LOADARGS_2(arg1, arg2); \
  1959. __sc_6 = (unsigned long) (arg3)
  1960. #undef LSS_SC_LOADARGS_4
  1961. #define LSS_SC_LOADARGS_4(arg1, arg2, arg3, arg4) \
  1962. LSS_SC_LOADARGS_3(arg1, arg2, arg3); \
  1963. __sc_7 = (unsigned long) (arg4)
  1964. #undef LSS_SC_LOADARGS_5
  1965. #define LSS_SC_LOADARGS_5(arg1, arg2, arg3, arg4, arg5) \
  1966. LSS_SC_LOADARGS_4(arg1, arg2, arg3, arg4); \
  1967. __sc_8 = (unsigned long) (arg5)
  1968. #undef LSS_SC_BODY
  1969. #define LSS_SC_BODY(nr, type, opt, args...) \
  1970. long __sc_ret, __sc_err; \
  1971. { \
  1972. unsigned long __sc_0 __asm__ ("r0") = __NR_socketcall; \
  1973. unsigned long __sc_3 __asm__ ("r3") = opt; \
  1974. unsigned long __sc_4 __asm__ ("r4"); \
  1975. unsigned long __sc_5 __asm__ ("r5"); \
  1976. unsigned long __sc_6 __asm__ ("r6"); \
  1977. unsigned long __sc_7 __asm__ ("r7"); \
  1978. unsigned long __sc_8 __asm__ ("r8"); \
  1979. LSS_SC_LOADARGS_##nr(args); \
  1980. __asm__ __volatile__ \
  1981. ("stwu 1, -48(1)\n\t" \
  1982. "stw 4, 20(1)\n\t" \
  1983. "stw 5, 24(1)\n\t" \
  1984. "stw 6, 28(1)\n\t" \
  1985. "stw 7, 32(1)\n\t" \
  1986. "stw 8, 36(1)\n\t" \
  1987. "addi 4, 1, 20\n\t" \
  1988. "sc\n\t" \
  1989. "mfcr %0" \
  1990. : "=&r" (__sc_0), \
  1991. "=&r" (__sc_3), "=&r" (__sc_4), \
  1992. "=&r" (__sc_5), "=&r" (__sc_6), \
  1993. "=&r" (__sc_7), "=&r" (__sc_8) \
  1994. : LSS_ASMINPUT_##nr \
  1995. : "cr0", "ctr", "memory"); \
  1996. __sc_ret = __sc_3; \
  1997. __sc_err = __sc_0; \
  1998. } \
  1999. LSS_RETURN(type, __sc_ret, __sc_err)
  2000. LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) {
  2001. LSS_SC_BODY(3, int, 1, domain, type, protocol);
  2002. }
  2003. #endif
  2004. #if defined(__i386__) || \
  2005. (defined(__arm__) && !defined(__ARM_EABI__)) || \
  2006. (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
  2007. /* See sys_socketcall in net/socket.c in kernel source.
  2008. * It de-multiplexes on its first arg and unpacks the arglist
  2009. * array in its second arg.
  2010. */
  2011. LSS_INLINE _syscall2(long, socketcall, int, c, unsigned long*, a)
  2012. LSS_INLINE int LSS_NAME(socket)(int domain, int type, int protocol) {
  2013. unsigned long args[3] = {
  2014. (unsigned long) domain,
  2015. (unsigned long) type,
  2016. (unsigned long) protocol
  2017. };
  2018. return LSS_NAME(socketcall)(1, args);
  2019. }
  2020. #elif defined(__ARM_EABI__)
  2021. LSS_INLINE _syscall3(int, socket, int, d,
  2022. int, t, int, p)
  2023. #endif
  2024. #if defined(__i386__) || defined(__PPC__) || \
  2025. (defined(__mips__) && _MIPS_SIM == _MIPS_SIM_ABI32)
  2026. LSS_INLINE _syscall3(pid_t, waitpid, pid_t, p,
  2027. int*, s, int, o)
  2028. #endif
  2029. #if defined(__mips__)
  2030. /* sys_pipe() on MIPS has non-standard calling conventions, as it returns
  2031. * both file handles through CPU registers.
  2032. */
  2033. LSS_INLINE int LSS_NAME(pipe)(int *p) {
  2034. unsigned long __v0 __asm__("$2") = __NR_pipe;
  2035. unsigned long __v1 __asm__("$3");
  2036. unsigned long __r7 __asm__("$7");
  2037. __asm__ __volatile__ ("syscall\n"
  2038. : "=&r"(__v0), "=&r"(__v1), "+r" (__r7)
  2039. : "0"(__v0)
  2040. : "$8", "$9", "$10", "$11", "$12",
  2041. "$13", "$14", "$15", "$24", "memory");
  2042. if (__r7) {
  2043. LSS_ERRNO = __v0;
  2044. return -1;
  2045. } else {
  2046. p[0] = __v0;
  2047. p[1] = __v1;
  2048. return 0;
  2049. }
  2050. }
  2051. #else
  2052. LSS_INLINE _syscall1(int, pipe, int *, p)
  2053. #endif
  2054. LSS_INLINE pid_t LSS_NAME(gettid)() {
  2055. pid_t tid = LSS_NAME(_gettid)();
  2056. if (tid != -1) {
  2057. return tid;
  2058. }
  2059. return LSS_NAME(getpid)();
  2060. }
  2061. LSS_INLINE void *LSS_NAME(mremap)(void *old_address, size_t old_size,
  2062. size_t new_size, int flags, ...) {
  2063. va_list ap;
  2064. void *new_address, *rc;
  2065. va_start(ap, flags);
  2066. new_address = va_arg(ap, void *);
  2067. rc = LSS_NAME(_mremap)(old_address, old_size, new_size,
  2068. flags, new_address);
  2069. va_end(ap);
  2070. return rc;
  2071. }
  2072. LSS_INLINE int LSS_NAME(ptrace_detach)(pid_t pid) {
  2073. /* PTRACE_DETACH can sometimes forget to wake up the tracee and it
  2074. * then sends job control signals to the real parent, rather than to
  2075. * the tracer. We reduce the risk of this happening by starting a
  2076. * whole new time slice, and then quickly sending a SIGCONT signal
  2077. * right after detaching from the tracee.
  2078. */
  2079. int rc, err;
  2080. LSS_NAME(sched_yield)();
  2081. rc = LSS_NAME(ptrace)(PTRACE_DETACH, pid, (void *)0, (void *)0);
  2082. err = LSS_ERRNO;
  2083. LSS_NAME(kill)(pid, SIGCONT);
  2084. LSS_ERRNO = err;
  2085. return rc;
  2086. }
  2087. #endif
  2088. #if defined(__cplusplus) && !defined(SYS_CPLUSPLUS)
  2089. }
  2090. #endif
  2091. #endif
  2092. #endif