kmp_error.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. /*
  2. * kmp_error.cpp -- KPTS functions for error checking at runtime
  3. */
  4. //===----------------------------------------------------------------------===//
  5. //
  6. // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
  7. // See https://llvm.org/LICENSE.txt for license information.
  8. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
  9. //
  10. //===----------------------------------------------------------------------===//
  11. #include "kmp.h"
  12. #include "kmp_error.h"
  13. #include "kmp_i18n.h"
  14. #include "kmp_str.h"
  15. /* ------------------------------------------------------------------------ */
  16. #define MIN_STACK 100
  17. static char const *cons_text_c[] = {
  18. "(none)",
  19. "\"parallel\"",
  20. "work-sharing", /* this is not called "for"
  21. because of lowering of
  22. "sections" pragmas */
  23. "\"ordered\" work-sharing", /* this is not called "for ordered" because of
  24. lowering of "sections" pragmas */
  25. "\"sections\"",
  26. "work-sharing", /* this is not called "single" because of lowering of
  27. "sections" pragmas */
  28. "\"critical\"",
  29. "\"ordered\"", /* in PARALLEL */
  30. "\"ordered\"", /* in PDO */
  31. "\"master\"",
  32. "\"reduce\"",
  33. "\"barrier\"",
  34. "\"masked\""};
  35. #define get_src(ident) ((ident) == NULL ? NULL : (ident)->psource)
  36. #define PUSH_MSG(ct, ident) \
  37. "\tpushing on stack: %s (%s)\n", cons_text_c[(ct)], get_src((ident))
  38. #define POP_MSG(p) \
  39. "\tpopping off stack: %s (%s)\n", cons_text_c[(p)->stack_data[tos].type], \
  40. get_src((p)->stack_data[tos].ident)
  41. static int const cons_text_c_num = sizeof(cons_text_c) / sizeof(char const *);
  42. /* --------------- START OF STATIC LOCAL ROUTINES ------------------------- */
  43. static void __kmp_check_null_func(void) { /* nothing to do */
  44. }
  45. static void __kmp_expand_cons_stack(int gtid, struct cons_header *p) {
  46. int i;
  47. struct cons_data *d;
  48. /* TODO for monitor perhaps? */
  49. if (gtid < 0)
  50. __kmp_check_null_func();
  51. KE_TRACE(10, ("expand cons_stack (%d %d)\n", gtid, __kmp_get_gtid()));
  52. d = p->stack_data;
  53. p->stack_size = (p->stack_size * 2) + 100;
  54. /* TODO free the old data */
  55. p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
  56. (p->stack_size + 1));
  57. for (i = p->stack_top; i >= 0; --i)
  58. p->stack_data[i] = d[i];
  59. /* NOTE: we do not free the old stack_data */
  60. }
  61. // NOTE: Function returns allocated memory, caller must free it!
  62. static char *__kmp_pragma(int ct, ident_t const *ident) {
  63. char const *cons = NULL; // Construct name.
  64. char *file = NULL; // File name.
  65. char *func = NULL; // Function (routine) name.
  66. char *line = NULL; // Line number.
  67. kmp_str_buf_t buffer;
  68. kmp_msg_t prgm;
  69. __kmp_str_buf_init(&buffer);
  70. if (0 < ct && ct < cons_text_c_num) {
  71. cons = cons_text_c[ct];
  72. } else {
  73. KMP_DEBUG_ASSERT(0);
  74. }
  75. if (ident != NULL && ident->psource != NULL) {
  76. char *tail = NULL;
  77. __kmp_str_buf_print(&buffer, "%s",
  78. ident->psource); // Copy source to buffer.
  79. // Split string in buffer to file, func, and line.
  80. tail = buffer.str;
  81. __kmp_str_split(tail, ';', NULL, &tail);
  82. __kmp_str_split(tail, ';', &file, &tail);
  83. __kmp_str_split(tail, ';', &func, &tail);
  84. __kmp_str_split(tail, ';', &line, &tail);
  85. }
  86. prgm = __kmp_msg_format(kmp_i18n_fmt_Pragma, cons, file, func, line);
  87. __kmp_str_buf_free(&buffer);
  88. return prgm.str;
  89. } // __kmp_pragma
  90. /* ----------------- END OF STATIC LOCAL ROUTINES ------------------------- */
  91. void __kmp_error_construct(kmp_i18n_id_t id, // Message identifier.
  92. enum cons_type ct, // Construct type.
  93. ident_t const *ident // Construct ident.
  94. ) {
  95. char *construct = __kmp_pragma(ct, ident);
  96. __kmp_fatal(__kmp_msg_format(id, construct), __kmp_msg_null);
  97. KMP_INTERNAL_FREE(construct);
  98. }
  99. void __kmp_error_construct2(kmp_i18n_id_t id, // Message identifier.
  100. enum cons_type ct, // First construct type.
  101. ident_t const *ident, // First construct ident.
  102. struct cons_data const *cons // Second construct.
  103. ) {
  104. char *construct1 = __kmp_pragma(ct, ident);
  105. char *construct2 = __kmp_pragma(cons->type, cons->ident);
  106. __kmp_fatal(__kmp_msg_format(id, construct1, construct2), __kmp_msg_null);
  107. KMP_INTERNAL_FREE(construct1);
  108. KMP_INTERNAL_FREE(construct2);
  109. }
  110. struct cons_header *__kmp_allocate_cons_stack(int gtid) {
  111. struct cons_header *p;
  112. /* TODO for monitor perhaps? */
  113. if (gtid < 0) {
  114. __kmp_check_null_func();
  115. }
  116. KE_TRACE(10, ("allocate cons_stack (%d)\n", gtid));
  117. p = (struct cons_header *)__kmp_allocate(sizeof(struct cons_header));
  118. p->p_top = p->w_top = p->s_top = 0;
  119. p->stack_data = (struct cons_data *)__kmp_allocate(sizeof(struct cons_data) *
  120. (MIN_STACK + 1));
  121. p->stack_size = MIN_STACK;
  122. p->stack_top = 0;
  123. p->stack_data[0].type = ct_none;
  124. p->stack_data[0].prev = 0;
  125. p->stack_data[0].ident = NULL;
  126. return p;
  127. }
  128. void __kmp_free_cons_stack(void *ptr) {
  129. struct cons_header *p = (struct cons_header *)ptr;
  130. if (p != NULL) {
  131. if (p->stack_data != NULL) {
  132. __kmp_free(p->stack_data);
  133. p->stack_data = NULL;
  134. }
  135. __kmp_free(p);
  136. }
  137. }
  138. #if KMP_DEBUG
  139. static void dump_cons_stack(int gtid, struct cons_header *p) {
  140. int i;
  141. int tos = p->stack_top;
  142. kmp_str_buf_t buffer;
  143. __kmp_str_buf_init(&buffer);
  144. __kmp_str_buf_print(
  145. &buffer,
  146. "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
  147. __kmp_str_buf_print(&buffer,
  148. "Begin construct stack with %d items for thread %d\n",
  149. tos, gtid);
  150. __kmp_str_buf_print(&buffer, " stack_top=%d { P=%d, W=%d, S=%d }\n", tos,
  151. p->p_top, p->w_top, p->s_top);
  152. for (i = tos; i > 0; i--) {
  153. struct cons_data *c = &(p->stack_data[i]);
  154. __kmp_str_buf_print(
  155. &buffer, " stack_data[%2d] = { %s (%s) %d %p }\n", i,
  156. cons_text_c[c->type], get_src(c->ident), c->prev, c->name);
  157. }
  158. __kmp_str_buf_print(&buffer, "End construct stack for thread %d\n", gtid);
  159. __kmp_str_buf_print(
  160. &buffer,
  161. "+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-\n");
  162. __kmp_debug_printf("%s", buffer.str);
  163. __kmp_str_buf_free(&buffer);
  164. }
  165. #endif
  166. void __kmp_push_parallel(int gtid, ident_t const *ident) {
  167. int tos;
  168. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  169. KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
  170. KE_TRACE(10, ("__kmp_push_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
  171. KE_TRACE(100, (PUSH_MSG(ct_parallel, ident)));
  172. if (p->stack_top >= p->stack_size) {
  173. __kmp_expand_cons_stack(gtid, p);
  174. }
  175. tos = ++p->stack_top;
  176. p->stack_data[tos].type = ct_parallel;
  177. p->stack_data[tos].prev = p->p_top;
  178. p->stack_data[tos].ident = ident;
  179. p->stack_data[tos].name = NULL;
  180. p->p_top = tos;
  181. KE_DUMP(1000, dump_cons_stack(gtid, p));
  182. }
  183. void __kmp_check_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
  184. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  185. KMP_DEBUG_ASSERT(__kmp_threads[gtid]->th.th_cons);
  186. KE_TRACE(10, ("__kmp_check_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
  187. if (p->stack_top >= p->stack_size) {
  188. __kmp_expand_cons_stack(gtid, p);
  189. }
  190. if (p->w_top > p->p_top) {
  191. // We are already in a WORKSHARE construct for this PARALLEL region.
  192. __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
  193. &p->stack_data[p->w_top]);
  194. }
  195. if (p->s_top > p->p_top) {
  196. // We are already in a SYNC construct for this PARALLEL region.
  197. __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
  198. &p->stack_data[p->s_top]);
  199. }
  200. }
  201. void __kmp_push_workshare(int gtid, enum cons_type ct, ident_t const *ident) {
  202. int tos;
  203. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  204. KE_TRACE(10, ("__kmp_push_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
  205. __kmp_check_workshare(gtid, ct, ident);
  206. KE_TRACE(100, (PUSH_MSG(ct, ident)));
  207. tos = ++p->stack_top;
  208. p->stack_data[tos].type = ct;
  209. p->stack_data[tos].prev = p->w_top;
  210. p->stack_data[tos].ident = ident;
  211. p->stack_data[tos].name = NULL;
  212. p->w_top = tos;
  213. KE_DUMP(1000, dump_cons_stack(gtid, p));
  214. }
  215. void
  216. #if KMP_USE_DYNAMIC_LOCK
  217. __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
  218. #else
  219. __kmp_check_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
  220. #endif
  221. {
  222. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  223. KE_TRACE(10, ("__kmp_check_sync (gtid=%d)\n", __kmp_get_gtid()));
  224. if (p->stack_top >= p->stack_size)
  225. __kmp_expand_cons_stack(gtid, p);
  226. if (ct == ct_ordered_in_parallel || ct == ct_ordered_in_pdo) {
  227. if (p->w_top <= p->p_top) {
  228. /* we are not in a worksharing construct */
  229. #ifdef BUILD_PARALLEL_ORDERED
  230. /* do not report error messages for PARALLEL ORDERED */
  231. KMP_ASSERT(ct == ct_ordered_in_parallel);
  232. #else
  233. __kmp_error_construct(kmp_i18n_msg_CnsBoundToWorksharing, ct, ident);
  234. #endif /* BUILD_PARALLEL_ORDERED */
  235. } else {
  236. /* inside a WORKSHARING construct for this PARALLEL region */
  237. if (!IS_CONS_TYPE_ORDERED(p->stack_data[p->w_top].type)) {
  238. __kmp_error_construct2(kmp_i18n_msg_CnsNoOrderedClause, ct, ident,
  239. &p->stack_data[p->w_top]);
  240. }
  241. }
  242. if (p->s_top > p->p_top && p->s_top > p->w_top) {
  243. /* inside a sync construct which is inside a worksharing construct */
  244. int index = p->s_top;
  245. enum cons_type stack_type;
  246. stack_type = p->stack_data[index].type;
  247. if (stack_type == ct_critical ||
  248. ((stack_type == ct_ordered_in_parallel ||
  249. stack_type == ct_ordered_in_pdo) &&
  250. /* C doesn't allow named ordered; ordered in ordered gets error */
  251. p->stack_data[index].ident != NULL &&
  252. (p->stack_data[index].ident->flags & KMP_IDENT_KMPC))) {
  253. /* we are in ORDERED which is inside an ORDERED or CRITICAL construct */
  254. __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
  255. &p->stack_data[index]);
  256. }
  257. }
  258. } else if (ct == ct_critical) {
  259. #if KMP_USE_DYNAMIC_LOCK
  260. if (lck != NULL &&
  261. __kmp_get_user_lock_owner(lck, seq) ==
  262. gtid) { /* this thread already has lock for this critical section */
  263. #else
  264. if (lck != NULL &&
  265. __kmp_get_user_lock_owner(lck) ==
  266. gtid) { /* this thread already has lock for this critical section */
  267. #endif
  268. int index = p->s_top;
  269. struct cons_data cons = {NULL, ct_critical, 0, NULL};
  270. /* walk up construct stack and try to find critical with matching name */
  271. while (index != 0 && p->stack_data[index].name != lck) {
  272. index = p->stack_data[index].prev;
  273. }
  274. if (index != 0) {
  275. /* found match on the stack (may not always because of interleaved
  276. * critical for Fortran) */
  277. cons = p->stack_data[index];
  278. }
  279. /* we are in CRITICAL which is inside a CRITICAL construct of same name */
  280. __kmp_error_construct2(kmp_i18n_msg_CnsNestingSameName, ct, ident, &cons);
  281. }
  282. } else if (ct == ct_master || ct == ct_masked || ct == ct_reduce) {
  283. if (p->w_top > p->p_top) {
  284. /* inside a WORKSHARING construct for this PARALLEL region */
  285. __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
  286. &p->stack_data[p->w_top]);
  287. }
  288. if (ct == ct_reduce && p->s_top > p->p_top) {
  289. /* inside a another SYNC construct for this PARALLEL region */
  290. __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
  291. &p->stack_data[p->s_top]);
  292. }
  293. }
  294. }
  295. void
  296. #if KMP_USE_DYNAMIC_LOCK
  297. __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck, kmp_uint32 seq )
  298. #else
  299. __kmp_push_sync( int gtid, enum cons_type ct, ident_t const * ident, kmp_user_lock_p lck )
  300. #endif
  301. {
  302. int tos;
  303. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  304. KMP_ASSERT(gtid == __kmp_get_gtid());
  305. KE_TRACE(10, ("__kmp_push_sync (gtid=%d)\n", gtid));
  306. #if KMP_USE_DYNAMIC_LOCK
  307. __kmp_check_sync(gtid, ct, ident, lck, seq);
  308. #else
  309. __kmp_check_sync(gtid, ct, ident, lck);
  310. #endif
  311. KE_TRACE(100, (PUSH_MSG(ct, ident)));
  312. tos = ++p->stack_top;
  313. p->stack_data[tos].type = ct;
  314. p->stack_data[tos].prev = p->s_top;
  315. p->stack_data[tos].ident = ident;
  316. p->stack_data[tos].name = lck;
  317. p->s_top = tos;
  318. KE_DUMP(1000, dump_cons_stack(gtid, p));
  319. }
  320. /* ------------------------------------------------------------------------ */
  321. void __kmp_pop_parallel(int gtid, ident_t const *ident) {
  322. int tos;
  323. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  324. tos = p->stack_top;
  325. KE_TRACE(10, ("__kmp_pop_parallel (%d %d)\n", gtid, __kmp_get_gtid()));
  326. if (tos == 0 || p->p_top == 0) {
  327. __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct_parallel, ident);
  328. }
  329. if (tos != p->p_top || p->stack_data[tos].type != ct_parallel) {
  330. __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct_parallel, ident,
  331. &p->stack_data[tos]);
  332. }
  333. KE_TRACE(100, (POP_MSG(p)));
  334. p->p_top = p->stack_data[tos].prev;
  335. p->stack_data[tos].type = ct_none;
  336. p->stack_data[tos].ident = NULL;
  337. p->stack_top = tos - 1;
  338. KE_DUMP(1000, dump_cons_stack(gtid, p));
  339. }
  340. enum cons_type __kmp_pop_workshare(int gtid, enum cons_type ct,
  341. ident_t const *ident) {
  342. int tos;
  343. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  344. tos = p->stack_top;
  345. KE_TRACE(10, ("__kmp_pop_workshare (%d %d)\n", gtid, __kmp_get_gtid()));
  346. if (tos == 0 || p->w_top == 0) {
  347. __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
  348. }
  349. if (tos != p->w_top ||
  350. (p->stack_data[tos].type != ct &&
  351. // below is the exception to the rule that construct types must match
  352. !(p->stack_data[tos].type == ct_pdo_ordered && ct == ct_pdo))) {
  353. __kmp_check_null_func();
  354. __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
  355. &p->stack_data[tos]);
  356. }
  357. KE_TRACE(100, (POP_MSG(p)));
  358. p->w_top = p->stack_data[tos].prev;
  359. p->stack_data[tos].type = ct_none;
  360. p->stack_data[tos].ident = NULL;
  361. p->stack_top = tos - 1;
  362. KE_DUMP(1000, dump_cons_stack(gtid, p));
  363. return p->stack_data[p->w_top].type;
  364. }
  365. void __kmp_pop_sync(int gtid, enum cons_type ct, ident_t const *ident) {
  366. int tos;
  367. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  368. tos = p->stack_top;
  369. KE_TRACE(10, ("__kmp_pop_sync (%d %d)\n", gtid, __kmp_get_gtid()));
  370. if (tos == 0 || p->s_top == 0) {
  371. __kmp_error_construct(kmp_i18n_msg_CnsDetectedEnd, ct, ident);
  372. }
  373. if (tos != p->s_top || p->stack_data[tos].type != ct) {
  374. __kmp_check_null_func();
  375. __kmp_error_construct2(kmp_i18n_msg_CnsExpectedEnd, ct, ident,
  376. &p->stack_data[tos]);
  377. }
  378. KE_TRACE(100, (POP_MSG(p)));
  379. p->s_top = p->stack_data[tos].prev;
  380. p->stack_data[tos].type = ct_none;
  381. p->stack_data[tos].ident = NULL;
  382. p->stack_top = tos - 1;
  383. KE_DUMP(1000, dump_cons_stack(gtid, p));
  384. }
  385. /* ------------------------------------------------------------------------ */
  386. void __kmp_check_barrier(int gtid, enum cons_type ct, ident_t const *ident) {
  387. struct cons_header *p = __kmp_threads[gtid]->th.th_cons;
  388. KE_TRACE(10, ("__kmp_check_barrier (loc: %p, gtid: %d %d)\n", ident, gtid,
  389. __kmp_get_gtid()));
  390. if (ident != 0) {
  391. __kmp_check_null_func();
  392. }
  393. if (p->w_top > p->p_top) {
  394. /* we are already in a WORKSHARING construct for this PARALLEL region */
  395. __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
  396. &p->stack_data[p->w_top]);
  397. }
  398. if (p->s_top > p->p_top) {
  399. /* we are already in a SYNC construct for this PARALLEL region */
  400. __kmp_error_construct2(kmp_i18n_msg_CnsInvalidNesting, ct, ident,
  401. &p->stack_data[p->s_top]);
  402. }
  403. }