gpu_tree.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. /*
  2. * Copyright 2013 Ecole Normale Superieure
  3. *
  4. * Use of this software is governed by the MIT license
  5. *
  6. * Written by Sven Verdoolaege,
  7. * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
  8. */
  9. #include <string.h>
  10. #include <isl/set.h>
  11. #include <isl/union_set.h>
  12. #include <isl/space.h>
  13. #include "gpu_tree.h"
  14. /* The functions in this file are used to navigate part of a schedule tree
  15. * that is mapped to blocks. Initially, this part consists of a linear
  16. * branch segment with a mark node with name "kernel" on the outer end
  17. * and a mark node with name "thread" on the inner end.
  18. * During the mapping to blocks, branching may be introduced, but only
  19. * one of the elements in each sequence contains the "thread" mark.
  20. * The filter of this element (and only this filter) contains
  21. * domain elements identified by the "core" argument of the functions
  22. * that move down this tree.
  23. *
  24. * Synchronization statements have a name that starts with "sync" and
  25. * a user pointer pointing to the kernel that contains the synchronization.
  26. * The functions inserting or detecting synchronizations take a ppcg_kernel
  27. * argument to be able to create or identify such statements.
  28. * They may also use two fields in this structure, the "core" field
  29. * to move around in the tree and the "n_sync" field to make sure that
  30. * each synchronization has a different name (within the kernel).
  31. */
  32. /* Is "node" a mark node with an identifier called "name"?
  33. */
  34. static int is_marked(__isl_keep isl_schedule_node *node, const char *name)
  35. {
  36. isl_id *mark;
  37. int has_name;
  38. if (!node)
  39. return -1;
  40. if (isl_schedule_node_get_type(node) != isl_schedule_node_mark)
  41. return 0;
  42. mark = isl_schedule_node_mark_get_id(node);
  43. if (!mark)
  44. return -1;
  45. has_name = !strcmp(isl_id_get_name(mark), name);
  46. isl_id_free(mark);
  47. return has_name;
  48. }
  49. /* Is "node" a mark node with an identifier called "kernel"?
  50. */
  51. int gpu_tree_node_is_kernel(__isl_keep isl_schedule_node *node)
  52. {
  53. return is_marked(node, "kernel");
  54. }
  55. /* Is "node" a mark node with an identifier called "shared"?
  56. */
  57. static int node_is_shared(__isl_keep isl_schedule_node *node)
  58. {
  59. return is_marked(node, "shared");
  60. }
  61. /* Is "node" a mark node with an identifier called "thread"?
  62. */
  63. static int node_is_thread(__isl_keep isl_schedule_node *node)
  64. {
  65. return is_marked(node, "thread");
  66. }
  67. /* Insert a mark node with identifier "shared" in front of "node".
  68. */
  69. static __isl_give isl_schedule_node *insert_shared(
  70. __isl_take isl_schedule_node *node)
  71. {
  72. isl_ctx *ctx;
  73. isl_id *id;
  74. ctx = isl_schedule_node_get_ctx(node);
  75. id = isl_id_alloc(ctx, "shared", NULL);
  76. node = isl_schedule_node_insert_mark(node, id);
  77. return node;
  78. }
  79. /* Insert a "shared" mark in front of the "thread" mark
  80. * provided the linear branch between "node" and the "thread" mark
  81. * does not contain such a "shared" mark already.
  82. *
  83. * As a side effect, this function checks that the subtree at "node"
  84. * actually contains a "thread" mark and that there is no branching
  85. * in between "node" and this "thread" mark.
  86. */
  87. __isl_give isl_schedule_node *gpu_tree_insert_shared_before_thread(
  88. __isl_take isl_schedule_node *node)
  89. {
  90. int depth0, depth;
  91. int any_shared = 0;
  92. if (!node)
  93. return NULL;
  94. depth0 = isl_schedule_node_get_tree_depth(node);
  95. for (;;) {
  96. int is_thread;
  97. int n;
  98. if (!any_shared) {
  99. any_shared = node_is_shared(node);
  100. if (any_shared < 0)
  101. return isl_schedule_node_free(node);
  102. }
  103. is_thread = node_is_thread(node);
  104. if (is_thread < 0)
  105. return isl_schedule_node_free(node);
  106. if (is_thread)
  107. break;
  108. n = isl_schedule_node_n_children(node);
  109. if (n == 0)
  110. isl_die(isl_schedule_node_get_ctx(node),
  111. isl_error_invalid,
  112. "no thread marker found",
  113. return isl_schedule_node_free(node));
  114. if (n > 1)
  115. isl_die(isl_schedule_node_get_ctx(node),
  116. isl_error_invalid,
  117. "expecting single thread marker",
  118. return isl_schedule_node_free(node));
  119. node = isl_schedule_node_child(node, 0);
  120. }
  121. if (!any_shared)
  122. node = insert_shared(node);
  123. depth = isl_schedule_node_get_tree_depth(node);
  124. node = isl_schedule_node_ancestor(node, depth - depth0);
  125. return node;
  126. }
  127. /* Assuming "node" is a filter node, does it correspond to the branch
  128. * that contains the "thread" mark, i.e., does it contain any elements
  129. * in "core"?
  130. */
  131. static int node_is_core(__isl_keep isl_schedule_node *node,
  132. __isl_keep isl_union_set *core)
  133. {
  134. int disjoint;
  135. isl_union_set *filter;
  136. filter = isl_schedule_node_filter_get_filter(node);
  137. disjoint = isl_union_set_is_disjoint(filter, core);
  138. isl_union_set_free(filter);
  139. if (disjoint < 0)
  140. return -1;
  141. return !disjoint;
  142. }
  143. /* Move to the only child of "node" that has the "thread" mark as descendant,
  144. * where the branch containing this mark is identified by the domain elements
  145. * in "core".
  146. *
  147. * If "node" is not a sequence, then it only has one child and we move
  148. * to that single child.
  149. * Otherwise, we check each of the filters in the children, pick
  150. * the one that corresponds to "core" and return a pointer to the child
  151. * of the filter node.
  152. */
  153. static __isl_give isl_schedule_node *core_child(
  154. __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
  155. {
  156. int i, n;
  157. if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
  158. return isl_schedule_node_child(node, 0);
  159. n = isl_schedule_node_n_children(node);
  160. for (i = 0; i < n; ++i) {
  161. int is_core;
  162. node = isl_schedule_node_child(node, i);
  163. is_core = node_is_core(node, core);
  164. if (is_core < 0)
  165. return isl_schedule_node_free(node);
  166. if (is_core)
  167. return isl_schedule_node_child(node, 0);
  168. node = isl_schedule_node_parent(node);
  169. }
  170. isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
  171. "core child not found", return isl_schedule_node_free(node));
  172. }
  173. /* Move down the branch between "kernel" and "thread" until
  174. * the "shared" mark is reached, where the branch containing the "shared"
  175. * mark is identified by the domain elements in "core".
  176. */
  177. __isl_give isl_schedule_node *gpu_tree_move_down_to_shared(
  178. __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
  179. {
  180. int is_shared;
  181. while ((is_shared = node_is_shared(node)) == 0)
  182. node = core_child(node, core);
  183. if (is_shared < 0)
  184. node = isl_schedule_node_free(node);
  185. return node;
  186. }
  187. /* Move down the branch between "kernel" and "thread" until
  188. * the "thread" mark is reached, where the branch containing the "thread"
  189. * mark is identified by the domain elements in "core".
  190. */
  191. __isl_give isl_schedule_node *gpu_tree_move_down_to_thread(
  192. __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
  193. {
  194. int is_thread;
  195. while ((is_thread = node_is_thread(node)) == 0)
  196. node = core_child(node, core);
  197. if (is_thread < 0)
  198. node = isl_schedule_node_free(node);
  199. return node;
  200. }
  201. /* Move up the tree underneath the "thread" mark until
  202. * the "thread" mark is reached.
  203. */
  204. __isl_give isl_schedule_node *gpu_tree_move_up_to_thread(
  205. __isl_take isl_schedule_node *node)
  206. {
  207. int is_thread;
  208. while ((is_thread = node_is_thread(node)) == 0)
  209. node = isl_schedule_node_parent(node);
  210. if (is_thread < 0)
  211. node = isl_schedule_node_free(node);
  212. return node;
  213. }
  214. /* Move up the tree underneath the "kernel" mark until
  215. * the "kernel" mark is reached.
  216. */
  217. __isl_give isl_schedule_node *gpu_tree_move_up_to_kernel(
  218. __isl_take isl_schedule_node *node)
  219. {
  220. int is_kernel;
  221. while ((is_kernel = gpu_tree_node_is_kernel(node)) == 0)
  222. node = isl_schedule_node_parent(node);
  223. if (is_kernel < 0)
  224. node = isl_schedule_node_free(node);
  225. return node;
  226. }
  227. /* Move down from the "kernel" mark (or at least a node with schedule
  228. * depth smaller than or equal to "depth") to a band node at schedule
  229. * depth "depth". The "thread" mark is assumed to have a schedule
  230. * depth greater than or equal to "depth". The branch containing the
  231. * "thread" mark is identified by the domain elements in "core".
  232. *
  233. * If the desired schedule depth is in the middle of band node,
  234. * then the band node is split into two pieces, the second piece
  235. * at the desired schedule depth.
  236. */
  237. __isl_give isl_schedule_node *gpu_tree_move_down_to_depth(
  238. __isl_take isl_schedule_node *node, int depth,
  239. __isl_keep isl_union_set *core)
  240. {
  241. int is_shared;
  242. int is_thread = 0;
  243. while (node && isl_schedule_node_get_schedule_depth(node) < depth) {
  244. if (isl_schedule_node_get_type(node) ==
  245. isl_schedule_node_band) {
  246. int node_depth, node_dim;
  247. node_depth = isl_schedule_node_get_schedule_depth(node);
  248. node_dim = isl_schedule_node_band_n_member(node);
  249. if (node_depth + node_dim > depth)
  250. node = isl_schedule_node_band_split(node,
  251. depth - node_depth);
  252. }
  253. node = core_child(node, core);
  254. }
  255. while ((is_shared = node_is_shared(node)) == 0 &&
  256. (is_thread = node_is_thread(node)) == 0 &&
  257. isl_schedule_node_get_type(node) != isl_schedule_node_band)
  258. node = core_child(node, core);
  259. if (is_shared < 0 || is_thread < 0)
  260. node = isl_schedule_node_free(node);
  261. return node;
  262. }
  263. /* Create a union set containing a single set with a tuple identifier
  264. * called "syncX" and user pointer equal to "kernel".
  265. */
  266. static __isl_give isl_union_set *create_sync_domain(struct ppcg_kernel *kernel)
  267. {
  268. isl_space *space;
  269. isl_id *id;
  270. char name[40];
  271. space = isl_space_set_alloc(kernel->ctx, 0, 0);
  272. snprintf(name, sizeof(name), "sync%d", kernel->n_sync++);
  273. id = isl_id_alloc(kernel->ctx, name, kernel);
  274. space = isl_space_set_tuple_id(space, isl_dim_set, id);
  275. return isl_union_set_from_set(isl_set_universe(space));
  276. }
  277. /* Is "id" the identifier of a synchronization statement inside "kernel"?
  278. * That is, does its name start with "sync" and does it point to "kernel"?
  279. */
  280. int gpu_tree_id_is_sync(__isl_keep isl_id *id, struct ppcg_kernel *kernel)
  281. {
  282. const char *name;
  283. name = isl_id_get_name(id);
  284. if (!name)
  285. return 0;
  286. else if (strncmp(name, "sync", 4))
  287. return 0;
  288. return isl_id_get_user(id) == kernel;
  289. }
  290. /* Does "domain" consist of a single set with a tuple identifier
  291. * corresponding to a synchronization for "kernel"?
  292. */
  293. static int domain_is_sync(__isl_keep isl_union_set *domain,
  294. struct ppcg_kernel *kernel)
  295. {
  296. int is_sync;
  297. isl_id *id;
  298. isl_set *set;
  299. if (isl_union_set_n_set(domain) != 1)
  300. return 0;
  301. set = isl_set_from_union_set(isl_union_set_copy(domain));
  302. id = isl_set_get_tuple_id(set);
  303. is_sync = gpu_tree_id_is_sync(id, kernel);
  304. isl_id_free(id);
  305. isl_set_free(set);
  306. return is_sync;
  307. }
  308. /* Does "node" point to a filter selecting a synchronization statement
  309. * for "kernel"?
  310. */
  311. static int node_is_sync_filter(__isl_keep isl_schedule_node *node,
  312. struct ppcg_kernel *kernel)
  313. {
  314. int is_sync;
  315. enum isl_schedule_node_type type;
  316. isl_union_set *domain;
  317. if (!node)
  318. return -1;
  319. type = isl_schedule_node_get_type(node);
  320. if (type != isl_schedule_node_filter)
  321. return 0;
  322. domain = isl_schedule_node_filter_get_filter(node);
  323. is_sync = domain_is_sync(domain, kernel);
  324. isl_union_set_free(domain);
  325. return is_sync;
  326. }
  327. /* Is "node" part of a sequence with a previous synchronization statement
  328. * for "kernel"?
  329. * That is, is the parent of "node" a filter such that there is
  330. * a previous filter that picks out exactly such a synchronization statement?
  331. */
  332. static int has_preceding_sync(__isl_keep isl_schedule_node *node,
  333. struct ppcg_kernel *kernel)
  334. {
  335. int found = 0;
  336. node = isl_schedule_node_copy(node);
  337. node = isl_schedule_node_parent(node);
  338. while (!found && isl_schedule_node_has_previous_sibling(node)) {
  339. node = isl_schedule_node_previous_sibling(node);
  340. if (!node)
  341. break;
  342. found = node_is_sync_filter(node, kernel);
  343. }
  344. if (!node)
  345. found = -1;
  346. isl_schedule_node_free(node);
  347. return found;
  348. }
  349. /* Is "node" part of a sequence with a subsequent synchronization statement
  350. * for "kernel"?
  351. * That is, is the parent of "node" a filter such that there is
  352. * a subsequent filter that picks out exactly such a synchronization statement?
  353. */
  354. static int has_following_sync(__isl_keep isl_schedule_node *node,
  355. struct ppcg_kernel *kernel)
  356. {
  357. int found = 0;
  358. node = isl_schedule_node_copy(node);
  359. node = isl_schedule_node_parent(node);
  360. while (!found && isl_schedule_node_has_next_sibling(node)) {
  361. node = isl_schedule_node_next_sibling(node);
  362. if (!node)
  363. break;
  364. found = node_is_sync_filter(node, kernel);
  365. }
  366. if (!node)
  367. found = -1;
  368. isl_schedule_node_free(node);
  369. return found;
  370. }
  371. /* Does the subtree rooted at "node" (which is a band node) contain
  372. * any synchronization statement for "kernel" that precedes
  373. * the core computation of "kernel" (identified by the elements
  374. * in kernel->core)?
  375. */
  376. static int has_sync_before_core(__isl_keep isl_schedule_node *node,
  377. struct ppcg_kernel *kernel)
  378. {
  379. int has_sync = 0;
  380. int is_thread;
  381. node = isl_schedule_node_copy(node);
  382. while ((is_thread = node_is_thread(node)) == 0) {
  383. node = core_child(node, kernel->core);
  384. has_sync = has_preceding_sync(node, kernel);
  385. if (has_sync < 0 || has_sync)
  386. break;
  387. }
  388. if (is_thread < 0 || !node)
  389. has_sync = -1;
  390. isl_schedule_node_free(node);
  391. return has_sync;
  392. }
  393. /* Does the subtree rooted at "node" (which is a band node) contain
  394. * any synchronization statement for "kernel" that follows
  395. * the core computation of "kernel" (identified by the elements
  396. * in kernel->core)?
  397. */
  398. static int has_sync_after_core(__isl_keep isl_schedule_node *node,
  399. struct ppcg_kernel *kernel)
  400. {
  401. int has_sync = 0;
  402. int is_thread;
  403. node = isl_schedule_node_copy(node);
  404. while ((is_thread = node_is_thread(node)) == 0) {
  405. node = core_child(node, kernel->core);
  406. has_sync = has_following_sync(node, kernel);
  407. if (has_sync < 0 || has_sync)
  408. break;
  409. }
  410. if (is_thread < 0 || !node)
  411. has_sync = -1;
  412. isl_schedule_node_free(node);
  413. return has_sync;
  414. }
  415. /* Insert (or extend) an extension on top of "node" that puts
  416. * a synchronization node for "kernel" before "node".
  417. * Return a pointer to the original node in the updated schedule tree.
  418. */
  419. static __isl_give isl_schedule_node *insert_sync_before(
  420. __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
  421. {
  422. isl_union_set *domain;
  423. isl_schedule_node *graft;
  424. if (!node)
  425. return NULL;
  426. domain = create_sync_domain(kernel);
  427. graft = isl_schedule_node_from_domain(domain);
  428. node = isl_schedule_node_graft_before(node, graft);
  429. return node;
  430. }
  431. /* Insert (or extend) an extension on top of "node" that puts
  432. * a synchronization node for "kernel" afater "node".
  433. * Return a pointer to the original node in the updated schedule tree.
  434. */
  435. static __isl_give isl_schedule_node *insert_sync_after(
  436. __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
  437. {
  438. isl_union_set *domain;
  439. isl_schedule_node *graft;
  440. if (!node)
  441. return NULL;
  442. domain = create_sync_domain(kernel);
  443. graft = isl_schedule_node_from_domain(domain);
  444. node = isl_schedule_node_graft_after(node, graft);
  445. return node;
  446. }
  447. /* Insert an extension on top of "node" that puts a synchronization node
  448. * for "kernel" before "node" unless there already is
  449. * such a synchronization node.
  450. */
  451. __isl_give isl_schedule_node *gpu_tree_ensure_preceding_sync(
  452. __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
  453. {
  454. int has_sync;
  455. has_sync = has_preceding_sync(node, kernel);
  456. if (has_sync < 0)
  457. return isl_schedule_node_free(node);
  458. if (has_sync)
  459. return node;
  460. return insert_sync_before(node, kernel);
  461. }
  462. /* Insert an extension on top of "node" that puts a synchronization node
  463. * for "kernel" after "node" unless there already is
  464. * such a synchronization node.
  465. */
  466. __isl_give isl_schedule_node *gpu_tree_ensure_following_sync(
  467. __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
  468. {
  469. int has_sync;
  470. has_sync = has_following_sync(node, kernel);
  471. if (has_sync < 0)
  472. return isl_schedule_node_free(node);
  473. if (has_sync)
  474. return node;
  475. return insert_sync_after(node, kernel);
  476. }
  477. /* Insert an extension on top of "node" that puts a synchronization node
  478. * for "kernel" after "node" unless there already is such a sync node or
  479. * "node" itself already * contains a synchronization node following
  480. * the core computation of "kernel".
  481. */
  482. __isl_give isl_schedule_node *gpu_tree_ensure_sync_after_core(
  483. __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
  484. {
  485. int has_sync;
  486. has_sync = has_sync_after_core(node, kernel);
  487. if (has_sync < 0)
  488. return isl_schedule_node_free(node);
  489. if (has_sync)
  490. return node;
  491. has_sync = has_following_sync(node, kernel);
  492. if (has_sync < 0)
  493. return isl_schedule_node_free(node);
  494. if (has_sync)
  495. return node;
  496. return insert_sync_after(node, kernel);
  497. }
  498. /* Move left in the sequence on top of "node" to a synchronization node
  499. * for "kernel".
  500. * If "node" itself contains a synchronization node preceding
  501. * the core computation of "kernel", then return "node" itself.
  502. * Otherwise, if "node" does not have a preceding synchronization node,
  503. * then create one first.
  504. */
  505. __isl_give isl_schedule_node *gpu_tree_move_left_to_sync(
  506. __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
  507. {
  508. int has_sync;
  509. int is_sync;
  510. has_sync = has_sync_before_core(node, kernel);
  511. if (has_sync < 0)
  512. return isl_schedule_node_free(node);
  513. if (has_sync)
  514. return node;
  515. node = gpu_tree_ensure_preceding_sync(node, kernel);
  516. node = isl_schedule_node_parent(node);
  517. while ((is_sync = node_is_sync_filter(node, kernel)) == 0)
  518. node = isl_schedule_node_previous_sibling(node);
  519. if (is_sync < 0)
  520. node = isl_schedule_node_free(node);
  521. node = isl_schedule_node_child(node, 0);
  522. return node;
  523. }
  524. /* Move right in the sequence on top of "node" to a synchronization node
  525. * for "kernel".
  526. * If "node" itself contains a synchronization node following
  527. * the core computation of "kernel", then return "node" itself.
  528. * Otherwise, if "node" does not have a following synchronization node,
  529. * then create one first.
  530. */
  531. __isl_give isl_schedule_node *gpu_tree_move_right_to_sync(
  532. __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
  533. {
  534. int has_sync;
  535. int is_sync;
  536. has_sync = has_sync_after_core(node, kernel);
  537. if (has_sync < 0)
  538. return isl_schedule_node_free(node);
  539. if (has_sync)
  540. return node;
  541. node = gpu_tree_ensure_following_sync(node, kernel);
  542. node = isl_schedule_node_parent(node);
  543. while ((is_sync = node_is_sync_filter(node, kernel)) == 0)
  544. node = isl_schedule_node_next_sibling(node);
  545. if (is_sync < 0)
  546. node = isl_schedule_node_free(node);
  547. node = isl_schedule_node_child(node, 0);
  548. return node;
  549. }