strassen2.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <stdio.h>
  17. #include <stdint.h>
  18. #include <math.h>
  19. #include <sys/types.h>
  20. #include <sys/time.h>
  21. #include <pthread.h>
  22. #include <signal.h>
  23. #include <starpu.h>
  24. #define MAXDEPS 4
  25. uint64_t current_tag = 1024;
  26. uint64_t used_mem = 0;
  27. /*
  28. Strassen:
  29. M1 = (A11 + A22)(B11 + B22)
  30. M2 = (A21 + A22)B11
  31. M3 = A11(B12 - B22)
  32. M4 = A22(B21 - B11)
  33. M5 = (A11 + A12)B22
  34. M6 = (A21 - A11)(B11 + B12)
  35. M7 = (A12 - A22)(B21 + B22)
  36. C11 = M1 + M4 - M5 + M7
  37. C12 = M3 + M5
  38. C21 = M2 + M4
  39. C22 = M1 - M2 + M3 + M6
  40. 7 recursive calls to the Strassen algorithm (in each Mi computation)
  41. 10+7 temporary buffers (to compute the terms of Mi = Mia x Mib, and to store Mi)
  42. complexity:
  43. M(n) multiplication complexity
  44. A(n) add/sub complexity
  45. M(n) = (10 + 8) A(n/2) + 7 M(n/2)
  46. NB: we consider fortran ordering (hence we compute M3t = (B12t - B22t)A11t for instance)
  47. */
  48. static unsigned size = 2048;
  49. static unsigned reclevel = 3;
  50. static unsigned norandom = 0;
  51. static unsigned pin = 0;
  52. extern void mult_core_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  53. extern void sub_core_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  54. extern void add_core_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  55. extern void self_add_core_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  56. extern void self_sub_core_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  57. #ifdef USE_CUDA
  58. extern void mult_cublas_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  59. extern void sub_cublas_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  60. extern void add_cublas_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  61. extern void self_add_cublas_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  62. extern void self_sub_cublas_codelet(starpu_data_interface_t *descr, __attribute__((unused)) void *arg);
  63. #endif
  64. extern void null_codelet(__attribute__((unused)) starpu_data_interface_t *descr,
  65. __attribute__((unused)) void *arg);
  66. extern void display_perf(double timing, unsigned size);
  67. struct starpu_perfmodel_t strassen_model_mult = {
  68. .type = HISTORY_BASED,
  69. .symbol = "strassen_model_mult"
  70. };
  71. struct starpu_perfmodel_t strassen_model_add = {
  72. .type = HISTORY_BASED,
  73. .symbol = "strassen_model_add"
  74. };
  75. struct starpu_perfmodel_t strassen_model_sub = {
  76. .type = HISTORY_BASED,
  77. .symbol = "strassen_model_sub"
  78. };
  79. struct starpu_perfmodel_t strassen_model_self_add = {
  80. .type = HISTORY_BASED,
  81. .symbol = "strassen_model_self_add"
  82. };
  83. struct starpu_perfmodel_t strassen_model_self_sub = {
  84. .type = HISTORY_BASED,
  85. .symbol = "strassen_model_self_sub"
  86. };
  87. struct data_deps_t {
  88. unsigned ndeps;
  89. starpu_tag_t deps[MAXDEPS];
  90. };
  91. struct strassen_iter {
  92. unsigned reclevel;
  93. struct strassen_iter *children[7];
  94. starpu_data_handle A, B, C;
  95. /* temporary buffers */
  96. /* Mi = Mia * Mib*/
  97. starpu_data_handle Mia_data[7];
  98. starpu_data_handle Mib_data[7];
  99. starpu_data_handle Mi_data[7];
  100. /* input deps */
  101. struct data_deps_t A_deps;
  102. struct data_deps_t B_deps;
  103. /* output deps */
  104. struct data_deps_t C_deps;
  105. };
  106. static starpu_filter f =
  107. {
  108. .filter_func = starpu_block_filter_func,
  109. .filter_arg = 2
  110. };
  111. static starpu_filter f2 =
  112. {
  113. .filter_func = starpu_vertical_block_filter_func,
  114. .filter_arg = 2
  115. };
  116. starpu_data_handle allocate_tmp_matrix(unsigned size, unsigned reclevel)
  117. {
  118. starpu_data_handle *data = malloc(sizeof(starpu_data_handle));
  119. float *buffer;
  120. #ifdef USE_CUDA
  121. if (pin) {
  122. starpu_malloc_pinned_if_possible(&buffer, size*size*sizeof(float));
  123. } else
  124. #endif
  125. {
  126. posix_memalign((void **)&buffer, 4096, size*size*sizeof(float));
  127. }
  128. assert(buffer);
  129. used_mem += size*size*sizeof(float);
  130. memset(buffer, 0, size*size*sizeof(float));
  131. starpu_monitor_blas_data(data, 0, (uintptr_t)buffer, size, size, size, sizeof(float));
  132. /* we construct a starpu_filter tree of depth reclevel */
  133. unsigned rec;
  134. for (rec = 0; rec < reclevel; rec++)
  135. starpu_map_filters(*data, 2, &f, &f2);
  136. return *data;
  137. }
  138. enum operation {
  139. ADD,
  140. SUB,
  141. MULT
  142. };
  143. static starpu_codelet cl_add = {
  144. .where = ANY,
  145. .model = &strassen_model_add,
  146. .core_func = add_core_codelet,
  147. #ifdef USE_CUDA
  148. .cublas_func = add_cublas_codelet,
  149. #endif
  150. .nbuffers = 3
  151. };
  152. static starpu_codelet cl_sub = {
  153. .where = ANY,
  154. .model = &strassen_model_sub,
  155. .core_func = sub_core_codelet,
  156. #ifdef USE_CUDA
  157. .cublas_func = sub_cublas_codelet,
  158. #endif
  159. .nbuffers = 3
  160. };
  161. static starpu_codelet cl_mult = {
  162. .where = ANY,
  163. .model = &strassen_model_mult,
  164. .core_func = mult_core_codelet,
  165. #ifdef USE_CUDA
  166. .cublas_func = mult_cublas_codelet,
  167. #endif
  168. .nbuffers = 3
  169. };
  170. /* C = A op B */
  171. struct starpu_task *compute_add_sub_op(starpu_data_handle C, enum operation op, starpu_data_handle A, starpu_data_handle B)
  172. {
  173. struct starpu_task *task = starpu_task_create();
  174. uint64_t j_tag = current_tag++;
  175. task->buffers[0].state = C;
  176. task->buffers[0].mode = W;
  177. task->buffers[1].state = A;
  178. task->buffers[1].mode = R;
  179. task->buffers[2].state = B;
  180. task->buffers[2].mode = R;
  181. task->callback_func = NULL;
  182. switch (op) {
  183. case ADD:
  184. task->cl = &cl_add;
  185. break;
  186. case SUB:
  187. task->cl = &cl_sub;
  188. break;
  189. case MULT:
  190. task->cl = &cl_mult;
  191. break;
  192. default:
  193. assert(0);
  194. };
  195. task->use_tag = 1;
  196. task->tag_id = j_tag;
  197. return task;
  198. }
  199. static starpu_codelet cl_self_add = {
  200. .where = ANY,
  201. .model = &strassen_model_self_add,
  202. .core_func = self_add_core_codelet,
  203. #ifdef USE_CUDA
  204. .cublas_func = self_add_cublas_codelet,
  205. #endif
  206. .nbuffers = 2
  207. };
  208. static starpu_codelet cl_self_sub = {
  209. .where = ANY,
  210. .model = &strassen_model_self_sub,
  211. .core_func = self_sub_core_codelet,
  212. #ifdef USE_CUDA
  213. .cublas_func = self_sub_cublas_codelet,
  214. #endif
  215. .nbuffers = 2
  216. };
  217. /* C = C op A */
  218. struct starpu_task *compute_self_add_sub_op(starpu_data_handle C, enum operation op, starpu_data_handle A)
  219. {
  220. struct starpu_task *task = starpu_task_create();
  221. uint64_t j_tag = current_tag++;
  222. task->buffers[0].state = C;
  223. task->buffers[0].mode = RW;
  224. task->buffers[1].state = A;
  225. task->buffers[1].mode = R;
  226. task->callback_func = NULL;
  227. switch (op) {
  228. case ADD:
  229. task->cl = &cl_self_add;
  230. break;
  231. case SUB:
  232. task->cl = &cl_self_sub;
  233. break;
  234. default:
  235. assert(0);
  236. };
  237. task->use_tag = 1;
  238. task->tag_id = j_tag;
  239. return task;
  240. }
  241. struct cleanup_arg {
  242. unsigned ndeps;
  243. uint64_t tags[8];
  244. unsigned ndata;
  245. starpu_data_handle data[32];
  246. };
  247. void cleanup_callback(void *_arg)
  248. {
  249. //fprintf(stderr, "cleanup callback\n");
  250. struct cleanup_arg *arg = _arg;
  251. unsigned i;
  252. for (i = 0; i < arg->ndata; i++)
  253. starpu_advise_if_data_is_important(arg->data[i], 0);
  254. free(arg);
  255. }
  256. static starpu_codelet cleanup_codelet = {
  257. .where = ANY,
  258. .model = NULL,
  259. .core_func = null_codelet,
  260. #ifdef USE_CUDA
  261. .cublas_func = null_codelet,
  262. #endif
  263. .nbuffers = 0
  264. };
  265. /* this creates a codelet that will tell StarPU that all specified data are not
  266. essential once the tasks corresponding to the task will be performed */
  267. void create_cleanup_task(struct cleanup_arg *cleanup_arg)
  268. {
  269. struct starpu_task *task = starpu_task_create();
  270. uint64_t j_tag = current_tag++;
  271. task->cl = &cleanup_codelet;
  272. task->callback_func = cleanup_callback;
  273. task->callback_arg = cleanup_arg;
  274. task->use_tag = 1;
  275. task->tag_id = j_tag;
  276. starpu_tag_declare_deps_array(j_tag, cleanup_arg->ndeps, cleanup_arg->tags);
  277. starpu_submit_task(task);
  278. }
  279. void strassen_mult(struct strassen_iter *iter)
  280. {
  281. if (iter->reclevel == 0)
  282. {
  283. struct starpu_task *task_mult =
  284. compute_add_sub_op(iter->C, MULT, iter->A, iter->B);
  285. uint64_t tag_mult = task_mult->tag_id;
  286. uint64_t deps_array[10];
  287. unsigned indexA, indexB;
  288. for (indexA = 0; indexA < iter->A_deps.ndeps; indexA++)
  289. {
  290. deps_array[indexA] = iter->A_deps.deps[indexA];
  291. }
  292. for (indexB = 0; indexB < iter->B_deps.ndeps; indexB++)
  293. {
  294. deps_array[indexB+indexA] = iter->B_deps.deps[indexB];
  295. }
  296. starpu_tag_declare_deps_array(tag_mult, indexA+indexB, deps_array);
  297. iter->C_deps.ndeps = 1;
  298. iter->C_deps.deps[0] = tag_mult;
  299. starpu_submit_task(task_mult);
  300. return;
  301. }
  302. starpu_data_handle A11 = get_sub_data(iter->A, 2, 0, 0);
  303. starpu_data_handle A12 = get_sub_data(iter->A, 2, 1, 0);
  304. starpu_data_handle A21 = get_sub_data(iter->A, 2, 0, 1);
  305. starpu_data_handle A22 = get_sub_data(iter->A, 2, 1, 1);
  306. starpu_data_handle B11 = get_sub_data(iter->B, 2, 0, 0);
  307. starpu_data_handle B12 = get_sub_data(iter->B, 2, 1, 0);
  308. starpu_data_handle B21 = get_sub_data(iter->B, 2, 0, 1);
  309. starpu_data_handle B22 = get_sub_data(iter->B, 2, 1, 1);
  310. starpu_data_handle C11 = get_sub_data(iter->C, 2, 0, 0);
  311. starpu_data_handle C12 = get_sub_data(iter->C, 2, 1, 0);
  312. starpu_data_handle C21 = get_sub_data(iter->C, 2, 0, 1);
  313. starpu_data_handle C22 = get_sub_data(iter->C, 2, 1, 1);
  314. unsigned size = starpu_get_blas_nx(A11);
  315. /* M1a = (A11 + A22) */
  316. iter->Mia_data[0] = allocate_tmp_matrix(size, iter->reclevel);
  317. struct starpu_task *task_1a = compute_add_sub_op(iter->Mia_data[0], ADD, A11, A22);
  318. uint64_t tag_1a = task_1a->tag_id;
  319. starpu_tag_declare_deps_array(tag_1a, iter->A_deps.ndeps, iter->A_deps.deps);
  320. starpu_submit_task(task_1a);
  321. /* M1b = (B11 + B22) */
  322. iter->Mib_data[0] = allocate_tmp_matrix(size, iter->reclevel);
  323. struct starpu_task *task_1b = compute_add_sub_op(iter->Mib_data[0], ADD, B11, B22);
  324. uint64_t tag_1b = task_1b->tag_id;
  325. starpu_tag_declare_deps_array(tag_1b, iter->B_deps.ndeps, iter->B_deps.deps);
  326. starpu_submit_task(task_1b);
  327. /* M2a = (A21 + A22) */
  328. iter->Mia_data[1] = allocate_tmp_matrix(size, iter->reclevel);
  329. struct starpu_task *task_2a = compute_add_sub_op(iter->Mia_data[1], ADD, A21, A22);
  330. uint64_t tag_2a = task_2a->tag_id;
  331. starpu_tag_declare_deps_array(tag_2a, iter->A_deps.ndeps, iter->A_deps.deps);
  332. starpu_submit_task(task_2a);
  333. /* M3b = (B12 - B22) */
  334. iter->Mib_data[2] = allocate_tmp_matrix(size, iter->reclevel);
  335. struct starpu_task *task_3b = compute_add_sub_op(iter->Mib_data[2], SUB, B12, B22);
  336. uint64_t tag_3b = task_3b->tag_id;
  337. starpu_tag_declare_deps_array(tag_3b, iter->B_deps.ndeps, iter->B_deps.deps);
  338. starpu_submit_task(task_3b);
  339. /* M4b = (B21 - B11) */
  340. iter->Mib_data[3] = allocate_tmp_matrix(size, iter->reclevel);
  341. struct starpu_task *task_4b = compute_add_sub_op(iter->Mib_data[3], SUB, B21, B11);
  342. uint64_t tag_4b = task_4b->tag_id;
  343. starpu_tag_declare_deps_array(tag_4b, iter->B_deps.ndeps, iter->B_deps.deps);
  344. starpu_submit_task(task_4b);
  345. /* M5a = (A11 + A12) */
  346. iter->Mia_data[4] = allocate_tmp_matrix(size, iter->reclevel);
  347. struct starpu_task *task_5a = compute_add_sub_op(iter->Mia_data[4], ADD, A11, A12);
  348. uint64_t tag_5a = task_5a->tag_id;
  349. starpu_tag_declare_deps_array(tag_5a, iter->A_deps.ndeps, iter->A_deps.deps);
  350. starpu_submit_task(task_5a);
  351. /* M6a = (A21 - A11) */
  352. iter->Mia_data[5] = allocate_tmp_matrix(size, iter->reclevel);
  353. struct starpu_task *task_6a = compute_add_sub_op(iter->Mia_data[5], SUB, A21, A11);
  354. uint64_t tag_6a = task_6a->tag_id;
  355. starpu_tag_declare_deps_array(tag_6a, iter->A_deps.ndeps, iter->A_deps.deps);
  356. starpu_submit_task(task_6a);
  357. /* M6b = (B11 + B12) */
  358. iter->Mib_data[5] = allocate_tmp_matrix(size, iter->reclevel);
  359. struct starpu_task *task_6b = compute_add_sub_op(iter->Mib_data[5], SUB, B11, B12);
  360. uint64_t tag_6b = task_6b->tag_id;
  361. starpu_tag_declare_deps_array(tag_6b, iter->B_deps.ndeps, iter->B_deps.deps);
  362. starpu_submit_task(task_6b);
  363. /* M7a = (A12 - A22) */
  364. iter->Mia_data[6] = allocate_tmp_matrix(size, iter->reclevel);
  365. struct starpu_task *task_7a = compute_add_sub_op(iter->Mia_data[6], SUB, A12, A22);
  366. uint64_t tag_7a = task_7a->tag_id;
  367. starpu_tag_declare_deps_array(tag_7a, iter->A_deps.ndeps, iter->A_deps.deps);
  368. starpu_submit_task(task_7a);
  369. /* M7b = (B21 + B22) */
  370. iter->Mib_data[6] = allocate_tmp_matrix(size, iter->reclevel);
  371. struct starpu_task *task_7b = compute_add_sub_op(iter->Mib_data[6], ADD, B21, B22);
  372. uint64_t tag_7b = task_7b->tag_id;
  373. starpu_tag_declare_deps_array(tag_7b, iter->B_deps.ndeps, iter->B_deps.deps);
  374. starpu_submit_task(task_7b);
  375. iter->Mi_data[0] = allocate_tmp_matrix(size, iter->reclevel);
  376. iter->Mi_data[1] = allocate_tmp_matrix(size, iter->reclevel);
  377. iter->Mi_data[2] = allocate_tmp_matrix(size, iter->reclevel);
  378. iter->Mi_data[3] = allocate_tmp_matrix(size, iter->reclevel);
  379. iter->Mi_data[4] = allocate_tmp_matrix(size, iter->reclevel);
  380. iter->Mi_data[5] = allocate_tmp_matrix(size, iter->reclevel);
  381. iter->Mi_data[6] = allocate_tmp_matrix(size, iter->reclevel);
  382. /* M1 = M1a * M1b */
  383. iter->children[0] = malloc(sizeof(struct strassen_iter));
  384. iter->children[0]->reclevel = iter->reclevel - 1;
  385. iter->children[0]->A_deps.ndeps = 1;
  386. iter->children[0]->A_deps.deps[0] = tag_1a;
  387. iter->children[0]->B_deps.ndeps = 1;
  388. iter->children[0]->B_deps.deps[0] = tag_1b;
  389. iter->children[0]->A = iter->Mia_data[0];
  390. iter->children[0]->B = iter->Mib_data[0];
  391. iter->children[0]->C = iter->Mi_data[0];
  392. strassen_mult(iter->children[0]);
  393. /* M2 = M2a * B11 */
  394. iter->children[1] = malloc(sizeof(struct strassen_iter));
  395. iter->children[1]->reclevel = iter->reclevel - 1;
  396. iter->children[1]->A_deps.ndeps = 1;
  397. iter->children[1]->A_deps.deps[0] = tag_2a;
  398. iter->children[1]->B_deps.ndeps = iter->B_deps.ndeps;
  399. memcpy(iter->children[1]->B_deps.deps, iter->B_deps.deps, iter->B_deps.ndeps*sizeof(uint64_t));
  400. iter->children[1]->A = iter->Mia_data[1];
  401. iter->children[1]->B = B11;
  402. iter->children[1]->C = iter->Mi_data[1];
  403. strassen_mult(iter->children[1]);
  404. /* M3 = A11 * M3b */
  405. iter->children[2] = malloc(sizeof(struct strassen_iter));
  406. iter->children[2]->reclevel = iter->reclevel - 1;
  407. iter->children[2]->A_deps.ndeps = iter->B_deps.ndeps;
  408. memcpy(iter->children[2]->A_deps.deps, iter->A_deps.deps, iter->A_deps.ndeps*sizeof(uint64_t));
  409. iter->children[2]->B_deps.ndeps = 1;
  410. iter->children[2]->B_deps.deps[0] = tag_3b;
  411. iter->children[2]->A = A11;
  412. iter->children[2]->B = iter->Mib_data[2];
  413. iter->children[2]->C = iter->Mi_data[2];
  414. strassen_mult(iter->children[2]);
  415. /* M4 = A22 * M4b */
  416. iter->children[3] = malloc(sizeof(struct strassen_iter));
  417. iter->children[3]->reclevel = iter->reclevel - 1;
  418. iter->children[3]->A_deps.ndeps = iter->B_deps.ndeps;
  419. memcpy(iter->children[3]->A_deps.deps, iter->A_deps.deps, iter->A_deps.ndeps*sizeof(uint64_t));
  420. iter->children[3]->B_deps.ndeps = 1;
  421. iter->children[3]->B_deps.deps[0] = tag_4b;
  422. iter->children[3]->A = A22;
  423. iter->children[3]->B = iter->Mib_data[3];
  424. iter->children[3]->C = iter->Mi_data[3];
  425. strassen_mult(iter->children[3]);
  426. /* M5 = M5a * B22 */
  427. iter->children[4] = malloc(sizeof(struct strassen_iter));
  428. iter->children[4]->reclevel = iter->reclevel - 1;
  429. iter->children[4]->A_deps.ndeps = 1;
  430. iter->children[4]->A_deps.deps[0] = tag_5a;
  431. iter->children[4]->B_deps.ndeps = iter->B_deps.ndeps;
  432. memcpy(iter->children[4]->B_deps.deps, iter->B_deps.deps, iter->B_deps.ndeps*sizeof(uint64_t));
  433. iter->children[4]->A = iter->Mia_data[4];
  434. iter->children[4]->B = B22;
  435. iter->children[4]->C = iter->Mi_data[4];
  436. strassen_mult(iter->children[4]);
  437. /* M6 = M6a * M6b */
  438. iter->children[5] = malloc(sizeof(struct strassen_iter));
  439. iter->children[5]->reclevel = iter->reclevel - 1;
  440. iter->children[5]->A_deps.ndeps = 1;
  441. iter->children[5]->A_deps.deps[0] = tag_6a;
  442. iter->children[5]->B_deps.ndeps = 1;
  443. iter->children[5]->B_deps.deps[0] = tag_6b;
  444. iter->children[5]->A = iter->Mia_data[5];
  445. iter->children[5]->B = iter->Mib_data[5];
  446. iter->children[5]->C = iter->Mi_data[5];
  447. strassen_mult(iter->children[5]);
  448. /* M7 = M7a * M7b */
  449. iter->children[6] = malloc(sizeof(struct strassen_iter));
  450. iter->children[6]->reclevel = iter->reclevel - 1;
  451. iter->children[6]->A_deps.ndeps = 1;
  452. iter->children[6]->A_deps.deps[0] = tag_7a;
  453. iter->children[6]->B_deps.ndeps = 1;
  454. iter->children[6]->B_deps.deps[0] = tag_7b;
  455. iter->children[6]->A = iter->Mia_data[6];
  456. iter->children[6]->B = iter->Mib_data[6];
  457. iter->children[6]->C = iter->Mi_data[6];
  458. strassen_mult(iter->children[6]);
  459. uint64_t *tag_m1 = iter->children[0]->C_deps.deps;
  460. uint64_t *tag_m2 = iter->children[1]->C_deps.deps;
  461. uint64_t *tag_m3 = iter->children[2]->C_deps.deps;
  462. uint64_t *tag_m4 = iter->children[3]->C_deps.deps;
  463. uint64_t *tag_m5 = iter->children[4]->C_deps.deps;
  464. uint64_t *tag_m6 = iter->children[5]->C_deps.deps;
  465. uint64_t *tag_m7 = iter->children[6]->C_deps.deps;
  466. /* C11 = M1 + M4 - M5 + M7 */
  467. struct starpu_task *task_c11_a = compute_self_add_sub_op(C11, ADD, iter->Mi_data[0]);
  468. struct starpu_task *task_c11_b = compute_self_add_sub_op(C11, ADD, iter->Mi_data[3]);
  469. struct starpu_task *task_c11_c = compute_self_add_sub_op(C11, SUB, iter->Mi_data[4]);
  470. struct starpu_task *task_c11_d = compute_self_add_sub_op(C11, ADD, iter->Mi_data[6]);
  471. uint64_t tag_c11_a = task_c11_a->tag_id;
  472. uint64_t tag_c11_b = task_c11_b->tag_id;
  473. uint64_t tag_c11_c = task_c11_c->tag_id;
  474. uint64_t tag_c11_d = task_c11_d->tag_id;
  475. /* C12 = M3 + M5 */
  476. struct starpu_task *task_c12_a = compute_self_add_sub_op(C12, ADD, iter->Mi_data[2]);
  477. struct starpu_task *task_c12_b = compute_self_add_sub_op(C12, ADD, iter->Mi_data[4]);
  478. uint64_t tag_c12_a = task_c12_a->tag_id;
  479. uint64_t tag_c12_b = task_c12_b->tag_id;
  480. /* C21 = M2 + M4 */
  481. struct starpu_task *task_c21_a = compute_self_add_sub_op(C21, ADD, iter->Mi_data[1]);
  482. struct starpu_task *task_c21_b = compute_self_add_sub_op(C21, ADD, iter->Mi_data[3]);
  483. uint64_t tag_c21_a = task_c21_a->tag_id;
  484. uint64_t tag_c21_b = task_c21_b->tag_id;
  485. /* C22 = M1 - M2 + M3 + M6 */
  486. struct starpu_task *task_c22_a = compute_self_add_sub_op(C22, ADD, iter->Mi_data[0]);
  487. struct starpu_task *task_c22_b = compute_self_add_sub_op(C22, SUB, iter->Mi_data[1]);
  488. struct starpu_task *task_c22_c = compute_self_add_sub_op(C22, ADD, iter->Mi_data[3]);
  489. struct starpu_task *task_c22_d = compute_self_add_sub_op(C22, ADD, iter->Mi_data[5]);
  490. uint64_t tag_c22_a = task_c22_a->tag_id;
  491. uint64_t tag_c22_b = task_c22_b->tag_id;
  492. uint64_t tag_c22_c = task_c22_c->tag_id;
  493. uint64_t tag_c22_d = task_c22_d->tag_id;
  494. if (iter->reclevel == 1)
  495. {
  496. starpu_tag_declare_deps(tag_c11_a, 1, tag_m1[0]);
  497. starpu_tag_declare_deps(tag_c11_b, 2, tag_m4[0], tag_c11_a);
  498. starpu_tag_declare_deps(tag_c11_c, 2, tag_m5[0], tag_c11_b);
  499. starpu_tag_declare_deps(tag_c11_d, 2, tag_m7[0], tag_c11_c);
  500. starpu_tag_declare_deps(tag_c12_a, 1, tag_m3[0]);
  501. starpu_tag_declare_deps(tag_c12_b, 2, tag_m5[0], tag_c12_a);
  502. starpu_tag_declare_deps(tag_c21_a, 1, tag_m2[0]);
  503. starpu_tag_declare_deps(tag_c21_b, 2, tag_m4[0], tag_c21_a);
  504. starpu_tag_declare_deps(tag_c22_a, 1, tag_m1[0]);
  505. starpu_tag_declare_deps(tag_c22_b, 2, tag_m2[0], tag_c22_a);
  506. starpu_tag_declare_deps(tag_c22_c, 2, tag_m3[0], tag_c22_b);
  507. starpu_tag_declare_deps(tag_c22_d, 2, tag_m6[0], tag_c22_c);
  508. }
  509. else
  510. {
  511. starpu_tag_declare_deps(tag_c11_a, 4, tag_m1[0], tag_m1[1], tag_m1[2], tag_m1[3]);
  512. starpu_tag_declare_deps(tag_c11_b, 5, tag_m4[0], tag_m4[1], tag_m4[2], tag_m4[3], tag_c11_a);
  513. starpu_tag_declare_deps(tag_c11_c, 5, tag_m5[0], tag_m5[1], tag_m5[2], tag_m5[3], tag_c11_b);
  514. starpu_tag_declare_deps(tag_c11_d, 5, tag_m7[0], tag_m7[1], tag_m7[2], tag_m7[3], tag_c11_c);
  515. starpu_tag_declare_deps(tag_c12_a, 4, tag_m3[0], tag_m3[1], tag_m3[2], tag_m3[3]);
  516. starpu_tag_declare_deps(tag_c12_b, 5, tag_m5[0], tag_m5[1], tag_m5[2], tag_m5[3], tag_c12_a);
  517. starpu_tag_declare_deps(tag_c21_a, 4, tag_m2[0], tag_m2[1], tag_m2[2], tag_m2[3]);
  518. starpu_tag_declare_deps(tag_c21_b, 5, tag_m4[0], tag_m4[1], tag_m4[2], tag_m4[3], tag_c21_a);
  519. starpu_tag_declare_deps(tag_c22_a, 4, tag_m1[0], tag_m1[1], tag_m1[2], tag_m1[3]);
  520. starpu_tag_declare_deps(tag_c22_b, 5, tag_m2[0], tag_m2[1], tag_m2[2], tag_m2[3], tag_c22_a);
  521. starpu_tag_declare_deps(tag_c22_c, 5, tag_m3[0], tag_m3[1], tag_m3[2], tag_m3[3], tag_c22_b);
  522. starpu_tag_declare_deps(tag_c22_d, 5, tag_m6[0], tag_m6[1], tag_m6[2], tag_m6[3], tag_c22_c);
  523. }
  524. starpu_submit_task(task_c11_a);
  525. starpu_submit_task(task_c11_b);
  526. starpu_submit_task(task_c11_c);
  527. starpu_submit_task(task_c11_d);
  528. starpu_submit_task(task_c12_a);
  529. starpu_submit_task(task_c12_b);
  530. starpu_submit_task(task_c21_a);
  531. starpu_submit_task(task_c21_b);
  532. starpu_submit_task(task_c22_a);
  533. starpu_submit_task(task_c22_b);
  534. starpu_submit_task(task_c22_c);
  535. starpu_submit_task(task_c22_d);
  536. iter->C_deps.ndeps = 4;
  537. iter->C_deps.deps[0] = tag_c11_d;
  538. iter->C_deps.deps[1] = tag_c12_b;
  539. iter->C_deps.deps[2] = tag_c21_b;
  540. iter->C_deps.deps[3] = tag_c22_d;
  541. struct cleanup_arg *clean_struct = malloc(sizeof(struct cleanup_arg));
  542. clean_struct->ndeps = 4;
  543. clean_struct->tags[0] = tag_c11_d;
  544. clean_struct->tags[1] = tag_c12_b;
  545. clean_struct->tags[2] = tag_c21_b;
  546. clean_struct->tags[3] = tag_c22_d;
  547. clean_struct->ndata = 17;
  548. clean_struct->data[0] = iter->Mia_data[0];
  549. clean_struct->data[1] = iter->Mib_data[0];
  550. clean_struct->data[2] = iter->Mia_data[1];
  551. clean_struct->data[3] = iter->Mib_data[2];
  552. clean_struct->data[4] = iter->Mib_data[3];
  553. clean_struct->data[5] = iter->Mia_data[4];
  554. clean_struct->data[6] = iter->Mia_data[5];
  555. clean_struct->data[7] = iter->Mib_data[5];
  556. clean_struct->data[8] = iter->Mia_data[6];
  557. clean_struct->data[9] = iter->Mib_data[6];
  558. clean_struct->data[10] = iter->Mi_data[0];
  559. clean_struct->data[11] = iter->Mi_data[1];
  560. clean_struct->data[12] = iter->Mi_data[2];
  561. clean_struct->data[13] = iter->Mi_data[3];
  562. clean_struct->data[14] = iter->Mi_data[4];
  563. clean_struct->data[15] = iter->Mi_data[5];
  564. clean_struct->data[16] = iter->Mi_data[6];
  565. create_cleanup_task(clean_struct);
  566. }
  567. static void dummy_codelet_func(__attribute__((unused))starpu_data_interface_t *descr,
  568. __attribute__((unused)) void *arg)
  569. {
  570. }
  571. static starpu_codelet dummy_codelet = {
  572. .where = ANY,
  573. .model = NULL,
  574. .core_func = dummy_codelet_func,
  575. #ifdef USE_CUDA
  576. .cublas_func = dummy_codelet_func,
  577. #endif
  578. .nbuffers = 0
  579. };
  580. static struct starpu_task *dummy_task(uint64_t tag)
  581. {
  582. struct starpu_task *task =starpu_task_create();
  583. task->callback_func = NULL;
  584. task->cl = &dummy_codelet;
  585. task->cl_arg = NULL;
  586. task->use_tag = 1;
  587. task->tag_id = tag;
  588. return task;
  589. }
  590. void parse_args(int argc, char **argv)
  591. {
  592. int i;
  593. for (i = 1; i < argc; i++) {
  594. if (strcmp(argv[i], "-size") == 0) {
  595. char *argptr;
  596. size = strtol(argv[++i], &argptr, 10);
  597. }
  598. if (strcmp(argv[i], "-rec") == 0) {
  599. char *argptr;
  600. reclevel = strtol(argv[++i], &argptr, 10);
  601. }
  602. if (strcmp(argv[i], "-no-random") == 0) {
  603. norandom = 1;
  604. }
  605. if (strcmp(argv[i], "-pin") == 0) {
  606. pin = 1;
  607. }
  608. }
  609. }
  610. int main(int argc, char **argv)
  611. {
  612. starpu_data_handle data_A, data_B, data_C;
  613. float *A, *B, *C;
  614. struct timeval start;
  615. struct timeval end;
  616. parse_args(argc, argv);
  617. starpu_init();
  618. #ifdef USE_CUDA
  619. if (pin) {
  620. starpu_malloc_pinned_if_possible(&A, size*size*sizeof(float));
  621. starpu_malloc_pinned_if_possible(&B, size*size*sizeof(float));
  622. starpu_malloc_pinned_if_possible(&C, size*size*sizeof(float));
  623. } else
  624. #endif
  625. {
  626. posix_memalign((void **)&A, 4096, size*size*sizeof(float));
  627. posix_memalign((void **)&B, 4096, size*size*sizeof(float));
  628. posix_memalign((void **)&C, 4096, size*size*sizeof(float));
  629. }
  630. assert(A);
  631. assert(B);
  632. assert(C);
  633. used_mem += 3*size*size*sizeof(float);
  634. memset(A, 0, size*size*sizeof(float));
  635. memset(B, 0, size*size*sizeof(float));
  636. memset(C, 0, size*size*sizeof(float));
  637. starpu_monitor_blas_data(&data_A, 0, (uintptr_t)A, size, size, size, sizeof(float));
  638. starpu_monitor_blas_data(&data_B, 0, (uintptr_t)B, size, size, size, sizeof(float));
  639. starpu_monitor_blas_data(&data_C, 0, (uintptr_t)C, size, size, size, sizeof(float));
  640. unsigned rec;
  641. for (rec = 0; rec < reclevel; rec++)
  642. {
  643. starpu_map_filters(data_A, 2, &f, &f2);
  644. starpu_map_filters(data_B, 2, &f, &f2);
  645. starpu_map_filters(data_C, 2, &f, &f2);
  646. }
  647. struct strassen_iter iter;
  648. iter.reclevel = reclevel;
  649. iter.A = data_A;
  650. iter.B = data_B;
  651. iter.C = data_C;
  652. iter.A_deps.ndeps = 1;
  653. iter.A_deps.deps[0] = 42;
  654. iter.B_deps.ndeps = 1;
  655. iter.B_deps.deps[0] = 42;
  656. strassen_mult(&iter);
  657. starpu_tag_declare_deps_array(10, iter.C_deps.ndeps, iter.C_deps.deps);
  658. fprintf(stderr, "Using %ld MB of memory\n", used_mem/(1024*1024));
  659. struct starpu_task *task_start = dummy_task(42);
  660. gettimeofday(&start, NULL);
  661. starpu_submit_task(task_start);
  662. struct starpu_task *task_end = dummy_task(10);
  663. task_end->synchronous = 1;
  664. starpu_submit_task(task_end);
  665. gettimeofday(&end, NULL);
  666. starpu_shutdown();
  667. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  668. display_perf(timing, size);
  669. return 0;
  670. }