strassen2.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857
  1. /*
  2. * StarPU
  3. * Copyright (C) INRIA 2008-2009 (see AUTHORS file)
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU Lesser General Public License as published by
  7. * the Free Software Foundation; either version 2.1 of the License, or (at
  8. * your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  13. *
  14. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  15. */
  16. #include <stdio.h>
  17. #include <stdint.h>
  18. #include <math.h>
  19. #include <sys/types.h>
  20. #include <sys/time.h>
  21. #include <pthread.h>
  22. #include <signal.h>
  23. #include <starpu.h>
  24. #define MAXDEPS 4
  25. uint64_t current_tag = 1024;
  26. uint64_t used_mem = 0;
  27. uint64_t used_mem_predicted = 0;
  28. #define MAXREC 7
  29. /* the size consumed by the algorithm should be
  30. * <= (size)^2 * ( predicted_mem[rec] + 1)
  31. * NB: we don't really need this, but this is useful to avoid allocating
  32. * thousands of pinned buffers and as many VMA that pressure Linux a lot */
  33. static unsigned predicted_mem[7] = {
  34. 12, 29, 58, 110, 201, 361, 640
  35. };
  36. static unsigned char *bigbuffer;
  37. /*
  38. Strassen:
  39. M1 = (A11 + A22)(B11 + B22)
  40. M2 = (A21 + A22)B11
  41. M3 = A11(B12 - B22)
  42. M4 = A22(B21 - B11)
  43. M5 = (A11 + A12)B22
  44. M6 = (A21 - A11)(B11 + B12)
  45. M7 = (A12 - A22)(B21 + B22)
  46. C11 = M1 + M4 - M5 + M7
  47. C12 = M3 + M5
  48. C21 = M2 + M4
  49. C22 = M1 - M2 + M3 + M6
  50. 7 recursive calls to the Strassen algorithm (in each Mi computation)
  51. 10+7 temporary buffers (to compute the terms of Mi = Mia x Mib, and to store Mi)
  52. complexity:
  53. M(n) multiplication complexity
  54. A(n) add/sub complexity
  55. M(n) = (10 + 8) A(n/2) + 7 M(n/2)
  56. NB: we consider fortran ordering (hence we compute M3t = (B12t - B22t)A11t for instance)
  57. */
  58. static unsigned size = 2048;
  59. static unsigned reclevel = 3;
  60. static unsigned norandom = 0;
  61. static unsigned pin = 0;
  62. extern void mult_cpu_codelet(void *descr[], __attribute__((unused)) void *arg);
  63. extern void sub_cpu_codelet(void *descr[], __attribute__((unused)) void *arg);
  64. extern void add_cpu_codelet(void *descr[], __attribute__((unused)) void *arg);
  65. extern void self_add_cpu_codelet(void *descr[], __attribute__((unused)) void *arg);
  66. extern void self_sub_cpu_codelet(void *descr[], __attribute__((unused)) void *arg);
  67. #ifdef STARPU_USE_CUDA
  68. extern void mult_cublas_codelet(void *descr[], __attribute__((unused)) void *arg);
  69. extern void sub_cublas_codelet(void *descr[], __attribute__((unused)) void *arg);
  70. extern void add_cublas_codelet(void *descr[], __attribute__((unused)) void *arg);
  71. extern void self_add_cublas_codelet(void *descr[], __attribute__((unused)) void *arg);
  72. extern void self_sub_cublas_codelet(void *descr[], __attribute__((unused)) void *arg);
  73. #endif
  74. extern void null_codelet(__attribute__((unused)) void *descr[],
  75. __attribute__((unused)) void *arg);
  76. extern void display_perf(double timing, unsigned size);
  77. struct starpu_perfmodel_t strassen_model_mult = {
  78. .type = STARPU_HISTORY_BASED,
  79. .symbol = "strassen_model_mult"
  80. };
  81. struct starpu_perfmodel_t strassen_model_add = {
  82. .type = STARPU_HISTORY_BASED,
  83. .symbol = "strassen_model_add"
  84. };
  85. struct starpu_perfmodel_t strassen_model_sub = {
  86. .type = STARPU_HISTORY_BASED,
  87. .symbol = "strassen_model_sub"
  88. };
  89. struct starpu_perfmodel_t strassen_model_self_add = {
  90. .type = STARPU_HISTORY_BASED,
  91. .symbol = "strassen_model_self_add"
  92. };
  93. struct starpu_perfmodel_t strassen_model_self_sub = {
  94. .type = STARPU_HISTORY_BASED,
  95. .symbol = "strassen_model_self_sub"
  96. };
  97. struct data_deps_t {
  98. unsigned ndeps;
  99. starpu_tag_t deps[MAXDEPS];
  100. };
  101. struct strassen_iter {
  102. unsigned reclevel;
  103. struct strassen_iter *children[7];
  104. starpu_data_handle A, B, C;
  105. /* temporary buffers */
  106. /* Mi = Mia * Mib*/
  107. starpu_data_handle Mia_data[7];
  108. starpu_data_handle Mib_data[7];
  109. starpu_data_handle Mi_data[7];
  110. /* input deps */
  111. struct data_deps_t A_deps;
  112. struct data_deps_t B_deps;
  113. /* output deps */
  114. struct data_deps_t C_deps;
  115. };
  116. static starpu_filter f =
  117. {
  118. .filter_func = starpu_block_filter_func,
  119. .filter_arg = 2
  120. };
  121. static starpu_filter f2 =
  122. {
  123. .filter_func = starpu_vertical_block_filter_func,
  124. .filter_arg = 2
  125. };
  126. static float *allocate_tmp_matrix_wrapper(size_t size)
  127. {
  128. float *buffer;
  129. buffer = (float *)&bigbuffer[used_mem];
  130. /* XXX there could be some extra alignment constraints here */
  131. used_mem += size;
  132. if (used_mem > used_mem_predicted)
  133. fprintf(stderr, "used %ld predict %ld\n", used_mem, used_mem_predicted);
  134. assert(used_mem <= used_mem_predicted);
  135. memset(buffer, 0, size);
  136. return buffer;
  137. }
  138. static starpu_data_handle allocate_tmp_matrix(unsigned size, unsigned reclevel)
  139. {
  140. starpu_data_handle *data = malloc(sizeof(starpu_data_handle));
  141. float *buffer;
  142. buffer = allocate_tmp_matrix_wrapper(size*size*sizeof(float));
  143. starpu_register_matrix_data(data, 0, (uintptr_t)buffer, size, size, size, sizeof(float));
  144. /* we construct a starpu_filter tree of depth reclevel */
  145. unsigned rec;
  146. for (rec = 0; rec < reclevel; rec++)
  147. starpu_map_filters(*data, 2, &f, &f2);
  148. return *data;
  149. }
  150. enum operation {
  151. ADD,
  152. SUB,
  153. MULT
  154. };
  155. static starpu_codelet cl_add = {
  156. .where = STARPU_CPU|STARPU_CUDA,
  157. .model = &strassen_model_add,
  158. .cpu_func = add_cpu_codelet,
  159. #ifdef STARPU_USE_CUDA
  160. .cuda_func = add_cublas_codelet,
  161. #endif
  162. .nbuffers = 3
  163. };
  164. static starpu_codelet cl_sub = {
  165. .where = STARPU_CPU|STARPU_CUDA,
  166. .model = &strassen_model_sub,
  167. .cpu_func = sub_cpu_codelet,
  168. #ifdef STARPU_USE_CUDA
  169. .cuda_func = sub_cublas_codelet,
  170. #endif
  171. .nbuffers = 3
  172. };
  173. static starpu_codelet cl_mult = {
  174. .where = STARPU_CPU|STARPU_CUDA,
  175. .model = &strassen_model_mult,
  176. .cpu_func = mult_cpu_codelet,
  177. #ifdef STARPU_USE_CUDA
  178. .cuda_func = mult_cublas_codelet,
  179. #endif
  180. .nbuffers = 3
  181. };
  182. /* C = A op B */
  183. struct starpu_task *compute_add_sub_op(starpu_data_handle C, enum operation op, starpu_data_handle A, starpu_data_handle B)
  184. {
  185. struct starpu_task *task = starpu_task_create();
  186. uint64_t j_tag = current_tag++;
  187. task->buffers[0].handle = C;
  188. task->buffers[0].mode = STARPU_W;
  189. task->buffers[1].handle = A;
  190. task->buffers[1].mode = STARPU_R;
  191. task->buffers[2].handle = B;
  192. task->buffers[2].mode = STARPU_R;
  193. task->callback_func = NULL;
  194. switch (op) {
  195. case ADD:
  196. task->cl = &cl_add;
  197. break;
  198. case SUB:
  199. task->cl = &cl_sub;
  200. break;
  201. case MULT:
  202. task->cl = &cl_mult;
  203. break;
  204. default:
  205. assert(0);
  206. };
  207. task->use_tag = 1;
  208. task->tag_id = (starpu_tag_t)j_tag;
  209. return task;
  210. }
  211. static starpu_codelet cl_self_add = {
  212. .where = STARPU_CPU|STARPU_CUDA,
  213. .model = &strassen_model_self_add,
  214. .cpu_func = self_add_cpu_codelet,
  215. #ifdef STARPU_USE_CUDA
  216. .cuda_func = self_add_cublas_codelet,
  217. #endif
  218. .nbuffers = 2
  219. };
  220. static starpu_codelet cl_self_sub = {
  221. .where = STARPU_CPU|STARPU_CUDA,
  222. .model = &strassen_model_self_sub,
  223. .cpu_func = self_sub_cpu_codelet,
  224. #ifdef STARPU_USE_CUDA
  225. .cuda_func = self_sub_cublas_codelet,
  226. #endif
  227. .nbuffers = 2
  228. };
  229. /* C = C op A */
  230. struct starpu_task *compute_self_add_sub_op(starpu_data_handle C, enum operation op, starpu_data_handle A)
  231. {
  232. struct starpu_task *task = starpu_task_create();
  233. uint64_t j_tag = current_tag++;
  234. task->buffers[0].handle = C;
  235. task->buffers[0].mode = STARPU_RW;
  236. task->buffers[1].handle = A;
  237. task->buffers[1].mode = STARPU_R;
  238. task->callback_func = NULL;
  239. switch (op) {
  240. case ADD:
  241. task->cl = &cl_self_add;
  242. break;
  243. case SUB:
  244. task->cl = &cl_self_sub;
  245. break;
  246. default:
  247. assert(0);
  248. };
  249. task->use_tag = 1;
  250. task->tag_id = (starpu_tag_t)j_tag;
  251. return task;
  252. }
  253. struct cleanup_arg {
  254. unsigned ndeps;
  255. starpu_tag_t tags[8];
  256. unsigned ndata;
  257. starpu_data_handle data[32];
  258. };
  259. void cleanup_callback(void *_arg)
  260. {
  261. //fprintf(stderr, "cleanup callback\n");
  262. struct cleanup_arg *arg = _arg;
  263. unsigned i;
  264. for (i = 0; i < arg->ndata; i++)
  265. starpu_advise_if_data_is_important(arg->data[i], 0);
  266. free(arg);
  267. }
  268. static starpu_codelet cleanup_codelet = {
  269. .where = STARPU_CPU|STARPU_CUDA,
  270. .model = NULL,
  271. .cpu_func = null_codelet,
  272. #ifdef STARPU_USE_CUDA
  273. .cuda_func = null_codelet,
  274. #endif
  275. .nbuffers = 0
  276. };
  277. /* this creates a codelet that will tell StarPU that all specified data are not
  278. essential once the tasks corresponding to the task will be performed */
  279. void create_cleanup_task(struct cleanup_arg *cleanup_arg)
  280. {
  281. struct starpu_task *task = starpu_task_create();
  282. uint64_t j_tag = current_tag++;
  283. task->cl = &cleanup_codelet;
  284. task->callback_func = cleanup_callback;
  285. task->callback_arg = cleanup_arg;
  286. task->use_tag = 1;
  287. task->tag_id = j_tag;
  288. starpu_tag_declare_deps_array(j_tag, cleanup_arg->ndeps, cleanup_arg->tags);
  289. starpu_submit_task(task);
  290. }
  291. void strassen_mult(struct strassen_iter *iter)
  292. {
  293. if (iter->reclevel == 0)
  294. {
  295. struct starpu_task *task_mult =
  296. compute_add_sub_op(iter->C, MULT, iter->A, iter->B);
  297. starpu_tag_t tag_mult = task_mult->tag_id;
  298. starpu_tag_t deps_array[10];
  299. unsigned indexA, indexB;
  300. for (indexA = 0; indexA < iter->A_deps.ndeps; indexA++)
  301. {
  302. deps_array[indexA] = iter->A_deps.deps[indexA];
  303. }
  304. for (indexB = 0; indexB < iter->B_deps.ndeps; indexB++)
  305. {
  306. deps_array[indexB+indexA] = iter->B_deps.deps[indexB];
  307. }
  308. starpu_tag_declare_deps_array(tag_mult, indexA+indexB, deps_array);
  309. iter->C_deps.ndeps = 1;
  310. iter->C_deps.deps[0] = tag_mult;
  311. starpu_submit_task(task_mult);
  312. return;
  313. }
  314. starpu_data_handle A11 = starpu_get_sub_data(iter->A, 2, 0, 0);
  315. starpu_data_handle A12 = starpu_get_sub_data(iter->A, 2, 1, 0);
  316. starpu_data_handle A21 = starpu_get_sub_data(iter->A, 2, 0, 1);
  317. starpu_data_handle A22 = starpu_get_sub_data(iter->A, 2, 1, 1);
  318. starpu_data_handle B11 = starpu_get_sub_data(iter->B, 2, 0, 0);
  319. starpu_data_handle B12 = starpu_get_sub_data(iter->B, 2, 1, 0);
  320. starpu_data_handle B21 = starpu_get_sub_data(iter->B, 2, 0, 1);
  321. starpu_data_handle B22 = starpu_get_sub_data(iter->B, 2, 1, 1);
  322. starpu_data_handle C11 = starpu_get_sub_data(iter->C, 2, 0, 0);
  323. starpu_data_handle C12 = starpu_get_sub_data(iter->C, 2, 1, 0);
  324. starpu_data_handle C21 = starpu_get_sub_data(iter->C, 2, 0, 1);
  325. starpu_data_handle C22 = starpu_get_sub_data(iter->C, 2, 1, 1);
  326. unsigned size = starpu_get_matrix_nx(A11);
  327. /* M1a = (A11 + A22) */
  328. iter->Mia_data[0] = allocate_tmp_matrix(size, iter->reclevel);
  329. struct starpu_task *task_1a = compute_add_sub_op(iter->Mia_data[0], ADD, A11, A22);
  330. starpu_tag_t tag_1a = task_1a->tag_id;
  331. starpu_tag_declare_deps_array(tag_1a, iter->A_deps.ndeps, iter->A_deps.deps);
  332. starpu_submit_task(task_1a);
  333. /* M1b = (B11 + B22) */
  334. iter->Mib_data[0] = allocate_tmp_matrix(size, iter->reclevel);
  335. struct starpu_task *task_1b = compute_add_sub_op(iter->Mib_data[0], ADD, B11, B22);
  336. starpu_tag_t tag_1b = task_1b->tag_id;
  337. starpu_tag_declare_deps_array(tag_1b, iter->B_deps.ndeps, iter->B_deps.deps);
  338. starpu_submit_task(task_1b);
  339. /* M2a = (A21 + A22) */
  340. iter->Mia_data[1] = allocate_tmp_matrix(size, iter->reclevel);
  341. struct starpu_task *task_2a = compute_add_sub_op(iter->Mia_data[1], ADD, A21, A22);
  342. starpu_tag_t tag_2a = task_2a->tag_id;
  343. starpu_tag_declare_deps_array(tag_2a, iter->A_deps.ndeps, iter->A_deps.deps);
  344. starpu_submit_task(task_2a);
  345. /* M3b = (B12 - B22) */
  346. iter->Mib_data[2] = allocate_tmp_matrix(size, iter->reclevel);
  347. struct starpu_task *task_3b = compute_add_sub_op(iter->Mib_data[2], SUB, B12, B22);
  348. starpu_tag_t tag_3b = task_3b->tag_id;
  349. starpu_tag_declare_deps_array(tag_3b, iter->B_deps.ndeps, iter->B_deps.deps);
  350. starpu_submit_task(task_3b);
  351. /* M4b = (B21 - B11) */
  352. iter->Mib_data[3] = allocate_tmp_matrix(size, iter->reclevel);
  353. struct starpu_task *task_4b = compute_add_sub_op(iter->Mib_data[3], SUB, B21, B11);
  354. starpu_tag_t tag_4b = task_4b->tag_id;
  355. starpu_tag_declare_deps_array(tag_4b, iter->B_deps.ndeps, iter->B_deps.deps);
  356. starpu_submit_task(task_4b);
  357. /* M5a = (A11 + A12) */
  358. iter->Mia_data[4] = allocate_tmp_matrix(size, iter->reclevel);
  359. struct starpu_task *task_5a = compute_add_sub_op(iter->Mia_data[4], ADD, A11, A12);
  360. starpu_tag_t tag_5a = task_5a->tag_id;
  361. starpu_tag_declare_deps_array(tag_5a, iter->A_deps.ndeps, iter->A_deps.deps);
  362. starpu_submit_task(task_5a);
  363. /* M6a = (A21 - A11) */
  364. iter->Mia_data[5] = allocate_tmp_matrix(size, iter->reclevel);
  365. struct starpu_task *task_6a = compute_add_sub_op(iter->Mia_data[5], SUB, A21, A11);
  366. starpu_tag_t tag_6a = task_6a->tag_id;
  367. starpu_tag_declare_deps_array(tag_6a, iter->A_deps.ndeps, iter->A_deps.deps);
  368. starpu_submit_task(task_6a);
  369. /* M6b = (B11 + B12) */
  370. iter->Mib_data[5] = allocate_tmp_matrix(size, iter->reclevel);
  371. struct starpu_task *task_6b = compute_add_sub_op(iter->Mib_data[5], SUB, B11, B12);
  372. starpu_tag_t tag_6b = task_6b->tag_id;
  373. starpu_tag_declare_deps_array(tag_6b, iter->B_deps.ndeps, iter->B_deps.deps);
  374. starpu_submit_task(task_6b);
  375. /* M7a = (A12 - A22) */
  376. iter->Mia_data[6] = allocate_tmp_matrix(size, iter->reclevel);
  377. struct starpu_task *task_7a = compute_add_sub_op(iter->Mia_data[6], SUB, A12, A22);
  378. starpu_tag_t tag_7a = task_7a->tag_id;
  379. starpu_tag_declare_deps_array(tag_7a, iter->A_deps.ndeps, iter->A_deps.deps);
  380. starpu_submit_task(task_7a);
  381. /* M7b = (B21 + B22) */
  382. iter->Mib_data[6] = allocate_tmp_matrix(size, iter->reclevel);
  383. struct starpu_task *task_7b = compute_add_sub_op(iter->Mib_data[6], ADD, B21, B22);
  384. starpu_tag_t tag_7b = task_7b->tag_id;
  385. starpu_tag_declare_deps_array(tag_7b, iter->B_deps.ndeps, iter->B_deps.deps);
  386. starpu_submit_task(task_7b);
  387. iter->Mi_data[0] = allocate_tmp_matrix(size, iter->reclevel);
  388. iter->Mi_data[1] = allocate_tmp_matrix(size, iter->reclevel);
  389. iter->Mi_data[2] = allocate_tmp_matrix(size, iter->reclevel);
  390. iter->Mi_data[3] = allocate_tmp_matrix(size, iter->reclevel);
  391. iter->Mi_data[4] = allocate_tmp_matrix(size, iter->reclevel);
  392. iter->Mi_data[5] = allocate_tmp_matrix(size, iter->reclevel);
  393. iter->Mi_data[6] = allocate_tmp_matrix(size, iter->reclevel);
  394. /* M1 = M1a * M1b */
  395. iter->children[0] = malloc(sizeof(struct strassen_iter));
  396. iter->children[0]->reclevel = iter->reclevel - 1;
  397. iter->children[0]->A_deps.ndeps = 1;
  398. iter->children[0]->A_deps.deps[0] = tag_1a;
  399. iter->children[0]->B_deps.ndeps = 1;
  400. iter->children[0]->B_deps.deps[0] = tag_1b;
  401. iter->children[0]->A = iter->Mia_data[0];
  402. iter->children[0]->B = iter->Mib_data[0];
  403. iter->children[0]->C = iter->Mi_data[0];
  404. strassen_mult(iter->children[0]);
  405. /* M2 = M2a * B11 */
  406. iter->children[1] = malloc(sizeof(struct strassen_iter));
  407. iter->children[1]->reclevel = iter->reclevel - 1;
  408. iter->children[1]->A_deps.ndeps = 1;
  409. iter->children[1]->A_deps.deps[0] = tag_2a;
  410. iter->children[1]->B_deps.ndeps = iter->B_deps.ndeps;
  411. memcpy(iter->children[1]->B_deps.deps, iter->B_deps.deps, iter->B_deps.ndeps*sizeof(starpu_tag_t));
  412. iter->children[1]->A = iter->Mia_data[1];
  413. iter->children[1]->B = B11;
  414. iter->children[1]->C = iter->Mi_data[1];
  415. strassen_mult(iter->children[1]);
  416. /* M3 = A11 * M3b */
  417. iter->children[2] = malloc(sizeof(struct strassen_iter));
  418. iter->children[2]->reclevel = iter->reclevel - 1;
  419. iter->children[2]->A_deps.ndeps = iter->B_deps.ndeps;
  420. memcpy(iter->children[2]->A_deps.deps, iter->A_deps.deps, iter->A_deps.ndeps*sizeof(starpu_tag_t));
  421. iter->children[2]->B_deps.ndeps = 1;
  422. iter->children[2]->B_deps.deps[0] = tag_3b;
  423. iter->children[2]->A = A11;
  424. iter->children[2]->B = iter->Mib_data[2];
  425. iter->children[2]->C = iter->Mi_data[2];
  426. strassen_mult(iter->children[2]);
  427. /* M4 = A22 * M4b */
  428. iter->children[3] = malloc(sizeof(struct strassen_iter));
  429. iter->children[3]->reclevel = iter->reclevel - 1;
  430. iter->children[3]->A_deps.ndeps = iter->B_deps.ndeps;
  431. memcpy(iter->children[3]->A_deps.deps, iter->A_deps.deps, iter->A_deps.ndeps*sizeof(starpu_tag_t));
  432. iter->children[3]->B_deps.ndeps = 1;
  433. iter->children[3]->B_deps.deps[0] = tag_4b;
  434. iter->children[3]->A = A22;
  435. iter->children[3]->B = iter->Mib_data[3];
  436. iter->children[3]->C = iter->Mi_data[3];
  437. strassen_mult(iter->children[3]);
  438. /* M5 = M5a * B22 */
  439. iter->children[4] = malloc(sizeof(struct strassen_iter));
  440. iter->children[4]->reclevel = iter->reclevel - 1;
  441. iter->children[4]->A_deps.ndeps = 1;
  442. iter->children[4]->A_deps.deps[0] = tag_5a;
  443. iter->children[4]->B_deps.ndeps = iter->B_deps.ndeps;
  444. memcpy(iter->children[4]->B_deps.deps, iter->B_deps.deps, iter->B_deps.ndeps*sizeof(starpu_tag_t));
  445. iter->children[4]->A = iter->Mia_data[4];
  446. iter->children[4]->B = B22;
  447. iter->children[4]->C = iter->Mi_data[4];
  448. strassen_mult(iter->children[4]);
  449. /* M6 = M6a * M6b */
  450. iter->children[5] = malloc(sizeof(struct strassen_iter));
  451. iter->children[5]->reclevel = iter->reclevel - 1;
  452. iter->children[5]->A_deps.ndeps = 1;
  453. iter->children[5]->A_deps.deps[0] = tag_6a;
  454. iter->children[5]->B_deps.ndeps = 1;
  455. iter->children[5]->B_deps.deps[0] = tag_6b;
  456. iter->children[5]->A = iter->Mia_data[5];
  457. iter->children[5]->B = iter->Mib_data[5];
  458. iter->children[5]->C = iter->Mi_data[5];
  459. strassen_mult(iter->children[5]);
  460. /* M7 = M7a * M7b */
  461. iter->children[6] = malloc(sizeof(struct strassen_iter));
  462. iter->children[6]->reclevel = iter->reclevel - 1;
  463. iter->children[6]->A_deps.ndeps = 1;
  464. iter->children[6]->A_deps.deps[0] = tag_7a;
  465. iter->children[6]->B_deps.ndeps = 1;
  466. iter->children[6]->B_deps.deps[0] = tag_7b;
  467. iter->children[6]->A = iter->Mia_data[6];
  468. iter->children[6]->B = iter->Mib_data[6];
  469. iter->children[6]->C = iter->Mi_data[6];
  470. strassen_mult(iter->children[6]);
  471. starpu_tag_t *tag_m1 = iter->children[0]->C_deps.deps;
  472. starpu_tag_t *tag_m2 = iter->children[1]->C_deps.deps;
  473. starpu_tag_t *tag_m3 = iter->children[2]->C_deps.deps;
  474. starpu_tag_t *tag_m4 = iter->children[3]->C_deps.deps;
  475. starpu_tag_t *tag_m5 = iter->children[4]->C_deps.deps;
  476. starpu_tag_t *tag_m6 = iter->children[5]->C_deps.deps;
  477. starpu_tag_t *tag_m7 = iter->children[6]->C_deps.deps;
  478. /* C11 = M1 + M4 - M5 + M7 */
  479. struct starpu_task *task_c11_a = compute_self_add_sub_op(C11, ADD, iter->Mi_data[0]);
  480. struct starpu_task *task_c11_b = compute_self_add_sub_op(C11, ADD, iter->Mi_data[3]);
  481. struct starpu_task *task_c11_c = compute_self_add_sub_op(C11, SUB, iter->Mi_data[4]);
  482. struct starpu_task *task_c11_d = compute_self_add_sub_op(C11, ADD, iter->Mi_data[6]);
  483. starpu_tag_t tag_c11_a = task_c11_a->tag_id;
  484. starpu_tag_t tag_c11_b = task_c11_b->tag_id;
  485. starpu_tag_t tag_c11_c = task_c11_c->tag_id;
  486. starpu_tag_t tag_c11_d = task_c11_d->tag_id;
  487. /* C12 = M3 + M5 */
  488. struct starpu_task *task_c12_a = compute_self_add_sub_op(C12, ADD, iter->Mi_data[2]);
  489. struct starpu_task *task_c12_b = compute_self_add_sub_op(C12, ADD, iter->Mi_data[4]);
  490. starpu_tag_t tag_c12_a = task_c12_a->tag_id;
  491. starpu_tag_t tag_c12_b = task_c12_b->tag_id;
  492. /* C21 = M2 + M4 */
  493. struct starpu_task *task_c21_a = compute_self_add_sub_op(C21, ADD, iter->Mi_data[1]);
  494. struct starpu_task *task_c21_b = compute_self_add_sub_op(C21, ADD, iter->Mi_data[3]);
  495. starpu_tag_t tag_c21_a = task_c21_a->tag_id;
  496. starpu_tag_t tag_c21_b = task_c21_b->tag_id;
  497. /* C22 = M1 - M2 + M3 + M6 */
  498. struct starpu_task *task_c22_a = compute_self_add_sub_op(C22, ADD, iter->Mi_data[0]);
  499. struct starpu_task *task_c22_b = compute_self_add_sub_op(C22, SUB, iter->Mi_data[1]);
  500. struct starpu_task *task_c22_c = compute_self_add_sub_op(C22, ADD, iter->Mi_data[3]);
  501. struct starpu_task *task_c22_d = compute_self_add_sub_op(C22, ADD, iter->Mi_data[5]);
  502. starpu_tag_t tag_c22_a = task_c22_a->tag_id;
  503. starpu_tag_t tag_c22_b = task_c22_b->tag_id;
  504. starpu_tag_t tag_c22_c = task_c22_c->tag_id;
  505. starpu_tag_t tag_c22_d = task_c22_d->tag_id;
  506. if (iter->reclevel == 1)
  507. {
  508. starpu_tag_declare_deps(tag_c11_a, 1, tag_m1[0]);
  509. starpu_tag_declare_deps(tag_c11_b, 2, tag_m4[0], tag_c11_a);
  510. starpu_tag_declare_deps(tag_c11_c, 2, tag_m5[0], tag_c11_b);
  511. starpu_tag_declare_deps(tag_c11_d, 2, tag_m7[0], tag_c11_c);
  512. starpu_tag_declare_deps(tag_c12_a, 1, tag_m3[0]);
  513. starpu_tag_declare_deps(tag_c12_b, 2, tag_m5[0], tag_c12_a);
  514. starpu_tag_declare_deps(tag_c21_a, 1, tag_m2[0]);
  515. starpu_tag_declare_deps(tag_c21_b, 2, tag_m4[0], tag_c21_a);
  516. starpu_tag_declare_deps(tag_c22_a, 1, tag_m1[0]);
  517. starpu_tag_declare_deps(tag_c22_b, 2, tag_m2[0], tag_c22_a);
  518. starpu_tag_declare_deps(tag_c22_c, 2, tag_m3[0], tag_c22_b);
  519. starpu_tag_declare_deps(tag_c22_d, 2, tag_m6[0], tag_c22_c);
  520. }
  521. else
  522. {
  523. starpu_tag_declare_deps(tag_c11_a, 4, tag_m1[0], tag_m1[1], tag_m1[2], tag_m1[3]);
  524. starpu_tag_declare_deps(tag_c11_b, 5, tag_m4[0], tag_m4[1], tag_m4[2], tag_m4[3], tag_c11_a);
  525. starpu_tag_declare_deps(tag_c11_c, 5, tag_m5[0], tag_m5[1], tag_m5[2], tag_m5[3], tag_c11_b);
  526. starpu_tag_declare_deps(tag_c11_d, 5, tag_m7[0], tag_m7[1], tag_m7[2], tag_m7[3], tag_c11_c);
  527. starpu_tag_declare_deps(tag_c12_a, 4, tag_m3[0], tag_m3[1], tag_m3[2], tag_m3[3]);
  528. starpu_tag_declare_deps(tag_c12_b, 5, tag_m5[0], tag_m5[1], tag_m5[2], tag_m5[3], tag_c12_a);
  529. starpu_tag_declare_deps(tag_c21_a, 4, tag_m2[0], tag_m2[1], tag_m2[2], tag_m2[3]);
  530. starpu_tag_declare_deps(tag_c21_b, 5, tag_m4[0], tag_m4[1], tag_m4[2], tag_m4[3], tag_c21_a);
  531. starpu_tag_declare_deps(tag_c22_a, 4, tag_m1[0], tag_m1[1], tag_m1[2], tag_m1[3]);
  532. starpu_tag_declare_deps(tag_c22_b, 5, tag_m2[0], tag_m2[1], tag_m2[2], tag_m2[3], tag_c22_a);
  533. starpu_tag_declare_deps(tag_c22_c, 5, tag_m3[0], tag_m3[1], tag_m3[2], tag_m3[3], tag_c22_b);
  534. starpu_tag_declare_deps(tag_c22_d, 5, tag_m6[0], tag_m6[1], tag_m6[2], tag_m6[3], tag_c22_c);
  535. }
  536. starpu_submit_task(task_c11_a);
  537. starpu_submit_task(task_c11_b);
  538. starpu_submit_task(task_c11_c);
  539. starpu_submit_task(task_c11_d);
  540. starpu_submit_task(task_c12_a);
  541. starpu_submit_task(task_c12_b);
  542. starpu_submit_task(task_c21_a);
  543. starpu_submit_task(task_c21_b);
  544. starpu_submit_task(task_c22_a);
  545. starpu_submit_task(task_c22_b);
  546. starpu_submit_task(task_c22_c);
  547. starpu_submit_task(task_c22_d);
  548. iter->C_deps.ndeps = 4;
  549. iter->C_deps.deps[0] = tag_c11_d;
  550. iter->C_deps.deps[1] = tag_c12_b;
  551. iter->C_deps.deps[2] = tag_c21_b;
  552. iter->C_deps.deps[3] = tag_c22_d;
  553. struct cleanup_arg *clean_struct = malloc(sizeof(struct cleanup_arg));
  554. clean_struct->ndeps = 4;
  555. clean_struct->tags[0] = tag_c11_d;
  556. clean_struct->tags[1] = tag_c12_b;
  557. clean_struct->tags[2] = tag_c21_b;
  558. clean_struct->tags[3] = tag_c22_d;
  559. clean_struct->ndata = 17;
  560. clean_struct->data[0] = iter->Mia_data[0];
  561. clean_struct->data[1] = iter->Mib_data[0];
  562. clean_struct->data[2] = iter->Mia_data[1];
  563. clean_struct->data[3] = iter->Mib_data[2];
  564. clean_struct->data[4] = iter->Mib_data[3];
  565. clean_struct->data[5] = iter->Mia_data[4];
  566. clean_struct->data[6] = iter->Mia_data[5];
  567. clean_struct->data[7] = iter->Mib_data[5];
  568. clean_struct->data[8] = iter->Mia_data[6];
  569. clean_struct->data[9] = iter->Mib_data[6];
  570. clean_struct->data[10] = iter->Mi_data[0];
  571. clean_struct->data[11] = iter->Mi_data[1];
  572. clean_struct->data[12] = iter->Mi_data[2];
  573. clean_struct->data[13] = iter->Mi_data[3];
  574. clean_struct->data[14] = iter->Mi_data[4];
  575. clean_struct->data[15] = iter->Mi_data[5];
  576. clean_struct->data[16] = iter->Mi_data[6];
  577. create_cleanup_task(clean_struct);
  578. }
  579. static void dummy_codelet_func(__attribute__((unused))void *descr[],
  580. __attribute__((unused)) void *arg)
  581. {
  582. }
  583. static starpu_codelet dummy_codelet = {
  584. .where = STARPU_CPU|STARPU_CUDA,
  585. .model = NULL,
  586. .cpu_func = dummy_codelet_func,
  587. #ifdef STARPU_USE_CUDA
  588. .cuda_func = dummy_codelet_func,
  589. #endif
  590. .nbuffers = 0
  591. };
  592. static struct starpu_task *dummy_task(starpu_tag_t tag)
  593. {
  594. struct starpu_task *task =starpu_task_create();
  595. task->callback_func = NULL;
  596. task->cl = &dummy_codelet;
  597. task->cl_arg = NULL;
  598. task->use_tag = 1;
  599. task->tag_id = tag;
  600. return task;
  601. }
  602. void parse_args(int argc, char **argv)
  603. {
  604. int i;
  605. for (i = 1; i < argc; i++) {
  606. if (strcmp(argv[i], "-size") == 0) {
  607. char *argptr;
  608. size = strtol(argv[++i], &argptr, 10);
  609. }
  610. if (strcmp(argv[i], "-rec") == 0) {
  611. char *argptr;
  612. reclevel = strtol(argv[++i], &argptr, 10);
  613. }
  614. if (strcmp(argv[i], "-no-random") == 0) {
  615. norandom = 1;
  616. }
  617. if (strcmp(argv[i], "-pin") == 0) {
  618. pin = 1;
  619. }
  620. }
  621. }
  622. int main(int argc, char **argv)
  623. {
  624. starpu_data_handle data_A, data_B, data_C;
  625. float *A, *B, *C;
  626. struct timeval start;
  627. struct timeval end;
  628. parse_args(argc, argv);
  629. assert(reclevel <= MAXREC);
  630. /* this is an upper bound ! */
  631. used_mem_predicted = size*size*(predicted_mem[reclevel] + 1);
  632. fprintf(stderr, "(Predicted) Memory consumption: %ld MB\n", used_mem_predicted/(1024*1024));
  633. starpu_init(NULL);
  634. starpu_helper_init_cublas();
  635. #ifdef STARPU_USE_CUDA
  636. if (pin) {
  637. starpu_malloc_pinned_if_possible((void **)&bigbuffer, used_mem_predicted);
  638. } else
  639. #endif
  640. {
  641. #ifdef STARPU_HAVE_POSIX_MEMALIGN
  642. posix_memalign((void **)&bigbuffer, 4096, used_mem_predicted);
  643. #else
  644. bigbuffer = malloc(used_mem_predicted);
  645. #endif
  646. }
  647. A = allocate_tmp_matrix_wrapper(size*size*sizeof(float));
  648. B = allocate_tmp_matrix_wrapper(size*size*sizeof(float));
  649. C = allocate_tmp_matrix_wrapper(size*size*sizeof(float));
  650. starpu_register_matrix_data(&data_A, 0, (uintptr_t)A, size, size, size, sizeof(float));
  651. starpu_register_matrix_data(&data_B, 0, (uintptr_t)B, size, size, size, sizeof(float));
  652. starpu_register_matrix_data(&data_C, 0, (uintptr_t)C, size, size, size, sizeof(float));
  653. unsigned rec;
  654. for (rec = 0; rec < reclevel; rec++)
  655. {
  656. starpu_map_filters(data_A, 2, &f, &f2);
  657. starpu_map_filters(data_B, 2, &f, &f2);
  658. starpu_map_filters(data_C, 2, &f, &f2);
  659. }
  660. struct strassen_iter iter;
  661. iter.reclevel = reclevel;
  662. iter.A = data_A;
  663. iter.B = data_B;
  664. iter.C = data_C;
  665. iter.A_deps.ndeps = 1;
  666. iter.A_deps.deps[0] = 42;
  667. iter.B_deps.ndeps = 1;
  668. iter.B_deps.deps[0] = 42;
  669. strassen_mult(&iter);
  670. starpu_tag_declare_deps_array(10, iter.C_deps.ndeps, iter.C_deps.deps);
  671. fprintf(stderr, "Using %ld MB of memory\n", used_mem/(1024*1024));
  672. struct starpu_task *task_start = dummy_task(42);
  673. gettimeofday(&start, NULL);
  674. starpu_submit_task(task_start);
  675. struct starpu_task *task_end = dummy_task(10);
  676. task_end->synchronous = 1;
  677. starpu_submit_task(task_end);
  678. gettimeofday(&end, NULL);
  679. starpu_helper_shutdown_cublas();
  680. starpu_shutdown();
  681. double timing = (double)((end.tv_sec - start.tv_sec)*1000000 + (end.tv_usec - start.tv_usec));
  682. display_perf(timing, size);
  683. return 0;
  684. }