coherency.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2012 Université de Bordeaux 1
  4. * Copyright (C) 2010, 2011, 2012 Centre National de la Recherche Scientifique
  5. *
  6. * StarPU is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU Lesser General Public License as published by
  8. * the Free Software Foundation; either version 2.1 of the License, or (at
  9. * your option) any later version.
  10. *
  11. * StarPU is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  14. *
  15. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  16. */
  17. #include <common/config.h>
  18. #include <datawizard/coherency.h>
  19. #include <datawizard/copy_driver.h>
  20. #include <datawizard/write_back.h>
  21. #include <core/dependencies/data_concurrency.h>
  22. #include <profiling/profiling.h>
  23. #include <math.h>
  24. static int link_supports_direct_transfers(starpu_data_handle_t handle, unsigned src_node, unsigned dst_node, unsigned *handling_node);
  25. uint32_t _starpu_select_src_node(starpu_data_handle_t handle, unsigned destination)
  26. {
  27. int src_node = -1;
  28. unsigned i;
  29. unsigned nnodes = starpu_memory_nodes_get_count();
  30. /* first find a valid copy, either a STARPU_OWNER or a STARPU_SHARED */
  31. uint32_t node;
  32. uint32_t src_node_mask = 0;
  33. size_t size = _starpu_data_get_size(handle);
  34. double cost = INFINITY;
  35. for (node = 0; node < nnodes; node++)
  36. {
  37. if (handle->per_node[node].state != STARPU_INVALID)
  38. {
  39. /* we found a copy ! */
  40. src_node_mask |= (1<<node);
  41. }
  42. }
  43. /* we should have found at least one copy ! */
  44. STARPU_ASSERT(src_node_mask != 0);
  45. /* Without knowing the size, we won't know the cost */
  46. if (!size)
  47. cost = 0;
  48. /* Check whether we have transfer cost for all nodes, if so, take the minimum */
  49. if (cost)
  50. for (i = 0; i < nnodes; i++)
  51. {
  52. if (src_node_mask & (1<<i))
  53. {
  54. double time = _starpu_predict_transfer_time(i, destination, size);
  55. unsigned handling_node;
  56. /* Avoid indirect transfers */
  57. if (!link_supports_direct_transfers(handle, i, destination, &handling_node))
  58. continue;
  59. if (_STARPU_IS_ZERO(time))
  60. {
  61. /* No estimation, will have to revert to dumb strategy */
  62. cost = 0.0;
  63. break;
  64. }
  65. else if (time < cost)
  66. {
  67. cost = time;
  68. src_node = i;
  69. }
  70. }
  71. }
  72. if (cost && src_node != -1)
  73. /* Could estimate through cost, return that */
  74. return src_node;
  75. /* Revert to dumb strategy: take RAM unless only a GPU has it */
  76. for (i = 0; i < nnodes; i++)
  77. {
  78. if (src_node_mask & (1<<i))
  79. {
  80. /* this is a potential candidate */
  81. src_node = i;
  82. /* however GPU are expensive sources, really !
  83. * Unless peer transfer is supported.
  84. * Other should be ok */
  85. if (
  86. #ifndef HAVE_CUDA_MEMCPY_PEER
  87. starpu_node_get_kind(i) != STARPU_CUDA_RAM &&
  88. #endif
  89. starpu_node_get_kind(i) != STARPU_OPENCL_RAM)
  90. break ;
  91. }
  92. }
  93. STARPU_ASSERT(src_node != -1);
  94. return src_node;
  95. }
  96. /* this may be called once the data is fetched with header and STARPU_RW-lock hold */
  97. void _starpu_update_data_state(starpu_data_handle_t handle,
  98. struct _starpu_data_replicate *requesting_replicate,
  99. enum starpu_access_mode mode)
  100. {
  101. /* There is nothing to do for relaxed coherency modes (scratch or
  102. * reductions) */
  103. if (!(mode & STARPU_RW))
  104. return;
  105. unsigned nnodes = starpu_memory_nodes_get_count();
  106. /* the data is present now */
  107. unsigned requesting_node = requesting_replicate->memory_node;
  108. requesting_replicate->requested[requesting_node] = 0;
  109. if (mode & STARPU_W)
  110. {
  111. /* the requesting node now has the only valid copy */
  112. uint32_t node;
  113. for (node = 0; node < nnodes; node++)
  114. handle->per_node[node].state = STARPU_INVALID;
  115. requesting_replicate->state = STARPU_OWNER;
  116. }
  117. else
  118. { /* read only */
  119. if (requesting_replicate->state != STARPU_OWNER)
  120. {
  121. /* there was at least another copy of the data */
  122. uint32_t node;
  123. for (node = 0; node < nnodes; node++)
  124. {
  125. struct _starpu_data_replicate *replicate = &handle->per_node[node];
  126. if (replicate->state != STARPU_INVALID)
  127. replicate->state = STARPU_SHARED;
  128. }
  129. requesting_replicate->state = STARPU_SHARED;
  130. }
  131. }
  132. }
  133. static int worker_supports_direct_access(unsigned node, unsigned handling_node)
  134. {
  135. if (node == handling_node)
  136. return 1;
  137. if (!_starpu_memory_node_workers(handling_node))
  138. /* No worker to process the request from that node */
  139. return 0;
  140. int type = starpu_node_get_kind(node);
  141. switch (type)
  142. {
  143. case STARPU_CUDA_RAM:
  144. #ifdef HAVE_CUDA_MEMCPY_PEER
  145. /* GPUs not always allow direct remote access: if CUDA4
  146. * is enabled, we allow two CUDA devices to communicate. */
  147. return (starpu_node_get_kind(handling_node) != STARPU_OPENCL_RAM);
  148. #else
  149. /* Direct GPU-GPU transfers are not allowed in general */
  150. return 0;
  151. #endif
  152. case STARPU_OPENCL_RAM:
  153. return 0;
  154. default:
  155. return 1;
  156. }
  157. }
  158. static int link_supports_direct_transfers(starpu_data_handle_t handle, unsigned src_node, unsigned dst_node, unsigned *handling_node)
  159. {
  160. (void) handle; // unused
  161. /* XXX That's a hack until we get cudaMemcpy3DPeerAsync to work !
  162. * Perhaps not all data interface provide a direct GPU-GPU transfer
  163. * method ! */
  164. #ifdef STARPU_USE_CUDA
  165. if (src_node != dst_node && starpu_node_get_kind(src_node) == STARPU_CUDA_RAM && starpu_node_get_kind(dst_node) == STARPU_CUDA_RAM)
  166. {
  167. const struct starpu_data_copy_methods *copy_methods = handle->ops->copy_methods;
  168. if (!copy_methods->cuda_to_cuda_async)
  169. return 0;
  170. }
  171. #endif
  172. if (worker_supports_direct_access(src_node, dst_node))
  173. {
  174. *handling_node = dst_node;
  175. return 1;
  176. }
  177. if (worker_supports_direct_access(dst_node, src_node))
  178. {
  179. *handling_node = src_node;
  180. return 1;
  181. }
  182. return 0;
  183. }
  184. /* Determines the path of a request : each hop is defined by (src,dst) and the
  185. * node that handles the hop. The returned value indicates the number of hops,
  186. * and the max_len is the maximum number of hops (ie. the size of the
  187. * src_nodes, dst_nodes and handling_nodes arrays. */
  188. static int determine_request_path(starpu_data_handle_t handle,
  189. unsigned src_node, unsigned dst_node,
  190. enum starpu_access_mode mode, int max_len,
  191. unsigned *src_nodes, unsigned *dst_nodes,
  192. unsigned *handling_nodes)
  193. {
  194. if (!(mode & STARPU_R))
  195. {
  196. /* The destination node should only allocate the data, no transfer is required */
  197. STARPU_ASSERT(max_len >= 1);
  198. src_nodes[0] = 0; // ignored
  199. dst_nodes[0] = dst_node;
  200. handling_nodes[0] = dst_node;
  201. return 1;
  202. }
  203. unsigned handling_node;
  204. int link_is_valid = link_supports_direct_transfers(handle, src_node, dst_node, &handling_node);
  205. if (!link_is_valid)
  206. {
  207. /* We need an intermediate hop to implement data staging
  208. * through main memory. */
  209. STARPU_ASSERT(max_len >= 2);
  210. /* XXX we hardcode 0 as the RAM node ... */
  211. /* GPU -> RAM */
  212. src_nodes[0] = src_node;
  213. dst_nodes[0] = 0;
  214. handling_nodes[0] = src_node;
  215. /* RAM -> GPU */
  216. src_nodes[1] = 0;
  217. dst_nodes[1] = dst_node;
  218. handling_nodes[1] = dst_node;
  219. return 2;
  220. }
  221. else
  222. {
  223. STARPU_ASSERT(max_len >= 1);
  224. src_nodes[0] = src_node;
  225. dst_nodes[0] = dst_node;
  226. handling_nodes[0] = handling_node;
  227. #ifndef HAVE_CUDA_MEMCPY_PEER
  228. STARPU_ASSERT(!(mode & STARPU_R) || starpu_node_get_kind(src_node) != STARPU_CUDA_RAM || starpu_node_get_kind(dst_node) != STARPU_CUDA_RAM);
  229. #endif
  230. return 1;
  231. }
  232. }
  233. /* handle->lock should be taken. r is returned locked. The node parameter
  234. * indicate either the source of the request, or the destination for a
  235. * write-only request. */
  236. static struct _starpu_data_request *_starpu_search_existing_data_request(struct _starpu_data_replicate *replicate, unsigned node, enum starpu_access_mode mode, unsigned is_prefetch)
  237. {
  238. struct _starpu_data_request *r;
  239. r = replicate->request[node];
  240. if (r)
  241. {
  242. _starpu_spin_lock(&r->lock);
  243. /* perhaps we need to "upgrade" the request */
  244. if (is_prefetch < r->prefetch)
  245. _starpu_update_prefetch_status(r);
  246. if (mode & STARPU_R)
  247. {
  248. /* in case the exisiting request did not imply a memory
  249. * transfer yet, we have to take a second refcnt now
  250. * for the source, in addition to the refcnt for the
  251. * destination
  252. * (so that the source remains valid) */
  253. if (!(r->mode & STARPU_R))
  254. {
  255. replicate->refcnt++;
  256. replicate->handle->busy_count++;
  257. }
  258. r->mode = (enum starpu_access_mode) ((int) r->mode | (int) STARPU_R);
  259. }
  260. if (mode & STARPU_W)
  261. r->mode = (enum starpu_access_mode) ((int) r->mode | (int) STARPU_W);
  262. }
  263. return r;
  264. }
  265. /*
  266. * This function is called when the data is needed on the local node, this
  267. * returns a pointer to the local copy
  268. *
  269. * R STARPU_W STARPU_RW
  270. * Owner OK OK OK
  271. * Shared OK 1 1
  272. * Invalid 2 3 4
  273. *
  274. * case 1 : shared + (read)write :
  275. * no data copy but shared->Invalid/Owner
  276. * case 2 : invalid + read :
  277. * data copy + invalid->shared + owner->shared (STARPU_ASSERT(there is a valid))
  278. * case 3 : invalid + write :
  279. * no data copy + invalid->owner + (owner,shared)->invalid
  280. * case 4 : invalid + R/STARPU_W :
  281. * data copy + if (STARPU_W) (invalid->owner + owner->invalid)
  282. * else (invalid,owner->shared)
  283. */
  284. /* This function is called with handle's header lock taken */
  285. struct _starpu_data_request *_starpu_create_request_to_fetch_data(starpu_data_handle_t handle,
  286. struct _starpu_data_replicate *dst_replicate,
  287. enum starpu_access_mode mode, unsigned is_prefetch,
  288. unsigned async,
  289. void (*callback_func)(void *), void *callback_arg)
  290. {
  291. unsigned requesting_node = dst_replicate->memory_node;
  292. if (dst_replicate->state != STARPU_INVALID)
  293. {
  294. #ifdef STARPU_MEMORY_STATUS
  295. enum _starpu_cache_state old_state = dst_replicate->state;
  296. #endif
  297. /* the data is already available so we can stop */
  298. _starpu_update_data_state(handle, dst_replicate, mode);
  299. _starpu_msi_cache_hit(requesting_node);
  300. #ifdef STARPU_MEMORY_STATUS
  301. _starpu_handle_stats_cache_hit(handle, requesting_node);
  302. /* XXX Broken ? */
  303. if (old_state == STARPU_SHARED
  304. && dst_replicate->state == STARPU_OWNER)
  305. _starpu_handle_stats_shared_to_owner(handle, requesting_node);
  306. #endif
  307. _starpu_memchunk_recently_used(dst_replicate->mc, requesting_node);
  308. _starpu_spin_unlock(&handle->header_lock);
  309. if (callback_func)
  310. callback_func(callback_arg);
  311. _STARPU_LOG_OUT_TAG("data available");
  312. return NULL;
  313. }
  314. _starpu_msi_cache_miss(requesting_node);
  315. /* the only remaining situation is that the local copy was invalid */
  316. STARPU_ASSERT(dst_replicate->state == STARPU_INVALID);
  317. /* find someone who already has the data */
  318. uint32_t src_node = 0;
  319. /* if the data is in write only mode, there is no need for a source */
  320. if (mode & STARPU_R)
  321. {
  322. src_node = _starpu_select_src_node(handle, requesting_node);
  323. STARPU_ASSERT(src_node != requesting_node);
  324. }
  325. /* We can safely assume that there won't be more than 2 hops in the
  326. * current implementation */
  327. unsigned src_nodes[4], dst_nodes[4], handling_nodes[4];
  328. int nhops = determine_request_path(handle, src_node, requesting_node, mode, 4,
  329. src_nodes, dst_nodes, handling_nodes);
  330. STARPU_ASSERT(nhops >= 1 && nhops <= 4);
  331. struct _starpu_data_request *requests[nhops];
  332. /* Did we reuse a request for that hop ? */
  333. int reused_requests[nhops];
  334. /* Construct an array with a list of requests, possibly reusing existing requests */
  335. int hop;
  336. for (hop = 0; hop < nhops; hop++)
  337. {
  338. struct _starpu_data_request *r;
  339. unsigned hop_src_node = src_nodes[hop];
  340. unsigned hop_dst_node = dst_nodes[hop];
  341. unsigned hop_handling_node = handling_nodes[hop];
  342. struct _starpu_data_replicate *hop_src_replicate;
  343. struct _starpu_data_replicate *hop_dst_replicate;
  344. /* Only the first request is independant */
  345. unsigned ndeps = (hop == 0)?0:1;
  346. hop_src_replicate = &handle->per_node[hop_src_node];
  347. hop_dst_replicate = (hop != nhops - 1)?&handle->per_node[hop_dst_node]:dst_replicate;
  348. /* Try to reuse a request if possible */
  349. r = _starpu_search_existing_data_request(hop_dst_replicate,
  350. (mode & STARPU_R)?hop_src_node:hop_dst_node,
  351. mode, is_prefetch);
  352. reused_requests[hop] = !!r;
  353. if (!r)
  354. {
  355. /* Create a new request if there was no request to reuse */
  356. r = _starpu_create_data_request(handle, hop_src_replicate,
  357. hop_dst_replicate, hop_handling_node,
  358. mode, ndeps, is_prefetch);
  359. }
  360. requests[hop] = r;
  361. }
  362. /* Chain these requests */
  363. for (hop = 0; hop < nhops; hop++)
  364. {
  365. struct _starpu_data_request *r;
  366. r = requests[hop];
  367. if (hop != nhops - 1)
  368. {
  369. if (!reused_requests[hop + 1]) {
  370. r->next_req[r->next_req_count++] = requests[hop + 1];
  371. STARPU_ASSERT(r->next_req_count <= STARPU_MAXNODES);
  372. }
  373. }
  374. else
  375. _starpu_data_request_append_callback(r, callback_func, callback_arg);
  376. if (reused_requests[hop])
  377. _starpu_spin_unlock(&r->lock);
  378. }
  379. if (!async)
  380. requests[nhops - 1]->refcnt++;
  381. /* we only submit the first request, the remaining will be
  382. * automatically submitted afterward */
  383. if (!reused_requests[0])
  384. _starpu_post_data_request(requests[0], handling_nodes[0]);
  385. return requests[nhops - 1];
  386. }
  387. int _starpu_fetch_data_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *dst_replicate,
  388. enum starpu_access_mode mode, unsigned detached, unsigned async,
  389. void (*callback_func)(void *), void *callback_arg)
  390. {
  391. uint32_t local_node = _starpu_get_local_memory_node();
  392. _STARPU_LOG_IN();
  393. while (_starpu_spin_trylock(&handle->header_lock))
  394. _starpu_datawizard_progress(local_node, 1);
  395. if (!detached)
  396. {
  397. /* Take a reference which will be released by _starpu_release_data_on_node */
  398. dst_replicate->refcnt++;
  399. dst_replicate->handle->busy_count++;
  400. }
  401. struct _starpu_data_request *r;
  402. r = _starpu_create_request_to_fetch_data(handle, dst_replicate, mode,
  403. detached, async, callback_func, callback_arg);
  404. /* If no request was created, the handle was already up-to-date on the
  405. * node. In this case, _starpu_create_request_to_fetch_data has already
  406. * unlocked the header. */
  407. if (!r)
  408. return 0;
  409. _starpu_spin_unlock(&handle->header_lock);
  410. int ret = async?0:_starpu_wait_data_request_completion(r, 1);
  411. _STARPU_LOG_OUT();
  412. return ret;
  413. }
  414. static int prefetch_data_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, enum starpu_access_mode mode)
  415. {
  416. return _starpu_fetch_data_on_node(handle, replicate, mode, 1, 1, NULL, NULL);
  417. }
  418. static int fetch_data(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate, enum starpu_access_mode mode)
  419. {
  420. return _starpu_fetch_data_on_node(handle, replicate, mode, 0, 0, NULL, NULL);
  421. }
  422. uint32_t _starpu_get_data_refcnt(starpu_data_handle_t handle, uint32_t node)
  423. {
  424. return handle->per_node[node].refcnt;
  425. }
  426. size_t _starpu_data_get_size(starpu_data_handle_t handle)
  427. {
  428. return handle->data_size;
  429. }
  430. uint32_t _starpu_data_get_footprint(starpu_data_handle_t handle)
  431. {
  432. return handle->footprint;
  433. }
  434. /* in case the data was accessed on a write mode, do not forget to
  435. * make it accessible again once it is possible ! */
  436. void _starpu_release_data_on_node(starpu_data_handle_t handle, uint32_t default_wt_mask, struct _starpu_data_replicate *replicate)
  437. {
  438. uint32_t wt_mask;
  439. wt_mask = default_wt_mask | handle->wt_mask;
  440. wt_mask &= (1<<starpu_memory_nodes_get_count())-1;
  441. /* Note that it is possible that there is no valid copy of the data (if
  442. * starpu_data_invalidate was called for instance). In that case, we do
  443. * not enforce any write-through mechanism. */
  444. unsigned memory_node = replicate->memory_node;
  445. if (replicate->state != STARPU_INVALID && handle->current_mode & STARPU_W)
  446. if ((wt_mask & ~(1<<memory_node)))
  447. _starpu_write_through_data(handle, memory_node, wt_mask);
  448. uint32_t local_node = _starpu_get_local_memory_node();
  449. while (_starpu_spin_trylock(&handle->header_lock))
  450. _starpu_datawizard_progress(local_node, 1);
  451. /* Release refcnt taken by fetch_data_on_node */
  452. replicate->refcnt--;
  453. STARPU_ASSERT(replicate->refcnt >= 0);
  454. STARPU_ASSERT(handle->busy_count > 0);
  455. handle->busy_count--;
  456. _starpu_data_check_not_busy(handle);
  457. /* In case there was a temporary handle (eg. used for reduction), this
  458. * handle may have requested to be destroyed when the data is released
  459. * */
  460. unsigned handle_was_destroyed = handle->lazy_unregister;
  461. _starpu_notify_data_dependencies(handle);
  462. if (!handle_was_destroyed)
  463. _starpu_spin_unlock(&handle->header_lock);
  464. }
  465. static void _starpu_set_data_requested_flag_if_needed(struct _starpu_data_replicate *replicate)
  466. {
  467. // XXX : this is just a hint, so we don't take the lock ...
  468. // _STARPU_PTHREAD_SPIN_LOCK(&handle->header_lock);
  469. if (replicate->state == STARPU_INVALID)
  470. {
  471. unsigned dst_node = replicate->memory_node;
  472. replicate->requested[dst_node] = 1;
  473. }
  474. // _STARPU_PTHREAD_SPIN_UNLOCK(&handle->header_lock);
  475. }
  476. int starpu_prefetch_task_input_on_node(struct starpu_task *task, uint32_t node)
  477. {
  478. unsigned nbuffers = task->cl->nbuffers;
  479. unsigned index;
  480. for (index = 0; index < nbuffers; index++)
  481. {
  482. starpu_data_handle_t handle = task->handles[index];
  483. enum starpu_access_mode mode = task->cl->modes[index];
  484. if (mode & (STARPU_SCRATCH|STARPU_REDUX))
  485. continue;
  486. struct _starpu_data_replicate *replicate = &handle->per_node[node];
  487. prefetch_data_on_node(handle, replicate, mode);
  488. _starpu_set_data_requested_flag_if_needed(replicate);
  489. }
  490. return 0;
  491. }
  492. static struct _starpu_data_replicate *get_replicate(starpu_data_handle_t handle, enum starpu_access_mode mode, int workerid, unsigned local_memory_node)
  493. {
  494. if (mode & (STARPU_SCRATCH|STARPU_REDUX))
  495. return &handle->per_worker[workerid];
  496. else
  497. /* That's a "normal" buffer (R/W) */
  498. return &handle->per_node[local_memory_node];
  499. }
  500. int _starpu_fetch_task_input(struct _starpu_job *j, uint32_t mask)
  501. {
  502. _STARPU_TRACE_START_FETCH_INPUT(NULL);
  503. int profiling = starpu_profiling_status_get();
  504. struct starpu_task *task = j->task;
  505. if (profiling && task->profiling_info)
  506. _starpu_clock_gettime(&task->profiling_info->acquire_data_start_time);
  507. struct starpu_buffer_descr *descrs = j->ordered_buffers;
  508. unsigned nbuffers = task->cl->nbuffers;
  509. unsigned local_memory_node = _starpu_get_local_memory_node();
  510. int workerid = starpu_worker_get_id();
  511. unsigned index;
  512. for (index = 0; index < nbuffers; index++)
  513. {
  514. int ret;
  515. starpu_data_handle_t handle = descrs[index].handle;
  516. enum starpu_access_mode mode = descrs[index].mode;
  517. struct _starpu_data_replicate *local_replicate;
  518. if (index && descrs[index-1].handle == descrs[index].handle)
  519. /* We have already took this data, skip it. This
  520. * depends on ordering putting writes before reads, see
  521. * _starpu_compar_handles */
  522. continue;
  523. local_replicate = get_replicate(handle, mode, workerid, local_memory_node);
  524. ret = fetch_data(handle, local_replicate, mode);
  525. if (STARPU_UNLIKELY(ret))
  526. goto enomem;
  527. }
  528. /* Now that we have taken the data locks in locking order, fill the codelet interfaces in function order. */
  529. for (index = 0; index < nbuffers; index++)
  530. {
  531. starpu_data_handle_t handle = task->handles[index];
  532. enum starpu_access_mode mode = task->cl->modes[index];
  533. struct _starpu_data_replicate *local_replicate;
  534. local_replicate = get_replicate(handle, mode, workerid, local_memory_node);
  535. task->interfaces[index] = local_replicate->data_interface;
  536. if (mode & STARPU_REDUX)
  537. {
  538. /* If the replicate was not initialized yet, we have to do it now */
  539. if (!local_replicate->initialized)
  540. _starpu_redux_init_data_replicate(handle, local_replicate, workerid);
  541. }
  542. }
  543. if (profiling && task->profiling_info)
  544. _starpu_clock_gettime(&task->profiling_info->acquire_data_end_time);
  545. _STARPU_TRACE_END_FETCH_INPUT(NULL);
  546. return 0;
  547. enomem:
  548. /* try to unreference all the input that were successfully taken */
  549. /* XXX broken ... */
  550. _STARPU_DISP("something went wrong with buffer %u\n", index);
  551. //push_codelet_output(task, index, mask);
  552. _starpu_push_task_output(j, mask);
  553. return -1;
  554. }
  555. void _starpu_push_task_output(struct _starpu_job *j, uint32_t mask)
  556. {
  557. _STARPU_TRACE_START_PUSH_OUTPUT(NULL);
  558. int profiling = starpu_profiling_status_get();
  559. struct starpu_task *task = j->task;
  560. if (profiling && task->profiling_info)
  561. _starpu_clock_gettime(&task->profiling_info->release_data_start_time);
  562. struct starpu_buffer_descr *descrs = j->ordered_buffers;
  563. unsigned nbuffers = task->cl->nbuffers;
  564. int workerid = starpu_worker_get_id();
  565. unsigned local_memory_node = _starpu_get_local_memory_node();
  566. unsigned index;
  567. for (index = 0; index < nbuffers; index++)
  568. {
  569. starpu_data_handle_t handle = descrs[index].handle;
  570. enum starpu_access_mode mode = descrs[index].mode;
  571. struct _starpu_data_replicate *local_replicate;
  572. if (index && descrs[index-1].handle == descrs[index].handle)
  573. /* We have already released this data, skip it. This
  574. * depends on ordering putting writes before reads, see
  575. * _starpu_compar_handles */
  576. continue;
  577. local_replicate = get_replicate(handle, mode, workerid, local_memory_node);
  578. /* In case there was a temporary handle (eg. used for
  579. * reduction), this handle may have requested to be destroyed
  580. * when the data is released
  581. * */
  582. unsigned handle_was_destroyed = handle->lazy_unregister;
  583. _starpu_release_data_on_node(handle, mask, local_replicate);
  584. if (!handle_was_destroyed)
  585. _starpu_release_data_enforce_sequential_consistency(task, handle);
  586. }
  587. if (profiling && task->profiling_info)
  588. _starpu_clock_gettime(&task->profiling_info->release_data_end_time);
  589. _STARPU_TRACE_END_PUSH_OUTPUT(NULL);
  590. }
  591. /* NB : this value can only be an indication of the status of a data
  592. at some point, but there is no strong garantee ! */
  593. unsigned _starpu_is_data_present_or_requested(starpu_data_handle_t handle, uint32_t node)
  594. {
  595. unsigned ret = 0;
  596. // XXX : this is just a hint, so we don't take the lock ...
  597. // _STARPU_PTHREAD_SPIN_LOCK(&handle->header_lock);
  598. if (handle->per_node[node].state != STARPU_INVALID)
  599. {
  600. ret = 1;
  601. }
  602. else
  603. {
  604. unsigned i;
  605. unsigned nnodes = starpu_memory_nodes_get_count();
  606. for (i = 0; i < nnodes; i++)
  607. {
  608. if (handle->per_node[node].requested[i] || handle->per_node[node].request[i])
  609. ret = 1;
  610. }
  611. }
  612. // _STARPU_PTHREAD_SPIN_UNLOCK(&handle->header_lock);
  613. return ret;
  614. }