data_interface.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2009-2015 Université de Bordeaux
  4. * Copyright (C) 2010, 2011, 2012, 2013, 2014, 2015 CNRS
  5. * Copyright (C) 2014 INRIA
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <stdint.h>
  19. #include <datawizard/datawizard.h>
  20. #include <datawizard/memory_nodes.h>
  21. #include <core/dependencies/data_concurrency.h>
  22. #include <common/uthash.h>
  23. #include <common/starpu_spinlock.h>
  24. #include <core/task.h>
  25. #include <core/workers.h>
  26. #include <datawizard/memstats.h>
  27. #ifdef STARPU_OPENMP
  28. #include <util/openmp_runtime_support.h>
  29. #endif
  30. /* Entry in the `registered_handles' hash table. */
  31. struct handle_entry
  32. {
  33. UT_hash_handle hh;
  34. void *pointer;
  35. starpu_data_handle_t handle;
  36. };
  37. /* Hash table mapping host pointers to data handles. */
  38. static struct handle_entry *registered_handles;
  39. static struct _starpu_spinlock registered_handles_lock;
  40. static int _data_interface_number = STARPU_MAX_INTERFACE_ID;
  41. starpu_arbiter_t _starpu_global_arbiter;
  42. static void _starpu_data_unregister(starpu_data_handle_t handle, unsigned coherent, unsigned nowait);
  43. void _starpu_data_interface_init(void)
  44. {
  45. _starpu_spin_init(&registered_handles_lock);
  46. /* Just for testing purpose */
  47. if (starpu_get_env_number_default("STARPU_GLOBAL_ARBITER", 0) > 0)
  48. _starpu_global_arbiter = starpu_arbiter_create();
  49. }
  50. void _starpu_data_interface_shutdown()
  51. {
  52. struct handle_entry *entry, *tmp;
  53. if (registered_handles)
  54. {
  55. _STARPU_DISP("[warning] The application has not unregistered all data handles.\n");
  56. }
  57. _starpu_spin_destroy(&registered_handles_lock);
  58. HASH_ITER(hh, registered_handles, entry, tmp)
  59. {
  60. HASH_DEL(registered_handles, entry);
  61. free(entry);
  62. }
  63. registered_handles = NULL;
  64. }
  65. #ifdef STARPU_OPENMP
  66. void _starpu_omp_unregister_region_handles(struct starpu_omp_region *region)
  67. {
  68. _starpu_spin_lock(&region->registered_handles_lock);
  69. struct handle_entry *entry, *tmp;
  70. HASH_ITER(hh, (region->registered_handles), entry, tmp)
  71. {
  72. entry->handle->removed_from_context_hash = 1;
  73. HASH_DEL(region->registered_handles, entry);
  74. starpu_data_unregister(entry->handle);
  75. free(entry);
  76. }
  77. _starpu_spin_unlock(&region->registered_handles_lock);
  78. }
  79. void _starpu_omp_unregister_task_handles(struct starpu_omp_task *task)
  80. {
  81. struct handle_entry *entry, *tmp;
  82. HASH_ITER(hh, task->registered_handles, entry, tmp)
  83. {
  84. entry->handle->removed_from_context_hash = 1;
  85. HASH_DEL(task->registered_handles, entry);
  86. starpu_data_unregister(entry->handle);
  87. free(entry);
  88. }
  89. }
  90. #endif
  91. struct starpu_data_interface_ops *_starpu_data_interface_get_ops(unsigned interface_id)
  92. {
  93. switch (interface_id)
  94. {
  95. case STARPU_MATRIX_INTERFACE_ID:
  96. return &starpu_interface_matrix_ops;
  97. case STARPU_BLOCK_INTERFACE_ID:
  98. return &starpu_interface_block_ops;
  99. case STARPU_VECTOR_INTERFACE_ID:
  100. return &starpu_interface_vector_ops;
  101. case STARPU_CSR_INTERFACE_ID:
  102. return &starpu_interface_csr_ops;
  103. case STARPU_BCSR_INTERFACE_ID:
  104. return &starpu_interface_bcsr_ops;
  105. case STARPU_VARIABLE_INTERFACE_ID:
  106. return &starpu_interface_variable_ops;
  107. case STARPU_VOID_INTERFACE_ID:
  108. return &starpu_interface_void_ops;
  109. case STARPU_MULTIFORMAT_INTERFACE_ID:
  110. return &starpu_interface_multiformat_ops;
  111. default:
  112. STARPU_ABORT();
  113. return NULL;
  114. }
  115. }
  116. /* Register the mapping from PTR to HANDLE. If PTR is already mapped to
  117. * some handle, the new mapping shadows the previous one. */
  118. void _starpu_data_register_ram_pointer(starpu_data_handle_t handle, void *ptr)
  119. {
  120. struct handle_entry *entry;
  121. entry = (struct handle_entry *) malloc(sizeof(*entry));
  122. STARPU_ASSERT(entry != NULL);
  123. entry->pointer = ptr;
  124. entry->handle = handle;
  125. #ifdef STARPU_OPENMP
  126. struct starpu_omp_task *task = _starpu_omp_get_task();
  127. if (task)
  128. {
  129. if (task->is_implicit)
  130. {
  131. struct starpu_omp_region *parallel_region = task->owner_region;
  132. _starpu_spin_lock(&parallel_region->registered_handles_lock);
  133. HASH_ADD_PTR(parallel_region->registered_handles, pointer, entry);
  134. _starpu_spin_unlock(&parallel_region->registered_handles_lock);
  135. }
  136. else
  137. {
  138. HASH_ADD_PTR(task->registered_handles, pointer, entry);
  139. }
  140. }
  141. else
  142. #endif
  143. {
  144. _starpu_spin_lock(&registered_handles_lock);
  145. HASH_ADD_PTR(registered_handles, pointer, entry);
  146. _starpu_spin_unlock(&registered_handles_lock);
  147. }
  148. }
  149. starpu_data_handle_t starpu_data_lookup(const void *ptr)
  150. {
  151. starpu_data_handle_t result;
  152. #ifdef STARPU_OPENMP
  153. struct starpu_omp_task *task = _starpu_omp_get_task();
  154. if (task)
  155. {
  156. if (task->is_implicit)
  157. {
  158. struct starpu_omp_region *parallel_region = task->owner_region;
  159. _starpu_spin_lock(&parallel_region->registered_handles_lock);
  160. {
  161. struct handle_entry *entry;
  162. HASH_FIND_PTR(parallel_region->registered_handles, &ptr, entry);
  163. if(STARPU_UNLIKELY(entry == NULL))
  164. result = NULL;
  165. else
  166. result = entry->handle;
  167. }
  168. _starpu_spin_unlock(&parallel_region->registered_handles_lock);
  169. }
  170. else
  171. {
  172. struct handle_entry *entry;
  173. HASH_FIND_PTR(task->registered_handles, &ptr, entry);
  174. if(STARPU_UNLIKELY(entry == NULL))
  175. result = NULL;
  176. else
  177. result = entry->handle;
  178. }
  179. }
  180. else
  181. #endif
  182. {
  183. _starpu_spin_lock(&registered_handles_lock);
  184. {
  185. struct handle_entry *entry;
  186. HASH_FIND_PTR(registered_handles, &ptr, entry);
  187. if(STARPU_UNLIKELY(entry == NULL))
  188. result = NULL;
  189. else
  190. result = entry->handle;
  191. }
  192. _starpu_spin_unlock(&registered_handles_lock);
  193. }
  194. return result;
  195. }
  196. /*
  197. * Start monitoring a piece of data
  198. */
  199. static void _starpu_register_new_data(starpu_data_handle_t handle,
  200. unsigned home_node, uint32_t wt_mask)
  201. {
  202. void *ptr;
  203. STARPU_ASSERT(handle);
  204. /* initialize the new lock */
  205. _starpu_data_requester_list_init(&handle->req_list);
  206. handle->refcnt = 0;
  207. handle->busy_count = 0;
  208. handle->busy_waiting = 0;
  209. STARPU_PTHREAD_MUTEX_INIT(&handle->busy_mutex, NULL);
  210. STARPU_PTHREAD_COND_INIT(&handle->busy_cond, NULL);
  211. _starpu_spin_init(&handle->header_lock);
  212. /* first take care to properly lock the data */
  213. _starpu_spin_lock(&handle->header_lock);
  214. /* there is no hierarchy yet */
  215. handle->nchildren = 0;
  216. handle->root_handle = handle;
  217. handle->father_handle = NULL;
  218. handle->sibling_index = 0; /* could be anything for the root */
  219. handle->depth = 1; /* the tree is just a node yet */
  220. handle->mpi_data = NULL; /* invalid until set */
  221. handle->is_not_important = 0;
  222. handle->sequential_consistency =
  223. starpu_data_get_default_sequential_consistency_flag();
  224. STARPU_PTHREAD_MUTEX_INIT(&handle->sequential_consistency_mutex, NULL);
  225. handle->last_submitted_mode = STARPU_R;
  226. handle->last_sync_task = NULL;
  227. handle->last_submitted_accessors.task = NULL;
  228. handle->last_submitted_accessors.next = &handle->last_submitted_accessors;
  229. handle->last_submitted_accessors.prev = &handle->last_submitted_accessors;
  230. handle->post_sync_tasks = NULL;
  231. /* Tell helgrind that the race in _starpu_unlock_post_sync_tasks is fine */
  232. STARPU_HG_DISABLE_CHECKING(handle->post_sync_tasks_cnt);
  233. handle->post_sync_tasks_cnt = 0;
  234. /* By default, there are no methods available to perform a reduction */
  235. handle->redux_cl = NULL;
  236. handle->init_cl = NULL;
  237. handle->reduction_refcnt = 0;
  238. _starpu_data_requester_list_init(&handle->reduction_req_list);
  239. handle->reduction_tmp_handles = NULL;
  240. #ifdef STARPU_USE_FXT
  241. handle->last_submitted_ghost_sync_id_is_valid = 0;
  242. handle->last_submitted_ghost_sync_id = 0;
  243. handle->last_submitted_ghost_accessors_id = NULL;
  244. #endif
  245. handle->wt_mask = wt_mask;
  246. /* Store some values directly in the handle not to recompute them all
  247. * the time. */
  248. handle->footprint = _starpu_compute_data_footprint(handle);
  249. handle->home_node = home_node;
  250. if (_starpu_global_arbiter)
  251. /* Just for testing purpose */
  252. starpu_data_assign_arbiter(handle, _starpu_global_arbiter);
  253. else
  254. handle->arbiter = NULL;
  255. _starpu_data_requester_list_init(&handle->arbitered_req_list);
  256. /* that new data is invalid from all nodes perpective except for the
  257. * home node */
  258. unsigned node;
  259. for (node = 0; node < STARPU_MAXNODES; node++)
  260. {
  261. struct _starpu_data_replicate *replicate;
  262. replicate = &handle->per_node[node];
  263. replicate->memory_node = node;
  264. replicate->relaxed_coherency = 0;
  265. replicate->refcnt = 0;
  266. if (node == home_node)
  267. {
  268. /* this is the home node with the only valid copy */
  269. replicate->state = STARPU_OWNER;
  270. replicate->allocated = 1;
  271. replicate->automatically_allocated = 0;
  272. replicate->initialized = 1;
  273. }
  274. else
  275. {
  276. /* the value is not available here yet */
  277. replicate->state = STARPU_INVALID;
  278. replicate->allocated = 0;
  279. replicate->initialized = 0;
  280. }
  281. }
  282. unsigned worker;
  283. unsigned nworkers = starpu_worker_get_count();
  284. for (worker = 0; worker < nworkers; worker++)
  285. {
  286. struct _starpu_data_replicate *replicate;
  287. replicate = &handle->per_worker[worker];
  288. replicate->allocated = 0;
  289. replicate->automatically_allocated = 0;
  290. replicate->state = STARPU_INVALID;
  291. replicate->refcnt = 0;
  292. replicate->handle = handle;
  293. replicate->requested = 0;
  294. for (node = 0; node < STARPU_MAXNODES; node++)
  295. {
  296. replicate->request[node] = NULL;
  297. }
  298. /* Assuming being used for SCRATCH for now, patched when entering REDUX mode */
  299. replicate->relaxed_coherency = 1;
  300. replicate->initialized = 0;
  301. replicate->memory_node = starpu_worker_get_memory_node(worker);
  302. /* duplicate the content of the interface on node 0 */
  303. memcpy(replicate->data_interface, handle->per_node[0].data_interface, handle->ops->interface_size);
  304. }
  305. /* now the data is available ! */
  306. _starpu_spin_unlock(&handle->header_lock);
  307. ptr = starpu_data_handle_to_pointer(handle, STARPU_MAIN_RAM);
  308. if (ptr != NULL)
  309. {
  310. _starpu_data_register_ram_pointer(handle, ptr);
  311. }
  312. }
  313. void starpu_data_ptr_register(starpu_data_handle_t handle, unsigned node)
  314. {
  315. struct _starpu_data_replicate *replicate = &handle->per_node[node];
  316. _starpu_spin_lock(&handle->header_lock);
  317. STARPU_ASSERT_MSG(replicate->allocated == 0, "starpu_data_ptr_register must be called right after starpu_data_register");
  318. replicate->allocated = 1;
  319. replicate->automatically_allocated = 0;
  320. _starpu_spin_unlock(&handle->header_lock);
  321. }
  322. int _starpu_data_handle_init(starpu_data_handle_t handle, struct starpu_data_interface_ops *interface_ops, unsigned int mf_node)
  323. {
  324. unsigned node;
  325. unsigned worker;
  326. /* Tell helgrind that our access to busy_count in
  327. * starpu_data_unregister is actually safe */
  328. STARPU_HG_DISABLE_CHECKING(handle->busy_count);
  329. handle->ops = interface_ops;
  330. handle->mf_node = mf_node;
  331. handle->mpi_data = NULL;
  332. size_t interfacesize = interface_ops->interface_size;
  333. _starpu_memory_stats_init(handle);
  334. for (node = 0; node < STARPU_MAXNODES; node++)
  335. {
  336. _starpu_memory_stats_init_per_node(handle, node);
  337. struct _starpu_data_replicate *replicate;
  338. replicate = &handle->per_node[node];
  339. /* relaxed_coherency = 0 */
  340. replicate->handle = handle;
  341. replicate->data_interface = calloc(1, interfacesize);
  342. STARPU_ASSERT(replicate->data_interface);
  343. }
  344. unsigned nworkers = starpu_worker_get_count();
  345. for (worker = 0; worker < nworkers; worker++)
  346. {
  347. struct _starpu_data_replicate *replicate;
  348. replicate = &handle->per_worker[worker];
  349. replicate->handle = handle;
  350. replicate->data_interface = calloc(1, interfacesize);
  351. STARPU_ASSERT(replicate->data_interface);
  352. }
  353. return 0;
  354. }
  355. static
  356. starpu_data_handle_t _starpu_data_handle_allocate(struct starpu_data_interface_ops *interface_ops, unsigned int mf_node)
  357. {
  358. starpu_data_handle_t handle = (starpu_data_handle_t) calloc(1, sizeof(struct _starpu_data_state));
  359. STARPU_ASSERT(handle);
  360. _starpu_data_handle_init(handle, interface_ops, mf_node);
  361. return handle;
  362. }
  363. void starpu_data_register(starpu_data_handle_t *handleptr, unsigned home_node,
  364. void *data_interface,
  365. struct starpu_data_interface_ops *ops)
  366. {
  367. starpu_data_handle_t handle = _starpu_data_handle_allocate(ops, home_node);
  368. STARPU_ASSERT(handleptr);
  369. *handleptr = handle;
  370. /* fill the interface fields with the appropriate method */
  371. STARPU_ASSERT(ops->register_data_handle);
  372. ops->register_data_handle(handle, home_node, data_interface);
  373. _starpu_register_new_data(handle, home_node, 0);
  374. }
  375. void starpu_data_register_same(starpu_data_handle_t *handledst, starpu_data_handle_t handlesrc)
  376. {
  377. void *local_interface = starpu_data_get_interface_on_node(handlesrc, STARPU_MAIN_RAM);
  378. starpu_data_register(handledst, -1, local_interface, handlesrc->ops);
  379. }
  380. void *starpu_data_handle_to_pointer(starpu_data_handle_t handle, unsigned node)
  381. {
  382. /* Check whether the operation is supported and the node has actually
  383. * been allocated. */
  384. if (handle->ops->handle_to_pointer
  385. && starpu_data_test_if_allocated_on_node(handle, node))
  386. {
  387. return handle->ops->handle_to_pointer(handle, node);
  388. }
  389. return NULL;
  390. }
  391. void *starpu_data_get_local_ptr(starpu_data_handle_t handle)
  392. {
  393. return starpu_data_handle_to_pointer(handle,
  394. _starpu_memory_node_get_local_key());
  395. }
  396. struct starpu_data_interface_ops* starpu_data_get_interface_ops(starpu_data_handle_t handle)
  397. {
  398. return handle->ops;
  399. }
  400. /*
  401. * Stop monitoring a piece of data
  402. */
  403. void _starpu_data_unregister_ram_pointer(starpu_data_handle_t handle)
  404. {
  405. const void *ram_ptr = starpu_data_handle_to_pointer(handle, STARPU_MAIN_RAM);
  406. #ifdef STARPU_OPENMP
  407. if (handle->removed_from_context_hash)
  408. return;
  409. #endif
  410. if (ram_ptr != NULL)
  411. {
  412. /* Remove the PTR -> HANDLE mapping. If a mapping from PTR
  413. * to another handle existed before (e.g., when using
  414. * filters), it becomes visible again. */
  415. struct handle_entry *entry;
  416. #ifdef STARPU_OPENMP
  417. struct starpu_omp_task *task = _starpu_omp_get_task();
  418. if (task)
  419. {
  420. if (task->is_implicit)
  421. {
  422. struct starpu_omp_region *parallel_region = task->owner_region;
  423. _starpu_spin_lock(&parallel_region->registered_handles_lock);
  424. HASH_FIND_PTR(parallel_region->registered_handles, &ram_ptr, entry);
  425. STARPU_ASSERT(entry != NULL);
  426. HASH_DEL(registered_handles, entry);
  427. _starpu_spin_unlock(&parallel_region->registered_handles_lock);
  428. }
  429. else
  430. {
  431. HASH_FIND_PTR(task->registered_handles, &ram_ptr, entry);
  432. STARPU_ASSERT(entry != NULL);
  433. HASH_DEL(task->registered_handles, entry);
  434. }
  435. }
  436. else
  437. #endif
  438. {
  439. _starpu_spin_lock(&registered_handles_lock);
  440. HASH_FIND_PTR(registered_handles, &ram_ptr, entry);
  441. STARPU_ASSERT(entry != NULL);
  442. HASH_DEL(registered_handles, entry);
  443. _starpu_spin_unlock(&registered_handles_lock);
  444. }
  445. free(entry);
  446. }
  447. }
  448. void _starpu_data_free_interfaces(starpu_data_handle_t handle)
  449. {
  450. unsigned node;
  451. unsigned worker;
  452. unsigned nworkers = starpu_worker_get_count();
  453. for (node = 0; node < STARPU_MAXNODES; node++)
  454. free(handle->per_node[node].data_interface);
  455. for (worker = 0; worker < nworkers; worker++)
  456. free(handle->per_worker[worker].data_interface);
  457. }
  458. struct _starpu_unregister_callback_arg
  459. {
  460. unsigned memory_node;
  461. starpu_data_handle_t handle;
  462. unsigned terminated;
  463. starpu_pthread_mutex_t mutex;
  464. starpu_pthread_cond_t cond;
  465. };
  466. /* Check whether we should tell starpu_data_unregister that the data handle is
  467. * not busy any more.
  468. * The header is supposed to be locked.
  469. * This may free the handle, if it was lazily unregistered (1 is returned in
  470. * that case). The handle pointer thus becomes invalid for the caller.
  471. */
  472. int _starpu_data_check_not_busy(starpu_data_handle_t handle)
  473. {
  474. if (!handle->busy_count && handle->busy_waiting)
  475. {
  476. STARPU_PTHREAD_MUTEX_LOCK(&handle->busy_mutex);
  477. STARPU_PTHREAD_COND_BROADCAST(&handle->busy_cond);
  478. STARPU_PTHREAD_MUTEX_UNLOCK(&handle->busy_mutex);
  479. }
  480. /* The handle has been destroyed in between (eg. this was a temporary
  481. * handle created for a reduction.) */
  482. if (handle->lazy_unregister && handle->busy_count == 0)
  483. {
  484. _starpu_spin_unlock(&handle->header_lock);
  485. _starpu_data_unregister(handle, 0, 1);
  486. /* Warning: in case we unregister the handle, we must be sure
  487. * that the caller will not try to unlock the header after
  488. * !*/
  489. return 1;
  490. }
  491. return 0;
  492. }
  493. static
  494. void _starpu_check_if_valid_and_fetch_data_on_node(starpu_data_handle_t handle, struct _starpu_data_replicate *replicate)
  495. {
  496. unsigned node;
  497. unsigned nnodes = starpu_memory_nodes_get_count();
  498. int valid = 0;
  499. for (node = 0; node < nnodes; node++)
  500. {
  501. if (handle->per_node[node].state != STARPU_INVALID)
  502. {
  503. /* we found a copy ! */
  504. valid = 1;
  505. }
  506. }
  507. if (valid)
  508. {
  509. int ret = _starpu_fetch_data_on_node(handle, replicate, STARPU_R, 0, 0, 0, NULL, NULL);
  510. STARPU_ASSERT(!ret);
  511. _starpu_release_data_on_node(handle, 0, &handle->per_node[handle->home_node]);
  512. }
  513. else
  514. {
  515. _starpu_spin_lock(&handle->header_lock);
  516. if (!_starpu_notify_data_dependencies(handle))
  517. _starpu_spin_unlock(&handle->header_lock);
  518. }
  519. }
  520. static void _starpu_data_unregister_fetch_data_callback(void *_arg)
  521. {
  522. struct _starpu_unregister_callback_arg *arg = (struct _starpu_unregister_callback_arg *) _arg;
  523. starpu_data_handle_t handle = arg->handle;
  524. STARPU_ASSERT(handle);
  525. struct _starpu_data_replicate *replicate = &handle->per_node[arg->memory_node];
  526. _starpu_check_if_valid_and_fetch_data_on_node(handle, replicate);
  527. /* unlock the caller */
  528. STARPU_PTHREAD_MUTEX_LOCK(&arg->mutex);
  529. arg->terminated = 1;
  530. STARPU_PTHREAD_COND_SIGNAL(&arg->cond);
  531. STARPU_PTHREAD_MUTEX_UNLOCK(&arg->mutex);
  532. }
  533. /* Unregister the data handle, perhaps we don't need to update the home_node
  534. * (in that case coherent is set to 0)
  535. * nowait is for internal use when we already know for sure that we won't have to wait.
  536. */
  537. static void _starpu_data_unregister(starpu_data_handle_t handle, unsigned coherent, unsigned nowait)
  538. {
  539. STARPU_ASSERT(handle);
  540. STARPU_ASSERT_MSG(handle->nchildren == 0, "data %p needs to be unpartitioned before unregistration", handle);
  541. STARPU_ASSERT(!(nowait && handle->busy_count != 0));
  542. int sequential_consistency = handle->sequential_consistency;
  543. if (sequential_consistency && !nowait)
  544. {
  545. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_data_unregister must not be called from a task or callback, perhaps you can use starpu_data_unregister_submit instead");
  546. /* If sequential consistency is enabled, wait until data is available */
  547. _starpu_data_wait_until_available(handle, STARPU_RW, "starpu_data_unregister");
  548. }
  549. if (coherent && !nowait)
  550. {
  551. STARPU_ASSERT_MSG(_starpu_worker_may_perform_blocking_calls(), "starpu_data_unregister must not be called from a task or callback, perhaps you can use starpu_data_unregister_submit instead");
  552. /* Fetch data in the home of the data to ensure we have a valid copy
  553. * where we registered it */
  554. int home_node = handle->home_node;
  555. if (home_node >= 0)
  556. {
  557. struct _starpu_unregister_callback_arg arg;
  558. arg.handle = handle;
  559. arg.memory_node = (unsigned)home_node;
  560. arg.terminated = 0;
  561. STARPU_PTHREAD_MUTEX_INIT(&arg.mutex, NULL);
  562. STARPU_PTHREAD_COND_INIT(&arg.cond, NULL);
  563. if (!_starpu_attempt_to_submit_data_request_from_apps(handle, STARPU_R,
  564. _starpu_data_unregister_fetch_data_callback, &arg))
  565. {
  566. /* no one has locked this data yet, so we proceed immediately */
  567. struct _starpu_data_replicate *home_replicate = &handle->per_node[home_node];
  568. _starpu_check_if_valid_and_fetch_data_on_node(handle, home_replicate);
  569. }
  570. else
  571. {
  572. STARPU_PTHREAD_MUTEX_LOCK(&arg.mutex);
  573. while (!arg.terminated)
  574. STARPU_PTHREAD_COND_WAIT(&arg.cond, &arg.mutex);
  575. STARPU_PTHREAD_MUTEX_UNLOCK(&arg.mutex);
  576. }
  577. STARPU_PTHREAD_MUTEX_DESTROY(&arg.mutex);
  578. STARPU_PTHREAD_COND_DESTROY(&arg.cond);
  579. }
  580. /* If this handle uses a multiformat interface, we may have to convert
  581. * this piece of data back into the CPU format.
  582. * XXX : This is quite hacky, could we submit a task instead ?
  583. */
  584. if (_starpu_data_is_multiformat_handle(handle) &&
  585. ( starpu_node_get_kind(handle->mf_node) != STARPU_CPU_RAM
  586. && starpu_node_get_kind(handle->mf_node) != STARPU_SCC_RAM
  587. && starpu_node_get_kind(handle->mf_node) != STARPU_SCC_SHM
  588. ))
  589. {
  590. _STARPU_DEBUG("Conversion needed\n");
  591. void *buffers[1];
  592. struct starpu_multiformat_interface *format_interface;
  593. format_interface = (struct starpu_multiformat_interface *) starpu_data_get_interface_on_node(handle, STARPU_MAIN_RAM);
  594. struct starpu_codelet *cl = NULL;
  595. enum starpu_node_kind node_kind = starpu_node_get_kind(handle->mf_node);
  596. switch (node_kind)
  597. {
  598. #ifdef STARPU_USE_CUDA
  599. case STARPU_CUDA_RAM:
  600. {
  601. struct starpu_multiformat_data_interface_ops *mf_ops;
  602. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  603. cl = mf_ops->cuda_to_cpu_cl;
  604. break;
  605. }
  606. #endif
  607. #ifdef STARPU_USE_OPENCL
  608. case STARPU_OPENCL_RAM:
  609. {
  610. struct starpu_multiformat_data_interface_ops *mf_ops;
  611. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  612. cl = mf_ops->opencl_to_cpu_cl;
  613. break;
  614. }
  615. #endif
  616. #ifdef STARPU_USE_MIC
  617. case STARPU_MIC_RAM:
  618. {
  619. struct starpu_multiformat_data_interface_ops *mf_ops;
  620. mf_ops = (struct starpu_multiformat_data_interface_ops *) handle->ops->get_mf_ops(format_interface);
  621. cl = mf_ops->mic_to_cpu_cl;
  622. break;
  623. }
  624. #endif
  625. case STARPU_CPU_RAM: /* Impossible ! */
  626. case STARPU_SCC_RAM: /* Impossible ! */
  627. case STARPU_SCC_SHM: /* Impossible ! */
  628. default:
  629. STARPU_ABORT();
  630. }
  631. buffers[0] = format_interface;
  632. _starpu_cl_func_t func = _starpu_task_get_cpu_nth_implementation(cl, 0);
  633. STARPU_ASSERT(func);
  634. func(buffers, NULL);
  635. }
  636. }
  637. _starpu_spin_lock(&handle->header_lock);
  638. if (!coherent)
  639. {
  640. /* Should we postpone the unregister operation ? */
  641. if ((handle->busy_count > 0) && handle->lazy_unregister)
  642. {
  643. _starpu_spin_unlock(&handle->header_lock);
  644. return;
  645. }
  646. }
  647. /* Tell holders of references that we're starting waiting */
  648. handle->busy_waiting = 1;
  649. _starpu_spin_unlock(&handle->header_lock);
  650. /* Wait for all requests to finish (notably WT requests) */
  651. STARPU_PTHREAD_MUTEX_LOCK(&handle->busy_mutex);
  652. while (1)
  653. {
  654. /* Here helgrind would shout that this an unprotected access,
  655. * but this is actually fine: all threads who do busy_count--
  656. * are supposed to call _starpu_data_check_not_busy, which will
  657. * wake us up through the busy_mutex/busy_cond. */
  658. if (!handle->busy_count)
  659. break;
  660. /* This is woken by _starpu_data_check_not_busy, always called
  661. * after decrementing busy_count */
  662. STARPU_PTHREAD_COND_WAIT(&handle->busy_cond, &handle->busy_mutex);
  663. }
  664. STARPU_PTHREAD_MUTEX_UNLOCK(&handle->busy_mutex);
  665. /* Wait for finished requests to release the handle */
  666. _starpu_spin_lock(&handle->header_lock);
  667. size_t size = _starpu_data_get_size(handle);
  668. _starpu_data_unregister_ram_pointer(handle);
  669. /* Destroy the data now */
  670. unsigned node;
  671. for (node = 0; node < STARPU_MAXNODES; node++)
  672. {
  673. struct _starpu_data_replicate *local = &handle->per_node[node];
  674. /* free the data copy in a lazy fashion */
  675. if (local->allocated && local->automatically_allocated)
  676. _starpu_request_mem_chunk_removal(handle, local, node, size);
  677. }
  678. unsigned worker;
  679. unsigned nworkers = starpu_worker_get_count();
  680. for (worker = 0; worker < nworkers; worker++)
  681. {
  682. struct _starpu_data_replicate *local = &handle->per_worker[worker];
  683. /* free the data copy in a lazy fashion */
  684. if (local->allocated && local->automatically_allocated)
  685. _starpu_request_mem_chunk_removal(handle, local, starpu_worker_get_memory_node(worker), size);
  686. }
  687. _starpu_data_free_interfaces(handle);
  688. _starpu_memory_stats_free(handle);
  689. _starpu_spin_unlock(&handle->header_lock);
  690. _starpu_spin_destroy(&handle->header_lock);
  691. STARPU_PTHREAD_MUTEX_DESTROY(&handle->busy_mutex);
  692. STARPU_PTHREAD_COND_DESTROY(&handle->busy_cond);
  693. STARPU_PTHREAD_MUTEX_DESTROY(&handle->sequential_consistency_mutex);
  694. free(handle);
  695. }
  696. void starpu_data_unregister(starpu_data_handle_t handle)
  697. {
  698. STARPU_ASSERT_MSG(!handle->lazy_unregister, "data %p can not be unregistered twice", handle);
  699. if (handle->unregister_hook)
  700. {
  701. handle->unregister_hook(handle);
  702. }
  703. _starpu_data_unregister(handle, 1, 0);
  704. }
  705. void starpu_data_unregister_no_coherency(starpu_data_handle_t handle)
  706. {
  707. if (handle->unregister_hook)
  708. {
  709. handle->unregister_hook(handle);
  710. }
  711. _starpu_data_unregister(handle, 0, 0);
  712. }
  713. static void _starpu_data_unregister_submit_cb(void *arg)
  714. {
  715. starpu_data_handle_t handle = arg;
  716. _starpu_spin_lock(&handle->header_lock);
  717. handle->lazy_unregister = 1;
  718. /* The handle should be busy since we are working on it.
  719. * when we releases the handle below, it will be destroyed by
  720. * _starpu_data_check_not_busy */
  721. STARPU_ASSERT(handle->busy_count);
  722. _starpu_spin_unlock(&handle->header_lock);
  723. starpu_data_release_on_node(handle, -1);
  724. }
  725. void starpu_data_unregister_submit(starpu_data_handle_t handle)
  726. {
  727. STARPU_ASSERT_MSG(!handle->lazy_unregister, "data %p can not be unregistered twice", handle);
  728. if (handle->unregister_hook)
  729. {
  730. handle->unregister_hook(handle);
  731. }
  732. /* Wait for all task dependencies on this handle before putting it for free */
  733. starpu_data_acquire_on_node_cb(handle, -1, STARPU_RW, _starpu_data_unregister_submit_cb, handle);
  734. }
  735. static void _starpu_data_invalidate(void *data)
  736. {
  737. starpu_data_handle_t handle = data;
  738. size_t size = _starpu_data_get_size(handle);
  739. _starpu_spin_lock(&handle->header_lock);
  740. unsigned node;
  741. for (node = 0; node < STARPU_MAXNODES; node++)
  742. {
  743. struct _starpu_data_replicate *local = &handle->per_node[node];
  744. if (local->mc && local->allocated && local->automatically_allocated)
  745. {
  746. if (node == STARPU_MAIN_RAM)
  747. _starpu_data_unregister_ram_pointer(handle);
  748. /* free the data copy in a lazy fashion */
  749. _starpu_request_mem_chunk_removal(handle, local, node, size);
  750. }
  751. local->state = STARPU_INVALID;
  752. }
  753. unsigned worker;
  754. unsigned nworkers = starpu_worker_get_count();
  755. for (worker = 0; worker < nworkers; worker++)
  756. {
  757. struct _starpu_data_replicate *local = &handle->per_worker[worker];
  758. if (local->mc && local->allocated && local->automatically_allocated)
  759. /* free the data copy in a lazy fashion */
  760. _starpu_request_mem_chunk_removal(handle, local, starpu_worker_get_memory_node(worker), size);
  761. local->state = STARPU_INVALID;
  762. }
  763. _starpu_spin_unlock(&handle->header_lock);
  764. starpu_data_release_on_node(handle, -1);
  765. }
  766. void starpu_data_invalidate(starpu_data_handle_t handle)
  767. {
  768. STARPU_ASSERT(handle);
  769. starpu_data_acquire_on_node(handle, -1, STARPU_W);
  770. _starpu_data_invalidate(handle);
  771. }
  772. void starpu_data_invalidate_submit(starpu_data_handle_t handle)
  773. {
  774. STARPU_ASSERT(handle);
  775. starpu_data_acquire_on_node_cb(handle, -1, STARPU_W, _starpu_data_invalidate, handle);
  776. }
  777. enum starpu_data_interface_id starpu_data_get_interface_id(starpu_data_handle_t handle)
  778. {
  779. return handle->ops->interfaceid;
  780. }
  781. void *starpu_data_get_interface_on_node(starpu_data_handle_t handle, unsigned memory_node)
  782. {
  783. return handle->per_node[memory_node].data_interface;
  784. }
  785. int starpu_data_interface_get_next_id(void)
  786. {
  787. _data_interface_number += 1;
  788. return _data_interface_number-1;
  789. }
  790. int starpu_data_pack(starpu_data_handle_t handle, void **ptr, starpu_ssize_t *count)
  791. {
  792. STARPU_ASSERT(handle->ops->pack_data);
  793. return handle->ops->pack_data(handle, _starpu_memory_node_get_local_key(), ptr, count);
  794. }
  795. int starpu_data_unpack(starpu_data_handle_t handle, void *ptr, size_t count)
  796. {
  797. STARPU_ASSERT(handle->ops->unpack_data);
  798. int ret;
  799. ret = handle->ops->unpack_data(handle, _starpu_memory_node_get_local_key(), ptr, count);
  800. starpu_free_flags(ptr, count, 0);
  801. return ret;
  802. }
  803. size_t starpu_data_get_size(starpu_data_handle_t handle)
  804. {
  805. return handle->ops->get_size(handle);
  806. }