data_concurrency.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /* StarPU --- Runtime system for heterogeneous multicore architectures.
  2. *
  3. * Copyright (C) 2013,2015-2017 Inria
  4. * Copyright (C) 2009-2015,2017,2018-2019 Université de Bordeaux
  5. * Copyright (C) 2010-2013,2015,2017,2018,2019 CNRS
  6. *
  7. * StarPU is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU Lesser General Public License as published by
  9. * the Free Software Foundation; either version 2.1 of the License, or (at
  10. * your option) any later version.
  11. *
  12. * StarPU is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. *
  16. * See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. */
  18. #include <core/dependencies/data_concurrency.h>
  19. #include <datawizard/coherency.h>
  20. #include <core/sched_policy.h>
  21. #include <common/starpu_spinlock.h>
  22. #include <datawizard/sort_data_handles.h>
  23. #include <datawizard/memory_nodes.h>
  24. /*
  25. * We have a kind of dining philosophers problem: various tasks are accessing
  26. * various data concurrently in different modes: STARPU_R, STARPU_RW, STARPU_W,
  27. * STARPU_SCRATCH and STARPU_REDUX. STARPU_RW is managed as a STARPU_W access.
  28. * We have the following constraints:
  29. *
  30. * - A single STARPU_W access is allowed at a time.
  31. * - Concurrent STARPU_R accesses are allowed.
  32. * - Concurrent STARPU_SCRATCH accesses are allowed.
  33. * - Concurrent STARPU_REDUX accesses are allowed.
  34. *
  35. * What we do here is implementing the Dijkstra solutions: handles are sorted
  36. * by pointer value order, and tasks call
  37. * _starpu_attempt_to_submit_data_request for each requested data in that order
  38. * (see _starpu_sort_task_handles call in _starpu_submit_job_enforce_data_deps).
  39. *
  40. * _starpu_attempt_to_submit_data_request will either:
  41. * - obtain access to the data, and thus the task can proceed with acquiring
  42. * other data (see _submit_job_enforce_data_deps)
  43. * - queue a request on the data handle
  44. *
  45. * When a task finishes, it calls _starpu_notify_data_dependencies for each
  46. * data, to free its acquisitions. This will look whether the first queued
  47. * request can be fulfilled, and in such case make the task try to acquire its
  48. * next data.
  49. *
  50. * The same mechanism is used for application data aquisition
  51. * (starpu_data_acquire).
  52. *
  53. * For data with an arbiter, we have a second step, performed after this first
  54. * step, implemented in data_arbiter_concurrency.c
  55. */
  56. /*
  57. * Check to see whether the first queued request can proceed, and return it in
  58. * such case.
  59. */
  60. /* the handle header lock must be taken by the caller */
  61. static struct _starpu_data_requester *may_unlock_data_req_list_head(starpu_data_handle_t handle)
  62. {
  63. struct _starpu_data_requester_prio_list *req_list;
  64. if (handle->reduction_refcnt > 0)
  65. {
  66. req_list = &handle->reduction_req_list;
  67. }
  68. else
  69. {
  70. if (_starpu_data_requester_prio_list_empty(&handle->reduction_req_list))
  71. req_list = &handle->req_list;
  72. else
  73. req_list = &handle->reduction_req_list;
  74. }
  75. /* if there is no one to unlock ... */
  76. if (_starpu_data_requester_prio_list_empty(req_list))
  77. return NULL;
  78. /* if there is no reference to the data anymore, we can use it */
  79. if (handle->refcnt == 0)
  80. return _starpu_data_requester_prio_list_pop_front_highest(req_list);
  81. /* Already writing to it, do not let another write access through */
  82. if (handle->current_mode == STARPU_W)
  83. return NULL;
  84. /* data->current_mode == STARPU_R, so we can process more readers */
  85. struct _starpu_data_requester *r = _starpu_data_requester_prio_list_front_highest(req_list);
  86. enum starpu_data_access_mode r_mode = r->mode;
  87. if (r_mode == STARPU_RW)
  88. r_mode = STARPU_W;
  89. /* If this is a STARPU_R, STARPU_SCRATCH or STARPU_REDUX type of
  90. * access, we only proceed if the current mode is the same as the
  91. * requested mode. */
  92. if (r_mode == handle->current_mode)
  93. return _starpu_data_requester_prio_list_pop_front_highest(req_list);
  94. else
  95. return NULL;
  96. }
  97. /* Try to submit a data request, in case the request can be processed
  98. * immediatly, return 0, if there is still a dependency that is not compatible
  99. * with the current mode, the request is put in the per-handle list of
  100. * "requesters", and this function returns 1. */
  101. /* No lock is held, this acquires and releases the handle header lock */
  102. static unsigned _starpu_attempt_to_submit_data_request(unsigned request_from_codelet,
  103. starpu_data_handle_t handle, enum starpu_data_access_mode mode,
  104. void (*callback)(void *), void *argcb,
  105. struct _starpu_job *j, unsigned buffer_index)
  106. {
  107. if (handle->arbiter)
  108. return _starpu_attempt_to_submit_arbitered_data_request(request_from_codelet, handle, mode, callback, argcb, j, buffer_index);
  109. /* Do not care about some flags */
  110. mode &= ~STARPU_COMMUTE;
  111. mode &= ~STARPU_SSEND;
  112. mode &= ~STARPU_LOCALITY;
  113. if (mode == STARPU_RW)
  114. mode = STARPU_W;
  115. /* Take the lock protecting the header. We try to do some progression
  116. * in case this is called from a worker, otherwise we just wait for the
  117. * lock to be available. */
  118. if (request_from_codelet)
  119. {
  120. int cpt = 0;
  121. while (cpt < STARPU_SPIN_MAXTRY && _starpu_spin_trylock(&handle->header_lock))
  122. {
  123. cpt++;
  124. _starpu_datawizard_progress(0);
  125. }
  126. if (cpt == STARPU_SPIN_MAXTRY)
  127. _starpu_spin_lock(&handle->header_lock);
  128. }
  129. else
  130. {
  131. _starpu_spin_lock(&handle->header_lock);
  132. }
  133. /* If we have a request that is not used for the reduction, and that a
  134. * reduction is pending, we put it at the end of normal list, and we
  135. * use the reduction_req_list instead */
  136. unsigned pending_reduction = (handle->reduction_refcnt > 0);
  137. unsigned frozen = 0;
  138. /* If we are currently performing a reduction, we freeze any request
  139. * that is not explicitely a reduction task. */
  140. unsigned is_a_reduction_task = (request_from_codelet && j && j->reduction_task);
  141. if (pending_reduction && !is_a_reduction_task)
  142. frozen = 1;
  143. /* If there is currently nobody accessing the piece of data, or it's
  144. * not another writter and if this is the same type of access as the
  145. * current one, we can proceed. */
  146. unsigned put_in_list = 1;
  147. enum starpu_data_access_mode previous_mode = handle->current_mode;
  148. if (!frozen && ((handle->refcnt == 0) || (!(mode == STARPU_W) && (handle->current_mode == mode))))
  149. {
  150. /* Detect whether this is the end of a reduction phase */
  151. /* We don't want to start multiple reductions of the
  152. * same handle at the same time ! */
  153. if ((handle->reduction_refcnt == 0) && (previous_mode == STARPU_REDUX) && (mode != STARPU_REDUX))
  154. {
  155. _starpu_data_end_reduction_mode(handle);
  156. /* Since we need to perform a mode change, we freeze
  157. * the request if needed. */
  158. put_in_list = (handle->reduction_refcnt > 0);
  159. }
  160. else
  161. {
  162. put_in_list = 0;
  163. }
  164. }
  165. if (put_in_list)
  166. {
  167. /* there cannot be multiple writers or a new writer
  168. * while the data is in read mode */
  169. handle->busy_count++;
  170. /* enqueue the request */
  171. struct _starpu_data_requester *r = _starpu_data_requester_new();
  172. r->mode = mode;
  173. r->is_requested_by_codelet = request_from_codelet;
  174. r->j = j;
  175. r->buffer_index = buffer_index;
  176. r->prio = j ? j->task->priority : 0;
  177. r->ready_data_callback = callback;
  178. r->argcb = argcb;
  179. /* We put the requester in a specific list if this is a reduction task */
  180. struct _starpu_data_requester_prio_list *req_list =
  181. is_a_reduction_task?&handle->reduction_req_list:&handle->req_list;
  182. _starpu_data_requester_prio_list_push_back(req_list, r);
  183. /* failed */
  184. put_in_list = 1;
  185. }
  186. else
  187. {
  188. handle->refcnt++;
  189. handle->busy_count++;
  190. /* Do not write to handle->current_mode if it is already
  191. * R. This avoids a spurious warning from helgrind when
  192. * the following happens:
  193. * acquire(R) in thread A
  194. * acquire(R) in thread B
  195. * release_data_on_node() in thread A
  196. * helgrind would shout that the latter reads current_mode
  197. * unsafely.
  198. *
  199. * This actually basically explains helgrind that it is a
  200. * shared R acquisition.
  201. */
  202. if (mode != STARPU_R || handle->current_mode != mode)
  203. handle->current_mode = mode;
  204. if ((mode == STARPU_REDUX) && (previous_mode != STARPU_REDUX))
  205. _starpu_data_start_reduction_mode(handle);
  206. /* success */
  207. put_in_list = 0;
  208. }
  209. _starpu_spin_unlock(&handle->header_lock);
  210. return put_in_list;
  211. }
  212. /* Take a data, without waiting for it to be available (it is assumed to be).
  213. * This is typicall used for nodeps tasks, for which a previous task has already
  214. * waited for the proper conditions, and we just need to take another reference
  215. * for overall reference coherency.
  216. * No lock is held, this acquires and releases the handle header lock */
  217. static void _starpu_take_data(unsigned request_from_codelet,
  218. starpu_data_handle_t handle, enum starpu_data_access_mode mode,
  219. struct _starpu_job *j)
  220. {
  221. STARPU_ASSERT_MSG(!handle->arbiter, "TODO");
  222. /* Do not care about some flags */
  223. mode &= ~STARPU_COMMUTE;
  224. mode &= ~STARPU_SSEND;
  225. mode &= ~STARPU_LOCALITY;
  226. if (mode == STARPU_RW)
  227. mode = STARPU_W;
  228. /* Take the lock protecting the header. We try to do some progression
  229. * in case this is called from a worker, otherwise we just wait for the
  230. * lock to be available. */
  231. if (request_from_codelet)
  232. {
  233. int cpt = 0;
  234. while (cpt < STARPU_SPIN_MAXTRY && _starpu_spin_trylock(&handle->header_lock))
  235. {
  236. cpt++;
  237. _starpu_datawizard_progress(0);
  238. }
  239. if (cpt == STARPU_SPIN_MAXTRY)
  240. _starpu_spin_lock(&handle->header_lock);
  241. }
  242. else
  243. {
  244. _starpu_spin_lock(&handle->header_lock);
  245. }
  246. /* If we are currently performing a reduction, we freeze any request
  247. * that is not explicitely a reduction task. */
  248. unsigned is_a_reduction_task = (request_from_codelet && j && j->reduction_task);
  249. STARPU_ASSERT_MSG(!is_a_reduction_task, "TODO");
  250. enum starpu_data_access_mode previous_mode = handle->current_mode;
  251. STARPU_ASSERT_MSG(mode == previous_mode, "mode was %d, but requested %d", previous_mode, mode);
  252. handle->refcnt++;
  253. handle->busy_count++;
  254. _starpu_spin_unlock(&handle->header_lock);
  255. }
  256. /* No lock is held */
  257. unsigned _starpu_attempt_to_submit_data_request_from_apps(starpu_data_handle_t handle, enum starpu_data_access_mode mode,
  258. void (*callback)(void *), void *argcb)
  259. {
  260. return _starpu_attempt_to_submit_data_request(0, handle, mode, callback, argcb, NULL, 0);
  261. }
  262. /* No lock is held */
  263. static unsigned attempt_to_submit_data_request_from_job(struct _starpu_job *j, unsigned buffer_index)
  264. {
  265. /* Note that we do not access j->task->handles, but j->ordered_buffers
  266. * which is a sorted copy of it. */
  267. struct _starpu_data_descr *buffer = &(_STARPU_JOB_GET_ORDERED_BUFFERS(j)[buffer_index]);
  268. starpu_data_handle_t handle = buffer->handle;
  269. enum starpu_data_access_mode mode = buffer->mode & ~STARPU_COMMUTE;
  270. return _starpu_attempt_to_submit_data_request(1, handle, mode, NULL, NULL, j, buffer_index);
  271. }
  272. /* Try to acquire all data of the given job, one by one in handle pointer value order
  273. */
  274. /* No lock is held */
  275. static unsigned _submit_job_enforce_data_deps(struct _starpu_job *j, unsigned start_buffer_index)
  276. {
  277. unsigned buf;
  278. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  279. for (buf = start_buffer_index; buf < nbuffers; buf++)
  280. {
  281. starpu_data_handle_t handle = _STARPU_JOB_GET_ORDERED_BUFFER_HANDLE(j, buf);
  282. if (buf)
  283. {
  284. starpu_data_handle_t handle_m1 = _STARPU_JOB_GET_ORDERED_BUFFER_HANDLE(j, buf-1);
  285. if (handle_m1 == handle)
  286. /* We have already requested this data, skip it. This
  287. * depends on ordering putting writes before reads, see
  288. * _starpu_compar_handles. */
  289. continue;
  290. }
  291. STARPU_ASSERT(j->task->status == STARPU_TASK_BLOCKED || j->task->status == STARPU_TASK_BLOCKED_ON_TAG || j->task->status == STARPU_TASK_BLOCKED_ON_TASK || j->task->status == STARPU_TASK_BLOCKED_ON_DATA);
  292. j->task->status = STARPU_TASK_BLOCKED_ON_DATA;
  293. if(handle->arbiter)
  294. {
  295. /* We arrived on an arbitered data, we stop and proceed
  296. * with the arbiter second step. */
  297. _starpu_submit_job_enforce_arbitered_deps(j, buf, nbuffers);
  298. return 1;
  299. }
  300. if (attempt_to_submit_data_request_from_job(j, buf))
  301. {
  302. return 1;
  303. }
  304. }
  305. return 0;
  306. }
  307. static void take_data_from_job(struct _starpu_job *j, unsigned buffer_index)
  308. {
  309. /* Note that we do not access j->task->handles, but j->ordered_buffers
  310. * which is a sorted copy of it. */
  311. struct _starpu_data_descr *buffer = &(_STARPU_JOB_GET_ORDERED_BUFFERS(j)[buffer_index]);
  312. starpu_data_handle_t handle = buffer->handle;
  313. enum starpu_data_access_mode mode = buffer->mode & ~STARPU_COMMUTE;
  314. _starpu_take_data(1, handle, mode, j);
  315. }
  316. /* Immediately acquire all data of the given job, one by one in handle pointer value order
  317. */
  318. /* No lock is held */
  319. static void _submit_job_take_data_deps(struct _starpu_job *j, unsigned start_buffer_index)
  320. {
  321. unsigned buf;
  322. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  323. for (buf = start_buffer_index; buf < nbuffers; buf++)
  324. {
  325. starpu_data_handle_t handle = _STARPU_JOB_GET_ORDERED_BUFFER_HANDLE(j, buf);
  326. if (buf)
  327. {
  328. starpu_data_handle_t handle_m1 = _STARPU_JOB_GET_ORDERED_BUFFER_HANDLE(j, buf-1);
  329. if (handle_m1 == handle)
  330. /* We have already requested this data, skip it. This
  331. * depends on ordering putting writes before reads, see
  332. * _starpu_compar_handles. */
  333. continue;
  334. }
  335. if(handle->arbiter)
  336. {
  337. /* We arrived on an arbitered data, we stop and proceed
  338. * with the arbiter second step. */
  339. STARPU_ASSERT_MSG(0, "TODO");
  340. //_starpu_submit_job_take_arbitered_deps(j, buf, nbuffers);
  341. }
  342. take_data_from_job(j, buf);
  343. }
  344. }
  345. /* This is called when the tag+task dependencies are to be finished releasing. */
  346. void _starpu_enforce_data_deps_notify_job_ready_soon(struct _starpu_job *j, _starpu_notify_job_start_data *data)
  347. {
  348. unsigned buf;
  349. if (j->task->cl) {
  350. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  351. for (buf = 0; buf < nbuffers; buf++)
  352. {
  353. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(j->task, buf);
  354. if (handle->arbiter)
  355. /* Oops, it's the arbiter's decision */
  356. return;
  357. }
  358. /* We need to check data availability only if sequential consistency
  359. * dependencies have not been used */
  360. if (!j->sequential_consistency) {
  361. for (buf = 0; buf < nbuffers; buf++)
  362. {
  363. starpu_data_handle_t handle = STARPU_TASK_GET_HANDLE(j->task, buf);
  364. enum starpu_data_access_mode mode = STARPU_TASK_GET_MODE(j->task, buf) & ~STARPU_COMMUTE;
  365. if (handle->reduction_refcnt)
  366. /* Reduction pending, don't bother trying */
  367. return;
  368. if (handle->refcnt != 0 && (mode == STARPU_W || handle->current_mode != mode))
  369. /* Incompatible modes, not ready immediately */
  370. return;
  371. }
  372. }
  373. }
  374. /* Ok, it really looks like this job will be ready soon */
  375. _starpu_job_notify_ready_soon(j, data);
  376. }
  377. void _starpu_job_set_ordered_buffers(struct _starpu_job *j)
  378. {
  379. /* Compute an ordered list of the different pieces of data so that we
  380. * grab then according to a total order, thus avoiding a deadlock
  381. * condition */
  382. unsigned i;
  383. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  384. struct starpu_task *task = j->task;
  385. struct _starpu_data_descr *buffers = _STARPU_JOB_GET_ORDERED_BUFFERS(j);
  386. for (i=0 ; i<nbuffers; i++)
  387. {
  388. buffers[i].index = i;
  389. buffers[i].handle = STARPU_TASK_GET_HANDLE(task, i);
  390. buffers[i].mode = STARPU_TASK_GET_MODE(task, i);
  391. buffers[i].node = -1;
  392. }
  393. _starpu_sort_task_handles(buffers, nbuffers);
  394. for (i=0 ; i<nbuffers; i++)
  395. {
  396. buffers[buffers[i].index].orderedindex = i;
  397. }
  398. }
  399. /* Sort the data used by the given job by handle pointer value order, and
  400. * try to acquire them in that order */
  401. /* No lock is held */
  402. unsigned _starpu_submit_job_enforce_data_deps(struct _starpu_job *j)
  403. {
  404. struct starpu_codelet *cl = j->task->cl;
  405. if ((cl == NULL) || (STARPU_TASK_GET_NBUFFERS(j->task) == 0))
  406. return 0;
  407. return _submit_job_enforce_data_deps(j, 0);
  408. }
  409. /* This request got fulfilled, continue with the other requests of the
  410. * corresponding job */
  411. /* No lock is held */
  412. static unsigned unlock_one_requester(struct _starpu_data_requester *r)
  413. {
  414. struct _starpu_job *j = r->j;
  415. unsigned nbuffers = STARPU_TASK_GET_NBUFFERS(j->task);
  416. unsigned buffer_index = r->buffer_index;
  417. if (buffer_index + 1 < nbuffers)
  418. /* not all buffers are protected yet */
  419. return _submit_job_enforce_data_deps(j, buffer_index + 1);
  420. else
  421. return 0;
  422. }
  423. /* Sort the data used by the given job by handle pointer value order, and
  424. * immediately acquire them in that order */
  425. /* No lock is held */
  426. void _starpu_submit_job_take_data_deps(struct _starpu_job *j)
  427. {
  428. struct starpu_codelet *cl = j->task->cl;
  429. if ((cl == NULL) || (STARPU_TASK_GET_NBUFFERS(j->task) == 0))
  430. return;
  431. _submit_job_take_data_deps(j, 0);
  432. }
  433. /* This is called when a task is finished with a piece of data
  434. * (or on starpu_data_release)
  435. *
  436. * The header lock must already be taken by the caller.
  437. * This may free the handle if it was lazily unregistered (1 is returned in
  438. * that case). The handle pointer thus becomes invalid for the caller.
  439. */
  440. int _starpu_notify_data_dependencies(starpu_data_handle_t handle)
  441. {
  442. _starpu_spin_checklocked(&handle->header_lock);
  443. if (handle->arbiter)
  444. {
  445. /* Keep our reference for now, _starpu_notify_arbitered_dependencies
  446. * will drop it when it needs to */
  447. STARPU_ASSERT(_starpu_data_requester_prio_list_empty(&handle->req_list));
  448. STARPU_ASSERT(_starpu_data_requester_prio_list_empty(&handle->reduction_req_list));
  449. _starpu_spin_unlock(&handle->header_lock);
  450. /* _starpu_notify_arbitered_dependencies will handle its own locking */
  451. _starpu_notify_arbitered_dependencies(handle);
  452. /* We have already unlocked */
  453. return 1;
  454. }
  455. /* A data access has finished so we remove a reference. */
  456. STARPU_ASSERT(handle->refcnt > 0);
  457. handle->refcnt--;
  458. STARPU_ASSERT(handle->busy_count > 0);
  459. handle->busy_count--;
  460. if (_starpu_data_check_not_busy(handle))
  461. /* Handle was destroyed, nothing left to do. */
  462. return 1;
  463. STARPU_ASSERT(_starpu_data_requester_prio_list_empty(&handle->arbitered_req_list));
  464. /* In case there is a pending reduction, and that this is the last
  465. * requester, we may go back to a "normal" coherency model. */
  466. if (handle->reduction_refcnt > 0)
  467. {
  468. //fprintf(stderr, "NOTIFY REDUCTION TASK RED REFCNT %d\n", handle->reduction_refcnt);
  469. handle->reduction_refcnt--;
  470. if (handle->reduction_refcnt == 0)
  471. _starpu_data_end_reduction_mode_terminate(handle);
  472. }
  473. if (handle->unlocking_reqs)
  474. /*
  475. * Our caller is already running the unlock loop below (we were
  476. * most probably called from the ready_data_callback call
  477. * below). Avoid looping again (which would potentially mean
  478. * unbounded recursion), our caller will continue doing the
  479. * unlock work for us.
  480. */
  481. return 0;
  482. handle->unlocking_reqs = 1;
  483. struct _starpu_data_requester *r;
  484. while ((r = may_unlock_data_req_list_head(handle)))
  485. {
  486. /* STARPU_RW accesses are treated as STARPU_W */
  487. enum starpu_data_access_mode r_mode = r->mode;
  488. if (r_mode == STARPU_RW)
  489. r_mode = STARPU_W;
  490. int put_in_list = 1;
  491. if ((handle->reduction_refcnt == 0) && (handle->current_mode == STARPU_REDUX) && (r_mode != STARPU_REDUX))
  492. {
  493. _starpu_data_end_reduction_mode(handle);
  494. /* Since we need to perform a mode change, we freeze
  495. * the request if needed. */
  496. put_in_list = (handle->reduction_refcnt > 0);
  497. }
  498. else
  499. {
  500. put_in_list = 0;
  501. }
  502. if (put_in_list)
  503. {
  504. /* We need to put the request back because we must
  505. * perform a reduction before. */
  506. _starpu_data_requester_prio_list_push_front(&handle->req_list, r);
  507. }
  508. else
  509. {
  510. /* The data is now attributed to that request so we put a
  511. * reference on it. */
  512. handle->refcnt++;
  513. handle->busy_count++;
  514. enum starpu_data_access_mode previous_mode = handle->current_mode;
  515. handle->current_mode = r_mode;
  516. /* In case we enter in a reduction mode, we invalidate all per
  517. * worker replicates. Note that the "per_node" replicates are
  518. * kept intact because we'll reduce a valid copy of the
  519. * "per-node replicate" with the per-worker replicates .*/
  520. if ((r_mode == STARPU_REDUX) && (previous_mode != STARPU_REDUX))
  521. _starpu_data_start_reduction_mode(handle);
  522. _starpu_spin_unlock(&handle->header_lock);
  523. if (r->is_requested_by_codelet)
  524. {
  525. if (!unlock_one_requester(r))
  526. _starpu_push_task(r->j);
  527. }
  528. else
  529. {
  530. STARPU_ASSERT(r->ready_data_callback);
  531. /* execute the callback associated with the data requester */
  532. r->ready_data_callback(r->argcb);
  533. }
  534. _starpu_data_requester_delete(r);
  535. _starpu_spin_lock(&handle->header_lock);
  536. STARPU_ASSERT(handle->busy_count > 0);
  537. handle->busy_count--;
  538. if (_starpu_data_check_not_busy(handle))
  539. return 1;
  540. }
  541. }
  542. handle->unlocking_reqs = 0;
  543. return 0;
  544. }