|
@@ -585,7 +585,7 @@ static size_t free_potentially_in_use_mc(unsigned node, unsigned force, size_t r
|
|
{
|
|
{
|
|
size_t freed = 0;
|
|
size_t freed = 0;
|
|
|
|
|
|
- struct _starpu_mem_chunk *mc, *next_mc = NULL;
|
|
+ struct _starpu_mem_chunk *mc, *next_mc = (void*) -1;
|
|
|
|
|
|
|
|
|
|
* We have to unlock mc_rwlock before locking header_lock, so we have
|
|
* We have to unlock mc_rwlock before locking header_lock, so we have
|
|
@@ -598,23 +598,31 @@ static size_t free_potentially_in_use_mc(unsigned node, unsigned force, size_t r
|
|
while (1)
|
|
while (1)
|
|
{
|
|
{
|
|
STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
|
|
STARPU_PTHREAD_RWLOCK_WRLOCK(&mc_rwlock[node]);
|
|
-
|
|
+
|
|
- mc = _starpu_mem_chunk_list_begin(mc_list[node]);
|
|
+ if (_starpu_mem_chunk_list_empty(mc_list[node]) || !next_mc)
|
|
- if (next_mc)
|
|
+ {
|
|
-
|
|
+ STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (next_mc == (void*) -1) {
|
|
|
|
+
|
|
|
|
+ mc = _starpu_mem_chunk_list_begin(mc_list[node]);
|
|
|
|
+ } else {
|
|
|
|
+
|
|
for (mc = _starpu_mem_chunk_list_begin(mc_list[node]);
|
|
for (mc = _starpu_mem_chunk_list_begin(mc_list[node]);
|
|
mc != _starpu_mem_chunk_list_end(mc_list[node]);
|
|
mc != _starpu_mem_chunk_list_end(mc_list[node]);
|
|
mc = _starpu_mem_chunk_list_next(mc))
|
|
mc = _starpu_mem_chunk_list_next(mc))
|
|
if (mc == next_mc)
|
|
if (mc == next_mc)
|
|
-
|
|
+
|
|
break;
|
|
break;
|
|
|
|
|
|
- if (mc == _starpu_mem_chunk_list_end(mc_list[node]))
|
|
+ if (mc == _starpu_mem_chunk_list_end(mc_list[node]))
|
|
- {
|
|
+
|
|
-
|
|
+ mc = _starpu_mem_chunk_list_begin(mc_list[node]);
|
|
- STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
|
|
|
|
- break;
|
|
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
|
|
next_mc = _starpu_mem_chunk_list_next(mc);
|
|
next_mc = _starpu_mem_chunk_list_next(mc);
|
|
STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
|
|
STARPU_PTHREAD_RWLOCK_UNLOCK(&mc_rwlock[node]);
|