Browse Source

add function for the scheduling ctxs in the doc and sort function in the header

Andra Hugo 11 years ago
parent
commit
4e3d51abc6

+ 7 - 0
doc/doxygen/chapters/api/codelet_and_tasks.doxy

@@ -649,6 +649,13 @@ starpu_task_submit() can be called from anywhere, including codelet
 functions and callbacks, provided that the field
 starpu_task::synchronous is set to 0.
 
+\fn int starpu_task_submit_to_ctx(struct starpu_task *task, unsigned sched_ctx_id)
+\ingroup API_Codelet_And_Tasks
+This function submits a task to StarPU to the context <c> sched_ctx_id </c>.
+By default starpu_task_submit submits the task to a global context that is
+created automatically by StarPU.
+
+
 \fn int starpu_task_wait_for_all(void)
 \ingroup API_Codelet_And_Tasks
 This function blocks until all the tasks that were submitted

+ 1 - 1
doc/doxygen/chapters/api/scheduling_context_hypervisor.doxy

@@ -2,7 +2,7 @@
  * This file is part of the StarPU Handbook.
  * Copyright (C) 2009--2011  Universit@'e de Bordeaux 1
  * Copyright (C) 2010, 2011, 2012, 2013  Centre National de la Recherche Scientifique
- * Copyright (C) 2011, 2012 Institut National de Recherche en Informatique et Automatique
+ * Copyright (C) 2011, 2012, 2013 Institut National de Recherche en Informatique et Automatique
  * See the file version.doxy for copying conditions.
  */
 

+ 34 - 56
doc/doxygen/chapters/api/scheduling_contexts.doxy

@@ -16,47 +16,6 @@ starpu tasks to them and we schedule them with the policy assigned to
 the context. Scheduling contexts can be created, deleted and modified
 dynamically.
 
-\enum starpu_worker_collection_type
-\ingroup API_Scheduling_Contexts
-types of structures the worker collection can implement
-\var starpu_worker_collection_type::STARPU_WORKER_LIST
-\ingroup API_Scheduling_Contexts
-List of workers
-
-\struct starpu_sched_ctx_iterator
-\ingroup API_Scheduling_Contexts
-todo
-\var starpu_sched_ctx_iterator::cursor
-todo
-
-\struct starpu_worker_collection
-\ingroup API_Scheduling_Contexts
-A scheduling context manages a collection of workers that can
-be memorized using different data structures. Thus, a generic
-structure is available in order to simplify the choice of its type.
-Only the list data structure is available but further data
-structures(like tree) implementations are foreseen.
-\var starpu_worker_collection::workerids
-        The workerids managed by the collection
-\var starpu_worker_collection::nworkers
-        The number of workers in the collection
-\var starpu_worker_collection::type
-        The type of structure (currently ::STARPU_WORKER_LIST is the only one available)
-\var starpu_worker_collection::has_next
-        Checks if there is another element in collection
-\var starpu_worker_collection::get_next
-        return the next element in the collection
-\var starpu_worker_collection::add
-        add a new element in the collection
-\var starpu_worker_collection::remove
-        remove an element from the collection
-\var starpu_worker_collection::init
-        Initialize the collection
-\var starpu_worker_collection::deinit
-        Deinitialize the colection
-\var starpu_worker_collection::init_iterator
-        Initialize the cursor if there is one
-
 \struct starpu_sched_ctx_performance_counters
 Performance counters used by the starpu to indicate the
 hypervisor how the application and the resources are executing.
@@ -66,11 +25,16 @@ hypervisor how the application and the resources are executing.
 \var starpu_sched_ctx_performance_counters::notify_idle_end
         Informs the hypervisor that after a period of idle, the worker has just executed a task in the specified context. The idle counter it though reset.
 \var starpu_sched_ctx_performance_counters::notify_pushed_task
-        Notifies the hypervisor a task has been scheduled on the queue of the worker corresponding to the specified context
+        Notifies the hypervisor that a task has been scheduled on the queue of the worker corresponding to the specified context
 \var starpu_sched_ctx_performance_counters::notify_poped_task
-        Informs the hypervisor a task executing a specified number of instructions has been poped from the worker
+        Informs the hypervisor that a task executing a specified number of instructions has been poped from the worker
 \var starpu_sched_ctx_performance_counters::notify_post_exec_hook
-        Notifies the hypervisor a task has just been executed
+        Notifies the hypervisor that a task has just been executed
+\var starpu_sched_ctx_performance_counters::notify_submitted_job
+        Notifies the hypervisor that a task has just been submitted
+\var starpu_sched_ctx_performance_counters::notify_delete_context
+        Notifies the hypervisor that the context was deleted
+
 
 @name Scheduling Contexts Basic API
 \ingroup API_Scheduling_Contexts
@@ -99,11 +63,6 @@ tasks will be submitted to. The return value should be at most
 \ingroup API_Scheduling_Contexts
 Create a context indicating an approximate interval of resources
 
-\fn void starpu_sched_ctx_delete(unsigned sched_ctx_id)
-\ingroup API_Scheduling_Contexts
-Delete scheduling context \p sched_ctx_id and transfer remaining
-workers to the inheritor scheduling context.
-
 \fn void starpu_sched_ctx_add_workers(int *workerids_ctx, int nworkers_ctx, unsigned sched_ctx_id)
 \ingroup API_Scheduling_Contexts
 This function adds dynamically the workers in \p workerids_ctx to the
@@ -116,6 +75,11 @@ This function removes the workers in \p workerids_ctx from the context
 \p sched_ctx_id. The last argument cannot be greater than
 STARPU_NMAX_SCHED_CTXS.
 
+\fn void starpu_sched_ctx_delete(unsigned sched_ctx_id)
+\ingroup API_Scheduling_Contexts
+Delete scheduling context \p sched_ctx_id and transfer remaining
+workers to the inheritor scheduling context.
+
 \fn void starpu_sched_ctx_set_inheritor(unsigned sched_ctx_id, unsigned inheritor)
 \ingroup API_Scheduling_Contexts
 Indicate which context whill inherit the resources of this context
@@ -134,12 +98,18 @@ Return the scheduling context the tasks are currently submitted to
 Stop submitting tasks from the empty context list until the next time
 the context has time to check the empty context list
 
-\fn void starpu_sched_ctx_finished_submit(unsigned sched_ctx_id);
+\fn void starpu_sched_ctx_finished_submit(unsigned sched_ctx_id)
 \ingroup API_Scheduling_Contexts
 Indicate starpu that the application finished submitting to this
 context in order to move the workers to the inheritor as soon as
 possible.
 
+\fn unsigned starpu_sched_ctx_get_workers_list(unsigned sched_ctx_id, int **workerids)
+\ingroup API_Scheduling_Contexts
+Returns the list of workers in the array \p workerids, the returned value is the 
+number of workers. The user should free the \p workerids table after finishing
+using it (it is allocated inside the function with the proper size)
+
 \fn unsigned starpu_sched_ctx_get_nworkers(unsigned sched_ctx_id)
 \ingroup API_Scheduling_Contexts
 Return the number of workers managed by the specified contexts
@@ -230,12 +200,6 @@ Delete the worker collection of the specified scheduling context
 \ingroup API_Scheduling_Contexts
 Return the worker collection managed by the indicated context
 
-\fn unsigned starpu_sched_ctx_get_workers_list(unsigned sched_ctx_id, int **workerids)
-\ingroup API_Scheduling_Contexts
-Returns the list of workers in the array \p workerids, the returned value is the 
-number of workers. The user should free the \p workerids table after finishing
-using it (it is allocated inside the function with the proper size)
-
 @name Scheduling Context Link with Hypervisor
 \ingroup API_Scheduling_Contexts
 
@@ -256,4 +220,18 @@ Allow the hypervisor to let starpu know he's initialised
 \ingroup API_Scheduling_Contexts
 Ask starpu if he is informed if the hypervisor is initialised
 
+\fn void starpu_sched_ctx_set_policy_data(unsigned sched_ctx_id, void *policy_data)
+\ingroup API_Scheduling_Contexts
+Allocate the scheduling policy data (private information of the scheduler like queues, variables,
+additional condition variables) the context
+
+\fn void *starpu_sched_ctx_get_policy_data(unsigned sched_ctx_id)
+\ingroup API_Scheduling_Contexts
+Return the scheduling policy data (private information of the scheduler) of the contexts previously 
+assigned to.
+
+\fn void *starpu_sched_ctx_exec_parallel_code(void* (*func)(void*), void *param, unsigned sched_ctx_id)
+\ingroup API_Scheduling_Contexts
+execute any parallel code on the workers of the sched_ctx (workers are blocked)
+
 */

+ 0 - 9
doc/doxygen/chapters/api/scheduling_policy.doxy

@@ -73,15 +73,6 @@ condition variable. For instance, in the case of a scheduling strategy
 with a single task queue, the same condition variable would be used to
 block and wake up all workers.
 
-\fn void starpu_sched_ctx_set_policy_data(unsigned sched_ctx_id, void *policy_data)
-\ingroup API_Scheduling_Policy
-Each scheduling policy uses some specific data (queues, variables,
-additional condition variables). It is memorize through a local
-structure. This function assigns it to a scheduling context.
-
-\fn void *starpu_sched_ctx_get_policy_data(unsigned sched_ctx_id)
-\ingroup API_Scheduling_Policy
-Returns the policy data previously assigned to a context
 
 \fn int starpu_sched_set_min_priority(int min_prio)
 \ingroup API_Scheduling_Policy

+ 43 - 0
doc/doxygen/chapters/api/workers.doxy

@@ -62,6 +62,49 @@ Intel MIC device
 Intel SCC device
 
 
+\struct starpu_worker_collection
+\ingroup API_Workers_Properties
+A scheduling context manages a collection of workers that can
+be memorized using different data structures. Thus, a generic
+structure is available in order to simplify the choice of its type.
+Only the list data structure is available but further data
+structures(like tree) implementations are foreseen.
+\var starpu_worker_collection::workerids
+        The workerids managed by the collection
+\var starpu_worker_collection::nworkers
+        The number of workers in the collection
+\var starpu_worker_collection::type
+        The type of structure (currently ::STARPU_WORKER_LIST is the only one available)
+\var starpu_worker_collection::has_next
+        Checks if there is another element in collection
+\var starpu_worker_collection::get_next
+        return the next element in the collection
+\var starpu_worker_collection::add
+        add a new element in the collection
+\var starpu_worker_collection::remove
+        remove an element from the collection
+\var starpu_worker_collection::init
+        Initialize the collection
+\var starpu_worker_collection::deinit
+        Deinitialize the colection
+\var starpu_worker_collection::init_iterator
+        Initialize the cursor if there is one
+
+\enum starpu_worker_collection_type
+\ingroup API_Workers_Properties
+Types of structures the worker collection can implement
+\var starpu_worker_collection_type::STARPU_WORKER_LIST
+\ingroup API_Workers_Properties
+The collection is an array
+
+\struct starpu_sched_ctx_iterator
+\ingroup API_Workers_Properties
+Structure needed to iterate on the collection
+\var starpu_sched_ctx_iterator::cursor
+The index of the current worker in the collection, needed when iterating on
+the collection.
+
+
 \fn unsigned starpu_worker_get_count(void)
 \ingroup API_Workers_Properties
 This function returns the number of workers (i.e. processing

+ 30 - 32
include/starpu_sched_ctx.h

@@ -50,37 +50,6 @@ void starpu_sched_ctx_finished_submit(unsigned sched_ctx_id);
 
 unsigned starpu_sched_ctx_get_workers_list(unsigned sched_ctx_id, int **workerids);
 
-struct starpu_sched_ctx_performance_counters
-{
-	void (*notify_idle_cycle)(unsigned sched_ctx_id, int worker, double idle_time);
-	void (*notify_idle_end)(unsigned sched_ctx_id, int worker);
-	void (*notify_pushed_task)(unsigned sched_ctx_id, int worker);
-	void (*notify_poped_task)(unsigned sched_ctx_id, int worker, struct starpu_task *task, size_t data_size, uint32_t footprint);
-	void (*notify_post_exec_hook)(unsigned sched_ctx_id, int taskid);
-	void (*notify_submitted_job)(struct starpu_task *task, uint32_t footprint, size_t data_size);
-	void (*notify_delete_context)(unsigned sched_ctx);
-};
-
-#ifdef STARPU_USE_SC_HYPERVISOR
-void starpu_sched_ctx_set_perf_counters(unsigned sched_ctx_id, struct starpu_sched_ctx_performance_counters *perf_counters);
-void starpu_sched_ctx_call_pushed_task_cb(int workerid, unsigned sched_ctx_id);
-#endif //STARPU_USE_SC_HYPERVISOR
-
-void starpu_sched_ctx_notify_hypervisor_exists(void);
-
-unsigned starpu_sched_ctx_check_if_hypervisor_exists(void);
-
-void starpu_sched_ctx_set_policy_data(unsigned sched_ctx_id, void *policy_data);
-
-void *starpu_sched_ctx_get_policy_data(unsigned sched_ctx_id);
-
-
-struct starpu_worker_collection *starpu_sched_ctx_create_worker_collection(unsigned sched_ctx_id, enum starpu_worker_collection_type type);
-
-void starpu_sched_ctx_delete_worker_collection(unsigned sched_ctx_id);
-
-struct starpu_worker_collection *starpu_sched_ctx_get_worker_collection(unsigned sched_ctx_id);
-
 unsigned starpu_sched_ctx_get_nworkers(unsigned sched_ctx_id);
 
 unsigned starpu_sched_ctx_get_nshared_workers(unsigned sched_ctx_id, unsigned sched_ctx_id2);
@@ -116,7 +85,36 @@ int starpu_sched_ctx_set_max_priority(unsigned sched_ctx_id, int max_prio);
 
 #define STARPU_DEFAULT_PRIO	0
 
-/* execute any parallel code on the workers of the sched_ctx (workers are blocked) */
+struct starpu_worker_collection *starpu_sched_ctx_create_worker_collection(unsigned sched_ctx_id, enum starpu_worker_collection_type type);
+
+void starpu_sched_ctx_delete_worker_collection(unsigned sched_ctx_id);
+
+struct starpu_worker_collection *starpu_sched_ctx_get_worker_collection(unsigned sched_ctx_id);
+
+struct starpu_sched_ctx_performance_counters
+{
+	void (*notify_idle_cycle)(unsigned sched_ctx_id, int worker, double idle_time);
+	void (*notify_idle_end)(unsigned sched_ctx_id, int worker);
+	void (*notify_pushed_task)(unsigned sched_ctx_id, int worker);
+	void (*notify_poped_task)(unsigned sched_ctx_id, int worker, struct starpu_task *task, size_t data_size, uint32_t footprint);
+	void (*notify_post_exec_hook)(unsigned sched_ctx_id, int taskid);
+	void (*notify_submitted_job)(struct starpu_task *task, uint32_t footprint, size_t data_size);
+	void (*notify_delete_context)(unsigned sched_ctx);
+};
+
+#ifdef STARPU_USE_SC_HYPERVISOR
+void starpu_sched_ctx_set_perf_counters(unsigned sched_ctx_id, struct starpu_sched_ctx_performance_counters *perf_counters);
+void starpu_sched_ctx_call_pushed_task_cb(int workerid, unsigned sched_ctx_id);
+#endif //STARPU_USE_SC_HYPERVISOR
+
+void starpu_sched_ctx_notify_hypervisor_exists(void);
+
+unsigned starpu_sched_ctx_check_if_hypervisor_exists(void);
+
+void starpu_sched_ctx_set_policy_data(unsigned sched_ctx_id, void *policy_data);
+
+void *starpu_sched_ctx_get_policy_data(unsigned sched_ctx_id);
+
 void *starpu_sched_ctx_exec_parallel_code(void* (*func)(void*), void *param, unsigned sched_ctx_id);
 
 #ifdef __cplusplus