starpu_trace_state_stats.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. #!/usr/bin/python2.7
  2. ##
  3. # StarPU --- Runtime system for heterogeneous multicore architectures.
  4. #
  5. # Copyright (C) 2016 INRIA
  6. #
  7. # StarPU is free software; you can redistribute it and/or modify
  8. # it under the terms of the GNU Lesser General Public License as published by
  9. # the Free Software Foundation; either version 2.1 of the License, or (at
  10. # your option) any later version.
  11. #
  12. # StarPU is distributed in the hope that it will be useful, but
  13. # WITHOUT ANY WARRANTY; without even the implied warranty of
  14. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  15. #
  16. # See the GNU Lesser General Public License in COPYING.LGPL for more details.
  17. ##
  18. ##
  19. # This script parses the generated trace.rec file and reports statistics about
  20. # the number of different events/tasks and their durations. The report is
  21. # similar to the starpu_paje_state_stats.in script, except that this one
  22. # doesn't need R and pj_dump (from the pajeng repository), and it is also much
  23. # faster.
  24. ##
  25. import getopt
  26. import os
  27. import sys
  28. class Event():
  29. def __init__(self, type, name, category, start_time):
  30. self._type = type
  31. self._name = name
  32. self._category = category
  33. self._start_time = start_time
  34. class EventStats():
  35. def __init__(self, name, duration_time, category, count = 1):
  36. self._name = name
  37. self._duration_time = duration_time
  38. self._category = category
  39. self._count = count
  40. def aggregate(self, duration_time):
  41. self._duration_time += duration_time
  42. self._count += 1
  43. def show(self):
  44. if not self._name == None and not self._category == None:
  45. print "\"" + self._name + "\"," + str(self._count) + ",\"" + self._category + "\"," + str(round(self._duration_time, 6))
  46. class Worker():
  47. def __init__(self, id):
  48. self._id = id
  49. self._events = []
  50. self._stats = []
  51. self._stack = []
  52. def get_event_stats(self, name):
  53. for stat in self._stats:
  54. if stat._name == name:
  55. return stat
  56. return None
  57. def add_event(self, type, name, category, start_time):
  58. self._events.append(Event(type, name, category, start_time))
  59. def calc_stats(self, start_profiling_time):
  60. num_events = len(self._events) - 1
  61. for i in xrange(0, num_events):
  62. curr_event = self._events[i]
  63. next_event = self._events[i+1]
  64. if curr_event._start_time <= start_profiling_time:
  65. # Ignore events before the start_profiling program event.
  66. continue
  67. if next_event._type == "PushState":
  68. self._stack.append(next_event)
  69. for j in xrange(i+1, num_events):
  70. next_event = self._events[j]
  71. if next_event._type == "SetState":
  72. break
  73. elif next_event._type == "PopState":
  74. curr_event = self._stack.pop()
  75. # Compute duration with the next event.
  76. a = curr_event._start_time
  77. b = next_event._start_time
  78. found = False
  79. for j in xrange(len(self._stats)):
  80. if self._stats[j]._name == curr_event._name:
  81. self._stats[j].aggregate(b - a)
  82. found = True
  83. break
  84. if not found == True:
  85. self._stats.append(EventStats(curr_event._name, b - a, curr_event._category))
  86. def read_blocks(input_file):
  87. empty_lines = 0
  88. first_line = 1
  89. blocks = []
  90. for line in open(input_file):
  91. if first_line:
  92. blocks.append([])
  93. blocks[-1].append(line)
  94. first_line = 0
  95. # Check for empty lines
  96. if not line or line[0] == '\n':
  97. # If 1st one: new block
  98. if empty_lines == 0:
  99. blocks.append([])
  100. empty_lines += 1
  101. else:
  102. # Non empty line: add line in current(last) block
  103. empty_lines = 0
  104. blocks[-1].append(line)
  105. return blocks
  106. def read_field(field, index):
  107. return field[index+1:-1]
  108. def insert_worker_event(workers, prog_events, block):
  109. worker_id = -1
  110. name = None
  111. start_time = 0.0
  112. category = None
  113. for line in block:
  114. if line[:2] == "E:": # EventType
  115. event_type = read_field(line, 2)
  116. elif line[:2] == "C:": # Category
  117. category = read_field(line, 2)
  118. elif line[:2] == "W:": # WorkerId
  119. worker_id = int(read_field(line, 2))
  120. elif line[:2] == "N:": # Name
  121. name = read_field(line, 2)
  122. elif line[:2] == "S:": # StartTime
  123. start_time = float(read_field(line, 2))
  124. # Program events don't belong to workers, they are globals.
  125. if category == "Program":
  126. prog_events.append(Event(event_type, name, category, start_time))
  127. return
  128. for worker in workers:
  129. if worker._id == worker_id:
  130. worker.add_event(event_type, name, category, start_time)
  131. return
  132. worker = Worker(worker_id)
  133. worker.add_event(event_type, name, category, start_time)
  134. workers.append(worker)
  135. def calc_times(stats):
  136. tr = 0.0 # Runtime
  137. tt = 0.0 # Task
  138. ti = 0.0 # Idle
  139. ts = 0.0 # Scheduling
  140. for stat in stats:
  141. if stat._category == None:
  142. continue
  143. if stat._category == "Runtime":
  144. if stat._name == "Scheduling":
  145. # Scheduling time is part of runtime but we want to have
  146. # it separately.
  147. ts += stat._duration_time
  148. else:
  149. tr += stat._duration_time
  150. elif stat._category == "Task":
  151. tt += stat._duration_time
  152. elif stat._category == "Other":
  153. ti += stat._duration_time
  154. else:
  155. sys.exit("Unknown category '" + stat._category + "'!")
  156. return (ti, tr, tt, ts)
  157. def save_times(ti, tr, tt, ts):
  158. f = open("times.csv", "w+")
  159. f.write("\"Time\",\"Duration\"\n")
  160. f.write("\"Runtime\"," + str(tr) + "\n")
  161. f.write("\"Task\"," + str(tt) + "\n")
  162. f.write("\"Idle\"," + str(ti) + "\n")
  163. f.write("\"Scheduling\"," + str(ts) + "\n")
  164. f.close()
  165. def calc_et(tt_1, tt_p):
  166. """ Compute the task efficiency (et). This measures the exploitation of
  167. data locality. """
  168. return tt_1 / tt_p
  169. def calc_er(tt_p, tr_p):
  170. """ Compute the runtime efficiency (er). This measures how the runtime
  171. overhead affects performance."""
  172. return tt_p / (tt_p + tr_p)
  173. def calc_ep(tt_p, tr_p, ti_p):
  174. """ Compute the pipeline efficiency (et). This measures how much
  175. concurrency is available and how well it's exploited. """
  176. return (tt_p + tr_p) / (tt_p + tr_p + ti_p)
  177. def calc_e(et, er, ep):
  178. """ Compute the parallel efficiency. """
  179. return et * er * ep
  180. def save_efficiencies(e, ep, er, et):
  181. f = open("efficiencies.csv", "w+")
  182. f.write("\"Efficiency\",\"Value\"\n")
  183. f.write("\"Parallel\"," + str(e) + "\n")
  184. f.write("\"Task\"," + str(et) + "\n")
  185. f.write("\"Runtime\"," + str(er) + "\n")
  186. f.write("\"Pipeline\"," + str(ep) + "\n")
  187. f.close()
  188. def usage():
  189. print "USAGE:"
  190. print "starpu_trace_state_stats.py [ -te -s=<time> ] <trace.rec>"
  191. print
  192. print "OPTIONS:"
  193. print " -t or --time Compute and dump times to times.csv"
  194. print
  195. print " -e or --efficiency Compute and dump efficiencies to efficiencies.csv"
  196. print
  197. print " -s or --seq_task_time Used to compute task efficiency between sequential and parallel times"
  198. print " (if not set, task efficiency will be 1.0)"
  199. print
  200. print "EXAMPLES:"
  201. print "# Compute event statistics and report them to stdout:"
  202. print "python starpu_trace_state_stats.py trace.rec"
  203. print
  204. print "# Compute event stats, times and efficiencies:"
  205. print "python starpu_trace_state_stats.py -te trace.rec"
  206. print
  207. print "# Compute correct task efficiency with the sequential task time:"
  208. print "python starpu_trace_state_stats.py -s=60093.950614 trace.rec"
  209. def main():
  210. try:
  211. opts, args = getopt.getopt(sys.argv[1:], "hets:",
  212. ["help", "time", "efficiency", "seq_task_time="])
  213. except getopt.GetoptError as err:
  214. usage()
  215. sys.exit(1)
  216. dump_time = False
  217. dump_efficiency = False
  218. tt_1 = 0.0
  219. for o, a in opts:
  220. if o in ("-h", "--help"):
  221. usage()
  222. sys.exit()
  223. elif o in ("-t", "--time"):
  224. dump_time = True
  225. elif o in ("-e", "--efficiency"):
  226. dump_efficiency = True
  227. elif o in ("-s", "--seq_task_time"):
  228. tt_1 = float(a)
  229. if len(args) < 1:
  230. usage()
  231. sys.exit()
  232. recfile = args[0]
  233. if not os.path.isfile(recfile):
  234. sys.exit("File does not exist!")
  235. # Declare a list for all workers.
  236. workers = []
  237. # Declare a list for program events
  238. prog_events = []
  239. # Read the recutils file format per blocks.
  240. blocks = read_blocks(recfile)
  241. for block in blocks:
  242. if not len(block) == 0:
  243. first_line = block[0]
  244. if first_line[:2] == "E:":
  245. insert_worker_event(workers, prog_events, block)
  246. # Find the start_profiling time event.
  247. start_profiling_time = 0.0
  248. for prog_event in prog_events:
  249. if prog_event._name == "start_profiling":
  250. start_profiling_time = prog_event._start_time
  251. break
  252. # Compute worker statistics.
  253. stats = []
  254. for worker in workers:
  255. worker.calc_stats(start_profiling_time)
  256. for stat in worker._stats:
  257. found = False
  258. for s in stats:
  259. if stat._name == s._name:
  260. found = True
  261. break
  262. if not found == True:
  263. stats.append(EventStats(stat._name, 0.0, stat._category, 0))
  264. # Compute global statistics for all workers.
  265. for i in xrange(0, len(workers)):
  266. for stat in stats:
  267. s = workers[i].get_event_stats(stat._name)
  268. if not s == None:
  269. # A task might not be executed on all workers.
  270. stat._duration_time += s._duration_time
  271. stat._count += s._count
  272. # Output statistics.
  273. print "\"Name\",\"Count\",\"Type\",\"Duration\""
  274. for stat in stats:
  275. stat.show()
  276. # Compute runtime, task, idle, scheduling times and dump them to times.csv
  277. ti_p = tr_p = tt_p = ts_p = 0.0
  278. if dump_time == True:
  279. ti_p, tr_p, tt_p, ts_p = calc_times(stats)
  280. save_times(ti_p, tr_p, tt_p, ts_p)
  281. # Compute runtime, task, idle efficiencies and dump them to
  282. # efficiencies.csv.
  283. if dump_efficiency == True or not tt_1 == 0.0:
  284. if dump_time == False:
  285. ti_p, tr_p, tt_p = ts_p = calc_times(stats)
  286. if tt_1 == 0.0:
  287. sys.stderr.write("WARNING: Task efficiency will be 1.0 because -s is not set!\n")
  288. tt_1 = tt_p
  289. # TODO: The formula for computing efficiencies must be updated with
  290. # the scheduling time. For now, just keep it as is.
  291. tr += ts
  292. # Compute efficiencies.
  293. ep = round(calc_ep(tt_p, tr_p, ti_p), 6)
  294. er = round(calc_er(tt_p, tr_p), 6)
  295. et = round(calc_et(tt_1, tt_p), 6)
  296. e = round(calc_e(et, er, ep), 6)
  297. save_efficiencies(e, ep, er, et)
  298. if __name__ == "__main__":
  299. main()