jack2 codebase
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

612 lines
16KB

  1. #!/usr/bin/env python
  2. # encoding: utf-8
  3. # Thomas Nagy, 2005-2018 (ita)
  4. """
  5. Runner.py: Task scheduling and execution
  6. """
  7. import heapq, traceback
  8. try:
  9. from queue import Queue, PriorityQueue
  10. except ImportError:
  11. from Queue import Queue
  12. try:
  13. from Queue import PriorityQueue
  14. except ImportError:
  15. class PriorityQueue(Queue):
  16. def _init(self, maxsize):
  17. self.maxsize = maxsize
  18. self.queue = []
  19. def _put(self, item):
  20. heapq.heappush(self.queue, item)
  21. def _get(self):
  22. return heapq.heappop(self.queue)
  23. from waflib import Utils, Task, Errors, Logs
  24. GAP = 5
  25. """
  26. Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run
  27. """
  28. class PriorityTasks(object):
  29. def __init__(self):
  30. self.lst = []
  31. def __len__(self):
  32. return len(self.lst)
  33. def __iter__(self):
  34. return iter(self.lst)
  35. def clear(self):
  36. self.lst = []
  37. def append(self, task):
  38. heapq.heappush(self.lst, task)
  39. def appendleft(self, task):
  40. "Deprecated, do not use"
  41. heapq.heappush(self.lst, task)
  42. def pop(self):
  43. return heapq.heappop(self.lst)
  44. def extend(self, lst):
  45. if self.lst:
  46. for x in lst:
  47. self.append(x)
  48. else:
  49. if isinstance(lst, list):
  50. self.lst = lst
  51. heapq.heapify(lst)
  52. else:
  53. self.lst = lst.lst
  54. class Consumer(Utils.threading.Thread):
  55. """
  56. Daemon thread object that executes a task. It shares a semaphore with
  57. the coordinator :py:class:`waflib.Runner.Spawner`. There is one
  58. instance per task to consume.
  59. """
  60. def __init__(self, spawner, task):
  61. Utils.threading.Thread.__init__(self)
  62. self.task = task
  63. """Task to execute"""
  64. self.spawner = spawner
  65. """Coordinator object"""
  66. self.setDaemon(1)
  67. self.start()
  68. def run(self):
  69. """
  70. Processes a single task
  71. """
  72. try:
  73. if not self.spawner.master.stop:
  74. self.spawner.master.process_task(self.task)
  75. finally:
  76. self.spawner.sem.release()
  77. self.spawner.master.out.put(self.task)
  78. self.task = None
  79. self.spawner = None
  80. class Spawner(Utils.threading.Thread):
  81. """
  82. Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and
  83. spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each
  84. :py:class:`waflib.Task.Task` instance.
  85. """
  86. def __init__(self, master):
  87. Utils.threading.Thread.__init__(self)
  88. self.master = master
  89. """:py:class:`waflib.Runner.Parallel` producer instance"""
  90. self.sem = Utils.threading.Semaphore(master.numjobs)
  91. """Bounded semaphore that prevents spawning more than *n* concurrent consumers"""
  92. self.setDaemon(1)
  93. self.start()
  94. def run(self):
  95. """
  96. Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop`
  97. """
  98. try:
  99. self.loop()
  100. except Exception:
  101. # Python 2 prints unnecessary messages when shutting down
  102. # we also want to stop the thread properly
  103. pass
  104. def loop(self):
  105. """
  106. Consumes task objects from the producer; ends when the producer has no more
  107. task to provide.
  108. """
  109. master = self.master
  110. while 1:
  111. task = master.ready.get()
  112. self.sem.acquire()
  113. if not master.stop:
  114. task.log_display(task.generator.bld)
  115. Consumer(self, task)
  116. class Parallel(object):
  117. """
  118. Schedule the tasks obtained from the build context for execution.
  119. """
  120. def __init__(self, bld, j=2):
  121. """
  122. The initialization requires a build context reference
  123. for computing the total number of jobs.
  124. """
  125. self.numjobs = j
  126. """
  127. Amount of parallel consumers to use
  128. """
  129. self.bld = bld
  130. """
  131. Instance of :py:class:`waflib.Build.BuildContext`
  132. """
  133. self.outstanding = PriorityTasks()
  134. """Heap of :py:class:`waflib.Task.Task` that may be ready to be executed"""
  135. self.postponed = PriorityTasks()
  136. """Heap of :py:class:`waflib.Task.Task` which are not ready to run for non-DAG reasons"""
  137. self.incomplete = set()
  138. """List of :py:class:`waflib.Task.Task` waiting for dependent tasks to complete (DAG)"""
  139. self.ready = PriorityQueue(0)
  140. """List of :py:class:`waflib.Task.Task` ready to be executed by consumers"""
  141. self.out = Queue(0)
  142. """List of :py:class:`waflib.Task.Task` returned by the task consumers"""
  143. self.count = 0
  144. """Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`"""
  145. self.processed = 0
  146. """Amount of tasks processed"""
  147. self.stop = False
  148. """Error flag to stop the build"""
  149. self.error = []
  150. """Tasks that could not be executed"""
  151. self.biter = None
  152. """Task iterator which must give groups of parallelizable tasks when calling ``next()``"""
  153. self.dirty = False
  154. """
  155. Flag that indicates that the build cache must be saved when a task was executed
  156. (calls :py:meth:`waflib.Build.BuildContext.store`)"""
  157. self.revdeps = Utils.defaultdict(set)
  158. """
  159. The reverse dependency graph of dependencies obtained from Task.run_after
  160. """
  161. self.spawner = Spawner(self)
  162. """
  163. Coordinating daemon thread that spawns thread consumers
  164. """
  165. def get_next_task(self):
  166. """
  167. Obtains the next Task instance to run
  168. :rtype: :py:class:`waflib.Task.Task`
  169. """
  170. if not self.outstanding:
  171. return None
  172. return self.outstanding.pop()
  173. def postpone(self, tsk):
  174. """
  175. Adds the task to the list :py:attr:`waflib.Runner.Parallel.postponed`.
  176. The order is scrambled so as to consume as many tasks in parallel as possible.
  177. :param tsk: task instance
  178. :type tsk: :py:class:`waflib.Task.Task`
  179. """
  180. self.postponed.append(tsk)
  181. def refill_task_list(self):
  182. """
  183. Pulls a next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`.
  184. Ensures that all tasks in the current build group are complete before processing the next one.
  185. """
  186. while self.count > self.numjobs * GAP:
  187. self.get_out()
  188. while not self.outstanding:
  189. if self.count:
  190. self.get_out()
  191. if self.outstanding:
  192. break
  193. elif self.postponed:
  194. try:
  195. cond = self.deadlock == self.processed
  196. except AttributeError:
  197. pass
  198. else:
  199. if cond:
  200. # The most common reason is conflicting build order declaration
  201. # for example: "X run_after Y" and "Y run_after X"
  202. # Another can be changing "run_after" dependencies while the build is running
  203. # for example: updating "tsk.run_after" in the "runnable_status" method
  204. lst = []
  205. for tsk in self.postponed:
  206. deps = [id(x) for x in tsk.run_after if not x.hasrun]
  207. lst.append('%s\t-> %r' % (repr(tsk), deps))
  208. if not deps:
  209. lst.append('\n task %r dependencies are done, check its *runnable_status*?' % id(tsk))
  210. raise Errors.WafError('Deadlock detected: check the task build order%s' % ''.join(lst))
  211. self.deadlock = self.processed
  212. if self.postponed:
  213. self.outstanding.extend(self.postponed)
  214. self.postponed.clear()
  215. elif not self.count:
  216. if self.incomplete:
  217. for x in self.incomplete:
  218. for k in x.run_after:
  219. if not k.hasrun:
  220. break
  221. else:
  222. # dependency added after the build started without updating revdeps
  223. self.incomplete.remove(x)
  224. self.outstanding.append(x)
  225. break
  226. else:
  227. raise Errors.WafError('Broken revdeps detected on %r' % self.incomplete)
  228. else:
  229. tasks = next(self.biter)
  230. ready, waiting = self.prio_and_split(tasks)
  231. self.outstanding.extend(ready)
  232. self.incomplete.update(waiting)
  233. self.total = self.bld.total()
  234. break
  235. def add_more_tasks(self, tsk):
  236. """
  237. If a task provides :py:attr:`waflib.Task.Task.more_tasks`, then the tasks contained
  238. in that list are added to the current build and will be processed before the next build group.
  239. The priorities for dependent tasks are not re-calculated globally
  240. :param tsk: task instance
  241. :type tsk: :py:attr:`waflib.Task.Task`
  242. """
  243. if getattr(tsk, 'more_tasks', None):
  244. more = set(tsk.more_tasks)
  245. groups_done = set()
  246. def iteri(a, b):
  247. for x in a:
  248. yield x
  249. for x in b:
  250. yield x
  251. # Update the dependency tree
  252. # this assumes that task.run_after values were updated
  253. for x in iteri(self.outstanding, self.incomplete):
  254. for k in x.run_after:
  255. if isinstance(k, Task.TaskGroup):
  256. if k not in groups_done:
  257. groups_done.add(k)
  258. for j in k.prev & more:
  259. self.revdeps[j].add(k)
  260. elif k in more:
  261. self.revdeps[k].add(x)
  262. ready, waiting = self.prio_and_split(tsk.more_tasks)
  263. self.outstanding.extend(ready)
  264. self.incomplete.update(waiting)
  265. self.total += len(tsk.more_tasks)
  266. def mark_finished(self, tsk):
  267. def try_unfreeze(x):
  268. # DAG ancestors are likely to be in the incomplete set
  269. # This assumes that the run_after contents have not changed
  270. # after the build starts, else a deadlock may occur
  271. if x in self.incomplete:
  272. # TODO remove dependencies to free some memory?
  273. # x.run_after.remove(tsk)
  274. for k in x.run_after:
  275. if not k.hasrun:
  276. break
  277. else:
  278. self.incomplete.remove(x)
  279. self.outstanding.append(x)
  280. if tsk in self.revdeps:
  281. for x in self.revdeps[tsk]:
  282. if isinstance(x, Task.TaskGroup):
  283. x.prev.remove(tsk)
  284. if not x.prev:
  285. for k in x.next:
  286. # TODO necessary optimization?
  287. k.run_after.remove(x)
  288. try_unfreeze(k)
  289. # TODO necessary optimization?
  290. x.next = []
  291. else:
  292. try_unfreeze(x)
  293. del self.revdeps[tsk]
  294. if hasattr(tsk, 'semaphore'):
  295. sem = tsk.semaphore
  296. sem.release(tsk)
  297. while sem.waiting and not sem.is_locked():
  298. # take a frozen task, make it ready to run
  299. x = sem.waiting.pop()
  300. self._add_task(x)
  301. def get_out(self):
  302. """
  303. Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution.
  304. Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`.
  305. :rtype: :py:attr:`waflib.Task.Task`
  306. """
  307. tsk = self.out.get()
  308. if not self.stop:
  309. self.add_more_tasks(tsk)
  310. self.mark_finished(tsk)
  311. self.count -= 1
  312. self.dirty = True
  313. return tsk
  314. def add_task(self, tsk):
  315. """
  316. Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them.
  317. :param tsk: task instance
  318. :type tsk: :py:attr:`waflib.Task.Task`
  319. """
  320. # TODO change in waf 2.1
  321. self.ready.put(tsk)
  322. def _add_task(self, tsk):
  323. if hasattr(tsk, 'semaphore'):
  324. sem = tsk.semaphore
  325. try:
  326. sem.acquire(tsk)
  327. except IndexError:
  328. sem.waiting.add(tsk)
  329. return
  330. self.count += 1
  331. self.processed += 1
  332. if self.numjobs == 1:
  333. tsk.log_display(tsk.generator.bld)
  334. try:
  335. self.process_task(tsk)
  336. finally:
  337. self.out.put(tsk)
  338. else:
  339. self.add_task(tsk)
  340. def process_task(self, tsk):
  341. """
  342. Processes a task and attempts to stop the build in case of errors
  343. """
  344. tsk.process()
  345. if tsk.hasrun != Task.SUCCESS:
  346. self.error_handler(tsk)
  347. def skip(self, tsk):
  348. """
  349. Mark a task as skipped/up-to-date
  350. """
  351. tsk.hasrun = Task.SKIPPED
  352. self.mark_finished(tsk)
  353. def cancel(self, tsk):
  354. """
  355. Mark a task as failed because of unsatisfiable dependencies
  356. """
  357. tsk.hasrun = Task.CANCELED
  358. self.mark_finished(tsk)
  359. def error_handler(self, tsk):
  360. """
  361. Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set,
  362. unless the build is executed with::
  363. $ waf build -k
  364. :param tsk: task instance
  365. :type tsk: :py:attr:`waflib.Task.Task`
  366. """
  367. if not self.bld.keep:
  368. self.stop = True
  369. self.error.append(tsk)
  370. def task_status(self, tsk):
  371. """
  372. Obtains the task status to decide whether to run it immediately or not.
  373. :return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER`
  374. :rtype: integer
  375. """
  376. try:
  377. return tsk.runnable_status()
  378. except Exception:
  379. self.processed += 1
  380. tsk.err_msg = traceback.format_exc()
  381. if not self.stop and self.bld.keep:
  382. self.skip(tsk)
  383. if self.bld.keep == 1:
  384. # if -k stop on the first exception, if -kk try to go as far as possible
  385. if Logs.verbose > 1 or not self.error:
  386. self.error.append(tsk)
  387. self.stop = True
  388. else:
  389. if Logs.verbose > 1:
  390. self.error.append(tsk)
  391. return Task.EXCEPTION
  392. tsk.hasrun = Task.EXCEPTION
  393. self.error_handler(tsk)
  394. return Task.EXCEPTION
  395. def start(self):
  396. """
  397. Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to
  398. :py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread
  399. has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out`
  400. and marks the build as failed by setting the ``stop`` flag.
  401. If only one job is used, then executes the tasks one by one, without consumers.
  402. """
  403. self.total = self.bld.total()
  404. while not self.stop:
  405. self.refill_task_list()
  406. # consider the next task
  407. tsk = self.get_next_task()
  408. if not tsk:
  409. if self.count:
  410. # tasks may add new ones after they are run
  411. continue
  412. else:
  413. # no tasks to run, no tasks running, time to exit
  414. break
  415. if tsk.hasrun:
  416. # if the task is marked as "run", just skip it
  417. self.processed += 1
  418. continue
  419. if self.stop: # stop immediately after a failure is detected
  420. break
  421. st = self.task_status(tsk)
  422. if st == Task.RUN_ME:
  423. self._add_task(tsk)
  424. elif st == Task.ASK_LATER:
  425. self.postpone(tsk)
  426. elif st == Task.SKIP_ME:
  427. self.processed += 1
  428. self.skip(tsk)
  429. self.add_more_tasks(tsk)
  430. elif st == Task.CANCEL_ME:
  431. # A dependency problem has occurred, and the
  432. # build is most likely run with `waf -k`
  433. if Logs.verbose > 1:
  434. self.error.append(tsk)
  435. self.processed += 1
  436. self.cancel(tsk)
  437. # self.count represents the tasks that have been made available to the consumer threads
  438. # collect all the tasks after an error else the message may be incomplete
  439. while self.error and self.count:
  440. self.get_out()
  441. self.ready.put(None)
  442. if not self.stop:
  443. assert not self.count
  444. assert not self.postponed
  445. assert not self.incomplete
  446. def prio_and_split(self, tasks):
  447. """
  448. Label input tasks with priority values, and return a pair containing
  449. the tasks that are ready to run and the tasks that are necessarily
  450. waiting for other tasks to complete.
  451. The priority system is really meant as an optional layer for optimization:
  452. dependency cycles are found quickly, and builds should be more efficient.
  453. A high priority number means that a task is processed first.
  454. This method can be overridden to disable the priority system::
  455. def prio_and_split(self, tasks):
  456. return tasks, []
  457. :return: A pair of task lists
  458. :rtype: tuple
  459. """
  460. # to disable:
  461. #return tasks, []
  462. for x in tasks:
  463. x.visited = 0
  464. reverse = self.revdeps
  465. groups_done = set()
  466. for x in tasks:
  467. for k in x.run_after:
  468. if isinstance(k, Task.TaskGroup):
  469. if k not in groups_done:
  470. groups_done.add(k)
  471. for j in k.prev:
  472. reverse[j].add(k)
  473. else:
  474. reverse[k].add(x)
  475. # the priority number is not the tree depth
  476. def visit(n):
  477. if isinstance(n, Task.TaskGroup):
  478. return sum(visit(k) for k in n.next)
  479. if n.visited == 0:
  480. n.visited = 1
  481. if n in reverse:
  482. rev = reverse[n]
  483. n.prio_order = n.tree_weight + len(rev) + sum(visit(k) for k in rev)
  484. else:
  485. n.prio_order = n.tree_weight
  486. n.visited = 2
  487. elif n.visited == 1:
  488. raise Errors.WafError('Dependency cycle found!')
  489. return n.prio_order
  490. for x in tasks:
  491. if x.visited != 0:
  492. # must visit all to detect cycles
  493. continue
  494. try:
  495. visit(x)
  496. except Errors.WafError:
  497. self.debug_cycles(tasks, reverse)
  498. ready = []
  499. waiting = []
  500. for x in tasks:
  501. for k in x.run_after:
  502. if not k.hasrun:
  503. waiting.append(x)
  504. break
  505. else:
  506. ready.append(x)
  507. return (ready, waiting)
  508. def debug_cycles(self, tasks, reverse):
  509. tmp = {}
  510. for x in tasks:
  511. tmp[x] = 0
  512. def visit(n, acc):
  513. if isinstance(n, Task.TaskGroup):
  514. for k in n.next:
  515. visit(k, acc)
  516. return
  517. if tmp[n] == 0:
  518. tmp[n] = 1
  519. for k in reverse.get(n, []):
  520. visit(k, [n] + acc)
  521. tmp[n] = 2
  522. elif tmp[n] == 1:
  523. lst = []
  524. for tsk in acc:
  525. lst.append(repr(tsk))
  526. if tsk is n:
  527. # exclude prior nodes, we want the minimum cycle
  528. break
  529. raise Errors.WafError('Task dependency cycle in "run_after" constraints: %s' % ''.join(lst))
  530. for x in tasks:
  531. visit(x, [])