Audio plugin host https://kx.studio/carla
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

477 lines
11KB

  1. //
  2. // detail/impl/scheduler.ipp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2015 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP
  11. #define ASIO_DETAIL_IMPL_SCHEDULER_IPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include "asio/detail/config.hpp"
  16. #include "asio/detail/event.hpp"
  17. #include "asio/detail/limits.hpp"
  18. #include "asio/detail/reactor.hpp"
  19. #include "asio/detail/scheduler.hpp"
  20. #include "asio/detail/scheduler_thread_info.hpp"
  21. #include "asio/detail/push_options.hpp"
  22. namespace asio {
  23. namespace detail {
  24. struct scheduler::task_cleanup
  25. {
  26. ~task_cleanup()
  27. {
  28. if (this_thread_->private_outstanding_work > 0)
  29. {
  30. asio::detail::increment(
  31. scheduler_->outstanding_work_,
  32. this_thread_->private_outstanding_work);
  33. }
  34. this_thread_->private_outstanding_work = 0;
  35. // Enqueue the completed operations and reinsert the task at the end of
  36. // the operation queue.
  37. lock_->lock();
  38. scheduler_->task_interrupted_ = true;
  39. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  40. scheduler_->op_queue_.push(&scheduler_->task_operation_);
  41. }
  42. scheduler* scheduler_;
  43. mutex::scoped_lock* lock_;
  44. thread_info* this_thread_;
  45. };
  46. struct scheduler::work_cleanup
  47. {
  48. ~work_cleanup()
  49. {
  50. if (this_thread_->private_outstanding_work > 1)
  51. {
  52. asio::detail::increment(
  53. scheduler_->outstanding_work_,
  54. this_thread_->private_outstanding_work - 1);
  55. }
  56. else if (this_thread_->private_outstanding_work < 1)
  57. {
  58. scheduler_->work_finished();
  59. }
  60. this_thread_->private_outstanding_work = 0;
  61. #if defined(ASIO_HAS_THREADS)
  62. if (!this_thread_->private_op_queue.empty())
  63. {
  64. lock_->lock();
  65. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  66. }
  67. #endif // defined(ASIO_HAS_THREADS)
  68. }
  69. scheduler* scheduler_;
  70. mutex::scoped_lock* lock_;
  71. thread_info* this_thread_;
  72. };
  73. scheduler::scheduler(
  74. asio::execution_context& ctx, int concurrency_hint)
  75. : asio::detail::execution_context_service_base<scheduler>(ctx),
  76. one_thread_(concurrency_hint == 1),
  77. mutex_(),
  78. task_(0),
  79. task_interrupted_(true),
  80. outstanding_work_(0),
  81. stopped_(false),
  82. shutdown_(false)
  83. {
  84. ASIO_HANDLER_TRACKING_INIT;
  85. }
  86. void scheduler::shutdown()
  87. {
  88. mutex::scoped_lock lock(mutex_);
  89. shutdown_ = true;
  90. lock.unlock();
  91. // Destroy handler objects.
  92. while (!op_queue_.empty())
  93. {
  94. operation* o = op_queue_.front();
  95. op_queue_.pop();
  96. if (o != &task_operation_)
  97. o->destroy();
  98. }
  99. // Reset to initial state.
  100. task_ = 0;
  101. }
  102. void scheduler::init_task()
  103. {
  104. mutex::scoped_lock lock(mutex_);
  105. if (!shutdown_ && !task_)
  106. {
  107. task_ = &use_service<reactor>(this->context());
  108. op_queue_.push(&task_operation_);
  109. wake_one_thread_and_unlock(lock);
  110. }
  111. }
  112. std::size_t scheduler::run(asio::error_code& ec)
  113. {
  114. ec = asio::error_code();
  115. if (outstanding_work_ == 0)
  116. {
  117. stop();
  118. return 0;
  119. }
  120. thread_info this_thread;
  121. this_thread.private_outstanding_work = 0;
  122. thread_call_stack::context ctx(this, this_thread);
  123. mutex::scoped_lock lock(mutex_);
  124. std::size_t n = 0;
  125. for (; do_run_one(lock, this_thread, ec); lock.lock())
  126. if (n != (std::numeric_limits<std::size_t>::max)())
  127. ++n;
  128. return n;
  129. }
  130. std::size_t scheduler::run_one(asio::error_code& ec)
  131. {
  132. ec = asio::error_code();
  133. if (outstanding_work_ == 0)
  134. {
  135. stop();
  136. return 0;
  137. }
  138. thread_info this_thread;
  139. this_thread.private_outstanding_work = 0;
  140. thread_call_stack::context ctx(this, this_thread);
  141. mutex::scoped_lock lock(mutex_);
  142. return do_run_one(lock, this_thread, ec);
  143. }
  144. std::size_t scheduler::poll(asio::error_code& ec)
  145. {
  146. ec = asio::error_code();
  147. if (outstanding_work_ == 0)
  148. {
  149. stop();
  150. return 0;
  151. }
  152. thread_info this_thread;
  153. this_thread.private_outstanding_work = 0;
  154. thread_call_stack::context ctx(this, this_thread);
  155. mutex::scoped_lock lock(mutex_);
  156. #if defined(ASIO_HAS_THREADS)
  157. // We want to support nested calls to poll() and poll_one(), so any handlers
  158. // that are already on a thread-private queue need to be put on to the main
  159. // queue now.
  160. if (one_thread_)
  161. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  162. op_queue_.push(outer_info->private_op_queue);
  163. #endif // defined(ASIO_HAS_THREADS)
  164. std::size_t n = 0;
  165. for (; do_poll_one(lock, this_thread, ec); lock.lock())
  166. if (n != (std::numeric_limits<std::size_t>::max)())
  167. ++n;
  168. return n;
  169. }
  170. std::size_t scheduler::poll_one(asio::error_code& ec)
  171. {
  172. ec = asio::error_code();
  173. if (outstanding_work_ == 0)
  174. {
  175. stop();
  176. return 0;
  177. }
  178. thread_info this_thread;
  179. this_thread.private_outstanding_work = 0;
  180. thread_call_stack::context ctx(this, this_thread);
  181. mutex::scoped_lock lock(mutex_);
  182. #if defined(ASIO_HAS_THREADS)
  183. // We want to support nested calls to poll() and poll_one(), so any handlers
  184. // that are already on a thread-private queue need to be put on to the main
  185. // queue now.
  186. if (one_thread_)
  187. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  188. op_queue_.push(outer_info->private_op_queue);
  189. #endif // defined(ASIO_HAS_THREADS)
  190. return do_poll_one(lock, this_thread, ec);
  191. }
  192. void scheduler::stop()
  193. {
  194. mutex::scoped_lock lock(mutex_);
  195. stop_all_threads(lock);
  196. }
  197. bool scheduler::stopped() const
  198. {
  199. mutex::scoped_lock lock(mutex_);
  200. return stopped_;
  201. }
  202. void scheduler::restart()
  203. {
  204. mutex::scoped_lock lock(mutex_);
  205. stopped_ = false;
  206. }
  207. void scheduler::compensating_work_started()
  208. {
  209. thread_info_base* this_thread = thread_call_stack::contains(this);
  210. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  211. }
  212. void scheduler::post_immediate_completion(
  213. scheduler::operation* op, bool is_continuation)
  214. {
  215. #if defined(ASIO_HAS_THREADS)
  216. if (one_thread_ || is_continuation)
  217. {
  218. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  219. {
  220. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  221. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  222. return;
  223. }
  224. }
  225. #else // defined(ASIO_HAS_THREADS)
  226. (void)is_continuation;
  227. #endif // defined(ASIO_HAS_THREADS)
  228. work_started();
  229. mutex::scoped_lock lock(mutex_);
  230. op_queue_.push(op);
  231. wake_one_thread_and_unlock(lock);
  232. }
  233. void scheduler::post_deferred_completion(scheduler::operation* op)
  234. {
  235. #if defined(ASIO_HAS_THREADS)
  236. if (one_thread_)
  237. {
  238. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  239. {
  240. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  241. return;
  242. }
  243. }
  244. #endif // defined(ASIO_HAS_THREADS)
  245. mutex::scoped_lock lock(mutex_);
  246. op_queue_.push(op);
  247. wake_one_thread_and_unlock(lock);
  248. }
  249. void scheduler::post_deferred_completions(
  250. op_queue<scheduler::operation>& ops)
  251. {
  252. if (!ops.empty())
  253. {
  254. #if defined(ASIO_HAS_THREADS)
  255. if (one_thread_)
  256. {
  257. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  258. {
  259. static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
  260. return;
  261. }
  262. }
  263. #endif // defined(ASIO_HAS_THREADS)
  264. mutex::scoped_lock lock(mutex_);
  265. op_queue_.push(ops);
  266. wake_one_thread_and_unlock(lock);
  267. }
  268. }
  269. void scheduler::do_dispatch(
  270. scheduler::operation* op)
  271. {
  272. work_started();
  273. mutex::scoped_lock lock(mutex_);
  274. op_queue_.push(op);
  275. wake_one_thread_and_unlock(lock);
  276. }
  277. void scheduler::abandon_operations(
  278. op_queue<scheduler::operation>& ops)
  279. {
  280. op_queue<scheduler::operation> ops2;
  281. ops2.push(ops);
  282. }
  283. std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
  284. scheduler::thread_info& this_thread,
  285. const asio::error_code& ec)
  286. {
  287. while (!stopped_)
  288. {
  289. if (!op_queue_.empty())
  290. {
  291. // Prepare to execute first handler from queue.
  292. operation* o = op_queue_.front();
  293. op_queue_.pop();
  294. bool more_handlers = (!op_queue_.empty());
  295. if (o == &task_operation_)
  296. {
  297. task_interrupted_ = more_handlers;
  298. if (more_handlers && !one_thread_)
  299. wakeup_event_.unlock_and_signal_one(lock);
  300. else
  301. lock.unlock();
  302. task_cleanup on_exit = { this, &lock, &this_thread };
  303. (void)on_exit;
  304. // Run the task. May throw an exception. Only block if the operation
  305. // queue is empty and we're not polling, otherwise we want to return
  306. // as soon as possible.
  307. task_->run(!more_handlers, this_thread.private_op_queue);
  308. }
  309. else
  310. {
  311. std::size_t task_result = o->task_result_;
  312. if (more_handlers && !one_thread_)
  313. wake_one_thread_and_unlock(lock);
  314. else
  315. lock.unlock();
  316. // Ensure the count of outstanding work is decremented on block exit.
  317. work_cleanup on_exit = { this, &lock, &this_thread };
  318. (void)on_exit;
  319. // Complete the operation. May throw an exception. Deletes the object.
  320. o->complete(this, ec, task_result);
  321. return 1;
  322. }
  323. }
  324. else
  325. {
  326. wakeup_event_.clear(lock);
  327. wakeup_event_.wait(lock);
  328. }
  329. }
  330. return 0;
  331. }
  332. std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
  333. scheduler::thread_info& this_thread,
  334. const asio::error_code& ec)
  335. {
  336. if (stopped_)
  337. return 0;
  338. operation* o = op_queue_.front();
  339. if (o == &task_operation_)
  340. {
  341. op_queue_.pop();
  342. lock.unlock();
  343. {
  344. task_cleanup c = { this, &lock, &this_thread };
  345. (void)c;
  346. // Run the task. May throw an exception. Only block if the operation
  347. // queue is empty and we're not polling, otherwise we want to return
  348. // as soon as possible.
  349. task_->run(false, this_thread.private_op_queue);
  350. }
  351. o = op_queue_.front();
  352. if (o == &task_operation_)
  353. {
  354. wakeup_event_.maybe_unlock_and_signal_one(lock);
  355. return 0;
  356. }
  357. }
  358. if (o == 0)
  359. return 0;
  360. op_queue_.pop();
  361. bool more_handlers = (!op_queue_.empty());
  362. std::size_t task_result = o->task_result_;
  363. if (more_handlers && !one_thread_)
  364. wake_one_thread_and_unlock(lock);
  365. else
  366. lock.unlock();
  367. // Ensure the count of outstanding work is decremented on block exit.
  368. work_cleanup on_exit = { this, &lock, &this_thread };
  369. (void)on_exit;
  370. // Complete the operation. May throw an exception. Deletes the object.
  371. o->complete(this, ec, task_result);
  372. return 1;
  373. }
  374. void scheduler::stop_all_threads(
  375. mutex::scoped_lock& lock)
  376. {
  377. stopped_ = true;
  378. wakeup_event_.signal_all(lock);
  379. if (!task_interrupted_ && task_)
  380. {
  381. task_interrupted_ = true;
  382. task_->interrupt();
  383. }
  384. }
  385. void scheduler::wake_one_thread_and_unlock(
  386. mutex::scoped_lock& lock)
  387. {
  388. if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
  389. {
  390. if (!task_interrupted_ && task_)
  391. {
  392. task_interrupted_ = true;
  393. task_->interrupt();
  394. }
  395. lock.unlock();
  396. }
  397. }
  398. } // namespace detail
  399. } // namespace asio
  400. #include "asio/detail/pop_options.hpp"
  401. #endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP