Audio plugin host https://kx.studio/carla
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

scheduler.ipp 14KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. //
  2. // detail/impl/scheduler.ipp
  3. // ~~~~~~~~~~~~~~~~~~~~~~~~~
  4. //
  5. // Copyright (c) 2003-2019 Christopher M. Kohlhoff (chris at kohlhoff dot com)
  6. //
  7. // Distributed under the Boost Software License, Version 1.0. (See accompanying
  8. // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
  9. //
  10. #ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP
  11. #define ASIO_DETAIL_IMPL_SCHEDULER_IPP
  12. #if defined(_MSC_VER) && (_MSC_VER >= 1200)
  13. # pragma once
  14. #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
  15. #include "asio/detail/config.hpp"
  16. #include "asio/detail/concurrency_hint.hpp"
  17. #include "asio/detail/event.hpp"
  18. #include "asio/detail/limits.hpp"
  19. #include "asio/detail/reactor.hpp"
  20. #include "asio/detail/scheduler.hpp"
  21. #include "asio/detail/scheduler_thread_info.hpp"
  22. #include "asio/detail/signal_blocker.hpp"
  23. #include "asio/detail/push_options.hpp"
  24. namespace asio {
  25. namespace detail {
  26. class scheduler::thread_function
  27. {
  28. public:
  29. explicit thread_function(scheduler* s)
  30. : this_(s)
  31. {
  32. }
  33. void operator()()
  34. {
  35. asio::error_code ec;
  36. this_->run(ec);
  37. }
  38. private:
  39. scheduler* this_;
  40. };
  41. struct scheduler::task_cleanup
  42. {
  43. ~task_cleanup()
  44. {
  45. if (this_thread_->private_outstanding_work > 0)
  46. {
  47. asio::detail::increment(
  48. scheduler_->outstanding_work_,
  49. this_thread_->private_outstanding_work);
  50. }
  51. this_thread_->private_outstanding_work = 0;
  52. // Enqueue the completed operations and reinsert the task at the end of
  53. // the operation queue.
  54. lock_->lock();
  55. scheduler_->task_interrupted_ = true;
  56. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  57. scheduler_->op_queue_.push(&scheduler_->task_operation_);
  58. }
  59. scheduler* scheduler_;
  60. mutex::scoped_lock* lock_;
  61. thread_info* this_thread_;
  62. };
  63. struct scheduler::work_cleanup
  64. {
  65. ~work_cleanup()
  66. {
  67. if (this_thread_->private_outstanding_work > 1)
  68. {
  69. asio::detail::increment(
  70. scheduler_->outstanding_work_,
  71. this_thread_->private_outstanding_work - 1);
  72. }
  73. else if (this_thread_->private_outstanding_work < 1)
  74. {
  75. scheduler_->work_finished();
  76. }
  77. this_thread_->private_outstanding_work = 0;
  78. #if defined(ASIO_HAS_THREADS)
  79. if (!this_thread_->private_op_queue.empty())
  80. {
  81. lock_->lock();
  82. scheduler_->op_queue_.push(this_thread_->private_op_queue);
  83. }
  84. #endif // defined(ASIO_HAS_THREADS)
  85. }
  86. scheduler* scheduler_;
  87. mutex::scoped_lock* lock_;
  88. thread_info* this_thread_;
  89. };
  90. scheduler::scheduler(asio::execution_context& ctx,
  91. int concurrency_hint, bool own_thread)
  92. : asio::detail::execution_context_service_base<scheduler>(ctx),
  93. one_thread_(concurrency_hint == 1
  94. || !ASIO_CONCURRENCY_HINT_IS_LOCKING(
  95. SCHEDULER, concurrency_hint)
  96. || !ASIO_CONCURRENCY_HINT_IS_LOCKING(
  97. REACTOR_IO, concurrency_hint)),
  98. mutex_(ASIO_CONCURRENCY_HINT_IS_LOCKING(
  99. SCHEDULER, concurrency_hint)),
  100. task_(0),
  101. task_interrupted_(true),
  102. outstanding_work_(0),
  103. stopped_(false),
  104. shutdown_(false),
  105. concurrency_hint_(concurrency_hint),
  106. thread_(0)
  107. {
  108. ASIO_HANDLER_TRACKING_INIT;
  109. if (own_thread)
  110. {
  111. ++outstanding_work_;
  112. asio::detail::signal_blocker sb;
  113. thread_ = new asio::detail::thread(thread_function(this));
  114. }
  115. }
  116. scheduler::~scheduler()
  117. {
  118. if (thread_)
  119. {
  120. thread_->join();
  121. delete thread_;
  122. }
  123. }
  124. void scheduler::shutdown()
  125. {
  126. mutex::scoped_lock lock(mutex_);
  127. shutdown_ = true;
  128. if (thread_)
  129. stop_all_threads(lock);
  130. lock.unlock();
  131. // Join thread to ensure task operation is returned to queue.
  132. if (thread_)
  133. {
  134. thread_->join();
  135. delete thread_;
  136. thread_ = 0;
  137. }
  138. // Destroy handler objects.
  139. while (!op_queue_.empty())
  140. {
  141. operation* o = op_queue_.front();
  142. op_queue_.pop();
  143. if (o != &task_operation_)
  144. o->destroy();
  145. }
  146. // Reset to initial state.
  147. task_ = 0;
  148. }
  149. void scheduler::init_task()
  150. {
  151. mutex::scoped_lock lock(mutex_);
  152. if (!shutdown_ && !task_)
  153. {
  154. task_ = &use_service<reactor>(this->context());
  155. op_queue_.push(&task_operation_);
  156. wake_one_thread_and_unlock(lock);
  157. }
  158. }
  159. std::size_t scheduler::run(asio::error_code& ec)
  160. {
  161. ec = asio::error_code();
  162. if (outstanding_work_ == 0)
  163. {
  164. stop();
  165. return 0;
  166. }
  167. thread_info this_thread;
  168. this_thread.private_outstanding_work = 0;
  169. thread_call_stack::context ctx(this, this_thread);
  170. mutex::scoped_lock lock(mutex_);
  171. std::size_t n = 0;
  172. for (; do_run_one(lock, this_thread, ec); lock.lock())
  173. if (n != (std::numeric_limits<std::size_t>::max)())
  174. ++n;
  175. return n;
  176. }
  177. std::size_t scheduler::run_one(asio::error_code& ec)
  178. {
  179. ec = asio::error_code();
  180. if (outstanding_work_ == 0)
  181. {
  182. stop();
  183. return 0;
  184. }
  185. thread_info this_thread;
  186. this_thread.private_outstanding_work = 0;
  187. thread_call_stack::context ctx(this, this_thread);
  188. mutex::scoped_lock lock(mutex_);
  189. return do_run_one(lock, this_thread, ec);
  190. }
  191. std::size_t scheduler::wait_one(long usec, asio::error_code& ec)
  192. {
  193. ec = asio::error_code();
  194. if (outstanding_work_ == 0)
  195. {
  196. stop();
  197. return 0;
  198. }
  199. thread_info this_thread;
  200. this_thread.private_outstanding_work = 0;
  201. thread_call_stack::context ctx(this, this_thread);
  202. mutex::scoped_lock lock(mutex_);
  203. return do_wait_one(lock, this_thread, usec, ec);
  204. }
  205. std::size_t scheduler::poll(asio::error_code& ec)
  206. {
  207. ec = asio::error_code();
  208. if (outstanding_work_ == 0)
  209. {
  210. stop();
  211. return 0;
  212. }
  213. thread_info this_thread;
  214. this_thread.private_outstanding_work = 0;
  215. thread_call_stack::context ctx(this, this_thread);
  216. mutex::scoped_lock lock(mutex_);
  217. #if defined(ASIO_HAS_THREADS)
  218. // We want to support nested calls to poll() and poll_one(), so any handlers
  219. // that are already on a thread-private queue need to be put on to the main
  220. // queue now.
  221. if (one_thread_)
  222. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  223. op_queue_.push(outer_info->private_op_queue);
  224. #endif // defined(ASIO_HAS_THREADS)
  225. std::size_t n = 0;
  226. for (; do_poll_one(lock, this_thread, ec); lock.lock())
  227. if (n != (std::numeric_limits<std::size_t>::max)())
  228. ++n;
  229. return n;
  230. }
  231. std::size_t scheduler::poll_one(asio::error_code& ec)
  232. {
  233. ec = asio::error_code();
  234. if (outstanding_work_ == 0)
  235. {
  236. stop();
  237. return 0;
  238. }
  239. thread_info this_thread;
  240. this_thread.private_outstanding_work = 0;
  241. thread_call_stack::context ctx(this, this_thread);
  242. mutex::scoped_lock lock(mutex_);
  243. #if defined(ASIO_HAS_THREADS)
  244. // We want to support nested calls to poll() and poll_one(), so any handlers
  245. // that are already on a thread-private queue need to be put on to the main
  246. // queue now.
  247. if (one_thread_)
  248. if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
  249. op_queue_.push(outer_info->private_op_queue);
  250. #endif // defined(ASIO_HAS_THREADS)
  251. return do_poll_one(lock, this_thread, ec);
  252. }
  253. void scheduler::stop()
  254. {
  255. mutex::scoped_lock lock(mutex_);
  256. stop_all_threads(lock);
  257. }
  258. bool scheduler::stopped() const
  259. {
  260. mutex::scoped_lock lock(mutex_);
  261. return stopped_;
  262. }
  263. void scheduler::restart()
  264. {
  265. mutex::scoped_lock lock(mutex_);
  266. stopped_ = false;
  267. }
  268. void scheduler::compensating_work_started()
  269. {
  270. thread_info_base* this_thread = thread_call_stack::contains(this);
  271. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  272. }
  273. void scheduler::post_immediate_completion(
  274. scheduler::operation* op, bool is_continuation)
  275. {
  276. #if defined(ASIO_HAS_THREADS)
  277. if (one_thread_ || is_continuation)
  278. {
  279. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  280. {
  281. ++static_cast<thread_info*>(this_thread)->private_outstanding_work;
  282. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  283. return;
  284. }
  285. }
  286. #else // defined(ASIO_HAS_THREADS)
  287. (void)is_continuation;
  288. #endif // defined(ASIO_HAS_THREADS)
  289. work_started();
  290. mutex::scoped_lock lock(mutex_);
  291. op_queue_.push(op);
  292. wake_one_thread_and_unlock(lock);
  293. }
  294. void scheduler::post_deferred_completion(scheduler::operation* op)
  295. {
  296. #if defined(ASIO_HAS_THREADS)
  297. if (one_thread_)
  298. {
  299. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  300. {
  301. static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
  302. return;
  303. }
  304. }
  305. #endif // defined(ASIO_HAS_THREADS)
  306. mutex::scoped_lock lock(mutex_);
  307. op_queue_.push(op);
  308. wake_one_thread_and_unlock(lock);
  309. }
  310. void scheduler::post_deferred_completions(
  311. op_queue<scheduler::operation>& ops)
  312. {
  313. if (!ops.empty())
  314. {
  315. #if defined(ASIO_HAS_THREADS)
  316. if (one_thread_)
  317. {
  318. if (thread_info_base* this_thread = thread_call_stack::contains(this))
  319. {
  320. static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
  321. return;
  322. }
  323. }
  324. #endif // defined(ASIO_HAS_THREADS)
  325. mutex::scoped_lock lock(mutex_);
  326. op_queue_.push(ops);
  327. wake_one_thread_and_unlock(lock);
  328. }
  329. }
  330. void scheduler::do_dispatch(
  331. scheduler::operation* op)
  332. {
  333. work_started();
  334. mutex::scoped_lock lock(mutex_);
  335. op_queue_.push(op);
  336. wake_one_thread_and_unlock(lock);
  337. }
  338. void scheduler::abandon_operations(
  339. op_queue<scheduler::operation>& ops)
  340. {
  341. op_queue<scheduler::operation> ops2;
  342. ops2.push(ops);
  343. }
  344. std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
  345. scheduler::thread_info& this_thread,
  346. const asio::error_code& ec)
  347. {
  348. while (!stopped_)
  349. {
  350. if (!op_queue_.empty())
  351. {
  352. // Prepare to execute first handler from queue.
  353. operation* o = op_queue_.front();
  354. op_queue_.pop();
  355. bool more_handlers = (!op_queue_.empty());
  356. if (o == &task_operation_)
  357. {
  358. task_interrupted_ = more_handlers;
  359. if (more_handlers && !one_thread_)
  360. wakeup_event_.unlock_and_signal_one(lock);
  361. else
  362. lock.unlock();
  363. task_cleanup on_exit = { this, &lock, &this_thread };
  364. (void)on_exit;
  365. // Run the task. May throw an exception. Only block if the operation
  366. // queue is empty and we're not polling, otherwise we want to return
  367. // as soon as possible.
  368. task_->run(more_handlers ? 0 : -1, this_thread.private_op_queue);
  369. }
  370. else
  371. {
  372. std::size_t task_result = o->task_result_;
  373. if (more_handlers && !one_thread_)
  374. wake_one_thread_and_unlock(lock);
  375. else
  376. lock.unlock();
  377. // Ensure the count of outstanding work is decremented on block exit.
  378. work_cleanup on_exit = { this, &lock, &this_thread };
  379. (void)on_exit;
  380. // Complete the operation. May throw an exception. Deletes the object.
  381. o->complete(this, ec, task_result);
  382. return 1;
  383. }
  384. }
  385. else
  386. {
  387. wakeup_event_.clear(lock);
  388. wakeup_event_.wait(lock);
  389. }
  390. }
  391. return 0;
  392. }
  393. std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
  394. scheduler::thread_info& this_thread, long usec,
  395. const asio::error_code& ec)
  396. {
  397. if (stopped_)
  398. return 0;
  399. operation* o = op_queue_.front();
  400. if (o == 0)
  401. {
  402. wakeup_event_.clear(lock);
  403. wakeup_event_.wait_for_usec(lock, usec);
  404. usec = 0; // Wait at most once.
  405. o = op_queue_.front();
  406. }
  407. if (o == &task_operation_)
  408. {
  409. op_queue_.pop();
  410. bool more_handlers = (!op_queue_.empty());
  411. task_interrupted_ = more_handlers;
  412. if (more_handlers && !one_thread_)
  413. wakeup_event_.unlock_and_signal_one(lock);
  414. else
  415. lock.unlock();
  416. {
  417. task_cleanup on_exit = { this, &lock, &this_thread };
  418. (void)on_exit;
  419. // Run the task. May throw an exception. Only block if the operation
  420. // queue is empty and we're not polling, otherwise we want to return
  421. // as soon as possible.
  422. task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
  423. }
  424. o = op_queue_.front();
  425. if (o == &task_operation_)
  426. {
  427. if (!one_thread_)
  428. wakeup_event_.maybe_unlock_and_signal_one(lock);
  429. return 0;
  430. }
  431. }
  432. if (o == 0)
  433. return 0;
  434. op_queue_.pop();
  435. bool more_handlers = (!op_queue_.empty());
  436. std::size_t task_result = o->task_result_;
  437. if (more_handlers && !one_thread_)
  438. wake_one_thread_and_unlock(lock);
  439. else
  440. lock.unlock();
  441. // Ensure the count of outstanding work is decremented on block exit.
  442. work_cleanup on_exit = { this, &lock, &this_thread };
  443. (void)on_exit;
  444. // Complete the operation. May throw an exception. Deletes the object.
  445. o->complete(this, ec, task_result);
  446. return 1;
  447. }
  448. std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
  449. scheduler::thread_info& this_thread,
  450. const asio::error_code& ec)
  451. {
  452. if (stopped_)
  453. return 0;
  454. operation* o = op_queue_.front();
  455. if (o == &task_operation_)
  456. {
  457. op_queue_.pop();
  458. lock.unlock();
  459. {
  460. task_cleanup c = { this, &lock, &this_thread };
  461. (void)c;
  462. // Run the task. May throw an exception. Only block if the operation
  463. // queue is empty and we're not polling, otherwise we want to return
  464. // as soon as possible.
  465. task_->run(0, this_thread.private_op_queue);
  466. }
  467. o = op_queue_.front();
  468. if (o == &task_operation_)
  469. {
  470. wakeup_event_.maybe_unlock_and_signal_one(lock);
  471. return 0;
  472. }
  473. }
  474. if (o == 0)
  475. return 0;
  476. op_queue_.pop();
  477. bool more_handlers = (!op_queue_.empty());
  478. std::size_t task_result = o->task_result_;
  479. if (more_handlers && !one_thread_)
  480. wake_one_thread_and_unlock(lock);
  481. else
  482. lock.unlock();
  483. // Ensure the count of outstanding work is decremented on block exit.
  484. work_cleanup on_exit = { this, &lock, &this_thread };
  485. (void)on_exit;
  486. // Complete the operation. May throw an exception. Deletes the object.
  487. o->complete(this, ec, task_result);
  488. return 1;
  489. }
  490. void scheduler::stop_all_threads(
  491. mutex::scoped_lock& lock)
  492. {
  493. stopped_ = true;
  494. wakeup_event_.signal_all(lock);
  495. if (!task_interrupted_ && task_)
  496. {
  497. task_interrupted_ = true;
  498. task_->interrupt();
  499. }
  500. }
  501. void scheduler::wake_one_thread_and_unlock(
  502. mutex::scoped_lock& lock)
  503. {
  504. if (!wakeup_event_.maybe_unlock_and_signal_one(lock))
  505. {
  506. if (!task_interrupted_ && task_)
  507. {
  508. task_interrupted_ = true;
  509. task_->interrupt();
  510. }
  511. lock.unlock();
  512. }
  513. }
  514. } // namespace detail
  515. } // namespace asio
  516. #include "asio/detail/pop_options.hpp"
  517. #endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP