You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

898 lines
28KB

  1. /*
  2. * This file is part of Libav.
  3. *
  4. * Libav is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * Libav is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with Libav; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Frame multithreading support functions
  21. * @see doc/multithreading.txt
  22. */
  23. #include "config.h"
  24. #include <stdatomic.h>
  25. #include <stdint.h>
  26. #if HAVE_PTHREADS
  27. #include <pthread.h>
  28. #elif HAVE_W32THREADS
  29. #include "compat/w32pthreads.h"
  30. #endif
  31. #include "avcodec.h"
  32. #include "hwaccel.h"
  33. #include "internal.h"
  34. #include "pthread_internal.h"
  35. #include "thread.h"
  36. #include "version.h"
  37. #include "libavutil/avassert.h"
  38. #include "libavutil/buffer.h"
  39. #include "libavutil/common.h"
  40. #include "libavutil/cpu.h"
  41. #include "libavutil/frame.h"
  42. #include "libavutil/internal.h"
  43. #include "libavutil/log.h"
  44. #include "libavutil/mem.h"
  45. enum {
  46. ///< Set when the thread is awaiting a packet.
  47. STATE_INPUT_READY,
  48. ///< Set before the codec has called ff_thread_finish_setup().
  49. STATE_SETTING_UP,
  50. /**
  51. * Set when the codec calls get_buffer().
  52. * State is returned to STATE_SETTING_UP afterwards.
  53. */
  54. STATE_GET_BUFFER,
  55. ///< Set after the codec has called ff_thread_finish_setup().
  56. STATE_SETUP_FINISHED,
  57. };
  58. /**
  59. * Context used by codec threads and stored in their AVCodecInternal thread_ctx.
  60. */
  61. typedef struct PerThreadContext {
  62. struct FrameThreadContext *parent;
  63. pthread_t thread;
  64. int thread_init;
  65. pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
  66. pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
  67. pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
  68. pthread_mutex_t mutex; ///< Mutex used to protect the contents of the PerThreadContext.
  69. pthread_mutex_t progress_mutex; ///< Mutex used to protect frame progress values and progress_cond.
  70. AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
  71. AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding).
  72. AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
  73. int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
  74. int result; ///< The result of the last codec decode/encode() call.
  75. atomic_int state;
  76. /**
  77. * Array of frames passed to ff_thread_release_buffer().
  78. * Frames are released after all threads referencing them are finished.
  79. */
  80. AVFrame *released_buffers;
  81. int num_released_buffers;
  82. int released_buffers_allocated;
  83. AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
  84. int requested_flags; ///< flags passed to get_buffer() for requested_frame
  85. int die; ///< Set when the thread should exit.
  86. int hwaccel_serializing;
  87. int async_serializing;
  88. } PerThreadContext;
  89. /**
  90. * Context stored in the client AVCodecInternal thread_ctx.
  91. */
  92. typedef struct FrameThreadContext {
  93. PerThreadContext *threads; ///< The contexts for each thread.
  94. PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
  95. pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
  96. /**
  97. * This lock is used for ensuring threads run in serial when hwaccel
  98. * is used.
  99. */
  100. pthread_mutex_t hwaccel_mutex;
  101. pthread_mutex_t async_mutex;
  102. int next_decoding; ///< The next context to submit a packet to.
  103. int next_finished; ///< The next context to return output from.
  104. int delaying; /**<
  105. * Set for the first N packets, where N is the number of threads.
  106. * While it is set, ff_thread_en/decode_frame won't return any results.
  107. */
  108. } FrameThreadContext;
  109. /**
  110. * Codec worker thread.
  111. *
  112. * Automatically calls ff_thread_finish_setup() if the codec does
  113. * not provide an update_thread_context method, or if the codec returns
  114. * before calling it.
  115. */
  116. static attribute_align_arg void *frame_worker_thread(void *arg)
  117. {
  118. PerThreadContext *p = arg;
  119. AVCodecContext *avctx = p->avctx;
  120. const AVCodec *codec = avctx->codec;
  121. while (1) {
  122. if (atomic_load(&p->state) == STATE_INPUT_READY) {
  123. pthread_mutex_lock(&p->mutex);
  124. while (atomic_load(&p->state) == STATE_INPUT_READY) {
  125. if (p->die) {
  126. pthread_mutex_unlock(&p->mutex);
  127. goto die;
  128. }
  129. pthread_cond_wait(&p->input_cond, &p->mutex);
  130. }
  131. pthread_mutex_unlock(&p->mutex);
  132. }
  133. if (!codec->update_thread_context && avctx->thread_safe_callbacks)
  134. ff_thread_finish_setup(avctx);
  135. pthread_mutex_lock(&p->mutex);
  136. /* If a decoder supports hwaccel, then it must call ff_get_format().
  137. * Since that call must happen before ff_thread_finish_setup(), the
  138. * decoder is required to implement update_thread_context() and call
  139. * ff_thread_finish_setup() manually. Therefore the above
  140. * ff_thread_finish_setup() call did not happen and hwaccel_serializing
  141. * cannot be true here. */
  142. av_assert0(!p->hwaccel_serializing);
  143. /* if the previous thread uses hwaccel then we take the lock to ensure
  144. * the threads don't run concurrently */
  145. if (avctx->hwaccel) {
  146. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  147. p->hwaccel_serializing = 1;
  148. }
  149. av_frame_unref(p->frame);
  150. p->got_frame = 0;
  151. p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);
  152. if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
  153. if (avctx->internal->allocate_progress)
  154. av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
  155. "free the frame on failure. This is a bug, please report it.\n");
  156. av_frame_unref(p->frame);
  157. }
  158. if (atomic_load(&p->state) == STATE_SETTING_UP)
  159. ff_thread_finish_setup(avctx);
  160. if (p->hwaccel_serializing) {
  161. p->hwaccel_serializing = 0;
  162. pthread_mutex_unlock(&p->parent->hwaccel_mutex);
  163. }
  164. if (p->async_serializing) {
  165. p->async_serializing = 0;
  166. pthread_mutex_unlock(&p->parent->async_mutex);
  167. }
  168. atomic_store(&p->state, STATE_INPUT_READY);
  169. pthread_mutex_lock(&p->progress_mutex);
  170. pthread_cond_signal(&p->output_cond);
  171. pthread_mutex_unlock(&p->progress_mutex);
  172. pthread_mutex_unlock(&p->mutex);
  173. }
  174. die:
  175. return NULL;
  176. }
  177. /**
  178. * Update the next thread's AVCodecContext with values from the reference thread's context.
  179. *
  180. * @param dst The destination context.
  181. * @param src The source context.
  182. * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
  183. */
  184. static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
  185. {
  186. int err = 0;
  187. if (dst != src) {
  188. dst->time_base = src->time_base;
  189. dst->framerate = src->framerate;
  190. dst->width = src->width;
  191. dst->height = src->height;
  192. dst->pix_fmt = src->pix_fmt;
  193. dst->sw_pix_fmt = src->sw_pix_fmt;
  194. dst->coded_width = src->coded_width;
  195. dst->coded_height = src->coded_height;
  196. dst->has_b_frames = src->has_b_frames;
  197. dst->idct_algo = src->idct_algo;
  198. dst->bits_per_coded_sample = src->bits_per_coded_sample;
  199. dst->sample_aspect_ratio = src->sample_aspect_ratio;
  200. dst->profile = src->profile;
  201. dst->level = src->level;
  202. dst->bits_per_raw_sample = src->bits_per_raw_sample;
  203. dst->ticks_per_frame = src->ticks_per_frame;
  204. dst->color_primaries = src->color_primaries;
  205. dst->color_trc = src->color_trc;
  206. dst->colorspace = src->colorspace;
  207. dst->color_range = src->color_range;
  208. dst->chroma_sample_location = src->chroma_sample_location;
  209. dst->hwaccel = src->hwaccel;
  210. dst->hwaccel_context = src->hwaccel_context;
  211. dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
  212. if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
  213. (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
  214. av_buffer_unref(&dst->hw_frames_ctx);
  215. if (src->hw_frames_ctx) {
  216. dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
  217. if (!dst->hw_frames_ctx)
  218. return AVERROR(ENOMEM);
  219. }
  220. }
  221. dst->hwaccel_flags = src->hwaccel_flags;
  222. }
  223. if (for_user) {
  224. #if FF_API_CODED_FRAME
  225. FF_DISABLE_DEPRECATION_WARNINGS
  226. dst->coded_frame = src->coded_frame;
  227. FF_ENABLE_DEPRECATION_WARNINGS
  228. #endif
  229. } else {
  230. if (dst->codec->update_thread_context)
  231. err = dst->codec->update_thread_context(dst, src);
  232. }
  233. return err;
  234. }
  235. /**
  236. * Update the next thread's AVCodecContext with values set by the user.
  237. *
  238. * @param dst The destination context.
  239. * @param src The source context.
  240. * @return 0 on success, negative error code on failure
  241. */
  242. static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
  243. {
  244. #define copy_fields(s, e) memcpy(&dst->s, &src->s, (char*)&dst->e - (char*)&dst->s);
  245. dst->flags = src->flags;
  246. dst->draw_horiz_band= src->draw_horiz_band;
  247. dst->get_buffer2 = src->get_buffer2;
  248. dst->opaque = src->opaque;
  249. dst->debug = src->debug;
  250. dst->slice_flags = src->slice_flags;
  251. dst->flags2 = src->flags2;
  252. copy_fields(skip_loop_filter, subtitle_header);
  253. dst->frame_number = src->frame_number;
  254. dst->reordered_opaque = src->reordered_opaque;
  255. if (src->slice_count && src->slice_offset) {
  256. if (dst->slice_count < src->slice_count) {
  257. int *tmp = av_realloc(dst->slice_offset, src->slice_count *
  258. sizeof(*dst->slice_offset));
  259. if (!tmp) {
  260. av_free(dst->slice_offset);
  261. return AVERROR(ENOMEM);
  262. }
  263. dst->slice_offset = tmp;
  264. }
  265. memcpy(dst->slice_offset, src->slice_offset,
  266. src->slice_count * sizeof(*dst->slice_offset));
  267. }
  268. dst->slice_count = src->slice_count;
  269. return 0;
  270. #undef copy_fields
  271. }
  272. /// Releases the buffers that this decoding thread was the last user of.
  273. static void release_delayed_buffers(PerThreadContext *p)
  274. {
  275. FrameThreadContext *fctx = p->parent;
  276. while (p->num_released_buffers > 0) {
  277. AVFrame *f;
  278. pthread_mutex_lock(&fctx->buffer_mutex);
  279. // fix extended data in case the caller screwed it up
  280. av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO);
  281. f = &p->released_buffers[--p->num_released_buffers];
  282. f->extended_data = f->data;
  283. av_frame_unref(f);
  284. pthread_mutex_unlock(&fctx->buffer_mutex);
  285. }
  286. }
  287. static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
  288. {
  289. FrameThreadContext *fctx = p->parent;
  290. PerThreadContext *prev_thread = fctx->prev_thread;
  291. const AVCodec *codec = p->avctx->codec;
  292. if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
  293. return 0;
  294. pthread_mutex_lock(&p->mutex);
  295. release_delayed_buffers(p);
  296. if (prev_thread) {
  297. int err;
  298. if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
  299. pthread_mutex_lock(&prev_thread->progress_mutex);
  300. while (atomic_load(&prev_thread->state) == STATE_SETTING_UP)
  301. pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
  302. pthread_mutex_unlock(&prev_thread->progress_mutex);
  303. }
  304. err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
  305. if (err) {
  306. pthread_mutex_unlock(&p->mutex);
  307. return err;
  308. }
  309. }
  310. av_packet_unref(&p->avpkt);
  311. av_packet_ref(&p->avpkt, avpkt);
  312. atomic_store(&p->state, STATE_SETTING_UP);
  313. pthread_cond_signal(&p->input_cond);
  314. pthread_mutex_unlock(&p->mutex);
  315. /*
  316. * If the client doesn't have a thread-safe get_buffer(),
  317. * then decoding threads call back to the main thread,
  318. * and it calls back to the client here.
  319. */
  320. if (!p->avctx->thread_safe_callbacks &&
  321. p->avctx->get_buffer2 != avcodec_default_get_buffer2) {
  322. while (atomic_load(&p->state) != STATE_SETUP_FINISHED &&
  323. atomic_load(&p->state) != STATE_INPUT_READY) {
  324. pthread_mutex_lock(&p->progress_mutex);
  325. while (atomic_load(&p->state) == STATE_SETTING_UP)
  326. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  327. if (atomic_load_explicit(&p->state, memory_order_acquire) == STATE_GET_BUFFER) {
  328. p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
  329. atomic_store(&p->state, STATE_SETTING_UP);
  330. pthread_cond_signal(&p->progress_cond);
  331. }
  332. pthread_mutex_unlock(&p->progress_mutex);
  333. }
  334. }
  335. fctx->prev_thread = p;
  336. fctx->next_decoding++;
  337. return 0;
  338. }
  339. int ff_thread_decode_frame(AVCodecContext *avctx,
  340. AVFrame *picture, int *got_picture_ptr,
  341. AVPacket *avpkt)
  342. {
  343. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  344. int finished = fctx->next_finished;
  345. PerThreadContext *p;
  346. int err, ret;
  347. /* release the async lock, permitting blocked hwaccel threads to
  348. * go forward while we are in this function */
  349. pthread_mutex_unlock(&fctx->async_mutex);
  350. /*
  351. * Submit a packet to the next decoding thread.
  352. */
  353. p = &fctx->threads[fctx->next_decoding];
  354. err = update_context_from_user(p->avctx, avctx);
  355. if (err)
  356. goto finish;
  357. err = submit_packet(p, avpkt);
  358. if (err)
  359. goto finish;
  360. /*
  361. * If we're still receiving the initial packets, don't return a frame.
  362. */
  363. if (fctx->delaying) {
  364. if (fctx->next_decoding >= (avctx->thread_count-1)) fctx->delaying = 0;
  365. *got_picture_ptr=0;
  366. if (avpkt->size) {
  367. ret = avpkt->size;
  368. goto finish;
  369. }
  370. }
  371. /*
  372. * Return the next available frame from the oldest thread.
  373. * If we're at the end of the stream, then we have to skip threads that
  374. * didn't output a frame, because we don't want to accidentally signal
  375. * EOF (avpkt->size == 0 && *got_picture_ptr == 0).
  376. */
  377. do {
  378. p = &fctx->threads[finished++];
  379. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  380. pthread_mutex_lock(&p->progress_mutex);
  381. while (atomic_load_explicit(&p->state, memory_order_relaxed) != STATE_INPUT_READY)
  382. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  383. pthread_mutex_unlock(&p->progress_mutex);
  384. }
  385. av_frame_move_ref(picture, p->frame);
  386. *got_picture_ptr = p->got_frame;
  387. picture->pkt_dts = p->avpkt.dts;
  388. /*
  389. * A later call with avkpt->size == 0 may loop over all threads,
  390. * including this one, searching for a frame to return before being
  391. * stopped by the "finished != fctx->next_finished" condition.
  392. * Make sure we don't mistakenly return the same frame again.
  393. */
  394. p->got_frame = 0;
  395. if (finished >= avctx->thread_count) finished = 0;
  396. } while (!avpkt->size && !*got_picture_ptr && finished != fctx->next_finished);
  397. update_context_from_thread(avctx, p->avctx, 1);
  398. if (fctx->next_decoding >= avctx->thread_count) fctx->next_decoding = 0;
  399. fctx->next_finished = finished;
  400. /* return the size of the consumed packet if no error occurred */
  401. ret = (p->result >= 0) ? avpkt->size : p->result;
  402. finish:
  403. pthread_mutex_lock(&fctx->async_mutex);
  404. if (err < 0)
  405. return err;
  406. return ret;
  407. }
  408. void ff_thread_report_progress(ThreadFrame *f, int n, int field)
  409. {
  410. PerThreadContext *p;
  411. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  412. if (!progress ||
  413. atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
  414. return;
  415. p = f->owner->internal->thread_ctx;
  416. if (f->owner->debug&FF_DEBUG_THREADS)
  417. av_log(f->owner, AV_LOG_DEBUG, "%p finished %d field %d\n", progress, n, field);
  418. pthread_mutex_lock(&p->progress_mutex);
  419. atomic_store_explicit(&progress[field], n, memory_order_release);
  420. pthread_cond_broadcast(&p->progress_cond);
  421. pthread_mutex_unlock(&p->progress_mutex);
  422. }
  423. void ff_thread_await_progress(ThreadFrame *f, int n, int field)
  424. {
  425. PerThreadContext *p;
  426. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  427. if (!progress ||
  428. atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
  429. return;
  430. p = f->owner->internal->thread_ctx;
  431. if (f->owner->debug&FF_DEBUG_THREADS)
  432. av_log(f->owner, AV_LOG_DEBUG, "thread awaiting %d field %d from %p\n", n, field, progress);
  433. pthread_mutex_lock(&p->progress_mutex);
  434. while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
  435. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  436. pthread_mutex_unlock(&p->progress_mutex);
  437. }
  438. void ff_thread_finish_setup(AVCodecContext *avctx) {
  439. PerThreadContext *p = avctx->internal->thread_ctx;
  440. if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
  441. if (avctx->hwaccel && !p->hwaccel_serializing) {
  442. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  443. p->hwaccel_serializing = 1;
  444. }
  445. /* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
  446. if (avctx->hwaccel &&
  447. !(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
  448. p->async_serializing = 1;
  449. pthread_mutex_lock(&p->parent->async_mutex);
  450. }
  451. pthread_mutex_lock(&p->progress_mutex);
  452. atomic_store(&p->state, STATE_SETUP_FINISHED);
  453. pthread_cond_broadcast(&p->progress_cond);
  454. pthread_mutex_unlock(&p->progress_mutex);
  455. }
  456. /// Waits for all threads to finish.
  457. static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count)
  458. {
  459. int i;
  460. pthread_mutex_unlock(&fctx->async_mutex);
  461. for (i = 0; i < thread_count; i++) {
  462. PerThreadContext *p = &fctx->threads[i];
  463. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  464. pthread_mutex_lock(&p->progress_mutex);
  465. while (atomic_load(&p->state) != STATE_INPUT_READY)
  466. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  467. pthread_mutex_unlock(&p->progress_mutex);
  468. }
  469. }
  470. pthread_mutex_lock(&fctx->async_mutex);
  471. }
  472. void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
  473. {
  474. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  475. const AVCodec *codec = avctx->codec;
  476. int i;
  477. park_frame_worker_threads(fctx, thread_count);
  478. if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
  479. update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0);
  480. for (i = 0; i < thread_count; i++) {
  481. PerThreadContext *p = &fctx->threads[i];
  482. pthread_mutex_lock(&p->mutex);
  483. p->die = 1;
  484. pthread_cond_signal(&p->input_cond);
  485. pthread_mutex_unlock(&p->mutex);
  486. if (p->thread_init)
  487. pthread_join(p->thread, NULL);
  488. if (codec->close)
  489. codec->close(p->avctx);
  490. avctx->codec = NULL;
  491. release_delayed_buffers(p);
  492. av_frame_free(&p->frame);
  493. }
  494. for (i = 0; i < thread_count; i++) {
  495. PerThreadContext *p = &fctx->threads[i];
  496. pthread_mutex_destroy(&p->mutex);
  497. pthread_mutex_destroy(&p->progress_mutex);
  498. pthread_cond_destroy(&p->input_cond);
  499. pthread_cond_destroy(&p->progress_cond);
  500. pthread_cond_destroy(&p->output_cond);
  501. av_packet_unref(&p->avpkt);
  502. av_freep(&p->released_buffers);
  503. if (i) {
  504. av_freep(&p->avctx->priv_data);
  505. av_freep(&p->avctx->slice_offset);
  506. }
  507. av_buffer_unref(&p->avctx->hw_frames_ctx);
  508. av_freep(&p->avctx->internal);
  509. av_freep(&p->avctx);
  510. }
  511. av_freep(&fctx->threads);
  512. pthread_mutex_destroy(&fctx->buffer_mutex);
  513. pthread_mutex_destroy(&fctx->hwaccel_mutex);
  514. pthread_mutex_unlock(&fctx->async_mutex);
  515. pthread_mutex_destroy(&fctx->async_mutex);
  516. av_freep(&avctx->internal->thread_ctx);
  517. }
  518. int ff_frame_thread_init(AVCodecContext *avctx)
  519. {
  520. int thread_count = avctx->thread_count;
  521. const AVCodec *codec = avctx->codec;
  522. AVCodecContext *src = avctx;
  523. FrameThreadContext *fctx;
  524. int i, err = 0;
  525. #if HAVE_W32THREADS
  526. w32thread_init();
  527. #endif
  528. if (!thread_count) {
  529. int nb_cpus = av_cpu_count();
  530. av_log(avctx, AV_LOG_DEBUG, "detected %d logical cores\n", nb_cpus);
  531. // use number of cores + 1 as thread count if there is more than one
  532. if (nb_cpus > 1)
  533. thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
  534. else
  535. thread_count = avctx->thread_count = 1;
  536. }
  537. if (thread_count <= 1) {
  538. avctx->active_thread_type = 0;
  539. return 0;
  540. }
  541. avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext));
  542. if (!fctx)
  543. return AVERROR(ENOMEM);
  544. fctx->threads = av_mallocz(sizeof(PerThreadContext) * thread_count);
  545. if (!fctx->threads) {
  546. av_freep(&avctx->internal->thread_ctx);
  547. return AVERROR(ENOMEM);
  548. }
  549. pthread_mutex_init(&fctx->buffer_mutex, NULL);
  550. pthread_mutex_init(&fctx->hwaccel_mutex, NULL);
  551. pthread_mutex_init(&fctx->async_mutex, NULL);
  552. pthread_mutex_lock(&fctx->async_mutex);
  553. fctx->delaying = 1;
  554. for (i = 0; i < thread_count; i++) {
  555. AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
  556. PerThreadContext *p = &fctx->threads[i];
  557. pthread_mutex_init(&p->mutex, NULL);
  558. pthread_mutex_init(&p->progress_mutex, NULL);
  559. pthread_cond_init(&p->input_cond, NULL);
  560. pthread_cond_init(&p->progress_cond, NULL);
  561. pthread_cond_init(&p->output_cond, NULL);
  562. p->frame = av_frame_alloc();
  563. if (!p->frame) {
  564. av_freep(&copy);
  565. err = AVERROR(ENOMEM);
  566. goto error;
  567. }
  568. p->parent = fctx;
  569. p->avctx = copy;
  570. if (!copy) {
  571. err = AVERROR(ENOMEM);
  572. goto error;
  573. }
  574. *copy = *src;
  575. copy->internal = av_malloc(sizeof(AVCodecInternal));
  576. if (!copy->internal) {
  577. err = AVERROR(ENOMEM);
  578. goto error;
  579. }
  580. *copy->internal = *src->internal;
  581. copy->internal->thread_ctx = p;
  582. copy->internal->last_pkt_props = &p->avpkt;
  583. if (!i) {
  584. src = copy;
  585. if (codec->init)
  586. err = codec->init(copy);
  587. update_context_from_thread(avctx, copy, 1);
  588. } else {
  589. copy->priv_data = av_malloc(codec->priv_data_size);
  590. if (!copy->priv_data) {
  591. err = AVERROR(ENOMEM);
  592. goto error;
  593. }
  594. memcpy(copy->priv_data, src->priv_data, codec->priv_data_size);
  595. copy->internal->is_copy = 1;
  596. if (codec->init_thread_copy)
  597. err = codec->init_thread_copy(copy);
  598. }
  599. if (err) goto error;
  600. if (!pthread_create(&p->thread, NULL, frame_worker_thread, p))
  601. p->thread_init = 1;
  602. }
  603. return 0;
  604. error:
  605. ff_frame_thread_free(avctx, i+1);
  606. return err;
  607. }
  608. void ff_thread_flush(AVCodecContext *avctx)
  609. {
  610. int i;
  611. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  612. if (!fctx) return;
  613. park_frame_worker_threads(fctx, avctx->thread_count);
  614. if (fctx->prev_thread) {
  615. if (fctx->prev_thread != &fctx->threads[0])
  616. update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
  617. }
  618. fctx->next_decoding = fctx->next_finished = 0;
  619. fctx->delaying = 1;
  620. fctx->prev_thread = NULL;
  621. for (i = 0; i < avctx->thread_count; i++) {
  622. PerThreadContext *p = &fctx->threads[i];
  623. // Make sure decode flush calls with size=0 won't return old frames
  624. p->got_frame = 0;
  625. av_frame_unref(p->frame);
  626. release_delayed_buffers(p);
  627. if (avctx->codec->flush)
  628. avctx->codec->flush(p->avctx);
  629. }
  630. }
  631. int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
  632. {
  633. PerThreadContext *p = avctx->internal->thread_ctx;
  634. int err;
  635. f->owner = avctx;
  636. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  637. return ff_get_buffer(avctx, f->f, flags);
  638. if (atomic_load(&p->state) != STATE_SETTING_UP &&
  639. (avctx->codec->update_thread_context || !avctx->thread_safe_callbacks)) {
  640. av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
  641. return -1;
  642. }
  643. if (avctx->internal->allocate_progress) {
  644. atomic_int *progress;
  645. f->progress = av_buffer_alloc(2 * sizeof(*progress));
  646. if (!f->progress) {
  647. return AVERROR(ENOMEM);
  648. }
  649. progress = (atomic_int*)f->progress->data;
  650. atomic_init(&progress[0], -1);
  651. atomic_init(&progress[1], -1);
  652. }
  653. pthread_mutex_lock(&p->parent->buffer_mutex);
  654. if (avctx->thread_safe_callbacks ||
  655. avctx->get_buffer2 == avcodec_default_get_buffer2) {
  656. err = ff_get_buffer(avctx, f->f, flags);
  657. } else {
  658. p->requested_frame = f->f;
  659. p->requested_flags = flags;
  660. atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
  661. pthread_mutex_lock(&p->progress_mutex);
  662. pthread_cond_signal(&p->progress_cond);
  663. while (atomic_load(&p->state) != STATE_SETTING_UP)
  664. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  665. err = p->result;
  666. pthread_mutex_unlock(&p->progress_mutex);
  667. }
  668. if (!avctx->thread_safe_callbacks && !avctx->codec->update_thread_context)
  669. ff_thread_finish_setup(avctx);
  670. if (err)
  671. av_buffer_unref(&f->progress);
  672. pthread_mutex_unlock(&p->parent->buffer_mutex);
  673. return err;
  674. }
  675. void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
  676. {
  677. PerThreadContext *p = avctx->internal->thread_ctx;
  678. FrameThreadContext *fctx;
  679. AVFrame *dst, *tmp;
  680. int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
  681. avctx->thread_safe_callbacks ||
  682. avctx->get_buffer2 == avcodec_default_get_buffer2;
  683. if (!f->f || !f->f->buf[0])
  684. return;
  685. if (avctx->debug & FF_DEBUG_BUFFERS)
  686. av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
  687. av_buffer_unref(&f->progress);
  688. f->owner = NULL;
  689. if (can_direct_free) {
  690. av_frame_unref(f->f);
  691. return;
  692. }
  693. fctx = p->parent;
  694. pthread_mutex_lock(&fctx->buffer_mutex);
  695. if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers))
  696. goto fail;
  697. tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated,
  698. (p->num_released_buffers + 1) *
  699. sizeof(*p->released_buffers));
  700. if (!tmp)
  701. goto fail;
  702. p->released_buffers = tmp;
  703. dst = &p->released_buffers[p->num_released_buffers];
  704. av_frame_move_ref(dst, f->f);
  705. p->num_released_buffers++;
  706. fail:
  707. pthread_mutex_unlock(&fctx->buffer_mutex);
  708. }