You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1022 lines
33KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Frame multithreading support functions
  21. * @see doc/multithreading.txt
  22. */
  23. #include "config.h"
  24. #include <stdatomic.h>
  25. #include <stdint.h>
  26. #include "avcodec.h"
  27. #include "hwaccel.h"
  28. #include "internal.h"
  29. #include "pthread_internal.h"
  30. #include "thread.h"
  31. #include "version.h"
  32. #include "libavutil/avassert.h"
  33. #include "libavutil/buffer.h"
  34. #include "libavutil/common.h"
  35. #include "libavutil/cpu.h"
  36. #include "libavutil/frame.h"
  37. #include "libavutil/internal.h"
  38. #include "libavutil/log.h"
  39. #include "libavutil/mem.h"
  40. #include "libavutil/opt.h"
  41. #include "libavutil/thread.h"
  42. enum {
  43. ///< Set when the thread is awaiting a packet.
  44. STATE_INPUT_READY,
  45. ///< Set before the codec has called ff_thread_finish_setup().
  46. STATE_SETTING_UP,
  47. /**
  48. * Set when the codec calls get_buffer().
  49. * State is returned to STATE_SETTING_UP afterwards.
  50. */
  51. STATE_GET_BUFFER,
  52. /**
  53. * Set when the codec calls get_format().
  54. * State is returned to STATE_SETTING_UP afterwards.
  55. */
  56. STATE_GET_FORMAT,
  57. ///< Set after the codec has called ff_thread_finish_setup().
  58. STATE_SETUP_FINISHED,
  59. };
  60. /**
  61. * Context used by codec threads and stored in their AVCodecInternal thread_ctx.
  62. */
  63. typedef struct PerThreadContext {
  64. struct FrameThreadContext *parent;
  65. pthread_t thread;
  66. int thread_init;
  67. pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
  68. pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
  69. pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
  70. pthread_mutex_t mutex; ///< Mutex used to protect the contents of the PerThreadContext.
  71. pthread_mutex_t progress_mutex; ///< Mutex used to protect frame progress values and progress_cond.
  72. AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
  73. AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding).
  74. AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
  75. int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
  76. int result; ///< The result of the last codec decode/encode() call.
  77. atomic_int state;
  78. /**
  79. * Array of frames passed to ff_thread_release_buffer().
  80. * Frames are released after all threads referencing them are finished.
  81. */
  82. AVFrame *released_buffers;
  83. int num_released_buffers;
  84. int released_buffers_allocated;
  85. AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
  86. int requested_flags; ///< flags passed to get_buffer() for requested_frame
  87. const enum AVPixelFormat *available_formats; ///< Format array for get_format()
  88. enum AVPixelFormat result_format; ///< get_format() result
  89. int die; ///< Set when the thread should exit.
  90. int hwaccel_serializing;
  91. int async_serializing;
  92. atomic_int debug_threads; ///< Set if the FF_DEBUG_THREADS option is set.
  93. } PerThreadContext;
  94. /**
  95. * Context stored in the client AVCodecInternal thread_ctx.
  96. */
  97. typedef struct FrameThreadContext {
  98. PerThreadContext *threads; ///< The contexts for each thread.
  99. PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
  100. pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
  101. /**
  102. * This lock is used for ensuring threads run in serial when hwaccel
  103. * is used.
  104. */
  105. pthread_mutex_t hwaccel_mutex;
  106. pthread_mutex_t async_mutex;
  107. pthread_cond_t async_cond;
  108. int async_lock;
  109. int next_decoding; ///< The next context to submit a packet to.
  110. int next_finished; ///< The next context to return output from.
  111. int delaying; /**<
  112. * Set for the first N packets, where N is the number of threads.
  113. * While it is set, ff_thread_en/decode_frame won't return any results.
  114. */
  115. } FrameThreadContext;
  116. #define THREAD_SAFE_CALLBACKS(avctx) \
  117. ((avctx)->thread_safe_callbacks || (avctx)->get_buffer2 == avcodec_default_get_buffer2)
  118. static void async_lock(FrameThreadContext *fctx)
  119. {
  120. pthread_mutex_lock(&fctx->async_mutex);
  121. while (fctx->async_lock)
  122. pthread_cond_wait(&fctx->async_cond, &fctx->async_mutex);
  123. fctx->async_lock = 1;
  124. pthread_mutex_unlock(&fctx->async_mutex);
  125. }
  126. static void async_unlock(FrameThreadContext *fctx)
  127. {
  128. pthread_mutex_lock(&fctx->async_mutex);
  129. av_assert0(fctx->async_lock);
  130. fctx->async_lock = 0;
  131. pthread_cond_broadcast(&fctx->async_cond);
  132. pthread_mutex_unlock(&fctx->async_mutex);
  133. }
  134. /**
  135. * Codec worker thread.
  136. *
  137. * Automatically calls ff_thread_finish_setup() if the codec does
  138. * not provide an update_thread_context method, or if the codec returns
  139. * before calling it.
  140. */
  141. static attribute_align_arg void *frame_worker_thread(void *arg)
  142. {
  143. PerThreadContext *p = arg;
  144. AVCodecContext *avctx = p->avctx;
  145. const AVCodec *codec = avctx->codec;
  146. pthread_mutex_lock(&p->mutex);
  147. while (1) {
  148. while (atomic_load(&p->state) == STATE_INPUT_READY && !p->die)
  149. pthread_cond_wait(&p->input_cond, &p->mutex);
  150. if (p->die) break;
  151. if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
  152. ff_thread_finish_setup(avctx);
  153. /* If a decoder supports hwaccel, then it must call ff_get_format().
  154. * Since that call must happen before ff_thread_finish_setup(), the
  155. * decoder is required to implement update_thread_context() and call
  156. * ff_thread_finish_setup() manually. Therefore the above
  157. * ff_thread_finish_setup() call did not happen and hwaccel_serializing
  158. * cannot be true here. */
  159. av_assert0(!p->hwaccel_serializing);
  160. /* if the previous thread uses hwaccel then we take the lock to ensure
  161. * the threads don't run concurrently */
  162. if (avctx->hwaccel) {
  163. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  164. p->hwaccel_serializing = 1;
  165. }
  166. av_frame_unref(p->frame);
  167. p->got_frame = 0;
  168. p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);
  169. if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
  170. if (avctx->internal->allocate_progress)
  171. av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
  172. "free the frame on failure. This is a bug, please report it.\n");
  173. av_frame_unref(p->frame);
  174. }
  175. if (atomic_load(&p->state) == STATE_SETTING_UP)
  176. ff_thread_finish_setup(avctx);
  177. if (p->hwaccel_serializing) {
  178. p->hwaccel_serializing = 0;
  179. pthread_mutex_unlock(&p->parent->hwaccel_mutex);
  180. }
  181. if (p->async_serializing) {
  182. p->async_serializing = 0;
  183. async_unlock(p->parent);
  184. }
  185. pthread_mutex_lock(&p->progress_mutex);
  186. atomic_store(&p->state, STATE_INPUT_READY);
  187. pthread_cond_broadcast(&p->progress_cond);
  188. pthread_cond_signal(&p->output_cond);
  189. pthread_mutex_unlock(&p->progress_mutex);
  190. }
  191. pthread_mutex_unlock(&p->mutex);
  192. return NULL;
  193. }
  194. /**
  195. * Update the next thread's AVCodecContext with values from the reference thread's context.
  196. *
  197. * @param dst The destination context.
  198. * @param src The source context.
  199. * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
  200. * @return 0 on success, negative error code on failure
  201. */
  202. static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
  203. {
  204. int err = 0;
  205. if (dst != src && (for_user || !(src->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY))) {
  206. dst->time_base = src->time_base;
  207. dst->framerate = src->framerate;
  208. dst->width = src->width;
  209. dst->height = src->height;
  210. dst->pix_fmt = src->pix_fmt;
  211. dst->sw_pix_fmt = src->sw_pix_fmt;
  212. dst->coded_width = src->coded_width;
  213. dst->coded_height = src->coded_height;
  214. dst->has_b_frames = src->has_b_frames;
  215. dst->idct_algo = src->idct_algo;
  216. dst->bits_per_coded_sample = src->bits_per_coded_sample;
  217. dst->sample_aspect_ratio = src->sample_aspect_ratio;
  218. dst->profile = src->profile;
  219. dst->level = src->level;
  220. dst->bits_per_raw_sample = src->bits_per_raw_sample;
  221. dst->ticks_per_frame = src->ticks_per_frame;
  222. dst->color_primaries = src->color_primaries;
  223. dst->color_trc = src->color_trc;
  224. dst->colorspace = src->colorspace;
  225. dst->color_range = src->color_range;
  226. dst->chroma_sample_location = src->chroma_sample_location;
  227. dst->hwaccel = src->hwaccel;
  228. dst->hwaccel_context = src->hwaccel_context;
  229. dst->channels = src->channels;
  230. dst->sample_rate = src->sample_rate;
  231. dst->sample_fmt = src->sample_fmt;
  232. dst->channel_layout = src->channel_layout;
  233. dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
  234. if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
  235. (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
  236. av_buffer_unref(&dst->hw_frames_ctx);
  237. if (src->hw_frames_ctx) {
  238. dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
  239. if (!dst->hw_frames_ctx)
  240. return AVERROR(ENOMEM);
  241. }
  242. }
  243. dst->hwaccel_flags = src->hwaccel_flags;
  244. }
  245. if (for_user) {
  246. dst->delay = src->thread_count - 1;
  247. #if FF_API_CODED_FRAME
  248. FF_DISABLE_DEPRECATION_WARNINGS
  249. dst->coded_frame = src->coded_frame;
  250. FF_ENABLE_DEPRECATION_WARNINGS
  251. #endif
  252. } else {
  253. if (dst->codec->update_thread_context)
  254. err = dst->codec->update_thread_context(dst, src);
  255. }
  256. return err;
  257. }
  258. /**
  259. * Update the next thread's AVCodecContext with values set by the user.
  260. *
  261. * @param dst The destination context.
  262. * @param src The source context.
  263. * @return 0 on success, negative error code on failure
  264. */
  265. static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
  266. {
  267. #define copy_fields(s, e) memcpy(&dst->s, &src->s, (char*)&dst->e - (char*)&dst->s);
  268. dst->flags = src->flags;
  269. dst->draw_horiz_band= src->draw_horiz_band;
  270. dst->get_buffer2 = src->get_buffer2;
  271. dst->opaque = src->opaque;
  272. dst->debug = src->debug;
  273. dst->debug_mv = src->debug_mv;
  274. dst->slice_flags = src->slice_flags;
  275. dst->flags2 = src->flags2;
  276. dst->export_side_data = src->export_side_data;
  277. copy_fields(skip_loop_filter, subtitle_header);
  278. dst->frame_number = src->frame_number;
  279. dst->reordered_opaque = src->reordered_opaque;
  280. dst->thread_safe_callbacks = src->thread_safe_callbacks;
  281. if (src->slice_count && src->slice_offset) {
  282. if (dst->slice_count < src->slice_count) {
  283. int err = av_reallocp_array(&dst->slice_offset, src->slice_count,
  284. sizeof(*dst->slice_offset));
  285. if (err < 0)
  286. return err;
  287. }
  288. memcpy(dst->slice_offset, src->slice_offset,
  289. src->slice_count * sizeof(*dst->slice_offset));
  290. }
  291. dst->slice_count = src->slice_count;
  292. return 0;
  293. #undef copy_fields
  294. }
  295. /// Releases the buffers that this decoding thread was the last user of.
  296. static void release_delayed_buffers(PerThreadContext *p)
  297. {
  298. FrameThreadContext *fctx = p->parent;
  299. while (p->num_released_buffers > 0) {
  300. AVFrame *f;
  301. pthread_mutex_lock(&fctx->buffer_mutex);
  302. // fix extended data in case the caller screwed it up
  303. av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
  304. p->avctx->codec_type == AVMEDIA_TYPE_AUDIO);
  305. f = &p->released_buffers[--p->num_released_buffers];
  306. f->extended_data = f->data;
  307. av_frame_unref(f);
  308. pthread_mutex_unlock(&fctx->buffer_mutex);
  309. }
  310. }
  311. static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
  312. AVPacket *avpkt)
  313. {
  314. FrameThreadContext *fctx = p->parent;
  315. PerThreadContext *prev_thread = fctx->prev_thread;
  316. const AVCodec *codec = p->avctx->codec;
  317. int ret;
  318. if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
  319. return 0;
  320. pthread_mutex_lock(&p->mutex);
  321. ret = update_context_from_user(p->avctx, user_avctx);
  322. if (ret) {
  323. pthread_mutex_unlock(&p->mutex);
  324. return ret;
  325. }
  326. atomic_store_explicit(&p->debug_threads,
  327. (p->avctx->debug & FF_DEBUG_THREADS) != 0,
  328. memory_order_relaxed);
  329. release_delayed_buffers(p);
  330. if (prev_thread) {
  331. int err;
  332. if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
  333. pthread_mutex_lock(&prev_thread->progress_mutex);
  334. while (atomic_load(&prev_thread->state) == STATE_SETTING_UP)
  335. pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
  336. pthread_mutex_unlock(&prev_thread->progress_mutex);
  337. }
  338. err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
  339. if (err) {
  340. pthread_mutex_unlock(&p->mutex);
  341. return err;
  342. }
  343. }
  344. av_packet_unref(&p->avpkt);
  345. ret = av_packet_ref(&p->avpkt, avpkt);
  346. if (ret < 0) {
  347. pthread_mutex_unlock(&p->mutex);
  348. av_log(p->avctx, AV_LOG_ERROR, "av_packet_ref() failed in submit_packet()\n");
  349. return ret;
  350. }
  351. atomic_store(&p->state, STATE_SETTING_UP);
  352. pthread_cond_signal(&p->input_cond);
  353. pthread_mutex_unlock(&p->mutex);
  354. /*
  355. * If the client doesn't have a thread-safe get_buffer(),
  356. * then decoding threads call back to the main thread,
  357. * and it calls back to the client here.
  358. */
  359. if (!p->avctx->thread_safe_callbacks && (
  360. p->avctx->get_format != avcodec_default_get_format ||
  361. p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
  362. while (atomic_load(&p->state) != STATE_SETUP_FINISHED && atomic_load(&p->state) != STATE_INPUT_READY) {
  363. int call_done = 1;
  364. pthread_mutex_lock(&p->progress_mutex);
  365. while (atomic_load(&p->state) == STATE_SETTING_UP)
  366. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  367. switch (atomic_load_explicit(&p->state, memory_order_acquire)) {
  368. case STATE_GET_BUFFER:
  369. p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
  370. break;
  371. case STATE_GET_FORMAT:
  372. p->result_format = ff_get_format(p->avctx, p->available_formats);
  373. break;
  374. default:
  375. call_done = 0;
  376. break;
  377. }
  378. if (call_done) {
  379. atomic_store(&p->state, STATE_SETTING_UP);
  380. pthread_cond_signal(&p->progress_cond);
  381. }
  382. pthread_mutex_unlock(&p->progress_mutex);
  383. }
  384. }
  385. fctx->prev_thread = p;
  386. fctx->next_decoding++;
  387. return 0;
  388. }
  389. int ff_thread_decode_frame(AVCodecContext *avctx,
  390. AVFrame *picture, int *got_picture_ptr,
  391. AVPacket *avpkt)
  392. {
  393. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  394. int finished = fctx->next_finished;
  395. PerThreadContext *p;
  396. int err;
  397. /* release the async lock, permitting blocked hwaccel threads to
  398. * go forward while we are in this function */
  399. async_unlock(fctx);
  400. /*
  401. * Submit a packet to the next decoding thread.
  402. */
  403. p = &fctx->threads[fctx->next_decoding];
  404. err = submit_packet(p, avctx, avpkt);
  405. if (err)
  406. goto finish;
  407. /*
  408. * If we're still receiving the initial packets, don't return a frame.
  409. */
  410. if (fctx->next_decoding > (avctx->thread_count-1-(avctx->codec_id == AV_CODEC_ID_FFV1)))
  411. fctx->delaying = 0;
  412. if (fctx->delaying) {
  413. *got_picture_ptr=0;
  414. if (avpkt->size) {
  415. err = avpkt->size;
  416. goto finish;
  417. }
  418. }
  419. /*
  420. * Return the next available frame from the oldest thread.
  421. * If we're at the end of the stream, then we have to skip threads that
  422. * didn't output a frame/error, because we don't want to accidentally signal
  423. * EOF (avpkt->size == 0 && *got_picture_ptr == 0 && err >= 0).
  424. */
  425. do {
  426. p = &fctx->threads[finished++];
  427. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  428. pthread_mutex_lock(&p->progress_mutex);
  429. while (atomic_load_explicit(&p->state, memory_order_relaxed) != STATE_INPUT_READY)
  430. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  431. pthread_mutex_unlock(&p->progress_mutex);
  432. }
  433. av_frame_move_ref(picture, p->frame);
  434. *got_picture_ptr = p->got_frame;
  435. picture->pkt_dts = p->avpkt.dts;
  436. err = p->result;
  437. /*
  438. * A later call with avkpt->size == 0 may loop over all threads,
  439. * including this one, searching for a frame/error to return before being
  440. * stopped by the "finished != fctx->next_finished" condition.
  441. * Make sure we don't mistakenly return the same frame/error again.
  442. */
  443. p->got_frame = 0;
  444. p->result = 0;
  445. if (finished >= avctx->thread_count) finished = 0;
  446. } while (!avpkt->size && !*got_picture_ptr && err >= 0 && finished != fctx->next_finished);
  447. update_context_from_thread(avctx, p->avctx, 1);
  448. if (fctx->next_decoding >= avctx->thread_count) fctx->next_decoding = 0;
  449. fctx->next_finished = finished;
  450. /* return the size of the consumed packet if no error occurred */
  451. if (err >= 0)
  452. err = avpkt->size;
  453. finish:
  454. async_lock(fctx);
  455. return err;
  456. }
  457. void ff_thread_report_progress(ThreadFrame *f, int n, int field)
  458. {
  459. PerThreadContext *p;
  460. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  461. if (!progress ||
  462. atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
  463. return;
  464. p = f->owner[field]->internal->thread_ctx;
  465. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  466. av_log(f->owner[field], AV_LOG_DEBUG,
  467. "%p finished %d field %d\n", progress, n, field);
  468. pthread_mutex_lock(&p->progress_mutex);
  469. atomic_store_explicit(&progress[field], n, memory_order_release);
  470. pthread_cond_broadcast(&p->progress_cond);
  471. pthread_mutex_unlock(&p->progress_mutex);
  472. }
  473. void ff_thread_await_progress(ThreadFrame *f, int n, int field)
  474. {
  475. PerThreadContext *p;
  476. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  477. if (!progress ||
  478. atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
  479. return;
  480. p = f->owner[field]->internal->thread_ctx;
  481. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  482. av_log(f->owner[field], AV_LOG_DEBUG,
  483. "thread awaiting %d field %d from %p\n", n, field, progress);
  484. pthread_mutex_lock(&p->progress_mutex);
  485. while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
  486. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  487. pthread_mutex_unlock(&p->progress_mutex);
  488. }
  489. void ff_thread_finish_setup(AVCodecContext *avctx) {
  490. PerThreadContext *p = avctx->internal->thread_ctx;
  491. if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
  492. if (avctx->hwaccel && !p->hwaccel_serializing) {
  493. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  494. p->hwaccel_serializing = 1;
  495. }
  496. /* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
  497. if (avctx->hwaccel &&
  498. !(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
  499. p->async_serializing = 1;
  500. async_lock(p->parent);
  501. }
  502. pthread_mutex_lock(&p->progress_mutex);
  503. if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
  504. av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
  505. }
  506. atomic_store(&p->state, STATE_SETUP_FINISHED);
  507. pthread_cond_broadcast(&p->progress_cond);
  508. pthread_mutex_unlock(&p->progress_mutex);
  509. }
  510. /// Waits for all threads to finish.
  511. static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count)
  512. {
  513. int i;
  514. async_unlock(fctx);
  515. for (i = 0; i < thread_count; i++) {
  516. PerThreadContext *p = &fctx->threads[i];
  517. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  518. pthread_mutex_lock(&p->progress_mutex);
  519. while (atomic_load(&p->state) != STATE_INPUT_READY)
  520. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  521. pthread_mutex_unlock(&p->progress_mutex);
  522. }
  523. p->got_frame = 0;
  524. }
  525. async_lock(fctx);
  526. }
  527. void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
  528. {
  529. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  530. const AVCodec *codec = avctx->codec;
  531. int i;
  532. park_frame_worker_threads(fctx, thread_count);
  533. if (fctx->prev_thread && avctx->internal->hwaccel_priv_data !=
  534. fctx->prev_thread->avctx->internal->hwaccel_priv_data) {
  535. if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) {
  536. av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n");
  537. }
  538. }
  539. if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
  540. if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
  541. av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
  542. fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
  543. fctx->threads->avctx->internal->is_copy = 1;
  544. }
  545. for (i = 0; i < thread_count; i++) {
  546. PerThreadContext *p = &fctx->threads[i];
  547. pthread_mutex_lock(&p->mutex);
  548. p->die = 1;
  549. pthread_cond_signal(&p->input_cond);
  550. pthread_mutex_unlock(&p->mutex);
  551. if (p->thread_init)
  552. pthread_join(p->thread, NULL);
  553. p->thread_init=0;
  554. if (codec->close && p->avctx)
  555. codec->close(p->avctx);
  556. release_delayed_buffers(p);
  557. av_frame_free(&p->frame);
  558. }
  559. for (i = 0; i < thread_count; i++) {
  560. PerThreadContext *p = &fctx->threads[i];
  561. pthread_mutex_destroy(&p->mutex);
  562. pthread_mutex_destroy(&p->progress_mutex);
  563. pthread_cond_destroy(&p->input_cond);
  564. pthread_cond_destroy(&p->progress_cond);
  565. pthread_cond_destroy(&p->output_cond);
  566. av_packet_unref(&p->avpkt);
  567. av_freep(&p->released_buffers);
  568. if (i && p->avctx) {
  569. av_freep(&p->avctx->priv_data);
  570. av_freep(&p->avctx->slice_offset);
  571. }
  572. if (p->avctx) {
  573. av_freep(&p->avctx->internal);
  574. av_buffer_unref(&p->avctx->hw_frames_ctx);
  575. }
  576. av_freep(&p->avctx);
  577. }
  578. av_freep(&fctx->threads);
  579. pthread_mutex_destroy(&fctx->buffer_mutex);
  580. pthread_mutex_destroy(&fctx->hwaccel_mutex);
  581. pthread_mutex_destroy(&fctx->async_mutex);
  582. pthread_cond_destroy(&fctx->async_cond);
  583. av_freep(&avctx->internal->thread_ctx);
  584. if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
  585. av_opt_free(avctx->priv_data);
  586. avctx->codec = NULL;
  587. }
  588. int ff_frame_thread_init(AVCodecContext *avctx)
  589. {
  590. int thread_count = avctx->thread_count;
  591. const AVCodec *codec = avctx->codec;
  592. AVCodecContext *src = avctx;
  593. FrameThreadContext *fctx;
  594. int i, err = 0;
  595. if (!thread_count) {
  596. int nb_cpus = av_cpu_count();
  597. #if FF_API_DEBUG_MV
  598. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv)
  599. nb_cpus = 1;
  600. #endif
  601. // use number of cores + 1 as thread count if there is more than one
  602. if (nb_cpus > 1)
  603. thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
  604. else
  605. thread_count = avctx->thread_count = 1;
  606. }
  607. if (thread_count <= 1) {
  608. avctx->active_thread_type = 0;
  609. return 0;
  610. }
  611. avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext));
  612. if (!fctx)
  613. return AVERROR(ENOMEM);
  614. fctx->threads = av_mallocz_array(thread_count, sizeof(PerThreadContext));
  615. if (!fctx->threads) {
  616. av_freep(&avctx->internal->thread_ctx);
  617. return AVERROR(ENOMEM);
  618. }
  619. pthread_mutex_init(&fctx->buffer_mutex, NULL);
  620. pthread_mutex_init(&fctx->hwaccel_mutex, NULL);
  621. pthread_mutex_init(&fctx->async_mutex, NULL);
  622. pthread_cond_init(&fctx->async_cond, NULL);
  623. fctx->async_lock = 1;
  624. fctx->delaying = 1;
  625. for (i = 0; i < thread_count; i++) {
  626. AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
  627. PerThreadContext *p = &fctx->threads[i];
  628. pthread_mutex_init(&p->mutex, NULL);
  629. pthread_mutex_init(&p->progress_mutex, NULL);
  630. pthread_cond_init(&p->input_cond, NULL);
  631. pthread_cond_init(&p->progress_cond, NULL);
  632. pthread_cond_init(&p->output_cond, NULL);
  633. p->frame = av_frame_alloc();
  634. if (!p->frame) {
  635. av_freep(&copy);
  636. err = AVERROR(ENOMEM);
  637. goto error;
  638. }
  639. p->parent = fctx;
  640. p->avctx = copy;
  641. if (!copy) {
  642. err = AVERROR(ENOMEM);
  643. goto error;
  644. }
  645. *copy = *src;
  646. copy->internal = av_malloc(sizeof(AVCodecInternal));
  647. if (!copy->internal) {
  648. copy->priv_data = NULL;
  649. err = AVERROR(ENOMEM);
  650. goto error;
  651. }
  652. *copy->internal = *src->internal;
  653. copy->internal->thread_ctx = p;
  654. copy->internal->last_pkt_props = &p->avpkt;
  655. if (!i) {
  656. src = copy;
  657. if (codec->init)
  658. err = codec->init(copy);
  659. update_context_from_thread(avctx, copy, 1);
  660. } else {
  661. copy->priv_data = av_malloc(codec->priv_data_size);
  662. if (!copy->priv_data) {
  663. err = AVERROR(ENOMEM);
  664. goto error;
  665. }
  666. memcpy(copy->priv_data, src->priv_data, codec->priv_data_size);
  667. copy->internal->is_copy = 1;
  668. if (codec->init_thread_copy)
  669. err = codec->init_thread_copy(copy);
  670. }
  671. if (err) goto error;
  672. atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
  673. err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
  674. p->thread_init= !err;
  675. if(!p->thread_init)
  676. goto error;
  677. }
  678. return 0;
  679. error:
  680. ff_frame_thread_free(avctx, i+1);
  681. return err;
  682. }
  683. void ff_thread_flush(AVCodecContext *avctx)
  684. {
  685. int i;
  686. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  687. if (!fctx) return;
  688. park_frame_worker_threads(fctx, avctx->thread_count);
  689. if (fctx->prev_thread) {
  690. if (fctx->prev_thread != &fctx->threads[0])
  691. update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
  692. }
  693. fctx->next_decoding = fctx->next_finished = 0;
  694. fctx->delaying = 1;
  695. fctx->prev_thread = NULL;
  696. for (i = 0; i < avctx->thread_count; i++) {
  697. PerThreadContext *p = &fctx->threads[i];
  698. // Make sure decode flush calls with size=0 won't return old frames
  699. p->got_frame = 0;
  700. av_frame_unref(p->frame);
  701. p->result = 0;
  702. release_delayed_buffers(p);
  703. if (avctx->codec->flush)
  704. avctx->codec->flush(p->avctx);
  705. }
  706. }
  707. int ff_thread_can_start_frame(AVCodecContext *avctx)
  708. {
  709. PerThreadContext *p = avctx->internal->thread_ctx;
  710. if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
  711. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  712. return 0;
  713. }
  714. return 1;
  715. }
  716. static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
  717. {
  718. PerThreadContext *p = avctx->internal->thread_ctx;
  719. int err;
  720. f->owner[0] = f->owner[1] = avctx;
  721. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  722. return ff_get_buffer(avctx, f->f, flags);
  723. if (atomic_load(&p->state) != STATE_SETTING_UP &&
  724. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  725. av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
  726. return -1;
  727. }
  728. if (avctx->internal->allocate_progress) {
  729. atomic_int *progress;
  730. f->progress = av_buffer_alloc(2 * sizeof(*progress));
  731. if (!f->progress) {
  732. return AVERROR(ENOMEM);
  733. }
  734. progress = (atomic_int*)f->progress->data;
  735. atomic_init(&progress[0], -1);
  736. atomic_init(&progress[1], -1);
  737. }
  738. pthread_mutex_lock(&p->parent->buffer_mutex);
  739. if (THREAD_SAFE_CALLBACKS(avctx)) {
  740. err = ff_get_buffer(avctx, f->f, flags);
  741. } else {
  742. pthread_mutex_lock(&p->progress_mutex);
  743. p->requested_frame = f->f;
  744. p->requested_flags = flags;
  745. atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
  746. pthread_cond_broadcast(&p->progress_cond);
  747. while (atomic_load(&p->state) != STATE_SETTING_UP)
  748. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  749. err = p->result;
  750. pthread_mutex_unlock(&p->progress_mutex);
  751. }
  752. if (!THREAD_SAFE_CALLBACKS(avctx) && !avctx->codec->update_thread_context)
  753. ff_thread_finish_setup(avctx);
  754. if (err)
  755. av_buffer_unref(&f->progress);
  756. pthread_mutex_unlock(&p->parent->buffer_mutex);
  757. return err;
  758. }
  759. enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
  760. {
  761. enum AVPixelFormat res;
  762. PerThreadContext *p = avctx->internal->thread_ctx;
  763. if (!(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks ||
  764. avctx->get_format == avcodec_default_get_format)
  765. return ff_get_format(avctx, fmt);
  766. if (atomic_load(&p->state) != STATE_SETTING_UP) {
  767. av_log(avctx, AV_LOG_ERROR, "get_format() cannot be called after ff_thread_finish_setup()\n");
  768. return -1;
  769. }
  770. pthread_mutex_lock(&p->progress_mutex);
  771. p->available_formats = fmt;
  772. atomic_store(&p->state, STATE_GET_FORMAT);
  773. pthread_cond_broadcast(&p->progress_cond);
  774. while (atomic_load(&p->state) != STATE_SETTING_UP)
  775. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  776. res = p->result_format;
  777. pthread_mutex_unlock(&p->progress_mutex);
  778. return res;
  779. }
  780. int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
  781. {
  782. int ret = thread_get_buffer_internal(avctx, f, flags);
  783. if (ret < 0)
  784. av_log(avctx, AV_LOG_ERROR, "thread_get_buffer() failed\n");
  785. return ret;
  786. }
  787. void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
  788. {
  789. PerThreadContext *p = avctx->internal->thread_ctx;
  790. FrameThreadContext *fctx;
  791. AVFrame *dst, *tmp;
  792. int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
  793. THREAD_SAFE_CALLBACKS(avctx);
  794. if (!f->f || !f->f->buf[0])
  795. return;
  796. if (avctx->debug & FF_DEBUG_BUFFERS)
  797. av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
  798. av_buffer_unref(&f->progress);
  799. f->owner[0] = f->owner[1] = NULL;
  800. if (can_direct_free) {
  801. av_frame_unref(f->f);
  802. return;
  803. }
  804. fctx = p->parent;
  805. pthread_mutex_lock(&fctx->buffer_mutex);
  806. if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers))
  807. goto fail;
  808. tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated,
  809. (p->num_released_buffers + 1) *
  810. sizeof(*p->released_buffers));
  811. if (!tmp)
  812. goto fail;
  813. p->released_buffers = tmp;
  814. dst = &p->released_buffers[p->num_released_buffers];
  815. av_frame_move_ref(dst, f->f);
  816. p->num_released_buffers++;
  817. fail:
  818. pthread_mutex_unlock(&fctx->buffer_mutex);
  819. }