You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1066 lines
34KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Frame multithreading support functions
  21. * @see doc/multithreading.txt
  22. */
  23. #include "config.h"
  24. #include <stdatomic.h>
  25. #include <stdint.h>
  26. #include "avcodec.h"
  27. #include "hwconfig.h"
  28. #include "internal.h"
  29. #include "pthread_internal.h"
  30. #include "thread.h"
  31. #include "version.h"
  32. #include "libavutil/avassert.h"
  33. #include "libavutil/buffer.h"
  34. #include "libavutil/common.h"
  35. #include "libavutil/cpu.h"
  36. #include "libavutil/frame.h"
  37. #include "libavutil/internal.h"
  38. #include "libavutil/log.h"
  39. #include "libavutil/mem.h"
  40. #include "libavutil/opt.h"
  41. #include "libavutil/thread.h"
  42. enum {
  43. ///< Set when the thread is awaiting a packet.
  44. STATE_INPUT_READY,
  45. ///< Set before the codec has called ff_thread_finish_setup().
  46. STATE_SETTING_UP,
  47. /**
  48. * Set when the codec calls get_buffer().
  49. * State is returned to STATE_SETTING_UP afterwards.
  50. */
  51. STATE_GET_BUFFER,
  52. /**
  53. * Set when the codec calls get_format().
  54. * State is returned to STATE_SETTING_UP afterwards.
  55. */
  56. STATE_GET_FORMAT,
  57. ///< Set after the codec has called ff_thread_finish_setup().
  58. STATE_SETUP_FINISHED,
  59. };
  60. /**
  61. * Context used by codec threads and stored in their AVCodecInternal thread_ctx.
  62. */
  63. typedef struct PerThreadContext {
  64. struct FrameThreadContext *parent;
  65. pthread_t thread;
  66. int thread_init;
  67. pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
  68. pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
  69. pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
  70. pthread_mutex_t mutex; ///< Mutex used to protect the contents of the PerThreadContext.
  71. pthread_mutex_t progress_mutex; ///< Mutex used to protect frame progress values and progress_cond.
  72. AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
  73. AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding).
  74. AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
  75. int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
  76. int result; ///< The result of the last codec decode/encode() call.
  77. atomic_int state;
  78. /**
  79. * Array of frames passed to ff_thread_release_buffer().
  80. * Frames are released after all threads referencing them are finished.
  81. */
  82. AVFrame **released_buffers;
  83. int num_released_buffers;
  84. int released_buffers_allocated;
  85. AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
  86. int requested_flags; ///< flags passed to get_buffer() for requested_frame
  87. const enum AVPixelFormat *available_formats; ///< Format array for get_format()
  88. enum AVPixelFormat result_format; ///< get_format() result
  89. int die; ///< Set when the thread should exit.
  90. int hwaccel_serializing;
  91. int async_serializing;
  92. atomic_int debug_threads; ///< Set if the FF_DEBUG_THREADS option is set.
  93. } PerThreadContext;
  94. /**
  95. * Context stored in the client AVCodecInternal thread_ctx.
  96. */
  97. typedef struct FrameThreadContext {
  98. PerThreadContext *threads; ///< The contexts for each thread.
  99. PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
  100. pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
  101. /**
  102. * This lock is used for ensuring threads run in serial when hwaccel
  103. * is used.
  104. */
  105. pthread_mutex_t hwaccel_mutex;
  106. pthread_mutex_t async_mutex;
  107. pthread_cond_t async_cond;
  108. int async_lock;
  109. int next_decoding; ///< The next context to submit a packet to.
  110. int next_finished; ///< The next context to return output from.
  111. int delaying; /**<
  112. * Set for the first N packets, where N is the number of threads.
  113. * While it is set, ff_thread_en/decode_frame won't return any results.
  114. */
  115. } FrameThreadContext;
  116. #define THREAD_SAFE_CALLBACKS(avctx) \
  117. ((avctx)->thread_safe_callbacks || (avctx)->get_buffer2 == avcodec_default_get_buffer2)
  118. static void async_lock(FrameThreadContext *fctx)
  119. {
  120. pthread_mutex_lock(&fctx->async_mutex);
  121. while (fctx->async_lock)
  122. pthread_cond_wait(&fctx->async_cond, &fctx->async_mutex);
  123. fctx->async_lock = 1;
  124. pthread_mutex_unlock(&fctx->async_mutex);
  125. }
  126. static void async_unlock(FrameThreadContext *fctx)
  127. {
  128. pthread_mutex_lock(&fctx->async_mutex);
  129. av_assert0(fctx->async_lock);
  130. fctx->async_lock = 0;
  131. pthread_cond_broadcast(&fctx->async_cond);
  132. pthread_mutex_unlock(&fctx->async_mutex);
  133. }
  134. /**
  135. * Codec worker thread.
  136. *
  137. * Automatically calls ff_thread_finish_setup() if the codec does
  138. * not provide an update_thread_context method, or if the codec returns
  139. * before calling it.
  140. */
  141. static attribute_align_arg void *frame_worker_thread(void *arg)
  142. {
  143. PerThreadContext *p = arg;
  144. AVCodecContext *avctx = p->avctx;
  145. const AVCodec *codec = avctx->codec;
  146. pthread_mutex_lock(&p->mutex);
  147. while (1) {
  148. while (atomic_load(&p->state) == STATE_INPUT_READY && !p->die)
  149. pthread_cond_wait(&p->input_cond, &p->mutex);
  150. if (p->die) break;
  151. if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
  152. ff_thread_finish_setup(avctx);
  153. /* If a decoder supports hwaccel, then it must call ff_get_format().
  154. * Since that call must happen before ff_thread_finish_setup(), the
  155. * decoder is required to implement update_thread_context() and call
  156. * ff_thread_finish_setup() manually. Therefore the above
  157. * ff_thread_finish_setup() call did not happen and hwaccel_serializing
  158. * cannot be true here. */
  159. av_assert0(!p->hwaccel_serializing);
  160. /* if the previous thread uses hwaccel then we take the lock to ensure
  161. * the threads don't run concurrently */
  162. if (avctx->hwaccel) {
  163. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  164. p->hwaccel_serializing = 1;
  165. }
  166. av_frame_unref(p->frame);
  167. p->got_frame = 0;
  168. p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);
  169. if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
  170. if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS)
  171. av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
  172. "free the frame on failure. This is a bug, please report it.\n");
  173. av_frame_unref(p->frame);
  174. }
  175. if (atomic_load(&p->state) == STATE_SETTING_UP)
  176. ff_thread_finish_setup(avctx);
  177. if (p->hwaccel_serializing) {
  178. p->hwaccel_serializing = 0;
  179. pthread_mutex_unlock(&p->parent->hwaccel_mutex);
  180. }
  181. if (p->async_serializing) {
  182. p->async_serializing = 0;
  183. async_unlock(p->parent);
  184. }
  185. pthread_mutex_lock(&p->progress_mutex);
  186. atomic_store(&p->state, STATE_INPUT_READY);
  187. pthread_cond_broadcast(&p->progress_cond);
  188. pthread_cond_signal(&p->output_cond);
  189. pthread_mutex_unlock(&p->progress_mutex);
  190. }
  191. pthread_mutex_unlock(&p->mutex);
  192. return NULL;
  193. }
  194. /**
  195. * Update the next thread's AVCodecContext with values from the reference thread's context.
  196. *
  197. * @param dst The destination context.
  198. * @param src The source context.
  199. * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
  200. * @return 0 on success, negative error code on failure
  201. */
  202. static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
  203. {
  204. int err = 0;
  205. if (dst != src && (for_user || src->codec->update_thread_context)) {
  206. dst->time_base = src->time_base;
  207. dst->framerate = src->framerate;
  208. dst->width = src->width;
  209. dst->height = src->height;
  210. dst->pix_fmt = src->pix_fmt;
  211. dst->sw_pix_fmt = src->sw_pix_fmt;
  212. dst->coded_width = src->coded_width;
  213. dst->coded_height = src->coded_height;
  214. dst->has_b_frames = src->has_b_frames;
  215. dst->idct_algo = src->idct_algo;
  216. dst->bits_per_coded_sample = src->bits_per_coded_sample;
  217. dst->sample_aspect_ratio = src->sample_aspect_ratio;
  218. dst->profile = src->profile;
  219. dst->level = src->level;
  220. dst->bits_per_raw_sample = src->bits_per_raw_sample;
  221. dst->ticks_per_frame = src->ticks_per_frame;
  222. dst->color_primaries = src->color_primaries;
  223. dst->color_trc = src->color_trc;
  224. dst->colorspace = src->colorspace;
  225. dst->color_range = src->color_range;
  226. dst->chroma_sample_location = src->chroma_sample_location;
  227. dst->hwaccel = src->hwaccel;
  228. dst->hwaccel_context = src->hwaccel_context;
  229. dst->channels = src->channels;
  230. dst->sample_rate = src->sample_rate;
  231. dst->sample_fmt = src->sample_fmt;
  232. dst->channel_layout = src->channel_layout;
  233. dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
  234. if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
  235. (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
  236. av_buffer_unref(&dst->hw_frames_ctx);
  237. if (src->hw_frames_ctx) {
  238. dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
  239. if (!dst->hw_frames_ctx)
  240. return AVERROR(ENOMEM);
  241. }
  242. }
  243. dst->hwaccel_flags = src->hwaccel_flags;
  244. if (!!dst->internal->pool != !!src->internal->pool ||
  245. (dst->internal->pool && dst->internal->pool->data != src->internal->pool->data)) {
  246. av_buffer_unref(&dst->internal->pool);
  247. if (src->internal->pool) {
  248. dst->internal->pool = av_buffer_ref(src->internal->pool);
  249. if (!dst->internal->pool)
  250. return AVERROR(ENOMEM);
  251. }
  252. }
  253. }
  254. if (for_user) {
  255. #if FF_API_CODED_FRAME
  256. FF_DISABLE_DEPRECATION_WARNINGS
  257. dst->coded_frame = src->coded_frame;
  258. FF_ENABLE_DEPRECATION_WARNINGS
  259. #endif
  260. } else {
  261. if (dst->codec->update_thread_context)
  262. err = dst->codec->update_thread_context(dst, src);
  263. }
  264. return err;
  265. }
  266. /**
  267. * Update the next thread's AVCodecContext with values set by the user.
  268. *
  269. * @param dst The destination context.
  270. * @param src The source context.
  271. * @return 0 on success, negative error code on failure
  272. */
  273. static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
  274. {
  275. dst->flags = src->flags;
  276. dst->draw_horiz_band= src->draw_horiz_band;
  277. dst->get_buffer2 = src->get_buffer2;
  278. dst->opaque = src->opaque;
  279. dst->debug = src->debug;
  280. dst->debug_mv = src->debug_mv;
  281. dst->slice_flags = src->slice_flags;
  282. dst->flags2 = src->flags2;
  283. dst->export_side_data = src->export_side_data;
  284. dst->skip_loop_filter = src->skip_loop_filter;
  285. dst->skip_idct = src->skip_idct;
  286. dst->skip_frame = src->skip_frame;
  287. dst->frame_number = src->frame_number;
  288. dst->reordered_opaque = src->reordered_opaque;
  289. dst->thread_safe_callbacks = src->thread_safe_callbacks;
  290. if (src->slice_count && src->slice_offset) {
  291. if (dst->slice_count < src->slice_count) {
  292. int err = av_reallocp_array(&dst->slice_offset, src->slice_count,
  293. sizeof(*dst->slice_offset));
  294. if (err < 0)
  295. return err;
  296. }
  297. memcpy(dst->slice_offset, src->slice_offset,
  298. src->slice_count * sizeof(*dst->slice_offset));
  299. }
  300. dst->slice_count = src->slice_count;
  301. return 0;
  302. }
  303. /// Releases the buffers that this decoding thread was the last user of.
  304. static void release_delayed_buffers(PerThreadContext *p)
  305. {
  306. FrameThreadContext *fctx = p->parent;
  307. while (p->num_released_buffers > 0) {
  308. AVFrame *f;
  309. pthread_mutex_lock(&fctx->buffer_mutex);
  310. // fix extended data in case the caller screwed it up
  311. av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
  312. p->avctx->codec_type == AVMEDIA_TYPE_AUDIO);
  313. f = p->released_buffers[--p->num_released_buffers];
  314. f->extended_data = f->data;
  315. av_frame_unref(f);
  316. pthread_mutex_unlock(&fctx->buffer_mutex);
  317. }
  318. }
  319. static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
  320. AVPacket *avpkt)
  321. {
  322. FrameThreadContext *fctx = p->parent;
  323. PerThreadContext *prev_thread = fctx->prev_thread;
  324. const AVCodec *codec = p->avctx->codec;
  325. int ret;
  326. if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
  327. return 0;
  328. pthread_mutex_lock(&p->mutex);
  329. ret = update_context_from_user(p->avctx, user_avctx);
  330. if (ret) {
  331. pthread_mutex_unlock(&p->mutex);
  332. return ret;
  333. }
  334. atomic_store_explicit(&p->debug_threads,
  335. (p->avctx->debug & FF_DEBUG_THREADS) != 0,
  336. memory_order_relaxed);
  337. release_delayed_buffers(p);
  338. if (prev_thread) {
  339. int err;
  340. if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
  341. pthread_mutex_lock(&prev_thread->progress_mutex);
  342. while (atomic_load(&prev_thread->state) == STATE_SETTING_UP)
  343. pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
  344. pthread_mutex_unlock(&prev_thread->progress_mutex);
  345. }
  346. err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
  347. if (err) {
  348. pthread_mutex_unlock(&p->mutex);
  349. return err;
  350. }
  351. }
  352. av_packet_unref(&p->avpkt);
  353. ret = av_packet_ref(&p->avpkt, avpkt);
  354. if (ret < 0) {
  355. pthread_mutex_unlock(&p->mutex);
  356. av_log(p->avctx, AV_LOG_ERROR, "av_packet_ref() failed in submit_packet()\n");
  357. return ret;
  358. }
  359. atomic_store(&p->state, STATE_SETTING_UP);
  360. pthread_cond_signal(&p->input_cond);
  361. pthread_mutex_unlock(&p->mutex);
  362. /*
  363. * If the client doesn't have a thread-safe get_buffer(),
  364. * then decoding threads call back to the main thread,
  365. * and it calls back to the client here.
  366. */
  367. if (!p->avctx->thread_safe_callbacks && (
  368. p->avctx->get_format != avcodec_default_get_format ||
  369. p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
  370. while (atomic_load(&p->state) != STATE_SETUP_FINISHED && atomic_load(&p->state) != STATE_INPUT_READY) {
  371. int call_done = 1;
  372. pthread_mutex_lock(&p->progress_mutex);
  373. while (atomic_load(&p->state) == STATE_SETTING_UP)
  374. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  375. switch (atomic_load_explicit(&p->state, memory_order_acquire)) {
  376. case STATE_GET_BUFFER:
  377. p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
  378. break;
  379. case STATE_GET_FORMAT:
  380. p->result_format = ff_get_format(p->avctx, p->available_formats);
  381. break;
  382. default:
  383. call_done = 0;
  384. break;
  385. }
  386. if (call_done) {
  387. atomic_store(&p->state, STATE_SETTING_UP);
  388. pthread_cond_signal(&p->progress_cond);
  389. }
  390. pthread_mutex_unlock(&p->progress_mutex);
  391. }
  392. }
  393. fctx->prev_thread = p;
  394. fctx->next_decoding++;
  395. return 0;
  396. }
  397. int ff_thread_decode_frame(AVCodecContext *avctx,
  398. AVFrame *picture, int *got_picture_ptr,
  399. AVPacket *avpkt)
  400. {
  401. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  402. int finished = fctx->next_finished;
  403. PerThreadContext *p;
  404. int err;
  405. /* release the async lock, permitting blocked hwaccel threads to
  406. * go forward while we are in this function */
  407. async_unlock(fctx);
  408. /*
  409. * Submit a packet to the next decoding thread.
  410. */
  411. p = &fctx->threads[fctx->next_decoding];
  412. err = submit_packet(p, avctx, avpkt);
  413. if (err)
  414. goto finish;
  415. /*
  416. * If we're still receiving the initial packets, don't return a frame.
  417. */
  418. if (fctx->next_decoding > (avctx->thread_count-1-(avctx->codec_id == AV_CODEC_ID_FFV1)))
  419. fctx->delaying = 0;
  420. if (fctx->delaying) {
  421. *got_picture_ptr=0;
  422. if (avpkt->size) {
  423. err = avpkt->size;
  424. goto finish;
  425. }
  426. }
  427. /*
  428. * Return the next available frame from the oldest thread.
  429. * If we're at the end of the stream, then we have to skip threads that
  430. * didn't output a frame/error, because we don't want to accidentally signal
  431. * EOF (avpkt->size == 0 && *got_picture_ptr == 0 && err >= 0).
  432. */
  433. do {
  434. p = &fctx->threads[finished++];
  435. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  436. pthread_mutex_lock(&p->progress_mutex);
  437. while (atomic_load_explicit(&p->state, memory_order_relaxed) != STATE_INPUT_READY)
  438. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  439. pthread_mutex_unlock(&p->progress_mutex);
  440. }
  441. av_frame_move_ref(picture, p->frame);
  442. *got_picture_ptr = p->got_frame;
  443. picture->pkt_dts = p->avpkt.dts;
  444. err = p->result;
  445. /*
  446. * A later call with avkpt->size == 0 may loop over all threads,
  447. * including this one, searching for a frame/error to return before being
  448. * stopped by the "finished != fctx->next_finished" condition.
  449. * Make sure we don't mistakenly return the same frame/error again.
  450. */
  451. p->got_frame = 0;
  452. p->result = 0;
  453. if (finished >= avctx->thread_count) finished = 0;
  454. } while (!avpkt->size && !*got_picture_ptr && err >= 0 && finished != fctx->next_finished);
  455. update_context_from_thread(avctx, p->avctx, 1);
  456. if (fctx->next_decoding >= avctx->thread_count) fctx->next_decoding = 0;
  457. fctx->next_finished = finished;
  458. /* return the size of the consumed packet if no error occurred */
  459. if (err >= 0)
  460. err = avpkt->size;
  461. finish:
  462. async_lock(fctx);
  463. return err;
  464. }
  465. void ff_thread_report_progress(ThreadFrame *f, int n, int field)
  466. {
  467. PerThreadContext *p;
  468. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  469. if (!progress ||
  470. atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
  471. return;
  472. p = f->owner[field]->internal->thread_ctx;
  473. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  474. av_log(f->owner[field], AV_LOG_DEBUG,
  475. "%p finished %d field %d\n", progress, n, field);
  476. pthread_mutex_lock(&p->progress_mutex);
  477. atomic_store_explicit(&progress[field], n, memory_order_release);
  478. pthread_cond_broadcast(&p->progress_cond);
  479. pthread_mutex_unlock(&p->progress_mutex);
  480. }
  481. void ff_thread_await_progress(ThreadFrame *f, int n, int field)
  482. {
  483. PerThreadContext *p;
  484. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  485. if (!progress ||
  486. atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
  487. return;
  488. p = f->owner[field]->internal->thread_ctx;
  489. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  490. av_log(f->owner[field], AV_LOG_DEBUG,
  491. "thread awaiting %d field %d from %p\n", n, field, progress);
  492. pthread_mutex_lock(&p->progress_mutex);
  493. while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
  494. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  495. pthread_mutex_unlock(&p->progress_mutex);
  496. }
  497. void ff_thread_finish_setup(AVCodecContext *avctx) {
  498. PerThreadContext *p = avctx->internal->thread_ctx;
  499. if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
  500. if (avctx->hwaccel && !p->hwaccel_serializing) {
  501. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  502. p->hwaccel_serializing = 1;
  503. }
  504. /* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
  505. if (avctx->hwaccel &&
  506. !(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
  507. p->async_serializing = 1;
  508. async_lock(p->parent);
  509. }
  510. pthread_mutex_lock(&p->progress_mutex);
  511. if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
  512. av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
  513. }
  514. atomic_store(&p->state, STATE_SETUP_FINISHED);
  515. pthread_cond_broadcast(&p->progress_cond);
  516. pthread_mutex_unlock(&p->progress_mutex);
  517. }
  518. /// Waits for all threads to finish.
  519. static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count)
  520. {
  521. int i;
  522. async_unlock(fctx);
  523. for (i = 0; i < thread_count; i++) {
  524. PerThreadContext *p = &fctx->threads[i];
  525. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  526. pthread_mutex_lock(&p->progress_mutex);
  527. while (atomic_load(&p->state) != STATE_INPUT_READY)
  528. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  529. pthread_mutex_unlock(&p->progress_mutex);
  530. }
  531. p->got_frame = 0;
  532. }
  533. async_lock(fctx);
  534. }
  535. void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
  536. {
  537. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  538. const AVCodec *codec = avctx->codec;
  539. int i, j;
  540. park_frame_worker_threads(fctx, thread_count);
  541. if (fctx->prev_thread && avctx->internal->hwaccel_priv_data !=
  542. fctx->prev_thread->avctx->internal->hwaccel_priv_data) {
  543. if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) {
  544. av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n");
  545. }
  546. }
  547. if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
  548. if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
  549. av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
  550. fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
  551. fctx->threads->avctx->internal->is_copy = 1;
  552. }
  553. for (i = 0; i < thread_count; i++) {
  554. PerThreadContext *p = &fctx->threads[i];
  555. pthread_mutex_lock(&p->mutex);
  556. p->die = 1;
  557. pthread_cond_signal(&p->input_cond);
  558. pthread_mutex_unlock(&p->mutex);
  559. if (p->thread_init)
  560. pthread_join(p->thread, NULL);
  561. p->thread_init=0;
  562. if (codec->close && p->avctx)
  563. codec->close(p->avctx);
  564. release_delayed_buffers(p);
  565. av_frame_free(&p->frame);
  566. }
  567. for (i = 0; i < thread_count; i++) {
  568. PerThreadContext *p = &fctx->threads[i];
  569. pthread_mutex_destroy(&p->mutex);
  570. pthread_mutex_destroy(&p->progress_mutex);
  571. pthread_cond_destroy(&p->input_cond);
  572. pthread_cond_destroy(&p->progress_cond);
  573. pthread_cond_destroy(&p->output_cond);
  574. av_packet_unref(&p->avpkt);
  575. for (j = 0; j < p->released_buffers_allocated; j++)
  576. av_frame_free(&p->released_buffers[j]);
  577. av_freep(&p->released_buffers);
  578. if (p->avctx) {
  579. if (codec->priv_class)
  580. av_opt_free(p->avctx->priv_data);
  581. av_freep(&p->avctx->priv_data);
  582. av_freep(&p->avctx->slice_offset);
  583. }
  584. if (p->avctx) {
  585. av_buffer_unref(&p->avctx->internal->pool);
  586. av_freep(&p->avctx->internal);
  587. av_buffer_unref(&p->avctx->hw_frames_ctx);
  588. }
  589. av_freep(&p->avctx);
  590. }
  591. av_freep(&fctx->threads);
  592. pthread_mutex_destroy(&fctx->buffer_mutex);
  593. pthread_mutex_destroy(&fctx->hwaccel_mutex);
  594. pthread_mutex_destroy(&fctx->async_mutex);
  595. pthread_cond_destroy(&fctx->async_cond);
  596. av_freep(&avctx->internal->thread_ctx);
  597. if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
  598. av_opt_free(avctx->priv_data);
  599. avctx->codec = NULL;
  600. }
  601. int ff_frame_thread_init(AVCodecContext *avctx)
  602. {
  603. int thread_count = avctx->thread_count;
  604. const AVCodec *codec = avctx->codec;
  605. AVCodecContext *src = avctx;
  606. FrameThreadContext *fctx;
  607. int i, err = 0;
  608. if (!thread_count) {
  609. int nb_cpus = av_cpu_count();
  610. #if FF_API_DEBUG_MV
  611. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv)
  612. nb_cpus = 1;
  613. #endif
  614. // use number of cores + 1 as thread count if there is more than one
  615. if (nb_cpus > 1)
  616. thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
  617. else
  618. thread_count = avctx->thread_count = 1;
  619. }
  620. if (thread_count <= 1) {
  621. avctx->active_thread_type = 0;
  622. return 0;
  623. }
  624. avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext));
  625. if (!fctx)
  626. return AVERROR(ENOMEM);
  627. fctx->threads = av_mallocz_array(thread_count, sizeof(PerThreadContext));
  628. if (!fctx->threads) {
  629. av_freep(&avctx->internal->thread_ctx);
  630. return AVERROR(ENOMEM);
  631. }
  632. pthread_mutex_init(&fctx->buffer_mutex, NULL);
  633. pthread_mutex_init(&fctx->hwaccel_mutex, NULL);
  634. pthread_mutex_init(&fctx->async_mutex, NULL);
  635. pthread_cond_init(&fctx->async_cond, NULL);
  636. fctx->async_lock = 1;
  637. fctx->delaying = 1;
  638. if (codec->type == AVMEDIA_TYPE_VIDEO)
  639. avctx->delay = src->thread_count - 1;
  640. for (i = 0; i < thread_count; i++) {
  641. AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
  642. PerThreadContext *p = &fctx->threads[i];
  643. pthread_mutex_init(&p->mutex, NULL);
  644. pthread_mutex_init(&p->progress_mutex, NULL);
  645. pthread_cond_init(&p->input_cond, NULL);
  646. pthread_cond_init(&p->progress_cond, NULL);
  647. pthread_cond_init(&p->output_cond, NULL);
  648. p->frame = av_frame_alloc();
  649. if (!p->frame) {
  650. av_freep(&copy);
  651. err = AVERROR(ENOMEM);
  652. goto error;
  653. }
  654. p->parent = fctx;
  655. p->avctx = copy;
  656. if (!copy) {
  657. err = AVERROR(ENOMEM);
  658. goto error;
  659. }
  660. *copy = *src;
  661. copy->internal = av_malloc(sizeof(AVCodecInternal));
  662. if (!copy->internal) {
  663. copy->priv_data = NULL;
  664. err = AVERROR(ENOMEM);
  665. goto error;
  666. }
  667. *copy->internal = *src->internal;
  668. copy->internal->thread_ctx = p;
  669. copy->internal->last_pkt_props = &p->avpkt;
  670. copy->delay = avctx->delay;
  671. if (codec->priv_data_size) {
  672. copy->priv_data = av_mallocz(codec->priv_data_size);
  673. if (!copy->priv_data) {
  674. err = AVERROR(ENOMEM);
  675. goto error;
  676. }
  677. if (codec->priv_class) {
  678. *(const AVClass **)copy->priv_data = codec->priv_class;
  679. err = av_opt_copy(copy->priv_data, src->priv_data);
  680. if (err < 0)
  681. goto error;
  682. }
  683. }
  684. if (i)
  685. copy->internal->is_copy = 1;
  686. if (codec->init)
  687. err = codec->init(copy);
  688. if (err) goto error;
  689. if (!i)
  690. update_context_from_thread(avctx, copy, 1);
  691. atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
  692. err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
  693. p->thread_init= !err;
  694. if(!p->thread_init)
  695. goto error;
  696. }
  697. return 0;
  698. error:
  699. ff_frame_thread_free(avctx, i+1);
  700. return err;
  701. }
  702. void ff_thread_flush(AVCodecContext *avctx)
  703. {
  704. int i;
  705. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  706. if (!fctx) return;
  707. park_frame_worker_threads(fctx, avctx->thread_count);
  708. if (fctx->prev_thread) {
  709. if (fctx->prev_thread != &fctx->threads[0])
  710. update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
  711. }
  712. fctx->next_decoding = fctx->next_finished = 0;
  713. fctx->delaying = 1;
  714. fctx->prev_thread = NULL;
  715. for (i = 0; i < avctx->thread_count; i++) {
  716. PerThreadContext *p = &fctx->threads[i];
  717. // Make sure decode flush calls with size=0 won't return old frames
  718. p->got_frame = 0;
  719. av_frame_unref(p->frame);
  720. p->result = 0;
  721. release_delayed_buffers(p);
  722. if (avctx->codec->flush)
  723. avctx->codec->flush(p->avctx);
  724. }
  725. }
  726. int ff_thread_can_start_frame(AVCodecContext *avctx)
  727. {
  728. PerThreadContext *p = avctx->internal->thread_ctx;
  729. if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
  730. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  731. return 0;
  732. }
  733. return 1;
  734. }
  735. static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
  736. {
  737. PerThreadContext *p = avctx->internal->thread_ctx;
  738. int err;
  739. f->owner[0] = f->owner[1] = avctx;
  740. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  741. return ff_get_buffer(avctx, f->f, flags);
  742. if (atomic_load(&p->state) != STATE_SETTING_UP &&
  743. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  744. av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
  745. return -1;
  746. }
  747. if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
  748. atomic_int *progress;
  749. f->progress = av_buffer_alloc(2 * sizeof(*progress));
  750. if (!f->progress) {
  751. return AVERROR(ENOMEM);
  752. }
  753. progress = (atomic_int*)f->progress->data;
  754. atomic_init(&progress[0], -1);
  755. atomic_init(&progress[1], -1);
  756. }
  757. pthread_mutex_lock(&p->parent->buffer_mutex);
  758. if (THREAD_SAFE_CALLBACKS(avctx)) {
  759. err = ff_get_buffer(avctx, f->f, flags);
  760. } else {
  761. pthread_mutex_lock(&p->progress_mutex);
  762. p->requested_frame = f->f;
  763. p->requested_flags = flags;
  764. atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
  765. pthread_cond_broadcast(&p->progress_cond);
  766. while (atomic_load(&p->state) != STATE_SETTING_UP)
  767. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  768. err = p->result;
  769. pthread_mutex_unlock(&p->progress_mutex);
  770. }
  771. if (!THREAD_SAFE_CALLBACKS(avctx) && !avctx->codec->update_thread_context)
  772. ff_thread_finish_setup(avctx);
  773. if (err)
  774. av_buffer_unref(&f->progress);
  775. pthread_mutex_unlock(&p->parent->buffer_mutex);
  776. return err;
  777. }
  778. enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
  779. {
  780. enum AVPixelFormat res;
  781. PerThreadContext *p = avctx->internal->thread_ctx;
  782. if (!(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks ||
  783. avctx->get_format == avcodec_default_get_format)
  784. return ff_get_format(avctx, fmt);
  785. if (atomic_load(&p->state) != STATE_SETTING_UP) {
  786. av_log(avctx, AV_LOG_ERROR, "get_format() cannot be called after ff_thread_finish_setup()\n");
  787. return -1;
  788. }
  789. pthread_mutex_lock(&p->progress_mutex);
  790. p->available_formats = fmt;
  791. atomic_store(&p->state, STATE_GET_FORMAT);
  792. pthread_cond_broadcast(&p->progress_cond);
  793. while (atomic_load(&p->state) != STATE_SETTING_UP)
  794. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  795. res = p->result_format;
  796. pthread_mutex_unlock(&p->progress_mutex);
  797. return res;
  798. }
  799. int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
  800. {
  801. int ret = thread_get_buffer_internal(avctx, f, flags);
  802. if (ret < 0)
  803. av_log(avctx, AV_LOG_ERROR, "thread_get_buffer() failed\n");
  804. return ret;
  805. }
  806. void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
  807. {
  808. PerThreadContext *p = avctx->internal->thread_ctx;
  809. FrameThreadContext *fctx;
  810. AVFrame *dst;
  811. int ret = 0;
  812. int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
  813. THREAD_SAFE_CALLBACKS(avctx);
  814. if (!f->f)
  815. return;
  816. if (avctx->debug & FF_DEBUG_BUFFERS)
  817. av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
  818. av_buffer_unref(&f->progress);
  819. f->owner[0] = f->owner[1] = NULL;
  820. // when the frame buffers are not allocated, just reset it to clean state
  821. if (can_direct_free || !f->f->buf[0]) {
  822. av_frame_unref(f->f);
  823. return;
  824. }
  825. fctx = p->parent;
  826. pthread_mutex_lock(&fctx->buffer_mutex);
  827. if (p->num_released_buffers == p->released_buffers_allocated) {
  828. AVFrame **tmp = av_realloc_array(p->released_buffers, p->released_buffers_allocated + 1,
  829. sizeof(*p->released_buffers));
  830. if (tmp) {
  831. tmp[p->released_buffers_allocated] = av_frame_alloc();
  832. p->released_buffers = tmp;
  833. }
  834. if (!tmp || !tmp[p->released_buffers_allocated]) {
  835. ret = AVERROR(ENOMEM);
  836. goto fail;
  837. }
  838. p->released_buffers_allocated++;
  839. }
  840. dst = p->released_buffers[p->num_released_buffers];
  841. av_frame_move_ref(dst, f->f);
  842. p->num_released_buffers++;
  843. fail:
  844. pthread_mutex_unlock(&fctx->buffer_mutex);
  845. // make sure the frame is clean even if we fail to free it
  846. // this leaks, but it is better than crashing
  847. if (ret < 0) {
  848. av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
  849. memset(f->f->buf, 0, sizeof(f->f->buf));
  850. if (f->f->extended_buf)
  851. memset(f->f->extended_buf, 0, f->f->nb_extended_buf * sizeof(*f->f->extended_buf));
  852. av_frame_unref(f->f);
  853. }
  854. }