You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1050 lines
34KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Frame multithreading support functions
  21. * @see doc/multithreading.txt
  22. */
  23. #include "config.h"
  24. #include <stdatomic.h>
  25. #include <stdint.h>
  26. #include "avcodec.h"
  27. #include "hwaccel.h"
  28. #include "internal.h"
  29. #include "pthread_internal.h"
  30. #include "thread.h"
  31. #include "version.h"
  32. #include "libavutil/avassert.h"
  33. #include "libavutil/buffer.h"
  34. #include "libavutil/common.h"
  35. #include "libavutil/cpu.h"
  36. #include "libavutil/frame.h"
  37. #include "libavutil/internal.h"
  38. #include "libavutil/log.h"
  39. #include "libavutil/mem.h"
  40. #include "libavutil/opt.h"
  41. #include "libavutil/thread.h"
  42. enum {
  43. ///< Set when the thread is awaiting a packet.
  44. STATE_INPUT_READY,
  45. ///< Set before the codec has called ff_thread_finish_setup().
  46. STATE_SETTING_UP,
  47. /**
  48. * Set when the codec calls get_buffer().
  49. * State is returned to STATE_SETTING_UP afterwards.
  50. */
  51. STATE_GET_BUFFER,
  52. /**
  53. * Set when the codec calls get_format().
  54. * State is returned to STATE_SETTING_UP afterwards.
  55. */
  56. STATE_GET_FORMAT,
  57. ///< Set after the codec has called ff_thread_finish_setup().
  58. STATE_SETUP_FINISHED,
  59. };
  60. /**
  61. * Context used by codec threads and stored in their AVCodecInternal thread_ctx.
  62. */
  63. typedef struct PerThreadContext {
  64. struct FrameThreadContext *parent;
  65. pthread_t thread;
  66. int thread_init;
  67. pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
  68. pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
  69. pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
  70. pthread_mutex_t mutex; ///< Mutex used to protect the contents of the PerThreadContext.
  71. pthread_mutex_t progress_mutex; ///< Mutex used to protect frame progress values and progress_cond.
  72. AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
  73. AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding).
  74. AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
  75. int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
  76. int result; ///< The result of the last codec decode/encode() call.
  77. atomic_int state;
  78. /**
  79. * Array of frames passed to ff_thread_release_buffer().
  80. * Frames are released after all threads referencing them are finished.
  81. */
  82. AVFrame **released_buffers;
  83. int num_released_buffers;
  84. int released_buffers_allocated;
  85. AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
  86. int requested_flags; ///< flags passed to get_buffer() for requested_frame
  87. const enum AVPixelFormat *available_formats; ///< Format array for get_format()
  88. enum AVPixelFormat result_format; ///< get_format() result
  89. int die; ///< Set when the thread should exit.
  90. int hwaccel_serializing;
  91. int async_serializing;
  92. atomic_int debug_threads; ///< Set if the FF_DEBUG_THREADS option is set.
  93. } PerThreadContext;
  94. /**
  95. * Context stored in the client AVCodecInternal thread_ctx.
  96. */
  97. typedef struct FrameThreadContext {
  98. PerThreadContext *threads; ///< The contexts for each thread.
  99. PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
  100. pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
  101. /**
  102. * This lock is used for ensuring threads run in serial when hwaccel
  103. * is used.
  104. */
  105. pthread_mutex_t hwaccel_mutex;
  106. pthread_mutex_t async_mutex;
  107. pthread_cond_t async_cond;
  108. int async_lock;
  109. int next_decoding; ///< The next context to submit a packet to.
  110. int next_finished; ///< The next context to return output from.
  111. int delaying; /**<
  112. * Set for the first N packets, where N is the number of threads.
  113. * While it is set, ff_thread_en/decode_frame won't return any results.
  114. */
  115. } FrameThreadContext;
  116. #define THREAD_SAFE_CALLBACKS(avctx) \
  117. ((avctx)->thread_safe_callbacks || (avctx)->get_buffer2 == avcodec_default_get_buffer2)
  118. static void async_lock(FrameThreadContext *fctx)
  119. {
  120. pthread_mutex_lock(&fctx->async_mutex);
  121. while (fctx->async_lock)
  122. pthread_cond_wait(&fctx->async_cond, &fctx->async_mutex);
  123. fctx->async_lock = 1;
  124. pthread_mutex_unlock(&fctx->async_mutex);
  125. }
  126. static void async_unlock(FrameThreadContext *fctx)
  127. {
  128. pthread_mutex_lock(&fctx->async_mutex);
  129. av_assert0(fctx->async_lock);
  130. fctx->async_lock = 0;
  131. pthread_cond_broadcast(&fctx->async_cond);
  132. pthread_mutex_unlock(&fctx->async_mutex);
  133. }
  134. /**
  135. * Codec worker thread.
  136. *
  137. * Automatically calls ff_thread_finish_setup() if the codec does
  138. * not provide an update_thread_context method, or if the codec returns
  139. * before calling it.
  140. */
  141. static attribute_align_arg void *frame_worker_thread(void *arg)
  142. {
  143. PerThreadContext *p = arg;
  144. AVCodecContext *avctx = p->avctx;
  145. const AVCodec *codec = avctx->codec;
  146. pthread_mutex_lock(&p->mutex);
  147. while (1) {
  148. while (atomic_load(&p->state) == STATE_INPUT_READY && !p->die)
  149. pthread_cond_wait(&p->input_cond, &p->mutex);
  150. if (p->die) break;
  151. if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
  152. ff_thread_finish_setup(avctx);
  153. /* If a decoder supports hwaccel, then it must call ff_get_format().
  154. * Since that call must happen before ff_thread_finish_setup(), the
  155. * decoder is required to implement update_thread_context() and call
  156. * ff_thread_finish_setup() manually. Therefore the above
  157. * ff_thread_finish_setup() call did not happen and hwaccel_serializing
  158. * cannot be true here. */
  159. av_assert0(!p->hwaccel_serializing);
  160. /* if the previous thread uses hwaccel then we take the lock to ensure
  161. * the threads don't run concurrently */
  162. if (avctx->hwaccel) {
  163. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  164. p->hwaccel_serializing = 1;
  165. }
  166. av_frame_unref(p->frame);
  167. p->got_frame = 0;
  168. p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);
  169. if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
  170. if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS)
  171. av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
  172. "free the frame on failure. This is a bug, please report it.\n");
  173. av_frame_unref(p->frame);
  174. }
  175. if (atomic_load(&p->state) == STATE_SETTING_UP)
  176. ff_thread_finish_setup(avctx);
  177. if (p->hwaccel_serializing) {
  178. p->hwaccel_serializing = 0;
  179. pthread_mutex_unlock(&p->parent->hwaccel_mutex);
  180. }
  181. if (p->async_serializing) {
  182. p->async_serializing = 0;
  183. async_unlock(p->parent);
  184. }
  185. pthread_mutex_lock(&p->progress_mutex);
  186. atomic_store(&p->state, STATE_INPUT_READY);
  187. pthread_cond_broadcast(&p->progress_cond);
  188. pthread_cond_signal(&p->output_cond);
  189. pthread_mutex_unlock(&p->progress_mutex);
  190. }
  191. pthread_mutex_unlock(&p->mutex);
  192. return NULL;
  193. }
  194. /**
  195. * Update the next thread's AVCodecContext with values from the reference thread's context.
  196. *
  197. * @param dst The destination context.
  198. * @param src The source context.
  199. * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
  200. * @return 0 on success, negative error code on failure
  201. */
  202. static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
  203. {
  204. int err = 0;
  205. if (dst != src && (for_user || !(src->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY))) {
  206. dst->time_base = src->time_base;
  207. dst->framerate = src->framerate;
  208. dst->width = src->width;
  209. dst->height = src->height;
  210. dst->pix_fmt = src->pix_fmt;
  211. dst->sw_pix_fmt = src->sw_pix_fmt;
  212. dst->coded_width = src->coded_width;
  213. dst->coded_height = src->coded_height;
  214. dst->has_b_frames = src->has_b_frames;
  215. dst->idct_algo = src->idct_algo;
  216. dst->bits_per_coded_sample = src->bits_per_coded_sample;
  217. dst->sample_aspect_ratio = src->sample_aspect_ratio;
  218. dst->profile = src->profile;
  219. dst->level = src->level;
  220. dst->bits_per_raw_sample = src->bits_per_raw_sample;
  221. dst->ticks_per_frame = src->ticks_per_frame;
  222. dst->color_primaries = src->color_primaries;
  223. dst->color_trc = src->color_trc;
  224. dst->colorspace = src->colorspace;
  225. dst->color_range = src->color_range;
  226. dst->chroma_sample_location = src->chroma_sample_location;
  227. dst->hwaccel = src->hwaccel;
  228. dst->hwaccel_context = src->hwaccel_context;
  229. dst->channels = src->channels;
  230. dst->sample_rate = src->sample_rate;
  231. dst->sample_fmt = src->sample_fmt;
  232. dst->channel_layout = src->channel_layout;
  233. dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
  234. if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
  235. (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
  236. av_buffer_unref(&dst->hw_frames_ctx);
  237. if (src->hw_frames_ctx) {
  238. dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
  239. if (!dst->hw_frames_ctx)
  240. return AVERROR(ENOMEM);
  241. }
  242. }
  243. dst->hwaccel_flags = src->hwaccel_flags;
  244. }
  245. if (for_user) {
  246. dst->delay = src->thread_count - 1;
  247. #if FF_API_CODED_FRAME
  248. FF_DISABLE_DEPRECATION_WARNINGS
  249. dst->coded_frame = src->coded_frame;
  250. FF_ENABLE_DEPRECATION_WARNINGS
  251. #endif
  252. } else {
  253. if (dst->codec->update_thread_context)
  254. err = dst->codec->update_thread_context(dst, src);
  255. }
  256. return err;
  257. }
  258. /**
  259. * Update the next thread's AVCodecContext with values set by the user.
  260. *
  261. * @param dst The destination context.
  262. * @param src The source context.
  263. * @return 0 on success, negative error code on failure
  264. */
  265. static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
  266. {
  267. dst->flags = src->flags;
  268. dst->draw_horiz_band= src->draw_horiz_band;
  269. dst->get_buffer2 = src->get_buffer2;
  270. dst->opaque = src->opaque;
  271. dst->debug = src->debug;
  272. dst->debug_mv = src->debug_mv;
  273. dst->slice_flags = src->slice_flags;
  274. dst->flags2 = src->flags2;
  275. dst->export_side_data = src->export_side_data;
  276. dst->skip_loop_filter = src->skip_loop_filter;
  277. dst->skip_idct = src->skip_idct;
  278. dst->skip_frame = src->skip_frame;
  279. dst->frame_number = src->frame_number;
  280. dst->reordered_opaque = src->reordered_opaque;
  281. dst->thread_safe_callbacks = src->thread_safe_callbacks;
  282. if (src->slice_count && src->slice_offset) {
  283. if (dst->slice_count < src->slice_count) {
  284. int err = av_reallocp_array(&dst->slice_offset, src->slice_count,
  285. sizeof(*dst->slice_offset));
  286. if (err < 0)
  287. return err;
  288. }
  289. memcpy(dst->slice_offset, src->slice_offset,
  290. src->slice_count * sizeof(*dst->slice_offset));
  291. }
  292. dst->slice_count = src->slice_count;
  293. return 0;
  294. }
  295. /// Releases the buffers that this decoding thread was the last user of.
  296. static void release_delayed_buffers(PerThreadContext *p)
  297. {
  298. FrameThreadContext *fctx = p->parent;
  299. while (p->num_released_buffers > 0) {
  300. AVFrame *f;
  301. pthread_mutex_lock(&fctx->buffer_mutex);
  302. // fix extended data in case the caller screwed it up
  303. av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
  304. p->avctx->codec_type == AVMEDIA_TYPE_AUDIO);
  305. f = p->released_buffers[--p->num_released_buffers];
  306. f->extended_data = f->data;
  307. av_frame_unref(f);
  308. pthread_mutex_unlock(&fctx->buffer_mutex);
  309. }
  310. }
  311. static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
  312. AVPacket *avpkt)
  313. {
  314. FrameThreadContext *fctx = p->parent;
  315. PerThreadContext *prev_thread = fctx->prev_thread;
  316. const AVCodec *codec = p->avctx->codec;
  317. int ret;
  318. if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
  319. return 0;
  320. pthread_mutex_lock(&p->mutex);
  321. ret = update_context_from_user(p->avctx, user_avctx);
  322. if (ret) {
  323. pthread_mutex_unlock(&p->mutex);
  324. return ret;
  325. }
  326. atomic_store_explicit(&p->debug_threads,
  327. (p->avctx->debug & FF_DEBUG_THREADS) != 0,
  328. memory_order_relaxed);
  329. release_delayed_buffers(p);
  330. if (prev_thread) {
  331. int err;
  332. if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
  333. pthread_mutex_lock(&prev_thread->progress_mutex);
  334. while (atomic_load(&prev_thread->state) == STATE_SETTING_UP)
  335. pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
  336. pthread_mutex_unlock(&prev_thread->progress_mutex);
  337. }
  338. err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
  339. if (err) {
  340. pthread_mutex_unlock(&p->mutex);
  341. return err;
  342. }
  343. }
  344. av_packet_unref(&p->avpkt);
  345. ret = av_packet_ref(&p->avpkt, avpkt);
  346. if (ret < 0) {
  347. pthread_mutex_unlock(&p->mutex);
  348. av_log(p->avctx, AV_LOG_ERROR, "av_packet_ref() failed in submit_packet()\n");
  349. return ret;
  350. }
  351. atomic_store(&p->state, STATE_SETTING_UP);
  352. pthread_cond_signal(&p->input_cond);
  353. pthread_mutex_unlock(&p->mutex);
  354. /*
  355. * If the client doesn't have a thread-safe get_buffer(),
  356. * then decoding threads call back to the main thread,
  357. * and it calls back to the client here.
  358. */
  359. if (!p->avctx->thread_safe_callbacks && (
  360. p->avctx->get_format != avcodec_default_get_format ||
  361. p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
  362. while (atomic_load(&p->state) != STATE_SETUP_FINISHED && atomic_load(&p->state) != STATE_INPUT_READY) {
  363. int call_done = 1;
  364. pthread_mutex_lock(&p->progress_mutex);
  365. while (atomic_load(&p->state) == STATE_SETTING_UP)
  366. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  367. switch (atomic_load_explicit(&p->state, memory_order_acquire)) {
  368. case STATE_GET_BUFFER:
  369. p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
  370. break;
  371. case STATE_GET_FORMAT:
  372. p->result_format = ff_get_format(p->avctx, p->available_formats);
  373. break;
  374. default:
  375. call_done = 0;
  376. break;
  377. }
  378. if (call_done) {
  379. atomic_store(&p->state, STATE_SETTING_UP);
  380. pthread_cond_signal(&p->progress_cond);
  381. }
  382. pthread_mutex_unlock(&p->progress_mutex);
  383. }
  384. }
  385. fctx->prev_thread = p;
  386. fctx->next_decoding++;
  387. return 0;
  388. }
  389. int ff_thread_decode_frame(AVCodecContext *avctx,
  390. AVFrame *picture, int *got_picture_ptr,
  391. AVPacket *avpkt)
  392. {
  393. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  394. int finished = fctx->next_finished;
  395. PerThreadContext *p;
  396. int err;
  397. /* release the async lock, permitting blocked hwaccel threads to
  398. * go forward while we are in this function */
  399. async_unlock(fctx);
  400. /*
  401. * Submit a packet to the next decoding thread.
  402. */
  403. p = &fctx->threads[fctx->next_decoding];
  404. err = submit_packet(p, avctx, avpkt);
  405. if (err)
  406. goto finish;
  407. /*
  408. * If we're still receiving the initial packets, don't return a frame.
  409. */
  410. if (fctx->next_decoding > (avctx->thread_count-1-(avctx->codec_id == AV_CODEC_ID_FFV1)))
  411. fctx->delaying = 0;
  412. if (fctx->delaying) {
  413. *got_picture_ptr=0;
  414. if (avpkt->size) {
  415. err = avpkt->size;
  416. goto finish;
  417. }
  418. }
  419. /*
  420. * Return the next available frame from the oldest thread.
  421. * If we're at the end of the stream, then we have to skip threads that
  422. * didn't output a frame/error, because we don't want to accidentally signal
  423. * EOF (avpkt->size == 0 && *got_picture_ptr == 0 && err >= 0).
  424. */
  425. do {
  426. p = &fctx->threads[finished++];
  427. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  428. pthread_mutex_lock(&p->progress_mutex);
  429. while (atomic_load_explicit(&p->state, memory_order_relaxed) != STATE_INPUT_READY)
  430. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  431. pthread_mutex_unlock(&p->progress_mutex);
  432. }
  433. av_frame_move_ref(picture, p->frame);
  434. *got_picture_ptr = p->got_frame;
  435. picture->pkt_dts = p->avpkt.dts;
  436. err = p->result;
  437. /*
  438. * A later call with avkpt->size == 0 may loop over all threads,
  439. * including this one, searching for a frame/error to return before being
  440. * stopped by the "finished != fctx->next_finished" condition.
  441. * Make sure we don't mistakenly return the same frame/error again.
  442. */
  443. p->got_frame = 0;
  444. p->result = 0;
  445. if (finished >= avctx->thread_count) finished = 0;
  446. } while (!avpkt->size && !*got_picture_ptr && err >= 0 && finished != fctx->next_finished);
  447. update_context_from_thread(avctx, p->avctx, 1);
  448. if (fctx->next_decoding >= avctx->thread_count) fctx->next_decoding = 0;
  449. fctx->next_finished = finished;
  450. /* return the size of the consumed packet if no error occurred */
  451. if (err >= 0)
  452. err = avpkt->size;
  453. finish:
  454. async_lock(fctx);
  455. return err;
  456. }
  457. void ff_thread_report_progress(ThreadFrame *f, int n, int field)
  458. {
  459. PerThreadContext *p;
  460. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  461. if (!progress ||
  462. atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
  463. return;
  464. p = f->owner[field]->internal->thread_ctx;
  465. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  466. av_log(f->owner[field], AV_LOG_DEBUG,
  467. "%p finished %d field %d\n", progress, n, field);
  468. pthread_mutex_lock(&p->progress_mutex);
  469. atomic_store_explicit(&progress[field], n, memory_order_release);
  470. pthread_cond_broadcast(&p->progress_cond);
  471. pthread_mutex_unlock(&p->progress_mutex);
  472. }
  473. void ff_thread_await_progress(ThreadFrame *f, int n, int field)
  474. {
  475. PerThreadContext *p;
  476. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  477. if (!progress ||
  478. atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
  479. return;
  480. p = f->owner[field]->internal->thread_ctx;
  481. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  482. av_log(f->owner[field], AV_LOG_DEBUG,
  483. "thread awaiting %d field %d from %p\n", n, field, progress);
  484. pthread_mutex_lock(&p->progress_mutex);
  485. while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
  486. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  487. pthread_mutex_unlock(&p->progress_mutex);
  488. }
  489. void ff_thread_finish_setup(AVCodecContext *avctx) {
  490. PerThreadContext *p = avctx->internal->thread_ctx;
  491. if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
  492. if (avctx->hwaccel && !p->hwaccel_serializing) {
  493. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  494. p->hwaccel_serializing = 1;
  495. }
  496. /* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
  497. if (avctx->hwaccel &&
  498. !(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
  499. p->async_serializing = 1;
  500. async_lock(p->parent);
  501. }
  502. pthread_mutex_lock(&p->progress_mutex);
  503. if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
  504. av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
  505. }
  506. atomic_store(&p->state, STATE_SETUP_FINISHED);
  507. pthread_cond_broadcast(&p->progress_cond);
  508. pthread_mutex_unlock(&p->progress_mutex);
  509. }
  510. /// Waits for all threads to finish.
  511. static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count)
  512. {
  513. int i;
  514. async_unlock(fctx);
  515. for (i = 0; i < thread_count; i++) {
  516. PerThreadContext *p = &fctx->threads[i];
  517. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  518. pthread_mutex_lock(&p->progress_mutex);
  519. while (atomic_load(&p->state) != STATE_INPUT_READY)
  520. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  521. pthread_mutex_unlock(&p->progress_mutex);
  522. }
  523. p->got_frame = 0;
  524. }
  525. async_lock(fctx);
  526. }
  527. void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
  528. {
  529. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  530. const AVCodec *codec = avctx->codec;
  531. int i, j;
  532. park_frame_worker_threads(fctx, thread_count);
  533. if (fctx->prev_thread && avctx->internal->hwaccel_priv_data !=
  534. fctx->prev_thread->avctx->internal->hwaccel_priv_data) {
  535. if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) {
  536. av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n");
  537. }
  538. }
  539. if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
  540. if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
  541. av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
  542. fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
  543. fctx->threads->avctx->internal->is_copy = 1;
  544. }
  545. for (i = 0; i < thread_count; i++) {
  546. PerThreadContext *p = &fctx->threads[i];
  547. pthread_mutex_lock(&p->mutex);
  548. p->die = 1;
  549. pthread_cond_signal(&p->input_cond);
  550. pthread_mutex_unlock(&p->mutex);
  551. if (p->thread_init)
  552. pthread_join(p->thread, NULL);
  553. p->thread_init=0;
  554. if (codec->close && p->avctx)
  555. codec->close(p->avctx);
  556. release_delayed_buffers(p);
  557. av_frame_free(&p->frame);
  558. }
  559. for (i = 0; i < thread_count; i++) {
  560. PerThreadContext *p = &fctx->threads[i];
  561. pthread_mutex_destroy(&p->mutex);
  562. pthread_mutex_destroy(&p->progress_mutex);
  563. pthread_cond_destroy(&p->input_cond);
  564. pthread_cond_destroy(&p->progress_cond);
  565. pthread_cond_destroy(&p->output_cond);
  566. av_packet_unref(&p->avpkt);
  567. for (j = 0; j < p->released_buffers_allocated; j++)
  568. av_frame_free(&p->released_buffers[j]);
  569. av_freep(&p->released_buffers);
  570. if (p->avctx) {
  571. if (codec->priv_class)
  572. av_opt_free(p->avctx->priv_data);
  573. av_freep(&p->avctx->priv_data);
  574. av_freep(&p->avctx->slice_offset);
  575. }
  576. if (p->avctx) {
  577. av_freep(&p->avctx->internal);
  578. av_buffer_unref(&p->avctx->hw_frames_ctx);
  579. }
  580. av_freep(&p->avctx);
  581. }
  582. av_freep(&fctx->threads);
  583. pthread_mutex_destroy(&fctx->buffer_mutex);
  584. pthread_mutex_destroy(&fctx->hwaccel_mutex);
  585. pthread_mutex_destroy(&fctx->async_mutex);
  586. pthread_cond_destroy(&fctx->async_cond);
  587. av_freep(&avctx->internal->thread_ctx);
  588. if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
  589. av_opt_free(avctx->priv_data);
  590. avctx->codec = NULL;
  591. }
  592. int ff_frame_thread_init(AVCodecContext *avctx)
  593. {
  594. int thread_count = avctx->thread_count;
  595. const AVCodec *codec = avctx->codec;
  596. AVCodecContext *src = avctx;
  597. FrameThreadContext *fctx;
  598. int i, err = 0;
  599. if (!thread_count) {
  600. int nb_cpus = av_cpu_count();
  601. #if FF_API_DEBUG_MV
  602. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv)
  603. nb_cpus = 1;
  604. #endif
  605. // use number of cores + 1 as thread count if there is more than one
  606. if (nb_cpus > 1)
  607. thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
  608. else
  609. thread_count = avctx->thread_count = 1;
  610. }
  611. if (thread_count <= 1) {
  612. avctx->active_thread_type = 0;
  613. return 0;
  614. }
  615. avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext));
  616. if (!fctx)
  617. return AVERROR(ENOMEM);
  618. fctx->threads = av_mallocz_array(thread_count, sizeof(PerThreadContext));
  619. if (!fctx->threads) {
  620. av_freep(&avctx->internal->thread_ctx);
  621. return AVERROR(ENOMEM);
  622. }
  623. pthread_mutex_init(&fctx->buffer_mutex, NULL);
  624. pthread_mutex_init(&fctx->hwaccel_mutex, NULL);
  625. pthread_mutex_init(&fctx->async_mutex, NULL);
  626. pthread_cond_init(&fctx->async_cond, NULL);
  627. fctx->async_lock = 1;
  628. fctx->delaying = 1;
  629. for (i = 0; i < thread_count; i++) {
  630. AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
  631. PerThreadContext *p = &fctx->threads[i];
  632. pthread_mutex_init(&p->mutex, NULL);
  633. pthread_mutex_init(&p->progress_mutex, NULL);
  634. pthread_cond_init(&p->input_cond, NULL);
  635. pthread_cond_init(&p->progress_cond, NULL);
  636. pthread_cond_init(&p->output_cond, NULL);
  637. p->frame = av_frame_alloc();
  638. if (!p->frame) {
  639. av_freep(&copy);
  640. err = AVERROR(ENOMEM);
  641. goto error;
  642. }
  643. p->parent = fctx;
  644. p->avctx = copy;
  645. if (!copy) {
  646. err = AVERROR(ENOMEM);
  647. goto error;
  648. }
  649. *copy = *src;
  650. copy->internal = av_malloc(sizeof(AVCodecInternal));
  651. if (!copy->internal) {
  652. copy->priv_data = NULL;
  653. err = AVERROR(ENOMEM);
  654. goto error;
  655. }
  656. *copy->internal = *src->internal;
  657. copy->internal->thread_ctx = p;
  658. copy->internal->last_pkt_props = &p->avpkt;
  659. if (codec->priv_data_size) {
  660. copy->priv_data = av_mallocz(codec->priv_data_size);
  661. if (!copy->priv_data) {
  662. err = AVERROR(ENOMEM);
  663. goto error;
  664. }
  665. if (codec->priv_class) {
  666. *(const AVClass **)copy->priv_data = codec->priv_class;
  667. err = av_opt_copy(copy->priv_data, src->priv_data);
  668. if (err < 0)
  669. goto error;
  670. }
  671. }
  672. if (i)
  673. copy->internal->is_copy = 1;
  674. if (codec->init)
  675. err = codec->init(copy);
  676. if (err) goto error;
  677. if (!i)
  678. update_context_from_thread(avctx, copy, 1);
  679. atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
  680. err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
  681. p->thread_init= !err;
  682. if(!p->thread_init)
  683. goto error;
  684. }
  685. return 0;
  686. error:
  687. ff_frame_thread_free(avctx, i+1);
  688. return err;
  689. }
  690. void ff_thread_flush(AVCodecContext *avctx)
  691. {
  692. int i;
  693. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  694. if (!fctx) return;
  695. park_frame_worker_threads(fctx, avctx->thread_count);
  696. if (fctx->prev_thread) {
  697. if (fctx->prev_thread != &fctx->threads[0])
  698. update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
  699. }
  700. fctx->next_decoding = fctx->next_finished = 0;
  701. fctx->delaying = 1;
  702. fctx->prev_thread = NULL;
  703. for (i = 0; i < avctx->thread_count; i++) {
  704. PerThreadContext *p = &fctx->threads[i];
  705. // Make sure decode flush calls with size=0 won't return old frames
  706. p->got_frame = 0;
  707. av_frame_unref(p->frame);
  708. p->result = 0;
  709. release_delayed_buffers(p);
  710. if (avctx->codec->flush)
  711. avctx->codec->flush(p->avctx);
  712. }
  713. }
  714. int ff_thread_can_start_frame(AVCodecContext *avctx)
  715. {
  716. PerThreadContext *p = avctx->internal->thread_ctx;
  717. if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
  718. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  719. return 0;
  720. }
  721. return 1;
  722. }
  723. static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
  724. {
  725. PerThreadContext *p = avctx->internal->thread_ctx;
  726. int err;
  727. f->owner[0] = f->owner[1] = avctx;
  728. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  729. return ff_get_buffer(avctx, f->f, flags);
  730. if (atomic_load(&p->state) != STATE_SETTING_UP &&
  731. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  732. av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
  733. return -1;
  734. }
  735. if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
  736. atomic_int *progress;
  737. f->progress = av_buffer_alloc(2 * sizeof(*progress));
  738. if (!f->progress) {
  739. return AVERROR(ENOMEM);
  740. }
  741. progress = (atomic_int*)f->progress->data;
  742. atomic_init(&progress[0], -1);
  743. atomic_init(&progress[1], -1);
  744. }
  745. pthread_mutex_lock(&p->parent->buffer_mutex);
  746. if (THREAD_SAFE_CALLBACKS(avctx)) {
  747. err = ff_get_buffer(avctx, f->f, flags);
  748. } else {
  749. pthread_mutex_lock(&p->progress_mutex);
  750. p->requested_frame = f->f;
  751. p->requested_flags = flags;
  752. atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
  753. pthread_cond_broadcast(&p->progress_cond);
  754. while (atomic_load(&p->state) != STATE_SETTING_UP)
  755. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  756. err = p->result;
  757. pthread_mutex_unlock(&p->progress_mutex);
  758. }
  759. if (!THREAD_SAFE_CALLBACKS(avctx) && !avctx->codec->update_thread_context)
  760. ff_thread_finish_setup(avctx);
  761. if (err)
  762. av_buffer_unref(&f->progress);
  763. pthread_mutex_unlock(&p->parent->buffer_mutex);
  764. return err;
  765. }
  766. enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
  767. {
  768. enum AVPixelFormat res;
  769. PerThreadContext *p = avctx->internal->thread_ctx;
  770. if (!(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks ||
  771. avctx->get_format == avcodec_default_get_format)
  772. return ff_get_format(avctx, fmt);
  773. if (atomic_load(&p->state) != STATE_SETTING_UP) {
  774. av_log(avctx, AV_LOG_ERROR, "get_format() cannot be called after ff_thread_finish_setup()\n");
  775. return -1;
  776. }
  777. pthread_mutex_lock(&p->progress_mutex);
  778. p->available_formats = fmt;
  779. atomic_store(&p->state, STATE_GET_FORMAT);
  780. pthread_cond_broadcast(&p->progress_cond);
  781. while (atomic_load(&p->state) != STATE_SETTING_UP)
  782. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  783. res = p->result_format;
  784. pthread_mutex_unlock(&p->progress_mutex);
  785. return res;
  786. }
  787. int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
  788. {
  789. int ret = thread_get_buffer_internal(avctx, f, flags);
  790. if (ret < 0)
  791. av_log(avctx, AV_LOG_ERROR, "thread_get_buffer() failed\n");
  792. return ret;
  793. }
  794. void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
  795. {
  796. PerThreadContext *p = avctx->internal->thread_ctx;
  797. FrameThreadContext *fctx;
  798. AVFrame *dst;
  799. int ret = 0;
  800. int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
  801. THREAD_SAFE_CALLBACKS(avctx);
  802. if (!f->f)
  803. return;
  804. if (avctx->debug & FF_DEBUG_BUFFERS)
  805. av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
  806. av_buffer_unref(&f->progress);
  807. f->owner[0] = f->owner[1] = NULL;
  808. // when the frame buffers are not allocated, just reset it to clean state
  809. if (can_direct_free || !f->f->buf[0]) {
  810. av_frame_unref(f->f);
  811. return;
  812. }
  813. fctx = p->parent;
  814. pthread_mutex_lock(&fctx->buffer_mutex);
  815. if (p->num_released_buffers == p->released_buffers_allocated) {
  816. AVFrame **tmp = av_realloc_array(p->released_buffers, p->released_buffers_allocated + 1,
  817. sizeof(*p->released_buffers));
  818. if (tmp) {
  819. tmp[p->released_buffers_allocated] = av_frame_alloc();
  820. p->released_buffers = tmp;
  821. }
  822. if (!tmp || !tmp[p->released_buffers_allocated]) {
  823. ret = AVERROR(ENOMEM);
  824. goto fail;
  825. }
  826. p->released_buffers_allocated++;
  827. }
  828. dst = p->released_buffers[p->num_released_buffers];
  829. av_frame_move_ref(dst, f->f);
  830. p->num_released_buffers++;
  831. fail:
  832. pthread_mutex_unlock(&fctx->buffer_mutex);
  833. // make sure the frame is clean even if we fail to free it
  834. // this leaks, but it is better than crashing
  835. if (ret < 0) {
  836. av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
  837. memset(f->f->buf, 0, sizeof(f->f->buf));
  838. if (f->f->extended_buf)
  839. memset(f->f->extended_buf, 0, f->f->nb_extended_buf * sizeof(*f->f->extended_buf));
  840. av_frame_unref(f->f);
  841. }
  842. }