You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1045 lines
33KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Frame multithreading support functions
  21. * @see doc/multithreading.txt
  22. */
  23. #include "config.h"
  24. #include <stdatomic.h>
  25. #include <stdint.h>
  26. #include "avcodec.h"
  27. #include "hwaccel.h"
  28. #include "internal.h"
  29. #include "pthread_internal.h"
  30. #include "thread.h"
  31. #include "version.h"
  32. #include "libavutil/avassert.h"
  33. #include "libavutil/buffer.h"
  34. #include "libavutil/common.h"
  35. #include "libavutil/cpu.h"
  36. #include "libavutil/frame.h"
  37. #include "libavutil/internal.h"
  38. #include "libavutil/log.h"
  39. #include "libavutil/mem.h"
  40. #include "libavutil/opt.h"
  41. #include "libavutil/thread.h"
  42. enum {
  43. ///< Set when the thread is awaiting a packet.
  44. STATE_INPUT_READY,
  45. ///< Set before the codec has called ff_thread_finish_setup().
  46. STATE_SETTING_UP,
  47. /**
  48. * Set when the codec calls get_buffer().
  49. * State is returned to STATE_SETTING_UP afterwards.
  50. */
  51. STATE_GET_BUFFER,
  52. /**
  53. * Set when the codec calls get_format().
  54. * State is returned to STATE_SETTING_UP afterwards.
  55. */
  56. STATE_GET_FORMAT,
  57. ///< Set after the codec has called ff_thread_finish_setup().
  58. STATE_SETUP_FINISHED,
  59. };
  60. /**
  61. * Context used by codec threads and stored in their AVCodecInternal thread_ctx.
  62. */
  63. typedef struct PerThreadContext {
  64. struct FrameThreadContext *parent;
  65. pthread_t thread;
  66. int thread_init;
  67. pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
  68. pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
  69. pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
  70. pthread_mutex_t mutex; ///< Mutex used to protect the contents of the PerThreadContext.
  71. pthread_mutex_t progress_mutex; ///< Mutex used to protect frame progress values and progress_cond.
  72. AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
  73. AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding).
  74. AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
  75. int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
  76. int result; ///< The result of the last codec decode/encode() call.
  77. atomic_int state;
  78. /**
  79. * Array of frames passed to ff_thread_release_buffer().
  80. * Frames are released after all threads referencing them are finished.
  81. */
  82. AVFrame *released_buffers;
  83. int num_released_buffers;
  84. int released_buffers_allocated;
  85. AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
  86. int requested_flags; ///< flags passed to get_buffer() for requested_frame
  87. const enum AVPixelFormat *available_formats; ///< Format array for get_format()
  88. enum AVPixelFormat result_format; ///< get_format() result
  89. int die; ///< Set when the thread should exit.
  90. int hwaccel_serializing;
  91. int async_serializing;
  92. atomic_int debug_threads; ///< Set if the FF_DEBUG_THREADS option is set.
  93. } PerThreadContext;
  94. /**
  95. * Context stored in the client AVCodecInternal thread_ctx.
  96. */
  97. typedef struct FrameThreadContext {
  98. PerThreadContext *threads; ///< The contexts for each thread.
  99. PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
  100. pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
  101. /**
  102. * This lock is used for ensuring threads run in serial when hwaccel
  103. * is used.
  104. */
  105. pthread_mutex_t hwaccel_mutex;
  106. pthread_mutex_t async_mutex;
  107. pthread_cond_t async_cond;
  108. int async_lock;
  109. int next_decoding; ///< The next context to submit a packet to.
  110. int next_finished; ///< The next context to return output from.
  111. int delaying; /**<
  112. * Set for the first N packets, where N is the number of threads.
  113. * While it is set, ff_thread_en/decode_frame won't return any results.
  114. */
  115. } FrameThreadContext;
  116. #define THREAD_SAFE_CALLBACKS(avctx) \
  117. ((avctx)->thread_safe_callbacks || (avctx)->get_buffer2 == avcodec_default_get_buffer2)
  118. static void async_lock(FrameThreadContext *fctx)
  119. {
  120. pthread_mutex_lock(&fctx->async_mutex);
  121. while (fctx->async_lock)
  122. pthread_cond_wait(&fctx->async_cond, &fctx->async_mutex);
  123. fctx->async_lock = 1;
  124. pthread_mutex_unlock(&fctx->async_mutex);
  125. }
  126. static void async_unlock(FrameThreadContext *fctx)
  127. {
  128. pthread_mutex_lock(&fctx->async_mutex);
  129. av_assert0(fctx->async_lock);
  130. fctx->async_lock = 0;
  131. pthread_cond_broadcast(&fctx->async_cond);
  132. pthread_mutex_unlock(&fctx->async_mutex);
  133. }
  134. /**
  135. * Codec worker thread.
  136. *
  137. * Automatically calls ff_thread_finish_setup() if the codec does
  138. * not provide an update_thread_context method, or if the codec returns
  139. * before calling it.
  140. */
  141. static attribute_align_arg void *frame_worker_thread(void *arg)
  142. {
  143. PerThreadContext *p = arg;
  144. AVCodecContext *avctx = p->avctx;
  145. const AVCodec *codec = avctx->codec;
  146. pthread_mutex_lock(&p->mutex);
  147. while (1) {
  148. while (atomic_load(&p->state) == STATE_INPUT_READY && !p->die)
  149. pthread_cond_wait(&p->input_cond, &p->mutex);
  150. if (p->die) break;
  151. if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
  152. ff_thread_finish_setup(avctx);
  153. /* If a decoder supports hwaccel, then it must call ff_get_format().
  154. * Since that call must happen before ff_thread_finish_setup(), the
  155. * decoder is required to implement update_thread_context() and call
  156. * ff_thread_finish_setup() manually. Therefore the above
  157. * ff_thread_finish_setup() call did not happen and hwaccel_serializing
  158. * cannot be true here. */
  159. av_assert0(!p->hwaccel_serializing);
  160. /* if the previous thread uses hwaccel then we take the lock to ensure
  161. * the threads don't run concurrently */
  162. if (avctx->hwaccel) {
  163. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  164. p->hwaccel_serializing = 1;
  165. }
  166. av_frame_unref(p->frame);
  167. p->got_frame = 0;
  168. p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);
  169. if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
  170. if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS)
  171. av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
  172. "free the frame on failure. This is a bug, please report it.\n");
  173. av_frame_unref(p->frame);
  174. }
  175. if (atomic_load(&p->state) == STATE_SETTING_UP)
  176. ff_thread_finish_setup(avctx);
  177. if (p->hwaccel_serializing) {
  178. p->hwaccel_serializing = 0;
  179. pthread_mutex_unlock(&p->parent->hwaccel_mutex);
  180. }
  181. if (p->async_serializing) {
  182. p->async_serializing = 0;
  183. async_unlock(p->parent);
  184. }
  185. pthread_mutex_lock(&p->progress_mutex);
  186. atomic_store(&p->state, STATE_INPUT_READY);
  187. pthread_cond_broadcast(&p->progress_cond);
  188. pthread_cond_signal(&p->output_cond);
  189. pthread_mutex_unlock(&p->progress_mutex);
  190. }
  191. pthread_mutex_unlock(&p->mutex);
  192. return NULL;
  193. }
  194. /**
  195. * Update the next thread's AVCodecContext with values from the reference thread's context.
  196. *
  197. * @param dst The destination context.
  198. * @param src The source context.
  199. * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
  200. * @return 0 on success, negative error code on failure
  201. */
  202. static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
  203. {
  204. int err = 0;
  205. if (dst != src && (for_user || !(src->codec_descriptor->props & AV_CODEC_PROP_INTRA_ONLY))) {
  206. dst->time_base = src->time_base;
  207. dst->framerate = src->framerate;
  208. dst->width = src->width;
  209. dst->height = src->height;
  210. dst->pix_fmt = src->pix_fmt;
  211. dst->sw_pix_fmt = src->sw_pix_fmt;
  212. dst->coded_width = src->coded_width;
  213. dst->coded_height = src->coded_height;
  214. dst->has_b_frames = src->has_b_frames;
  215. dst->idct_algo = src->idct_algo;
  216. dst->bits_per_coded_sample = src->bits_per_coded_sample;
  217. dst->sample_aspect_ratio = src->sample_aspect_ratio;
  218. dst->profile = src->profile;
  219. dst->level = src->level;
  220. dst->bits_per_raw_sample = src->bits_per_raw_sample;
  221. dst->ticks_per_frame = src->ticks_per_frame;
  222. dst->color_primaries = src->color_primaries;
  223. dst->color_trc = src->color_trc;
  224. dst->colorspace = src->colorspace;
  225. dst->color_range = src->color_range;
  226. dst->chroma_sample_location = src->chroma_sample_location;
  227. dst->hwaccel = src->hwaccel;
  228. dst->hwaccel_context = src->hwaccel_context;
  229. dst->channels = src->channels;
  230. dst->sample_rate = src->sample_rate;
  231. dst->sample_fmt = src->sample_fmt;
  232. dst->channel_layout = src->channel_layout;
  233. dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
  234. if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
  235. (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
  236. av_buffer_unref(&dst->hw_frames_ctx);
  237. if (src->hw_frames_ctx) {
  238. dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
  239. if (!dst->hw_frames_ctx)
  240. return AVERROR(ENOMEM);
  241. }
  242. }
  243. dst->hwaccel_flags = src->hwaccel_flags;
  244. }
  245. if (for_user) {
  246. dst->delay = src->thread_count - 1;
  247. #if FF_API_CODED_FRAME
  248. FF_DISABLE_DEPRECATION_WARNINGS
  249. dst->coded_frame = src->coded_frame;
  250. FF_ENABLE_DEPRECATION_WARNINGS
  251. #endif
  252. } else {
  253. if (dst->codec->update_thread_context)
  254. err = dst->codec->update_thread_context(dst, src);
  255. }
  256. return err;
  257. }
  258. /**
  259. * Update the next thread's AVCodecContext with values set by the user.
  260. *
  261. * @param dst The destination context.
  262. * @param src The source context.
  263. * @return 0 on success, negative error code on failure
  264. */
  265. static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
  266. {
  267. #define copy_fields(s, e) memcpy(&dst->s, &src->s, (char*)&dst->e - (char*)&dst->s);
  268. dst->flags = src->flags;
  269. dst->draw_horiz_band= src->draw_horiz_band;
  270. dst->get_buffer2 = src->get_buffer2;
  271. dst->opaque = src->opaque;
  272. dst->debug = src->debug;
  273. dst->debug_mv = src->debug_mv;
  274. dst->slice_flags = src->slice_flags;
  275. dst->flags2 = src->flags2;
  276. dst->export_side_data = src->export_side_data;
  277. copy_fields(skip_loop_filter, subtitle_header);
  278. dst->frame_number = src->frame_number;
  279. dst->reordered_opaque = src->reordered_opaque;
  280. dst->thread_safe_callbacks = src->thread_safe_callbacks;
  281. if (src->slice_count && src->slice_offset) {
  282. if (dst->slice_count < src->slice_count) {
  283. int err = av_reallocp_array(&dst->slice_offset, src->slice_count,
  284. sizeof(*dst->slice_offset));
  285. if (err < 0)
  286. return err;
  287. }
  288. memcpy(dst->slice_offset, src->slice_offset,
  289. src->slice_count * sizeof(*dst->slice_offset));
  290. }
  291. dst->slice_count = src->slice_count;
  292. return 0;
  293. #undef copy_fields
  294. }
  295. /// Releases the buffers that this decoding thread was the last user of.
  296. static void release_delayed_buffers(PerThreadContext *p)
  297. {
  298. FrameThreadContext *fctx = p->parent;
  299. while (p->num_released_buffers > 0) {
  300. AVFrame *f;
  301. pthread_mutex_lock(&fctx->buffer_mutex);
  302. // fix extended data in case the caller screwed it up
  303. av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
  304. p->avctx->codec_type == AVMEDIA_TYPE_AUDIO);
  305. f = &p->released_buffers[--p->num_released_buffers];
  306. f->extended_data = f->data;
  307. av_frame_unref(f);
  308. pthread_mutex_unlock(&fctx->buffer_mutex);
  309. }
  310. }
  311. static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
  312. AVPacket *avpkt)
  313. {
  314. FrameThreadContext *fctx = p->parent;
  315. PerThreadContext *prev_thread = fctx->prev_thread;
  316. const AVCodec *codec = p->avctx->codec;
  317. int ret;
  318. if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
  319. return 0;
  320. pthread_mutex_lock(&p->mutex);
  321. ret = update_context_from_user(p->avctx, user_avctx);
  322. if (ret) {
  323. pthread_mutex_unlock(&p->mutex);
  324. return ret;
  325. }
  326. atomic_store_explicit(&p->debug_threads,
  327. (p->avctx->debug & FF_DEBUG_THREADS) != 0,
  328. memory_order_relaxed);
  329. release_delayed_buffers(p);
  330. if (prev_thread) {
  331. int err;
  332. if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
  333. pthread_mutex_lock(&prev_thread->progress_mutex);
  334. while (atomic_load(&prev_thread->state) == STATE_SETTING_UP)
  335. pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
  336. pthread_mutex_unlock(&prev_thread->progress_mutex);
  337. }
  338. err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
  339. if (err) {
  340. pthread_mutex_unlock(&p->mutex);
  341. return err;
  342. }
  343. }
  344. av_packet_unref(&p->avpkt);
  345. ret = av_packet_ref(&p->avpkt, avpkt);
  346. if (ret < 0) {
  347. pthread_mutex_unlock(&p->mutex);
  348. av_log(p->avctx, AV_LOG_ERROR, "av_packet_ref() failed in submit_packet()\n");
  349. return ret;
  350. }
  351. atomic_store(&p->state, STATE_SETTING_UP);
  352. pthread_cond_signal(&p->input_cond);
  353. pthread_mutex_unlock(&p->mutex);
  354. /*
  355. * If the client doesn't have a thread-safe get_buffer(),
  356. * then decoding threads call back to the main thread,
  357. * and it calls back to the client here.
  358. */
  359. if (!p->avctx->thread_safe_callbacks && (
  360. p->avctx->get_format != avcodec_default_get_format ||
  361. p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
  362. while (atomic_load(&p->state) != STATE_SETUP_FINISHED && atomic_load(&p->state) != STATE_INPUT_READY) {
  363. int call_done = 1;
  364. pthread_mutex_lock(&p->progress_mutex);
  365. while (atomic_load(&p->state) == STATE_SETTING_UP)
  366. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  367. switch (atomic_load_explicit(&p->state, memory_order_acquire)) {
  368. case STATE_GET_BUFFER:
  369. p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
  370. break;
  371. case STATE_GET_FORMAT:
  372. p->result_format = ff_get_format(p->avctx, p->available_formats);
  373. break;
  374. default:
  375. call_done = 0;
  376. break;
  377. }
  378. if (call_done) {
  379. atomic_store(&p->state, STATE_SETTING_UP);
  380. pthread_cond_signal(&p->progress_cond);
  381. }
  382. pthread_mutex_unlock(&p->progress_mutex);
  383. }
  384. }
  385. fctx->prev_thread = p;
  386. fctx->next_decoding++;
  387. return 0;
  388. }
  389. int ff_thread_decode_frame(AVCodecContext *avctx,
  390. AVFrame *picture, int *got_picture_ptr,
  391. AVPacket *avpkt)
  392. {
  393. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  394. int finished = fctx->next_finished;
  395. PerThreadContext *p;
  396. int err;
  397. /* release the async lock, permitting blocked hwaccel threads to
  398. * go forward while we are in this function */
  399. async_unlock(fctx);
  400. /*
  401. * Submit a packet to the next decoding thread.
  402. */
  403. p = &fctx->threads[fctx->next_decoding];
  404. err = submit_packet(p, avctx, avpkt);
  405. if (err)
  406. goto finish;
  407. /*
  408. * If we're still receiving the initial packets, don't return a frame.
  409. */
  410. if (fctx->next_decoding > (avctx->thread_count-1-(avctx->codec_id == AV_CODEC_ID_FFV1)))
  411. fctx->delaying = 0;
  412. if (fctx->delaying) {
  413. *got_picture_ptr=0;
  414. if (avpkt->size) {
  415. err = avpkt->size;
  416. goto finish;
  417. }
  418. }
  419. /*
  420. * Return the next available frame from the oldest thread.
  421. * If we're at the end of the stream, then we have to skip threads that
  422. * didn't output a frame/error, because we don't want to accidentally signal
  423. * EOF (avpkt->size == 0 && *got_picture_ptr == 0 && err >= 0).
  424. */
  425. do {
  426. p = &fctx->threads[finished++];
  427. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  428. pthread_mutex_lock(&p->progress_mutex);
  429. while (atomic_load_explicit(&p->state, memory_order_relaxed) != STATE_INPUT_READY)
  430. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  431. pthread_mutex_unlock(&p->progress_mutex);
  432. }
  433. av_frame_move_ref(picture, p->frame);
  434. *got_picture_ptr = p->got_frame;
  435. picture->pkt_dts = p->avpkt.dts;
  436. err = p->result;
  437. /*
  438. * A later call with avkpt->size == 0 may loop over all threads,
  439. * including this one, searching for a frame/error to return before being
  440. * stopped by the "finished != fctx->next_finished" condition.
  441. * Make sure we don't mistakenly return the same frame/error again.
  442. */
  443. p->got_frame = 0;
  444. p->result = 0;
  445. if (finished >= avctx->thread_count) finished = 0;
  446. } while (!avpkt->size && !*got_picture_ptr && err >= 0 && finished != fctx->next_finished);
  447. update_context_from_thread(avctx, p->avctx, 1);
  448. if (fctx->next_decoding >= avctx->thread_count) fctx->next_decoding = 0;
  449. fctx->next_finished = finished;
  450. /* return the size of the consumed packet if no error occurred */
  451. if (err >= 0)
  452. err = avpkt->size;
  453. finish:
  454. async_lock(fctx);
  455. return err;
  456. }
  457. void ff_thread_report_progress(ThreadFrame *f, int n, int field)
  458. {
  459. PerThreadContext *p;
  460. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  461. if (!progress ||
  462. atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
  463. return;
  464. p = f->owner[field]->internal->thread_ctx;
  465. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  466. av_log(f->owner[field], AV_LOG_DEBUG,
  467. "%p finished %d field %d\n", progress, n, field);
  468. pthread_mutex_lock(&p->progress_mutex);
  469. atomic_store_explicit(&progress[field], n, memory_order_release);
  470. pthread_cond_broadcast(&p->progress_cond);
  471. pthread_mutex_unlock(&p->progress_mutex);
  472. }
  473. void ff_thread_await_progress(ThreadFrame *f, int n, int field)
  474. {
  475. PerThreadContext *p;
  476. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  477. if (!progress ||
  478. atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
  479. return;
  480. p = f->owner[field]->internal->thread_ctx;
  481. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  482. av_log(f->owner[field], AV_LOG_DEBUG,
  483. "thread awaiting %d field %d from %p\n", n, field, progress);
  484. pthread_mutex_lock(&p->progress_mutex);
  485. while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
  486. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  487. pthread_mutex_unlock(&p->progress_mutex);
  488. }
  489. void ff_thread_finish_setup(AVCodecContext *avctx) {
  490. PerThreadContext *p = avctx->internal->thread_ctx;
  491. if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
  492. if (avctx->hwaccel && !p->hwaccel_serializing) {
  493. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  494. p->hwaccel_serializing = 1;
  495. }
  496. /* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
  497. if (avctx->hwaccel &&
  498. !(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
  499. p->async_serializing = 1;
  500. async_lock(p->parent);
  501. }
  502. pthread_mutex_lock(&p->progress_mutex);
  503. if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
  504. av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
  505. }
  506. atomic_store(&p->state, STATE_SETUP_FINISHED);
  507. pthread_cond_broadcast(&p->progress_cond);
  508. pthread_mutex_unlock(&p->progress_mutex);
  509. }
  510. /// Waits for all threads to finish.
  511. static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count)
  512. {
  513. int i;
  514. async_unlock(fctx);
  515. for (i = 0; i < thread_count; i++) {
  516. PerThreadContext *p = &fctx->threads[i];
  517. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  518. pthread_mutex_lock(&p->progress_mutex);
  519. while (atomic_load(&p->state) != STATE_INPUT_READY)
  520. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  521. pthread_mutex_unlock(&p->progress_mutex);
  522. }
  523. p->got_frame = 0;
  524. }
  525. async_lock(fctx);
  526. }
  527. void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
  528. {
  529. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  530. const AVCodec *codec = avctx->codec;
  531. int i;
  532. park_frame_worker_threads(fctx, thread_count);
  533. if (fctx->prev_thread && avctx->internal->hwaccel_priv_data !=
  534. fctx->prev_thread->avctx->internal->hwaccel_priv_data) {
  535. if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) {
  536. av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n");
  537. }
  538. }
  539. if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
  540. if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
  541. av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
  542. fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
  543. fctx->threads->avctx->internal->is_copy = 1;
  544. }
  545. for (i = 0; i < thread_count; i++) {
  546. PerThreadContext *p = &fctx->threads[i];
  547. pthread_mutex_lock(&p->mutex);
  548. p->die = 1;
  549. pthread_cond_signal(&p->input_cond);
  550. pthread_mutex_unlock(&p->mutex);
  551. if (p->thread_init)
  552. pthread_join(p->thread, NULL);
  553. p->thread_init=0;
  554. if (codec->close && p->avctx)
  555. codec->close(p->avctx);
  556. release_delayed_buffers(p);
  557. av_frame_free(&p->frame);
  558. }
  559. for (i = 0; i < thread_count; i++) {
  560. PerThreadContext *p = &fctx->threads[i];
  561. pthread_mutex_destroy(&p->mutex);
  562. pthread_mutex_destroy(&p->progress_mutex);
  563. pthread_cond_destroy(&p->input_cond);
  564. pthread_cond_destroy(&p->progress_cond);
  565. pthread_cond_destroy(&p->output_cond);
  566. av_packet_unref(&p->avpkt);
  567. av_freep(&p->released_buffers);
  568. if (p->avctx) {
  569. if (codec->priv_class)
  570. av_opt_free(p->avctx->priv_data);
  571. av_freep(&p->avctx->priv_data);
  572. av_freep(&p->avctx->slice_offset);
  573. }
  574. if (p->avctx) {
  575. av_freep(&p->avctx->internal);
  576. av_buffer_unref(&p->avctx->hw_frames_ctx);
  577. }
  578. av_freep(&p->avctx);
  579. }
  580. av_freep(&fctx->threads);
  581. pthread_mutex_destroy(&fctx->buffer_mutex);
  582. pthread_mutex_destroy(&fctx->hwaccel_mutex);
  583. pthread_mutex_destroy(&fctx->async_mutex);
  584. pthread_cond_destroy(&fctx->async_cond);
  585. av_freep(&avctx->internal->thread_ctx);
  586. if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
  587. av_opt_free(avctx->priv_data);
  588. avctx->codec = NULL;
  589. }
  590. int ff_frame_thread_init(AVCodecContext *avctx)
  591. {
  592. int thread_count = avctx->thread_count;
  593. const AVCodec *codec = avctx->codec;
  594. AVCodecContext *src = avctx;
  595. FrameThreadContext *fctx;
  596. int i, err = 0;
  597. if (!thread_count) {
  598. int nb_cpus = av_cpu_count();
  599. #if FF_API_DEBUG_MV
  600. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv)
  601. nb_cpus = 1;
  602. #endif
  603. // use number of cores + 1 as thread count if there is more than one
  604. if (nb_cpus > 1)
  605. thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
  606. else
  607. thread_count = avctx->thread_count = 1;
  608. }
  609. if (thread_count <= 1) {
  610. avctx->active_thread_type = 0;
  611. return 0;
  612. }
  613. avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext));
  614. if (!fctx)
  615. return AVERROR(ENOMEM);
  616. fctx->threads = av_mallocz_array(thread_count, sizeof(PerThreadContext));
  617. if (!fctx->threads) {
  618. av_freep(&avctx->internal->thread_ctx);
  619. return AVERROR(ENOMEM);
  620. }
  621. pthread_mutex_init(&fctx->buffer_mutex, NULL);
  622. pthread_mutex_init(&fctx->hwaccel_mutex, NULL);
  623. pthread_mutex_init(&fctx->async_mutex, NULL);
  624. pthread_cond_init(&fctx->async_cond, NULL);
  625. fctx->async_lock = 1;
  626. fctx->delaying = 1;
  627. for (i = 0; i < thread_count; i++) {
  628. AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
  629. PerThreadContext *p = &fctx->threads[i];
  630. pthread_mutex_init(&p->mutex, NULL);
  631. pthread_mutex_init(&p->progress_mutex, NULL);
  632. pthread_cond_init(&p->input_cond, NULL);
  633. pthread_cond_init(&p->progress_cond, NULL);
  634. pthread_cond_init(&p->output_cond, NULL);
  635. p->frame = av_frame_alloc();
  636. if (!p->frame) {
  637. av_freep(&copy);
  638. err = AVERROR(ENOMEM);
  639. goto error;
  640. }
  641. p->parent = fctx;
  642. p->avctx = copy;
  643. if (!copy) {
  644. err = AVERROR(ENOMEM);
  645. goto error;
  646. }
  647. *copy = *src;
  648. copy->internal = av_malloc(sizeof(AVCodecInternal));
  649. if (!copy->internal) {
  650. copy->priv_data = NULL;
  651. err = AVERROR(ENOMEM);
  652. goto error;
  653. }
  654. *copy->internal = *src->internal;
  655. copy->internal->thread_ctx = p;
  656. copy->internal->last_pkt_props = &p->avpkt;
  657. if (codec->priv_data_size) {
  658. copy->priv_data = av_mallocz(codec->priv_data_size);
  659. if (!copy->priv_data) {
  660. err = AVERROR(ENOMEM);
  661. goto error;
  662. }
  663. if (codec->priv_class) {
  664. *(const AVClass **)copy->priv_data = codec->priv_class;
  665. err = av_opt_copy(copy->priv_data, src->priv_data);
  666. if (err < 0)
  667. goto error;
  668. }
  669. }
  670. if (i)
  671. copy->internal->is_copy = 1;
  672. if (codec->init)
  673. err = codec->init(copy);
  674. if (err) goto error;
  675. if (!i)
  676. update_context_from_thread(avctx, copy, 1);
  677. atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
  678. err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
  679. p->thread_init= !err;
  680. if(!p->thread_init)
  681. goto error;
  682. }
  683. return 0;
  684. error:
  685. ff_frame_thread_free(avctx, i+1);
  686. return err;
  687. }
  688. void ff_thread_flush(AVCodecContext *avctx)
  689. {
  690. int i;
  691. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  692. if (!fctx) return;
  693. park_frame_worker_threads(fctx, avctx->thread_count);
  694. if (fctx->prev_thread) {
  695. if (fctx->prev_thread != &fctx->threads[0])
  696. update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
  697. }
  698. fctx->next_decoding = fctx->next_finished = 0;
  699. fctx->delaying = 1;
  700. fctx->prev_thread = NULL;
  701. for (i = 0; i < avctx->thread_count; i++) {
  702. PerThreadContext *p = &fctx->threads[i];
  703. // Make sure decode flush calls with size=0 won't return old frames
  704. p->got_frame = 0;
  705. av_frame_unref(p->frame);
  706. p->result = 0;
  707. release_delayed_buffers(p);
  708. if (avctx->codec->flush)
  709. avctx->codec->flush(p->avctx);
  710. }
  711. }
  712. int ff_thread_can_start_frame(AVCodecContext *avctx)
  713. {
  714. PerThreadContext *p = avctx->internal->thread_ctx;
  715. if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
  716. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  717. return 0;
  718. }
  719. return 1;
  720. }
  721. static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
  722. {
  723. PerThreadContext *p = avctx->internal->thread_ctx;
  724. int err;
  725. f->owner[0] = f->owner[1] = avctx;
  726. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  727. return ff_get_buffer(avctx, f->f, flags);
  728. if (atomic_load(&p->state) != STATE_SETTING_UP &&
  729. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  730. av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
  731. return -1;
  732. }
  733. if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
  734. atomic_int *progress;
  735. f->progress = av_buffer_alloc(2 * sizeof(*progress));
  736. if (!f->progress) {
  737. return AVERROR(ENOMEM);
  738. }
  739. progress = (atomic_int*)f->progress->data;
  740. atomic_init(&progress[0], -1);
  741. atomic_init(&progress[1], -1);
  742. }
  743. pthread_mutex_lock(&p->parent->buffer_mutex);
  744. if (THREAD_SAFE_CALLBACKS(avctx)) {
  745. err = ff_get_buffer(avctx, f->f, flags);
  746. } else {
  747. pthread_mutex_lock(&p->progress_mutex);
  748. p->requested_frame = f->f;
  749. p->requested_flags = flags;
  750. atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
  751. pthread_cond_broadcast(&p->progress_cond);
  752. while (atomic_load(&p->state) != STATE_SETTING_UP)
  753. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  754. err = p->result;
  755. pthread_mutex_unlock(&p->progress_mutex);
  756. }
  757. if (!THREAD_SAFE_CALLBACKS(avctx) && !avctx->codec->update_thread_context)
  758. ff_thread_finish_setup(avctx);
  759. if (err)
  760. av_buffer_unref(&f->progress);
  761. pthread_mutex_unlock(&p->parent->buffer_mutex);
  762. return err;
  763. }
  764. enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
  765. {
  766. enum AVPixelFormat res;
  767. PerThreadContext *p = avctx->internal->thread_ctx;
  768. if (!(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks ||
  769. avctx->get_format == avcodec_default_get_format)
  770. return ff_get_format(avctx, fmt);
  771. if (atomic_load(&p->state) != STATE_SETTING_UP) {
  772. av_log(avctx, AV_LOG_ERROR, "get_format() cannot be called after ff_thread_finish_setup()\n");
  773. return -1;
  774. }
  775. pthread_mutex_lock(&p->progress_mutex);
  776. p->available_formats = fmt;
  777. atomic_store(&p->state, STATE_GET_FORMAT);
  778. pthread_cond_broadcast(&p->progress_cond);
  779. while (atomic_load(&p->state) != STATE_SETTING_UP)
  780. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  781. res = p->result_format;
  782. pthread_mutex_unlock(&p->progress_mutex);
  783. return res;
  784. }
  785. int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
  786. {
  787. int ret = thread_get_buffer_internal(avctx, f, flags);
  788. if (ret < 0)
  789. av_log(avctx, AV_LOG_ERROR, "thread_get_buffer() failed\n");
  790. return ret;
  791. }
  792. void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
  793. {
  794. PerThreadContext *p = avctx->internal->thread_ctx;
  795. FrameThreadContext *fctx;
  796. AVFrame *dst, *tmp;
  797. int ret = 0;
  798. int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
  799. THREAD_SAFE_CALLBACKS(avctx);
  800. if (!f->f)
  801. return;
  802. if (avctx->debug & FF_DEBUG_BUFFERS)
  803. av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
  804. av_buffer_unref(&f->progress);
  805. f->owner[0] = f->owner[1] = NULL;
  806. // when the frame buffers are not allocated, just reset it to clean state
  807. if (can_direct_free || !f->f->buf[0]) {
  808. av_frame_unref(f->f);
  809. return;
  810. }
  811. fctx = p->parent;
  812. pthread_mutex_lock(&fctx->buffer_mutex);
  813. if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers)) {
  814. ret = AVERROR(ENOMEM);
  815. goto fail;
  816. }
  817. tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated,
  818. (p->num_released_buffers + 1) *
  819. sizeof(*p->released_buffers));
  820. if (!tmp) {
  821. ret = AVERROR(ENOMEM);
  822. goto fail;
  823. }
  824. p->released_buffers = tmp;
  825. dst = &p->released_buffers[p->num_released_buffers];
  826. av_frame_move_ref(dst, f->f);
  827. p->num_released_buffers++;
  828. fail:
  829. pthread_mutex_unlock(&fctx->buffer_mutex);
  830. // make sure the frame is clean even if we fail to free it
  831. // this leaks, but it is better than crashing
  832. if (ret < 0) {
  833. av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
  834. memset(f->f->buf, 0, sizeof(f->f->buf));
  835. if (f->f->extended_buf)
  836. memset(f->f->extended_buf, 0, f->f->nb_extended_buf * sizeof(*f->f->extended_buf));
  837. av_frame_unref(f->f);
  838. }
  839. }