You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1025 lines
33KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file
  20. * Frame multithreading support functions
  21. * @see doc/multithreading.txt
  22. */
  23. #include "config.h"
  24. #include <stdatomic.h>
  25. #include <stdint.h>
  26. #include "avcodec.h"
  27. #include "hwaccel.h"
  28. #include "internal.h"
  29. #include "pthread_internal.h"
  30. #include "thread.h"
  31. #include "version.h"
  32. #include "libavutil/avassert.h"
  33. #include "libavutil/buffer.h"
  34. #include "libavutil/common.h"
  35. #include "libavutil/cpu.h"
  36. #include "libavutil/frame.h"
  37. #include "libavutil/internal.h"
  38. #include "libavutil/log.h"
  39. #include "libavutil/mem.h"
  40. #include "libavutil/opt.h"
  41. #include "libavutil/thread.h"
  42. enum {
  43. ///< Set when the thread is awaiting a packet.
  44. STATE_INPUT_READY,
  45. ///< Set before the codec has called ff_thread_finish_setup().
  46. STATE_SETTING_UP,
  47. /**
  48. * Set when the codec calls get_buffer().
  49. * State is returned to STATE_SETTING_UP afterwards.
  50. */
  51. STATE_GET_BUFFER,
  52. /**
  53. * Set when the codec calls get_format().
  54. * State is returned to STATE_SETTING_UP afterwards.
  55. */
  56. STATE_GET_FORMAT,
  57. ///< Set after the codec has called ff_thread_finish_setup().
  58. STATE_SETUP_FINISHED,
  59. };
  60. /**
  61. * Context used by codec threads and stored in their AVCodecInternal thread_ctx.
  62. */
  63. typedef struct PerThreadContext {
  64. struct FrameThreadContext *parent;
  65. pthread_t thread;
  66. int thread_init;
  67. pthread_cond_t input_cond; ///< Used to wait for a new packet from the main thread.
  68. pthread_cond_t progress_cond; ///< Used by child threads to wait for progress to change.
  69. pthread_cond_t output_cond; ///< Used by the main thread to wait for frames to finish.
  70. pthread_mutex_t mutex; ///< Mutex used to protect the contents of the PerThreadContext.
  71. pthread_mutex_t progress_mutex; ///< Mutex used to protect frame progress values and progress_cond.
  72. AVCodecContext *avctx; ///< Context used to decode packets passed to this thread.
  73. AVPacket avpkt; ///< Input packet (for decoding) or output (for encoding).
  74. AVFrame *frame; ///< Output frame (for decoding) or input (for encoding).
  75. int got_frame; ///< The output of got_picture_ptr from the last avcodec_decode_video() call.
  76. int result; ///< The result of the last codec decode/encode() call.
  77. atomic_int state;
  78. /**
  79. * Array of frames passed to ff_thread_release_buffer().
  80. * Frames are released after all threads referencing them are finished.
  81. */
  82. AVFrame *released_buffers;
  83. int num_released_buffers;
  84. int released_buffers_allocated;
  85. AVFrame *requested_frame; ///< AVFrame the codec passed to get_buffer()
  86. int requested_flags; ///< flags passed to get_buffer() for requested_frame
  87. const enum AVPixelFormat *available_formats; ///< Format array for get_format()
  88. enum AVPixelFormat result_format; ///< get_format() result
  89. int die; ///< Set when the thread should exit.
  90. int hwaccel_serializing;
  91. int async_serializing;
  92. atomic_int debug_threads; ///< Set if the FF_DEBUG_THREADS option is set.
  93. } PerThreadContext;
  94. /**
  95. * Context stored in the client AVCodecInternal thread_ctx.
  96. */
  97. typedef struct FrameThreadContext {
  98. PerThreadContext *threads; ///< The contexts for each thread.
  99. PerThreadContext *prev_thread; ///< The last thread submit_packet() was called on.
  100. pthread_mutex_t buffer_mutex; ///< Mutex used to protect get/release_buffer().
  101. /**
  102. * This lock is used for ensuring threads run in serial when hwaccel
  103. * is used.
  104. */
  105. pthread_mutex_t hwaccel_mutex;
  106. pthread_mutex_t async_mutex;
  107. pthread_cond_t async_cond;
  108. int async_lock;
  109. int next_decoding; ///< The next context to submit a packet to.
  110. int next_finished; ///< The next context to return output from.
  111. int delaying; /**<
  112. * Set for the first N packets, where N is the number of threads.
  113. * While it is set, ff_thread_en/decode_frame won't return any results.
  114. */
  115. } FrameThreadContext;
  116. #define THREAD_SAFE_CALLBACKS(avctx) \
  117. ((avctx)->thread_safe_callbacks || (avctx)->get_buffer2 == avcodec_default_get_buffer2)
  118. static void async_lock(FrameThreadContext *fctx)
  119. {
  120. pthread_mutex_lock(&fctx->async_mutex);
  121. while (fctx->async_lock)
  122. pthread_cond_wait(&fctx->async_cond, &fctx->async_mutex);
  123. fctx->async_lock = 1;
  124. pthread_mutex_unlock(&fctx->async_mutex);
  125. }
  126. static void async_unlock(FrameThreadContext *fctx)
  127. {
  128. pthread_mutex_lock(&fctx->async_mutex);
  129. av_assert0(fctx->async_lock);
  130. fctx->async_lock = 0;
  131. pthread_cond_broadcast(&fctx->async_cond);
  132. pthread_mutex_unlock(&fctx->async_mutex);
  133. }
  134. /**
  135. * Codec worker thread.
  136. *
  137. * Automatically calls ff_thread_finish_setup() if the codec does
  138. * not provide an update_thread_context method, or if the codec returns
  139. * before calling it.
  140. */
  141. static attribute_align_arg void *frame_worker_thread(void *arg)
  142. {
  143. PerThreadContext *p = arg;
  144. AVCodecContext *avctx = p->avctx;
  145. const AVCodec *codec = avctx->codec;
  146. pthread_mutex_lock(&p->mutex);
  147. while (1) {
  148. while (atomic_load(&p->state) == STATE_INPUT_READY && !p->die)
  149. pthread_cond_wait(&p->input_cond, &p->mutex);
  150. if (p->die) break;
  151. if (!codec->update_thread_context && THREAD_SAFE_CALLBACKS(avctx))
  152. ff_thread_finish_setup(avctx);
  153. /* If a decoder supports hwaccel, then it must call ff_get_format().
  154. * Since that call must happen before ff_thread_finish_setup(), the
  155. * decoder is required to implement update_thread_context() and call
  156. * ff_thread_finish_setup() manually. Therefore the above
  157. * ff_thread_finish_setup() call did not happen and hwaccel_serializing
  158. * cannot be true here. */
  159. av_assert0(!p->hwaccel_serializing);
  160. /* if the previous thread uses hwaccel then we take the lock to ensure
  161. * the threads don't run concurrently */
  162. if (avctx->hwaccel) {
  163. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  164. p->hwaccel_serializing = 1;
  165. }
  166. av_frame_unref(p->frame);
  167. p->got_frame = 0;
  168. p->result = codec->decode(avctx, p->frame, &p->got_frame, &p->avpkt);
  169. if ((p->result < 0 || !p->got_frame) && p->frame->buf[0]) {
  170. if (avctx->internal->allocate_progress)
  171. av_log(avctx, AV_LOG_ERROR, "A frame threaded decoder did not "
  172. "free the frame on failure. This is a bug, please report it.\n");
  173. av_frame_unref(p->frame);
  174. }
  175. if (atomic_load(&p->state) == STATE_SETTING_UP)
  176. ff_thread_finish_setup(avctx);
  177. if (p->hwaccel_serializing) {
  178. p->hwaccel_serializing = 0;
  179. pthread_mutex_unlock(&p->parent->hwaccel_mutex);
  180. }
  181. if (p->async_serializing) {
  182. p->async_serializing = 0;
  183. async_unlock(p->parent);
  184. }
  185. pthread_mutex_lock(&p->progress_mutex);
  186. atomic_store(&p->state, STATE_INPUT_READY);
  187. pthread_cond_broadcast(&p->progress_cond);
  188. pthread_cond_signal(&p->output_cond);
  189. pthread_mutex_unlock(&p->progress_mutex);
  190. }
  191. pthread_mutex_unlock(&p->mutex);
  192. return NULL;
  193. }
  194. /**
  195. * Update the next thread's AVCodecContext with values from the reference thread's context.
  196. *
  197. * @param dst The destination context.
  198. * @param src The source context.
  199. * @param for_user 0 if the destination is a codec thread, 1 if the destination is the user's thread
  200. * @return 0 on success, negative error code on failure
  201. */
  202. static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, int for_user)
  203. {
  204. int err = 0;
  205. if (dst != src && (for_user || !(av_codec_get_codec_descriptor(src)->props & AV_CODEC_PROP_INTRA_ONLY))) {
  206. dst->time_base = src->time_base;
  207. dst->framerate = src->framerate;
  208. dst->width = src->width;
  209. dst->height = src->height;
  210. dst->pix_fmt = src->pix_fmt;
  211. dst->sw_pix_fmt = src->sw_pix_fmt;
  212. dst->coded_width = src->coded_width;
  213. dst->coded_height = src->coded_height;
  214. dst->has_b_frames = src->has_b_frames;
  215. dst->idct_algo = src->idct_algo;
  216. dst->bits_per_coded_sample = src->bits_per_coded_sample;
  217. dst->sample_aspect_ratio = src->sample_aspect_ratio;
  218. #if FF_API_AFD
  219. FF_DISABLE_DEPRECATION_WARNINGS
  220. dst->dtg_active_format = src->dtg_active_format;
  221. FF_ENABLE_DEPRECATION_WARNINGS
  222. #endif /* FF_API_AFD */
  223. dst->profile = src->profile;
  224. dst->level = src->level;
  225. dst->bits_per_raw_sample = src->bits_per_raw_sample;
  226. dst->ticks_per_frame = src->ticks_per_frame;
  227. dst->color_primaries = src->color_primaries;
  228. dst->color_trc = src->color_trc;
  229. dst->colorspace = src->colorspace;
  230. dst->color_range = src->color_range;
  231. dst->chroma_sample_location = src->chroma_sample_location;
  232. dst->hwaccel = src->hwaccel;
  233. dst->hwaccel_context = src->hwaccel_context;
  234. dst->channels = src->channels;
  235. dst->sample_rate = src->sample_rate;
  236. dst->sample_fmt = src->sample_fmt;
  237. dst->channel_layout = src->channel_layout;
  238. dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
  239. if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
  240. (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
  241. av_buffer_unref(&dst->hw_frames_ctx);
  242. if (src->hw_frames_ctx) {
  243. dst->hw_frames_ctx = av_buffer_ref(src->hw_frames_ctx);
  244. if (!dst->hw_frames_ctx)
  245. return AVERROR(ENOMEM);
  246. }
  247. }
  248. dst->hwaccel_flags = src->hwaccel_flags;
  249. }
  250. if (for_user) {
  251. dst->delay = src->thread_count - 1;
  252. #if FF_API_CODED_FRAME
  253. FF_DISABLE_DEPRECATION_WARNINGS
  254. dst->coded_frame = src->coded_frame;
  255. FF_ENABLE_DEPRECATION_WARNINGS
  256. #endif
  257. } else {
  258. if (dst->codec->update_thread_context)
  259. err = dst->codec->update_thread_context(dst, src);
  260. }
  261. return err;
  262. }
  263. /**
  264. * Update the next thread's AVCodecContext with values set by the user.
  265. *
  266. * @param dst The destination context.
  267. * @param src The source context.
  268. * @return 0 on success, negative error code on failure
  269. */
  270. static int update_context_from_user(AVCodecContext *dst, AVCodecContext *src)
  271. {
  272. #define copy_fields(s, e) memcpy(&dst->s, &src->s, (char*)&dst->e - (char*)&dst->s);
  273. dst->flags = src->flags;
  274. dst->draw_horiz_band= src->draw_horiz_band;
  275. dst->get_buffer2 = src->get_buffer2;
  276. dst->opaque = src->opaque;
  277. dst->debug = src->debug;
  278. dst->debug_mv = src->debug_mv;
  279. dst->slice_flags = src->slice_flags;
  280. dst->flags2 = src->flags2;
  281. copy_fields(skip_loop_filter, subtitle_header);
  282. dst->frame_number = src->frame_number;
  283. dst->reordered_opaque = src->reordered_opaque;
  284. dst->thread_safe_callbacks = src->thread_safe_callbacks;
  285. if (src->slice_count && src->slice_offset) {
  286. if (dst->slice_count < src->slice_count) {
  287. int err = av_reallocp_array(&dst->slice_offset, src->slice_count,
  288. sizeof(*dst->slice_offset));
  289. if (err < 0)
  290. return err;
  291. }
  292. memcpy(dst->slice_offset, src->slice_offset,
  293. src->slice_count * sizeof(*dst->slice_offset));
  294. }
  295. dst->slice_count = src->slice_count;
  296. return 0;
  297. #undef copy_fields
  298. }
  299. /// Releases the buffers that this decoding thread was the last user of.
  300. static void release_delayed_buffers(PerThreadContext *p)
  301. {
  302. FrameThreadContext *fctx = p->parent;
  303. while (p->num_released_buffers > 0) {
  304. AVFrame *f;
  305. pthread_mutex_lock(&fctx->buffer_mutex);
  306. // fix extended data in case the caller screwed it up
  307. av_assert0(p->avctx->codec_type == AVMEDIA_TYPE_VIDEO ||
  308. p->avctx->codec_type == AVMEDIA_TYPE_AUDIO);
  309. f = &p->released_buffers[--p->num_released_buffers];
  310. f->extended_data = f->data;
  311. av_frame_unref(f);
  312. pthread_mutex_unlock(&fctx->buffer_mutex);
  313. }
  314. }
  315. static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
  316. AVPacket *avpkt)
  317. {
  318. FrameThreadContext *fctx = p->parent;
  319. PerThreadContext *prev_thread = fctx->prev_thread;
  320. const AVCodec *codec = p->avctx->codec;
  321. int ret;
  322. if (!avpkt->size && !(codec->capabilities & AV_CODEC_CAP_DELAY))
  323. return 0;
  324. pthread_mutex_lock(&p->mutex);
  325. ret = update_context_from_user(p->avctx, user_avctx);
  326. if (ret) {
  327. pthread_mutex_unlock(&p->mutex);
  328. return ret;
  329. }
  330. atomic_store_explicit(&p->debug_threads,
  331. (p->avctx->debug & FF_DEBUG_THREADS) != 0,
  332. memory_order_relaxed);
  333. release_delayed_buffers(p);
  334. if (prev_thread) {
  335. int err;
  336. if (atomic_load(&prev_thread->state) == STATE_SETTING_UP) {
  337. pthread_mutex_lock(&prev_thread->progress_mutex);
  338. while (atomic_load(&prev_thread->state) == STATE_SETTING_UP)
  339. pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
  340. pthread_mutex_unlock(&prev_thread->progress_mutex);
  341. }
  342. err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
  343. if (err) {
  344. pthread_mutex_unlock(&p->mutex);
  345. return err;
  346. }
  347. }
  348. av_packet_unref(&p->avpkt);
  349. ret = av_packet_ref(&p->avpkt, avpkt);
  350. if (ret < 0) {
  351. pthread_mutex_unlock(&p->mutex);
  352. av_log(p->avctx, AV_LOG_ERROR, "av_packet_ref() failed in submit_packet()\n");
  353. return ret;
  354. }
  355. atomic_store(&p->state, STATE_SETTING_UP);
  356. pthread_cond_signal(&p->input_cond);
  357. pthread_mutex_unlock(&p->mutex);
  358. /*
  359. * If the client doesn't have a thread-safe get_buffer(),
  360. * then decoding threads call back to the main thread,
  361. * and it calls back to the client here.
  362. */
  363. if (!p->avctx->thread_safe_callbacks && (
  364. p->avctx->get_format != avcodec_default_get_format ||
  365. p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
  366. while (atomic_load(&p->state) != STATE_SETUP_FINISHED && atomic_load(&p->state) != STATE_INPUT_READY) {
  367. int call_done = 1;
  368. pthread_mutex_lock(&p->progress_mutex);
  369. while (atomic_load(&p->state) == STATE_SETTING_UP)
  370. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  371. switch (atomic_load_explicit(&p->state, memory_order_acquire)) {
  372. case STATE_GET_BUFFER:
  373. p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
  374. break;
  375. case STATE_GET_FORMAT:
  376. p->result_format = ff_get_format(p->avctx, p->available_formats);
  377. break;
  378. default:
  379. call_done = 0;
  380. break;
  381. }
  382. if (call_done) {
  383. atomic_store(&p->state, STATE_SETTING_UP);
  384. pthread_cond_signal(&p->progress_cond);
  385. }
  386. pthread_mutex_unlock(&p->progress_mutex);
  387. }
  388. }
  389. fctx->prev_thread = p;
  390. fctx->next_decoding++;
  391. return 0;
  392. }
  393. int ff_thread_decode_frame(AVCodecContext *avctx,
  394. AVFrame *picture, int *got_picture_ptr,
  395. AVPacket *avpkt)
  396. {
  397. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  398. int finished = fctx->next_finished;
  399. PerThreadContext *p;
  400. int err;
  401. /* release the async lock, permitting blocked hwaccel threads to
  402. * go forward while we are in this function */
  403. async_unlock(fctx);
  404. /*
  405. * Submit a packet to the next decoding thread.
  406. */
  407. p = &fctx->threads[fctx->next_decoding];
  408. err = submit_packet(p, avctx, avpkt);
  409. if (err)
  410. goto finish;
  411. /*
  412. * If we're still receiving the initial packets, don't return a frame.
  413. */
  414. if (fctx->next_decoding > (avctx->thread_count-1-(avctx->codec_id == AV_CODEC_ID_FFV1)))
  415. fctx->delaying = 0;
  416. if (fctx->delaying) {
  417. *got_picture_ptr=0;
  418. if (avpkt->size) {
  419. err = avpkt->size;
  420. goto finish;
  421. }
  422. }
  423. /*
  424. * Return the next available frame from the oldest thread.
  425. * If we're at the end of the stream, then we have to skip threads that
  426. * didn't output a frame/error, because we don't want to accidentally signal
  427. * EOF (avpkt->size == 0 && *got_picture_ptr == 0 && err >= 0).
  428. */
  429. do {
  430. p = &fctx->threads[finished++];
  431. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  432. pthread_mutex_lock(&p->progress_mutex);
  433. while (atomic_load_explicit(&p->state, memory_order_relaxed) != STATE_INPUT_READY)
  434. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  435. pthread_mutex_unlock(&p->progress_mutex);
  436. }
  437. av_frame_move_ref(picture, p->frame);
  438. *got_picture_ptr = p->got_frame;
  439. picture->pkt_dts = p->avpkt.dts;
  440. err = p->result;
  441. /*
  442. * A later call with avkpt->size == 0 may loop over all threads,
  443. * including this one, searching for a frame/error to return before being
  444. * stopped by the "finished != fctx->next_finished" condition.
  445. * Make sure we don't mistakenly return the same frame/error again.
  446. */
  447. p->got_frame = 0;
  448. p->result = 0;
  449. if (finished >= avctx->thread_count) finished = 0;
  450. } while (!avpkt->size && !*got_picture_ptr && err >= 0 && finished != fctx->next_finished);
  451. update_context_from_thread(avctx, p->avctx, 1);
  452. if (fctx->next_decoding >= avctx->thread_count) fctx->next_decoding = 0;
  453. fctx->next_finished = finished;
  454. /* return the size of the consumed packet if no error occurred */
  455. if (err >= 0)
  456. err = avpkt->size;
  457. finish:
  458. async_lock(fctx);
  459. return err;
  460. }
  461. void ff_thread_report_progress(ThreadFrame *f, int n, int field)
  462. {
  463. PerThreadContext *p;
  464. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  465. if (!progress ||
  466. atomic_load_explicit(&progress[field], memory_order_relaxed) >= n)
  467. return;
  468. p = f->owner[field]->internal->thread_ctx;
  469. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  470. av_log(f->owner[field], AV_LOG_DEBUG,
  471. "%p finished %d field %d\n", progress, n, field);
  472. pthread_mutex_lock(&p->progress_mutex);
  473. atomic_store_explicit(&progress[field], n, memory_order_release);
  474. pthread_cond_broadcast(&p->progress_cond);
  475. pthread_mutex_unlock(&p->progress_mutex);
  476. }
  477. void ff_thread_await_progress(ThreadFrame *f, int n, int field)
  478. {
  479. PerThreadContext *p;
  480. atomic_int *progress = f->progress ? (atomic_int*)f->progress->data : NULL;
  481. if (!progress ||
  482. atomic_load_explicit(&progress[field], memory_order_acquire) >= n)
  483. return;
  484. p = f->owner[field]->internal->thread_ctx;
  485. if (atomic_load_explicit(&p->debug_threads, memory_order_relaxed))
  486. av_log(f->owner[field], AV_LOG_DEBUG,
  487. "thread awaiting %d field %d from %p\n", n, field, progress);
  488. pthread_mutex_lock(&p->progress_mutex);
  489. while (atomic_load_explicit(&progress[field], memory_order_relaxed) < n)
  490. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  491. pthread_mutex_unlock(&p->progress_mutex);
  492. }
  493. void ff_thread_finish_setup(AVCodecContext *avctx) {
  494. PerThreadContext *p = avctx->internal->thread_ctx;
  495. if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return;
  496. if (avctx->hwaccel && !p->hwaccel_serializing) {
  497. pthread_mutex_lock(&p->parent->hwaccel_mutex);
  498. p->hwaccel_serializing = 1;
  499. }
  500. /* this assumes that no hwaccel calls happen before ff_thread_finish_setup() */
  501. if (avctx->hwaccel &&
  502. !(avctx->hwaccel->caps_internal & HWACCEL_CAP_ASYNC_SAFE)) {
  503. p->async_serializing = 1;
  504. async_lock(p->parent);
  505. }
  506. pthread_mutex_lock(&p->progress_mutex);
  507. if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
  508. av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
  509. }
  510. atomic_store(&p->state, STATE_SETUP_FINISHED);
  511. pthread_cond_broadcast(&p->progress_cond);
  512. pthread_mutex_unlock(&p->progress_mutex);
  513. }
  514. /// Waits for all threads to finish.
  515. static void park_frame_worker_threads(FrameThreadContext *fctx, int thread_count)
  516. {
  517. int i;
  518. async_unlock(fctx);
  519. for (i = 0; i < thread_count; i++) {
  520. PerThreadContext *p = &fctx->threads[i];
  521. if (atomic_load(&p->state) != STATE_INPUT_READY) {
  522. pthread_mutex_lock(&p->progress_mutex);
  523. while (atomic_load(&p->state) != STATE_INPUT_READY)
  524. pthread_cond_wait(&p->output_cond, &p->progress_mutex);
  525. pthread_mutex_unlock(&p->progress_mutex);
  526. }
  527. p->got_frame = 0;
  528. }
  529. async_lock(fctx);
  530. }
  531. void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
  532. {
  533. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  534. const AVCodec *codec = avctx->codec;
  535. int i;
  536. park_frame_worker_threads(fctx, thread_count);
  537. if (fctx->prev_thread && fctx->prev_thread != fctx->threads)
  538. if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) {
  539. av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n");
  540. fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy;
  541. fctx->threads->avctx->internal->is_copy = 1;
  542. }
  543. for (i = 0; i < thread_count; i++) {
  544. PerThreadContext *p = &fctx->threads[i];
  545. pthread_mutex_lock(&p->mutex);
  546. p->die = 1;
  547. pthread_cond_signal(&p->input_cond);
  548. pthread_mutex_unlock(&p->mutex);
  549. if (p->thread_init)
  550. pthread_join(p->thread, NULL);
  551. p->thread_init=0;
  552. if (codec->close && p->avctx)
  553. codec->close(p->avctx);
  554. release_delayed_buffers(p);
  555. av_frame_free(&p->frame);
  556. }
  557. for (i = 0; i < thread_count; i++) {
  558. PerThreadContext *p = &fctx->threads[i];
  559. pthread_mutex_destroy(&p->mutex);
  560. pthread_mutex_destroy(&p->progress_mutex);
  561. pthread_cond_destroy(&p->input_cond);
  562. pthread_cond_destroy(&p->progress_cond);
  563. pthread_cond_destroy(&p->output_cond);
  564. av_packet_unref(&p->avpkt);
  565. av_freep(&p->released_buffers);
  566. if (i && p->avctx) {
  567. av_freep(&p->avctx->priv_data);
  568. av_freep(&p->avctx->slice_offset);
  569. }
  570. if (p->avctx) {
  571. av_freep(&p->avctx->internal);
  572. av_buffer_unref(&p->avctx->hw_frames_ctx);
  573. }
  574. av_freep(&p->avctx);
  575. }
  576. av_freep(&fctx->threads);
  577. pthread_mutex_destroy(&fctx->buffer_mutex);
  578. pthread_mutex_destroy(&fctx->hwaccel_mutex);
  579. pthread_mutex_destroy(&fctx->async_mutex);
  580. pthread_cond_destroy(&fctx->async_cond);
  581. av_freep(&avctx->internal->thread_ctx);
  582. if (avctx->priv_data && avctx->codec && avctx->codec->priv_class)
  583. av_opt_free(avctx->priv_data);
  584. avctx->codec = NULL;
  585. }
  586. int ff_frame_thread_init(AVCodecContext *avctx)
  587. {
  588. int thread_count = avctx->thread_count;
  589. const AVCodec *codec = avctx->codec;
  590. AVCodecContext *src = avctx;
  591. FrameThreadContext *fctx;
  592. int i, err = 0;
  593. #if HAVE_W32THREADS
  594. w32thread_init();
  595. #endif
  596. if (!thread_count) {
  597. int nb_cpus = av_cpu_count();
  598. if ((avctx->debug & (FF_DEBUG_VIS_QP | FF_DEBUG_VIS_MB_TYPE)) || avctx->debug_mv)
  599. nb_cpus = 1;
  600. // use number of cores + 1 as thread count if there is more than one
  601. if (nb_cpus > 1)
  602. thread_count = avctx->thread_count = FFMIN(nb_cpus + 1, MAX_AUTO_THREADS);
  603. else
  604. thread_count = avctx->thread_count = 1;
  605. }
  606. if (thread_count <= 1) {
  607. avctx->active_thread_type = 0;
  608. return 0;
  609. }
  610. avctx->internal->thread_ctx = fctx = av_mallocz(sizeof(FrameThreadContext));
  611. if (!fctx)
  612. return AVERROR(ENOMEM);
  613. fctx->threads = av_mallocz_array(thread_count, sizeof(PerThreadContext));
  614. if (!fctx->threads) {
  615. av_freep(&avctx->internal->thread_ctx);
  616. return AVERROR(ENOMEM);
  617. }
  618. pthread_mutex_init(&fctx->buffer_mutex, NULL);
  619. pthread_mutex_init(&fctx->hwaccel_mutex, NULL);
  620. pthread_mutex_init(&fctx->async_mutex, NULL);
  621. pthread_cond_init(&fctx->async_cond, NULL);
  622. fctx->async_lock = 1;
  623. fctx->delaying = 1;
  624. for (i = 0; i < thread_count; i++) {
  625. AVCodecContext *copy = av_malloc(sizeof(AVCodecContext));
  626. PerThreadContext *p = &fctx->threads[i];
  627. pthread_mutex_init(&p->mutex, NULL);
  628. pthread_mutex_init(&p->progress_mutex, NULL);
  629. pthread_cond_init(&p->input_cond, NULL);
  630. pthread_cond_init(&p->progress_cond, NULL);
  631. pthread_cond_init(&p->output_cond, NULL);
  632. p->frame = av_frame_alloc();
  633. if (!p->frame) {
  634. av_freep(&copy);
  635. err = AVERROR(ENOMEM);
  636. goto error;
  637. }
  638. p->parent = fctx;
  639. p->avctx = copy;
  640. if (!copy) {
  641. err = AVERROR(ENOMEM);
  642. goto error;
  643. }
  644. *copy = *src;
  645. copy->internal = av_malloc(sizeof(AVCodecInternal));
  646. if (!copy->internal) {
  647. copy->priv_data = NULL;
  648. err = AVERROR(ENOMEM);
  649. goto error;
  650. }
  651. *copy->internal = *src->internal;
  652. copy->internal->thread_ctx = p;
  653. copy->internal->last_pkt_props = &p->avpkt;
  654. if (!i) {
  655. src = copy;
  656. if (codec->init)
  657. err = codec->init(copy);
  658. update_context_from_thread(avctx, copy, 1);
  659. } else {
  660. copy->priv_data = av_malloc(codec->priv_data_size);
  661. if (!copy->priv_data) {
  662. err = AVERROR(ENOMEM);
  663. goto error;
  664. }
  665. memcpy(copy->priv_data, src->priv_data, codec->priv_data_size);
  666. copy->internal->is_copy = 1;
  667. if (codec->init_thread_copy)
  668. err = codec->init_thread_copy(copy);
  669. }
  670. if (err) goto error;
  671. atomic_init(&p->debug_threads, (copy->debug & FF_DEBUG_THREADS) != 0);
  672. err = AVERROR(pthread_create(&p->thread, NULL, frame_worker_thread, p));
  673. p->thread_init= !err;
  674. if(!p->thread_init)
  675. goto error;
  676. }
  677. return 0;
  678. error:
  679. ff_frame_thread_free(avctx, i+1);
  680. return err;
  681. }
  682. void ff_thread_flush(AVCodecContext *avctx)
  683. {
  684. int i;
  685. FrameThreadContext *fctx = avctx->internal->thread_ctx;
  686. if (!fctx) return;
  687. park_frame_worker_threads(fctx, avctx->thread_count);
  688. if (fctx->prev_thread) {
  689. if (fctx->prev_thread != &fctx->threads[0])
  690. update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
  691. }
  692. fctx->next_decoding = fctx->next_finished = 0;
  693. fctx->delaying = 1;
  694. fctx->prev_thread = NULL;
  695. for (i = 0; i < avctx->thread_count; i++) {
  696. PerThreadContext *p = &fctx->threads[i];
  697. // Make sure decode flush calls with size=0 won't return old frames
  698. p->got_frame = 0;
  699. av_frame_unref(p->frame);
  700. p->result = 0;
  701. release_delayed_buffers(p);
  702. if (avctx->codec->flush)
  703. avctx->codec->flush(p->avctx);
  704. }
  705. }
  706. int ff_thread_can_start_frame(AVCodecContext *avctx)
  707. {
  708. PerThreadContext *p = avctx->internal->thread_ctx;
  709. if ((avctx->active_thread_type&FF_THREAD_FRAME) && atomic_load(&p->state) != STATE_SETTING_UP &&
  710. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  711. return 0;
  712. }
  713. return 1;
  714. }
  715. static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
  716. {
  717. PerThreadContext *p = avctx->internal->thread_ctx;
  718. int err;
  719. f->owner[0] = f->owner[1] = avctx;
  720. ff_init_buffer_info(avctx, f->f);
  721. if (!(avctx->active_thread_type & FF_THREAD_FRAME))
  722. return ff_get_buffer(avctx, f->f, flags);
  723. if (atomic_load(&p->state) != STATE_SETTING_UP &&
  724. (avctx->codec->update_thread_context || !THREAD_SAFE_CALLBACKS(avctx))) {
  725. av_log(avctx, AV_LOG_ERROR, "get_buffer() cannot be called after ff_thread_finish_setup()\n");
  726. return -1;
  727. }
  728. if (avctx->internal->allocate_progress) {
  729. atomic_int *progress;
  730. f->progress = av_buffer_alloc(2 * sizeof(*progress));
  731. if (!f->progress) {
  732. return AVERROR(ENOMEM);
  733. }
  734. progress = (atomic_int*)f->progress->data;
  735. atomic_init(&progress[0], -1);
  736. atomic_init(&progress[1], -1);
  737. }
  738. pthread_mutex_lock(&p->parent->buffer_mutex);
  739. if (avctx->thread_safe_callbacks ||
  740. avctx->get_buffer2 == avcodec_default_get_buffer2) {
  741. err = ff_get_buffer(avctx, f->f, flags);
  742. } else {
  743. pthread_mutex_lock(&p->progress_mutex);
  744. p->requested_frame = f->f;
  745. p->requested_flags = flags;
  746. atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
  747. pthread_cond_broadcast(&p->progress_cond);
  748. while (atomic_load(&p->state) != STATE_SETTING_UP)
  749. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  750. err = p->result;
  751. pthread_mutex_unlock(&p->progress_mutex);
  752. }
  753. if (!THREAD_SAFE_CALLBACKS(avctx) && !avctx->codec->update_thread_context)
  754. ff_thread_finish_setup(avctx);
  755. if (err)
  756. av_buffer_unref(&f->progress);
  757. pthread_mutex_unlock(&p->parent->buffer_mutex);
  758. return err;
  759. }
  760. enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
  761. {
  762. enum AVPixelFormat res;
  763. PerThreadContext *p = avctx->internal->thread_ctx;
  764. if (!(avctx->active_thread_type & FF_THREAD_FRAME) || avctx->thread_safe_callbacks ||
  765. avctx->get_format == avcodec_default_get_format)
  766. return ff_get_format(avctx, fmt);
  767. if (atomic_load(&p->state) != STATE_SETTING_UP) {
  768. av_log(avctx, AV_LOG_ERROR, "get_format() cannot be called after ff_thread_finish_setup()\n");
  769. return -1;
  770. }
  771. pthread_mutex_lock(&p->progress_mutex);
  772. p->available_formats = fmt;
  773. atomic_store(&p->state, STATE_GET_FORMAT);
  774. pthread_cond_broadcast(&p->progress_cond);
  775. while (atomic_load(&p->state) != STATE_SETTING_UP)
  776. pthread_cond_wait(&p->progress_cond, &p->progress_mutex);
  777. res = p->result_format;
  778. pthread_mutex_unlock(&p->progress_mutex);
  779. return res;
  780. }
  781. int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
  782. {
  783. int ret = thread_get_buffer_internal(avctx, f, flags);
  784. if (ret < 0)
  785. av_log(avctx, AV_LOG_ERROR, "thread_get_buffer() failed\n");
  786. return ret;
  787. }
  788. void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
  789. {
  790. PerThreadContext *p = avctx->internal->thread_ctx;
  791. FrameThreadContext *fctx;
  792. AVFrame *dst, *tmp;
  793. int can_direct_free = !(avctx->active_thread_type & FF_THREAD_FRAME) ||
  794. avctx->thread_safe_callbacks ||
  795. avctx->get_buffer2 == avcodec_default_get_buffer2;
  796. if (!f->f || !f->f->buf[0])
  797. return;
  798. if (avctx->debug & FF_DEBUG_BUFFERS)
  799. av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
  800. av_buffer_unref(&f->progress);
  801. f->owner[0] = f->owner[1] = NULL;
  802. if (can_direct_free) {
  803. av_frame_unref(f->f);
  804. return;
  805. }
  806. fctx = p->parent;
  807. pthread_mutex_lock(&fctx->buffer_mutex);
  808. if (p->num_released_buffers + 1 >= INT_MAX / sizeof(*p->released_buffers))
  809. goto fail;
  810. tmp = av_fast_realloc(p->released_buffers, &p->released_buffers_allocated,
  811. (p->num_released_buffers + 1) *
  812. sizeof(*p->released_buffers));
  813. if (!tmp)
  814. goto fail;
  815. p->released_buffers = tmp;
  816. dst = &p->released_buffers[p->num_released_buffers];
  817. av_frame_move_ref(dst, f->f);
  818. p->num_released_buffers++;
  819. fail:
  820. pthread_mutex_unlock(&fctx->buffer_mutex);
  821. }