You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1429 lines
43KB

  1. /*
  2. * generic decoding-related code
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include <string.h>
  22. #include "config.h"
  23. #include "libavutil/avassert.h"
  24. #include "libavutil/avstring.h"
  25. #include "libavutil/common.h"
  26. #include "libavutil/frame.h"
  27. #include "libavutil/hwcontext.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/intmath.h"
  30. #include "avcodec.h"
  31. #include "bytestream.h"
  32. #include "decode.h"
  33. #include "hwaccel.h"
  34. #include "internal.h"
  35. #include "thread.h"
  36. static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt)
  37. {
  38. int size = 0, ret;
  39. const uint8_t *data;
  40. uint32_t flags;
  41. data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
  42. if (!data)
  43. return 0;
  44. if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
  45. av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
  46. "changes, but PARAM_CHANGE side data was sent to it.\n");
  47. ret = AVERROR(EINVAL);
  48. goto fail2;
  49. }
  50. if (size < 4)
  51. goto fail;
  52. flags = bytestream_get_le32(&data);
  53. size -= 4;
  54. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
  55. if (size < 4)
  56. goto fail;
  57. avctx->channels = bytestream_get_le32(&data);
  58. size -= 4;
  59. }
  60. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
  61. if (size < 8)
  62. goto fail;
  63. avctx->channel_layout = bytestream_get_le64(&data);
  64. size -= 8;
  65. }
  66. if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
  67. if (size < 4)
  68. goto fail;
  69. avctx->sample_rate = bytestream_get_le32(&data);
  70. size -= 4;
  71. }
  72. if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
  73. if (size < 8)
  74. goto fail;
  75. avctx->width = bytestream_get_le32(&data);
  76. avctx->height = bytestream_get_le32(&data);
  77. size -= 8;
  78. ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
  79. if (ret < 0)
  80. goto fail2;
  81. }
  82. return 0;
  83. fail:
  84. av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
  85. ret = AVERROR_INVALIDDATA;
  86. fail2:
  87. if (ret < 0) {
  88. av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
  89. if (avctx->err_recognition & AV_EF_EXPLODE)
  90. return ret;
  91. }
  92. return 0;
  93. }
  94. static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
  95. {
  96. av_packet_unref(avci->last_pkt_props);
  97. if (pkt)
  98. return av_packet_copy_props(avci->last_pkt_props, pkt);
  99. return 0;
  100. }
  101. static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
  102. {
  103. int ret;
  104. /* move the original frame to our backup */
  105. av_frame_unref(avci->to_free);
  106. av_frame_move_ref(avci->to_free, frame);
  107. /* now copy everything except the AVBufferRefs back
  108. * note that we make a COPY of the side data, so calling av_frame_free() on
  109. * the caller's frame will work properly */
  110. ret = av_frame_copy_props(frame, avci->to_free);
  111. if (ret < 0)
  112. return ret;
  113. memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
  114. memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
  115. if (avci->to_free->extended_data != avci->to_free->data) {
  116. int planes = av_get_channel_layout_nb_channels(avci->to_free->channel_layout);
  117. int size = planes * sizeof(*frame->extended_data);
  118. if (!size) {
  119. av_frame_unref(frame);
  120. return AVERROR_BUG;
  121. }
  122. frame->extended_data = av_malloc(size);
  123. if (!frame->extended_data) {
  124. av_frame_unref(frame);
  125. return AVERROR(ENOMEM);
  126. }
  127. memcpy(frame->extended_data, avci->to_free->extended_data,
  128. size);
  129. } else
  130. frame->extended_data = frame->data;
  131. frame->format = avci->to_free->format;
  132. frame->width = avci->to_free->width;
  133. frame->height = avci->to_free->height;
  134. frame->channel_layout = avci->to_free->channel_layout;
  135. frame->nb_samples = avci->to_free->nb_samples;
  136. return 0;
  137. }
  138. static int bsfs_init(AVCodecContext *avctx)
  139. {
  140. AVCodecInternal *avci = avctx->internal;
  141. DecodeFilterContext *s = &avci->filter;
  142. const char *bsfs_str;
  143. int ret;
  144. if (s->nb_bsfs)
  145. return 0;
  146. bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
  147. while (bsfs_str && *bsfs_str) {
  148. AVBSFContext **tmp;
  149. const AVBitStreamFilter *filter;
  150. char *bsf;
  151. bsf = av_get_token(&bsfs_str, ",");
  152. if (!bsf) {
  153. ret = AVERROR(ENOMEM);
  154. goto fail;
  155. }
  156. filter = av_bsf_get_by_name(bsf);
  157. if (!filter) {
  158. av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
  159. "requested by a decoder. This is a bug, please report it.\n",
  160. bsf);
  161. ret = AVERROR_BUG;
  162. av_freep(&bsf);
  163. goto fail;
  164. }
  165. av_freep(&bsf);
  166. tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
  167. if (!tmp) {
  168. ret = AVERROR(ENOMEM);
  169. goto fail;
  170. }
  171. s->bsfs = tmp;
  172. s->nb_bsfs++;
  173. ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
  174. if (ret < 0)
  175. goto fail;
  176. if (s->nb_bsfs == 1) {
  177. /* We do not currently have an API for passing the input timebase into decoders,
  178. * but no filters used here should actually need it.
  179. * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
  180. s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
  181. ret = avcodec_parameters_from_context(s->bsfs[s->nb_bsfs - 1]->par_in,
  182. avctx);
  183. } else {
  184. s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
  185. ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
  186. s->bsfs[s->nb_bsfs - 2]->par_out);
  187. }
  188. if (ret < 0)
  189. goto fail;
  190. ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
  191. if (ret < 0)
  192. goto fail;
  193. }
  194. return 0;
  195. fail:
  196. ff_decode_bsfs_uninit(avctx);
  197. return ret;
  198. }
  199. /* try to get one output packet from the filter chain */
  200. static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
  201. {
  202. DecodeFilterContext *s = &avctx->internal->filter;
  203. int idx, ret;
  204. /* start with the last filter in the chain */
  205. idx = s->nb_bsfs - 1;
  206. while (idx >= 0) {
  207. /* request a packet from the currently selected filter */
  208. ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
  209. if (ret == AVERROR(EAGAIN)) {
  210. /* no packets available, try the next filter up the chain */
  211. ret = 0;
  212. idx--;
  213. continue;
  214. } else if (ret < 0 && ret != AVERROR_EOF) {
  215. return ret;
  216. }
  217. /* got a packet or EOF -- pass it to the caller or to the next filter
  218. * down the chain */
  219. if (idx == s->nb_bsfs - 1) {
  220. return ret;
  221. } else {
  222. idx++;
  223. ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
  224. if (ret < 0) {
  225. av_log(avctx, AV_LOG_ERROR,
  226. "Error pre-processing a packet before decoding\n");
  227. av_packet_unref(pkt);
  228. return ret;
  229. }
  230. }
  231. }
  232. return AVERROR(EAGAIN);
  233. }
  234. int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
  235. {
  236. AVCodecInternal *avci = avctx->internal;
  237. int ret;
  238. if (avci->draining)
  239. return AVERROR_EOF;
  240. ret = bsfs_poll(avctx, pkt);
  241. if (ret == AVERROR_EOF)
  242. avci->draining = 1;
  243. if (ret < 0)
  244. return ret;
  245. ret = extract_packet_props(avctx->internal, pkt);
  246. if (ret < 0)
  247. goto finish;
  248. ret = apply_param_change(avctx, pkt);
  249. if (ret < 0)
  250. goto finish;
  251. if (avctx->codec->receive_frame)
  252. avci->compat_decode_consumed += pkt->size;
  253. return 0;
  254. finish:
  255. av_packet_unref(pkt);
  256. return ret;
  257. }
  258. /*
  259. * The core of the receive_frame_wrapper for the decoders implementing
  260. * the simple API. Certain decoders might consume partial packets without
  261. * returning any output, so this function needs to be called in a loop until it
  262. * returns EAGAIN.
  263. **/
  264. static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
  265. {
  266. AVCodecInternal *avci = avctx->internal;
  267. DecodeSimpleContext *ds = &avci->ds;
  268. AVPacket *pkt = ds->in_pkt;
  269. int got_frame;
  270. int ret;
  271. if (!pkt->data && !avci->draining) {
  272. av_packet_unref(pkt);
  273. ret = ff_decode_get_packet(avctx, pkt);
  274. if (ret < 0 && ret != AVERROR_EOF)
  275. return ret;
  276. }
  277. // Some codecs (at least wma lossless) will crash when feeding drain packets
  278. // after EOF was signaled.
  279. if (avci->draining_done)
  280. return AVERROR_EOF;
  281. if (!pkt->data &&
  282. !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
  283. avctx->active_thread_type & FF_THREAD_FRAME))
  284. return AVERROR_EOF;
  285. got_frame = 0;
  286. if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
  287. ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
  288. } else {
  289. ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
  290. if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
  291. frame->pkt_dts = pkt->dts;
  292. /* get_buffer is supposed to set frame parameters */
  293. if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
  294. frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
  295. frame->width = avctx->width;
  296. frame->height = avctx->height;
  297. frame->format = avctx->codec->type == AVMEDIA_TYPE_VIDEO ?
  298. avctx->pix_fmt : avctx->sample_fmt;
  299. }
  300. }
  301. emms_c();
  302. if (!got_frame)
  303. av_frame_unref(frame);
  304. if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO)
  305. ret = pkt->size;
  306. if (avctx->internal->draining && !got_frame)
  307. avci->draining_done = 1;
  308. avci->compat_decode_consumed += ret;
  309. if (ret >= pkt->size || ret < 0) {
  310. av_packet_unref(pkt);
  311. } else {
  312. int consumed = ret;
  313. pkt->data += consumed;
  314. pkt->size -= consumed;
  315. pkt->pts = AV_NOPTS_VALUE;
  316. pkt->dts = AV_NOPTS_VALUE;
  317. avci->last_pkt_props->pts = AV_NOPTS_VALUE;
  318. avci->last_pkt_props->dts = AV_NOPTS_VALUE;
  319. }
  320. if (got_frame)
  321. av_assert0(frame->buf[0]);
  322. return ret < 0 ? ret : 0;
  323. }
  324. static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
  325. {
  326. int ret;
  327. while (!frame->buf[0]) {
  328. ret = decode_simple_internal(avctx, frame);
  329. if (ret < 0)
  330. return ret;
  331. }
  332. return 0;
  333. }
  334. static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
  335. {
  336. AVCodecInternal *avci = avctx->internal;
  337. int ret;
  338. av_assert0(!frame->buf[0]);
  339. if (avctx->codec->receive_frame)
  340. ret = avctx->codec->receive_frame(avctx, frame);
  341. else
  342. ret = decode_simple_receive_frame(avctx, frame);
  343. if (ret == AVERROR_EOF)
  344. avci->draining_done = 1;
  345. /* unwrap the per-frame decode data and restore the original opaque_ref*/
  346. if (!ret) {
  347. /* the only case where decode data is not set should be decoders
  348. * that do not call ff_get_buffer() */
  349. av_assert0((frame->opaque_ref && frame->opaque_ref->size == sizeof(FrameDecodeData)) ||
  350. !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
  351. if (frame->opaque_ref) {
  352. FrameDecodeData *fdd;
  353. AVBufferRef *user_opaque_ref;
  354. fdd = (FrameDecodeData*)frame->opaque_ref->data;
  355. if (fdd->post_process) {
  356. ret = fdd->post_process(avctx, frame);
  357. if (ret < 0) {
  358. av_frame_unref(frame);
  359. return ret;
  360. }
  361. }
  362. user_opaque_ref = fdd->user_opaque_ref;
  363. fdd->user_opaque_ref = NULL;
  364. av_buffer_unref(&frame->opaque_ref);
  365. frame->opaque_ref = user_opaque_ref;
  366. }
  367. }
  368. return ret;
  369. }
  370. int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
  371. {
  372. AVCodecInternal *avci = avctx->internal;
  373. int ret = 0;
  374. if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
  375. return AVERROR(EINVAL);
  376. if (avctx->internal->draining)
  377. return AVERROR_EOF;
  378. ret = bsfs_init(avctx);
  379. if (ret < 0)
  380. return ret;
  381. av_packet_unref(avci->buffer_pkt);
  382. if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
  383. ret = av_packet_ref(avci->buffer_pkt, avpkt);
  384. if (ret < 0)
  385. return ret;
  386. }
  387. ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
  388. if (ret < 0) {
  389. av_packet_unref(avci->buffer_pkt);
  390. return ret;
  391. }
  392. if (!avci->buffer_frame->buf[0]) {
  393. ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
  394. if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
  395. return ret;
  396. }
  397. return 0;
  398. }
  399. static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
  400. {
  401. /* make sure we are noisy about decoders returning invalid cropping data */
  402. if (frame->crop_left >= INT_MAX - frame->crop_right ||
  403. frame->crop_top >= INT_MAX - frame->crop_bottom ||
  404. (frame->crop_left + frame->crop_right) >= frame->width ||
  405. (frame->crop_top + frame->crop_bottom) >= frame->height) {
  406. av_log(avctx, AV_LOG_WARNING,
  407. "Invalid cropping information set by a decoder: %zu/%zu/%zu/%zu "
  408. "(frame size %dx%d). This is a bug, please report it\n",
  409. frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
  410. frame->width, frame->height);
  411. frame->crop_left = 0;
  412. frame->crop_right = 0;
  413. frame->crop_top = 0;
  414. frame->crop_bottom = 0;
  415. return 0;
  416. }
  417. if (!avctx->apply_cropping)
  418. return 0;
  419. return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
  420. AV_FRAME_CROP_UNALIGNED : 0);
  421. }
  422. int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
  423. {
  424. AVCodecInternal *avci = avctx->internal;
  425. int ret;
  426. av_frame_unref(frame);
  427. if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
  428. return AVERROR(EINVAL);
  429. ret = bsfs_init(avctx);
  430. if (ret < 0)
  431. return ret;
  432. if (avci->buffer_frame->buf[0]) {
  433. av_frame_move_ref(frame, avci->buffer_frame);
  434. } else {
  435. ret = decode_receive_frame_internal(avctx, frame);
  436. if (ret < 0)
  437. return ret;
  438. }
  439. if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
  440. ret = apply_cropping(avctx, frame);
  441. if (ret < 0) {
  442. av_frame_unref(frame);
  443. return ret;
  444. }
  445. }
  446. avctx->frame_number++;
  447. return 0;
  448. }
  449. static int compat_decode(AVCodecContext *avctx, AVFrame *frame,
  450. int *got_frame, AVPacket *pkt)
  451. {
  452. AVCodecInternal *avci = avctx->internal;
  453. int ret = 0;
  454. av_assert0(avci->compat_decode_consumed == 0);
  455. *got_frame = 0;
  456. avci->compat_decode = 1;
  457. if (avci->compat_decode_partial_size > 0 &&
  458. avci->compat_decode_partial_size != pkt->size) {
  459. av_log(avctx, AV_LOG_ERROR,
  460. "Got unexpected packet size after a partial decode\n");
  461. ret = AVERROR(EINVAL);
  462. goto finish;
  463. }
  464. if (!avci->compat_decode_partial_size) {
  465. ret = avcodec_send_packet(avctx, pkt);
  466. if (ret == AVERROR_EOF)
  467. ret = 0;
  468. else if (ret == AVERROR(EAGAIN)) {
  469. /* we fully drain all the output in each decode call, so this should not
  470. * ever happen */
  471. ret = AVERROR_BUG;
  472. goto finish;
  473. } else if (ret < 0)
  474. goto finish;
  475. }
  476. while (ret >= 0) {
  477. ret = avcodec_receive_frame(avctx, frame);
  478. if (ret < 0) {
  479. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
  480. ret = 0;
  481. goto finish;
  482. }
  483. if (frame != avci->compat_decode_frame) {
  484. if (!avctx->refcounted_frames) {
  485. ret = unrefcount_frame(avci, frame);
  486. if (ret < 0)
  487. goto finish;
  488. }
  489. *got_frame = 1;
  490. frame = avci->compat_decode_frame;
  491. } else {
  492. if (!avci->compat_decode_warned) {
  493. av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
  494. "API cannot return all the frames for this decoder. "
  495. "Some frames will be dropped. Update your code to the "
  496. "new decoding API to fix this.\n");
  497. avci->compat_decode_warned = 1;
  498. }
  499. }
  500. if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
  501. break;
  502. }
  503. finish:
  504. if (ret == 0) {
  505. /* if there are any bsfs then assume full packet is always consumed */
  506. if (avctx->codec->bsfs)
  507. ret = pkt->size;
  508. else
  509. ret = FFMIN(avci->compat_decode_consumed, pkt->size);
  510. }
  511. avci->compat_decode_consumed = 0;
  512. avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
  513. return ret;
  514. }
  515. int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
  516. int *got_picture_ptr,
  517. AVPacket *avpkt)
  518. {
  519. return compat_decode(avctx, picture, got_picture_ptr, avpkt);
  520. }
  521. int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
  522. AVFrame *frame,
  523. int *got_frame_ptr,
  524. AVPacket *avpkt)
  525. {
  526. return compat_decode(avctx, frame, got_frame_ptr, avpkt);
  527. }
  528. int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
  529. int *got_sub_ptr,
  530. AVPacket *avpkt)
  531. {
  532. int ret;
  533. ret = extract_packet_props(avctx->internal, avpkt);
  534. if (ret < 0)
  535. return ret;
  536. *got_sub_ptr = 0;
  537. ret = avctx->codec->decode(avctx, sub, got_sub_ptr, avpkt);
  538. if (*got_sub_ptr)
  539. avctx->frame_number++;
  540. return ret;
  541. }
  542. enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx,
  543. const enum AVPixelFormat *fmt)
  544. {
  545. const AVPixFmtDescriptor *desc;
  546. const AVCodecHWConfig *config;
  547. int i, n;
  548. // If a device was supplied when the codec was opened, assume that the
  549. // user wants to use it.
  550. if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
  551. AVHWDeviceContext *device_ctx =
  552. (AVHWDeviceContext*)avctx->hw_device_ctx->data;
  553. for (i = 0;; i++) {
  554. config = &avctx->codec->hw_configs[i]->public;
  555. if (!config)
  556. break;
  557. if (!(config->methods &
  558. AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
  559. continue;
  560. if (device_ctx->type != config->device_type)
  561. continue;
  562. for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
  563. if (config->pix_fmt == fmt[n])
  564. return fmt[n];
  565. }
  566. }
  567. }
  568. // No device or other setup, so we have to choose from things which
  569. // don't any other external information.
  570. // If the last element of the list is a software format, choose it
  571. // (this should be best software format if any exist).
  572. for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
  573. desc = av_pix_fmt_desc_get(fmt[n - 1]);
  574. if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
  575. return fmt[n - 1];
  576. // Finally, traverse the list in order and choose the first entry
  577. // with no external dependencies (if there is no hardware configuration
  578. // information available then this just picks the first entry).
  579. for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
  580. for (i = 0;; i++) {
  581. config = avcodec_get_hw_config(avctx->codec, i);
  582. if (!config)
  583. break;
  584. if (config->pix_fmt == fmt[n])
  585. break;
  586. }
  587. if (!config) {
  588. // No specific config available, so the decoder must be able
  589. // to handle this format without any additional setup.
  590. return fmt[n];
  591. }
  592. if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
  593. // Usable with only internal setup.
  594. return fmt[n];
  595. }
  596. }
  597. // Nothing is usable, give up.
  598. return AV_PIX_FMT_NONE;
  599. }
  600. int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
  601. enum AVHWDeviceType dev_type)
  602. {
  603. AVHWDeviceContext *device_ctx;
  604. AVHWFramesContext *frames_ctx;
  605. int ret;
  606. if (!avctx->hwaccel)
  607. return AVERROR(ENOSYS);
  608. if (avctx->hw_frames_ctx)
  609. return 0;
  610. if (!avctx->hw_device_ctx) {
  611. av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
  612. "required for hardware accelerated decoding.\n");
  613. return AVERROR(EINVAL);
  614. }
  615. device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
  616. if (device_ctx->type != dev_type) {
  617. av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
  618. "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
  619. av_hwdevice_get_type_name(device_ctx->type));
  620. return AVERROR(EINVAL);
  621. }
  622. ret = avcodec_get_hw_frames_parameters(avctx,
  623. avctx->hw_device_ctx,
  624. avctx->hwaccel->pix_fmt,
  625. &avctx->hw_frames_ctx);
  626. if (ret < 0)
  627. return ret;
  628. frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  629. if (frames_ctx->initial_pool_size) {
  630. // We guarantee 4 base work surfaces. The function above guarantees 1
  631. // (the absolute minimum), so add the missing count.
  632. frames_ctx->initial_pool_size += 3;
  633. }
  634. ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
  635. if (ret < 0) {
  636. av_buffer_unref(&avctx->hw_frames_ctx);
  637. return ret;
  638. }
  639. return 0;
  640. }
  641. int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
  642. AVBufferRef *device_ref,
  643. enum AVPixelFormat hw_pix_fmt,
  644. AVBufferRef **out_frames_ref)
  645. {
  646. AVBufferRef *frames_ref = NULL;
  647. const AVCodecHWConfigInternal *hw_config;
  648. const AVHWAccel *hwa;
  649. int i, ret;
  650. for (i = 0;; i++) {
  651. hw_config = avctx->codec->hw_configs[i];
  652. if (!hw_config)
  653. return AVERROR(ENOENT);
  654. if (hw_config->public.pix_fmt == hw_pix_fmt)
  655. break;
  656. }
  657. hwa = hw_config->hwaccel;
  658. if (!hwa || !hwa->frame_params)
  659. return AVERROR(ENOENT);
  660. frames_ref = av_hwframe_ctx_alloc(device_ref);
  661. if (!frames_ref)
  662. return AVERROR(ENOMEM);
  663. ret = hwa->frame_params(avctx, frames_ref);
  664. if (ret >= 0) {
  665. AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
  666. if (frames_ctx->initial_pool_size) {
  667. // If the user has requested that extra output surfaces be
  668. // available then add them here.
  669. if (avctx->extra_hw_frames > 0)
  670. frames_ctx->initial_pool_size += avctx->extra_hw_frames;
  671. // If frame threading is enabled then an extra surface per thread
  672. // is also required.
  673. if (avctx->active_thread_type & FF_THREAD_FRAME)
  674. frames_ctx->initial_pool_size += avctx->thread_count;
  675. }
  676. *out_frames_ref = frames_ref;
  677. } else {
  678. av_buffer_unref(&frames_ref);
  679. }
  680. return ret;
  681. }
  682. static int hwaccel_init(AVCodecContext *avctx,
  683. const AVCodecHWConfigInternal *hw_config)
  684. {
  685. const AVHWAccel *hwaccel;
  686. int err;
  687. hwaccel = hw_config->hwaccel;
  688. if (hwaccel->priv_data_size) {
  689. avctx->internal->hwaccel_priv_data =
  690. av_mallocz(hwaccel->priv_data_size);
  691. if (!avctx->internal->hwaccel_priv_data)
  692. return AVERROR(ENOMEM);
  693. }
  694. avctx->hwaccel = hwaccel;
  695. err = hwaccel->init(avctx);
  696. if (err < 0) {
  697. av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
  698. "hwaccel initialisation returned error.\n",
  699. av_get_pix_fmt_name(hw_config->public.pix_fmt));
  700. av_freep(&avctx->internal->hwaccel_priv_data);
  701. avctx->hwaccel = NULL;
  702. return err;
  703. }
  704. return 0;
  705. }
  706. static void hwaccel_uninit(AVCodecContext *avctx)
  707. {
  708. if (avctx->hwaccel && avctx->hwaccel->uninit)
  709. avctx->hwaccel->uninit(avctx);
  710. av_freep(&avctx->internal->hwaccel_priv_data);
  711. avctx->hwaccel = NULL;
  712. av_buffer_unref(&avctx->hw_frames_ctx);
  713. }
  714. int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
  715. {
  716. const AVPixFmtDescriptor *desc;
  717. enum AVPixelFormat *choices;
  718. enum AVPixelFormat ret, user_choice;
  719. const AVCodecHWConfigInternal *hw_config;
  720. const AVCodecHWConfig *config;
  721. int i, n, err;
  722. // Find end of list.
  723. for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
  724. // Must contain at least one entry.
  725. av_assert0(n >= 1);
  726. // If a software format is available, it must be the last entry.
  727. desc = av_pix_fmt_desc_get(fmt[n - 1]);
  728. if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
  729. // No software format is available.
  730. } else {
  731. avctx->sw_pix_fmt = fmt[n - 1];
  732. }
  733. choices = av_malloc_array(n + 1, sizeof(*choices));
  734. if (!choices)
  735. return AV_PIX_FMT_NONE;
  736. memcpy(choices, fmt, (n + 1) * sizeof(*choices));
  737. for (;;) {
  738. // Remove the previous hwaccel, if there was one.
  739. hwaccel_uninit(avctx);
  740. user_choice = avctx->get_format(avctx, choices);
  741. if (user_choice == AV_PIX_FMT_NONE) {
  742. // Explicitly chose nothing, give up.
  743. ret = AV_PIX_FMT_NONE;
  744. break;
  745. }
  746. desc = av_pix_fmt_desc_get(user_choice);
  747. if (!desc) {
  748. av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
  749. "get_format() callback.\n");
  750. ret = AV_PIX_FMT_NONE;
  751. break;
  752. }
  753. av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
  754. desc->name);
  755. for (i = 0; i < n; i++) {
  756. if (choices[i] == user_choice)
  757. break;
  758. }
  759. if (i == n) {
  760. av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
  761. "%s not in possible list.\n", desc->name);
  762. break;
  763. }
  764. if (avctx->codec->hw_configs) {
  765. for (i = 0;; i++) {
  766. hw_config = avctx->codec->hw_configs[i];
  767. if (!hw_config)
  768. break;
  769. if (hw_config->public.pix_fmt == user_choice)
  770. break;
  771. }
  772. } else {
  773. hw_config = NULL;
  774. }
  775. if (!hw_config) {
  776. // No config available, so no extra setup required.
  777. ret = user_choice;
  778. break;
  779. }
  780. config = &hw_config->public;
  781. if (config->methods &
  782. AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
  783. avctx->hw_frames_ctx) {
  784. const AVHWFramesContext *frames_ctx =
  785. (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  786. if (frames_ctx->format != user_choice) {
  787. av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
  788. "does not match the format of the provided frames "
  789. "context.\n", desc->name);
  790. goto try_again;
  791. }
  792. } else if (config->methods &
  793. AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
  794. avctx->hw_device_ctx) {
  795. const AVHWDeviceContext *device_ctx =
  796. (AVHWDeviceContext*)avctx->hw_device_ctx->data;
  797. if (device_ctx->type != config->device_type) {
  798. av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
  799. "does not match the type of the provided device "
  800. "context.\n", desc->name);
  801. goto try_again;
  802. }
  803. } else if (config->methods &
  804. AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
  805. // Internal-only setup, no additional configuration.
  806. } else if (config->methods &
  807. AV_CODEC_HW_CONFIG_METHOD_AD_HOC) {
  808. // Some ad-hoc configuration we can't see and can't check.
  809. } else {
  810. av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
  811. "missing configuration.\n", desc->name);
  812. goto try_again;
  813. }
  814. if (hw_config->hwaccel) {
  815. av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
  816. "initialisation.\n", desc->name);
  817. err = hwaccel_init(avctx, hw_config);
  818. if (err < 0)
  819. goto try_again;
  820. }
  821. ret = user_choice;
  822. break;
  823. try_again:
  824. av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
  825. "get_format() without it.\n", desc->name);
  826. for (i = 0; i < n; i++) {
  827. if (choices[i] == user_choice)
  828. break;
  829. }
  830. for (; i + 1 < n; i++)
  831. choices[i] = choices[i + 1];
  832. --n;
  833. }
  834. av_freep(&choices);
  835. return ret;
  836. }
  837. static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
  838. {
  839. FramePool *pool = avctx->internal->pool;
  840. int i, ret;
  841. switch (avctx->codec_type) {
  842. case AVMEDIA_TYPE_VIDEO: {
  843. uint8_t *data[4];
  844. int linesize[4];
  845. int size[4] = { 0 };
  846. int w = frame->width;
  847. int h = frame->height;
  848. int tmpsize, unaligned;
  849. if (pool->format == frame->format &&
  850. pool->width == frame->width && pool->height == frame->height)
  851. return 0;
  852. avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
  853. do {
  854. // NOTE: do not align linesizes individually, this breaks e.g. assumptions
  855. // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
  856. av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
  857. // increase alignment of w for next try (rhs gives the lowest bit set in w)
  858. w += w & ~(w - 1);
  859. unaligned = 0;
  860. for (i = 0; i < 4; i++)
  861. unaligned |= linesize[i] % pool->stride_align[i];
  862. } while (unaligned);
  863. tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
  864. NULL, linesize);
  865. if (tmpsize < 0)
  866. return -1;
  867. for (i = 0; i < 3 && data[i + 1]; i++)
  868. size[i] = data[i + 1] - data[i];
  869. size[i] = tmpsize - (data[i] - data[0]);
  870. for (i = 0; i < 4; i++) {
  871. av_buffer_pool_uninit(&pool->pools[i]);
  872. pool->linesize[i] = linesize[i];
  873. if (size[i]) {
  874. pool->pools[i] = av_buffer_pool_init(size[i] + 16, NULL);
  875. if (!pool->pools[i]) {
  876. ret = AVERROR(ENOMEM);
  877. goto fail;
  878. }
  879. }
  880. }
  881. pool->format = frame->format;
  882. pool->width = frame->width;
  883. pool->height = frame->height;
  884. break;
  885. }
  886. case AVMEDIA_TYPE_AUDIO: {
  887. int ch = av_get_channel_layout_nb_channels(frame->channel_layout);
  888. int planar = av_sample_fmt_is_planar(frame->format);
  889. int planes = planar ? ch : 1;
  890. if (pool->format == frame->format && pool->planes == planes &&
  891. pool->channels == ch && frame->nb_samples == pool->samples)
  892. return 0;
  893. av_buffer_pool_uninit(&pool->pools[0]);
  894. ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
  895. frame->nb_samples, frame->format, 0);
  896. if (ret < 0)
  897. goto fail;
  898. pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
  899. if (!pool->pools[0]) {
  900. ret = AVERROR(ENOMEM);
  901. goto fail;
  902. }
  903. pool->format = frame->format;
  904. pool->planes = planes;
  905. pool->channels = ch;
  906. pool->samples = frame->nb_samples;
  907. break;
  908. }
  909. default: av_assert0(0);
  910. }
  911. return 0;
  912. fail:
  913. for (i = 0; i < 4; i++)
  914. av_buffer_pool_uninit(&pool->pools[i]);
  915. pool->format = -1;
  916. pool->planes = pool->channels = pool->samples = 0;
  917. pool->width = pool->height = 0;
  918. return ret;
  919. }
  920. static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
  921. {
  922. FramePool *pool = avctx->internal->pool;
  923. int planes = pool->planes;
  924. int i;
  925. frame->linesize[0] = pool->linesize[0];
  926. if (planes > AV_NUM_DATA_POINTERS) {
  927. frame->extended_data = av_mallocz(planes * sizeof(*frame->extended_data));
  928. frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
  929. frame->extended_buf = av_mallocz(frame->nb_extended_buf *
  930. sizeof(*frame->extended_buf));
  931. if (!frame->extended_data || !frame->extended_buf) {
  932. av_freep(&frame->extended_data);
  933. av_freep(&frame->extended_buf);
  934. return AVERROR(ENOMEM);
  935. }
  936. } else
  937. frame->extended_data = frame->data;
  938. for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
  939. frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
  940. if (!frame->buf[i])
  941. goto fail;
  942. frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
  943. }
  944. for (i = 0; i < frame->nb_extended_buf; i++) {
  945. frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
  946. if (!frame->extended_buf[i])
  947. goto fail;
  948. frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
  949. }
  950. if (avctx->debug & FF_DEBUG_BUFFERS)
  951. av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
  952. return 0;
  953. fail:
  954. av_frame_unref(frame);
  955. return AVERROR(ENOMEM);
  956. }
  957. static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
  958. {
  959. FramePool *pool = s->internal->pool;
  960. int i;
  961. if (pic->data[0]) {
  962. av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
  963. return -1;
  964. }
  965. memset(pic->data, 0, sizeof(pic->data));
  966. pic->extended_data = pic->data;
  967. for (i = 0; i < 4 && pool->pools[i]; i++) {
  968. pic->linesize[i] = pool->linesize[i];
  969. pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
  970. if (!pic->buf[i])
  971. goto fail;
  972. pic->data[i] = pic->buf[i]->data;
  973. }
  974. for (; i < AV_NUM_DATA_POINTERS; i++) {
  975. pic->data[i] = NULL;
  976. pic->linesize[i] = 0;
  977. }
  978. if (pic->data[1] && !pic->data[2])
  979. avpriv_set_systematic_pal2((uint32_t *)pic->data[1], s->pix_fmt);
  980. if (s->debug & FF_DEBUG_BUFFERS)
  981. av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
  982. return 0;
  983. fail:
  984. av_frame_unref(pic);
  985. return AVERROR(ENOMEM);
  986. }
  987. int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
  988. {
  989. int ret;
  990. if (avctx->hw_frames_ctx) {
  991. ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
  992. frame->width = avctx->coded_width;
  993. frame->height = avctx->coded_height;
  994. return ret;
  995. }
  996. if ((ret = update_frame_pool(avctx, frame)) < 0)
  997. return ret;
  998. switch (avctx->codec_type) {
  999. case AVMEDIA_TYPE_VIDEO:
  1000. return video_get_buffer(avctx, frame);
  1001. case AVMEDIA_TYPE_AUDIO:
  1002. return audio_get_buffer(avctx, frame);
  1003. default:
  1004. return -1;
  1005. }
  1006. }
  1007. int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
  1008. {
  1009. AVPacket *pkt = avctx->internal->last_pkt_props;
  1010. int i;
  1011. struct {
  1012. enum AVPacketSideDataType packet;
  1013. enum AVFrameSideDataType frame;
  1014. } sd[] = {
  1015. { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
  1016. { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
  1017. { AV_PKT_DATA_SPHERICAL, AV_FRAME_DATA_SPHERICAL },
  1018. { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
  1019. { AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
  1020. };
  1021. frame->color_primaries = avctx->color_primaries;
  1022. frame->color_trc = avctx->color_trc;
  1023. frame->colorspace = avctx->colorspace;
  1024. frame->color_range = avctx->color_range;
  1025. frame->chroma_location = avctx->chroma_sample_location;
  1026. frame->reordered_opaque = avctx->reordered_opaque;
  1027. #if FF_API_PKT_PTS
  1028. FF_DISABLE_DEPRECATION_WARNINGS
  1029. frame->pkt_pts = pkt->pts;
  1030. FF_ENABLE_DEPRECATION_WARNINGS
  1031. #endif
  1032. frame->pts = pkt->pts;
  1033. for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
  1034. int size;
  1035. uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
  1036. if (packet_sd) {
  1037. AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
  1038. sd[i].frame,
  1039. size);
  1040. if (!frame_sd)
  1041. return AVERROR(ENOMEM);
  1042. memcpy(frame_sd->data, packet_sd, size);
  1043. }
  1044. }
  1045. return 0;
  1046. }
  1047. static void decode_data_free(void *opaque, uint8_t *data)
  1048. {
  1049. FrameDecodeData *fdd = (FrameDecodeData*)data;
  1050. av_buffer_unref(&fdd->user_opaque_ref);
  1051. if (fdd->post_process_opaque_free)
  1052. fdd->post_process_opaque_free(fdd->post_process_opaque);
  1053. if (fdd->hwaccel_priv_free)
  1054. fdd->hwaccel_priv_free(fdd->hwaccel_priv);
  1055. av_freep(&fdd);
  1056. }
  1057. static int attach_decode_data(AVFrame *frame)
  1058. {
  1059. AVBufferRef *fdd_buf;
  1060. FrameDecodeData *fdd;
  1061. fdd = av_mallocz(sizeof(*fdd));
  1062. if (!fdd)
  1063. return AVERROR(ENOMEM);
  1064. fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
  1065. NULL, AV_BUFFER_FLAG_READONLY);
  1066. if (!fdd_buf) {
  1067. av_freep(&fdd);
  1068. return AVERROR(ENOMEM);
  1069. }
  1070. fdd->user_opaque_ref = frame->opaque_ref;
  1071. frame->opaque_ref = fdd_buf;
  1072. return 0;
  1073. }
  1074. int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
  1075. {
  1076. const AVHWAccel *hwaccel = avctx->hwaccel;
  1077. int override_dimensions = 1;
  1078. int ret;
  1079. switch (avctx->codec_type) {
  1080. case AVMEDIA_TYPE_VIDEO:
  1081. if (frame->width <= 0 || frame->height <= 0) {
  1082. frame->width = FFMAX(avctx->width, avctx->coded_width);
  1083. frame->height = FFMAX(avctx->height, avctx->coded_height);
  1084. override_dimensions = 0;
  1085. }
  1086. if (frame->format < 0)
  1087. frame->format = avctx->pix_fmt;
  1088. if (!frame->sample_aspect_ratio.num)
  1089. frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
  1090. if (av_image_check_sar(frame->width, frame->height,
  1091. frame->sample_aspect_ratio) < 0) {
  1092. av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
  1093. frame->sample_aspect_ratio.num,
  1094. frame->sample_aspect_ratio.den);
  1095. frame->sample_aspect_ratio = (AVRational){ 0, 1 };
  1096. }
  1097. if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
  1098. return ret;
  1099. break;
  1100. case AVMEDIA_TYPE_AUDIO:
  1101. if (!frame->sample_rate)
  1102. frame->sample_rate = avctx->sample_rate;
  1103. if (frame->format < 0)
  1104. frame->format = avctx->sample_fmt;
  1105. if (!frame->channel_layout) {
  1106. if (avctx->channel_layout) {
  1107. if (av_get_channel_layout_nb_channels(avctx->channel_layout) !=
  1108. avctx->channels) {
  1109. av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
  1110. "configuration.\n");
  1111. return AVERROR(EINVAL);
  1112. }
  1113. frame->channel_layout = avctx->channel_layout;
  1114. } else {
  1115. if (avctx->channels > FF_SANE_NB_CHANNELS) {
  1116. av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
  1117. avctx->channels);
  1118. return AVERROR(ENOSYS);
  1119. }
  1120. frame->channel_layout = av_get_default_channel_layout(avctx->channels);
  1121. if (!frame->channel_layout)
  1122. frame->channel_layout = (1ULL << avctx->channels) - 1;
  1123. }
  1124. }
  1125. break;
  1126. default: return AVERROR(EINVAL);
  1127. }
  1128. ret = ff_decode_frame_props(avctx, frame);
  1129. if (ret < 0)
  1130. return ret;
  1131. if (hwaccel) {
  1132. if (hwaccel->alloc_frame) {
  1133. ret = hwaccel->alloc_frame(avctx, frame);
  1134. goto end;
  1135. }
  1136. } else
  1137. avctx->sw_pix_fmt = avctx->pix_fmt;
  1138. ret = avctx->get_buffer2(avctx, frame, flags);
  1139. if (ret < 0)
  1140. goto end;
  1141. ret = attach_decode_data(frame);
  1142. if (ret < 0)
  1143. goto end;
  1144. end:
  1145. if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
  1146. !(avctx->codec->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) {
  1147. frame->width = avctx->width;
  1148. frame->height = avctx->height;
  1149. }
  1150. if (ret < 0)
  1151. av_frame_unref(frame);
  1152. return ret;
  1153. }
  1154. int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
  1155. {
  1156. AVFrame *tmp;
  1157. int ret;
  1158. av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
  1159. if (!frame->data[0])
  1160. return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
  1161. if (av_frame_is_writable(frame))
  1162. return ff_decode_frame_props(avctx, frame);
  1163. tmp = av_frame_alloc();
  1164. if (!tmp)
  1165. return AVERROR(ENOMEM);
  1166. av_frame_move_ref(tmp, frame);
  1167. ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
  1168. if (ret < 0) {
  1169. av_frame_free(&tmp);
  1170. return ret;
  1171. }
  1172. av_frame_copy(frame, tmp);
  1173. av_frame_free(&tmp);
  1174. return 0;
  1175. }
  1176. void avcodec_flush_buffers(AVCodecContext *avctx)
  1177. {
  1178. avctx->internal->draining = 0;
  1179. avctx->internal->draining_done = 0;
  1180. av_frame_unref(avctx->internal->buffer_frame);
  1181. av_frame_unref(avctx->internal->compat_decode_frame);
  1182. av_packet_unref(avctx->internal->buffer_pkt);
  1183. avctx->internal->buffer_pkt_valid = 0;
  1184. av_packet_unref(avctx->internal->ds.in_pkt);
  1185. if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
  1186. ff_thread_flush(avctx);
  1187. else if (avctx->codec->flush)
  1188. avctx->codec->flush(avctx);
  1189. ff_decode_bsfs_uninit(avctx);
  1190. if (!avctx->refcounted_frames)
  1191. av_frame_unref(avctx->internal->to_free);
  1192. }
  1193. void ff_decode_bsfs_uninit(AVCodecContext *avctx)
  1194. {
  1195. DecodeFilterContext *s = &avctx->internal->filter;
  1196. int i;
  1197. for (i = 0; i < s->nb_bsfs; i++)
  1198. av_bsf_free(&s->bsfs[i]);
  1199. av_freep(&s->bsfs);
  1200. s->nb_bsfs = 0;
  1201. }