You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

925 lines
28KB

  1. /*
  2. * generic decoding-related code
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include <string.h>
  22. #include "config.h"
  23. #include "libavutil/avassert.h"
  24. #include "libavutil/common.h"
  25. #include "libavutil/frame.h"
  26. #include "libavutil/hwcontext.h"
  27. #include "libavutil/imgutils.h"
  28. #include "avcodec.h"
  29. #include "bytestream.h"
  30. #include "internal.h"
  31. #include "thread.h"
  32. static int apply_param_change(AVCodecContext *avctx, AVPacket *avpkt)
  33. {
  34. int size = 0, ret;
  35. const uint8_t *data;
  36. uint32_t flags;
  37. data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
  38. if (!data)
  39. return 0;
  40. if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
  41. av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
  42. "changes, but PARAM_CHANGE side data was sent to it.\n");
  43. ret = AVERROR(EINVAL);
  44. goto fail2;
  45. }
  46. if (size < 4)
  47. goto fail;
  48. flags = bytestream_get_le32(&data);
  49. size -= 4;
  50. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
  51. if (size < 4)
  52. goto fail;
  53. avctx->channels = bytestream_get_le32(&data);
  54. size -= 4;
  55. }
  56. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
  57. if (size < 8)
  58. goto fail;
  59. avctx->channel_layout = bytestream_get_le64(&data);
  60. size -= 8;
  61. }
  62. if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
  63. if (size < 4)
  64. goto fail;
  65. avctx->sample_rate = bytestream_get_le32(&data);
  66. size -= 4;
  67. }
  68. if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
  69. if (size < 8)
  70. goto fail;
  71. avctx->width = bytestream_get_le32(&data);
  72. avctx->height = bytestream_get_le32(&data);
  73. size -= 8;
  74. ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
  75. if (ret < 0)
  76. goto fail2;
  77. }
  78. return 0;
  79. fail:
  80. av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
  81. ret = AVERROR_INVALIDDATA;
  82. fail2:
  83. if (ret < 0) {
  84. av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
  85. if (avctx->err_recognition & AV_EF_EXPLODE)
  86. return ret;
  87. }
  88. return 0;
  89. }
  90. static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
  91. {
  92. int ret;
  93. /* move the original frame to our backup */
  94. av_frame_unref(avci->to_free);
  95. av_frame_move_ref(avci->to_free, frame);
  96. /* now copy everything except the AVBufferRefs back
  97. * note that we make a COPY of the side data, so calling av_frame_free() on
  98. * the caller's frame will work properly */
  99. ret = av_frame_copy_props(frame, avci->to_free);
  100. if (ret < 0)
  101. return ret;
  102. memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
  103. memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
  104. if (avci->to_free->extended_data != avci->to_free->data) {
  105. int planes = av_get_channel_layout_nb_channels(avci->to_free->channel_layout);
  106. int size = planes * sizeof(*frame->extended_data);
  107. if (!size) {
  108. av_frame_unref(frame);
  109. return AVERROR_BUG;
  110. }
  111. frame->extended_data = av_malloc(size);
  112. if (!frame->extended_data) {
  113. av_frame_unref(frame);
  114. return AVERROR(ENOMEM);
  115. }
  116. memcpy(frame->extended_data, avci->to_free->extended_data,
  117. size);
  118. } else
  119. frame->extended_data = frame->data;
  120. frame->format = avci->to_free->format;
  121. frame->width = avci->to_free->width;
  122. frame->height = avci->to_free->height;
  123. frame->channel_layout = avci->to_free->channel_layout;
  124. frame->nb_samples = avci->to_free->nb_samples;
  125. return 0;
  126. }
  127. static int do_decode(AVCodecContext *avctx, AVPacket *pkt)
  128. {
  129. int got_frame;
  130. int ret;
  131. av_assert0(!avctx->internal->buffer_frame->buf[0]);
  132. if (!pkt)
  133. pkt = avctx->internal->buffer_pkt;
  134. // This is the lesser evil. The field is for compatibility with legacy users
  135. // of the legacy API, and users using the new API should not be forced to
  136. // even know about this field.
  137. avctx->refcounted_frames = 1;
  138. // Some codecs (at least wma lossless) will crash when feeding drain packets
  139. // after EOF was signaled.
  140. if (avctx->internal->draining_done)
  141. return AVERROR_EOF;
  142. if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
  143. ret = avcodec_decode_video2(avctx, avctx->internal->buffer_frame,
  144. &got_frame, pkt);
  145. if (ret >= 0)
  146. ret = pkt->size;
  147. } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  148. ret = avcodec_decode_audio4(avctx, avctx->internal->buffer_frame,
  149. &got_frame, pkt);
  150. } else {
  151. ret = AVERROR(EINVAL);
  152. }
  153. if (ret < 0)
  154. return ret;
  155. if (avctx->internal->draining && !got_frame)
  156. avctx->internal->draining_done = 1;
  157. if (ret >= pkt->size) {
  158. av_packet_unref(avctx->internal->buffer_pkt);
  159. } else {
  160. int consumed = ret;
  161. if (pkt != avctx->internal->buffer_pkt) {
  162. av_packet_unref(avctx->internal->buffer_pkt);
  163. if ((ret = av_packet_ref(avctx->internal->buffer_pkt, pkt)) < 0)
  164. return ret;
  165. }
  166. avctx->internal->buffer_pkt->data += consumed;
  167. avctx->internal->buffer_pkt->size -= consumed;
  168. avctx->internal->buffer_pkt->pts = AV_NOPTS_VALUE;
  169. avctx->internal->buffer_pkt->dts = AV_NOPTS_VALUE;
  170. }
  171. if (got_frame)
  172. av_assert0(avctx->internal->buffer_frame->buf[0]);
  173. return 0;
  174. }
  175. int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
  176. {
  177. int ret;
  178. if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
  179. return AVERROR(EINVAL);
  180. if (avctx->internal->draining)
  181. return AVERROR_EOF;
  182. if (!avpkt || !avpkt->size) {
  183. avctx->internal->draining = 1;
  184. avpkt = NULL;
  185. if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
  186. return 0;
  187. }
  188. if (avctx->codec->send_packet) {
  189. if (avpkt) {
  190. ret = apply_param_change(avctx, (AVPacket *)avpkt);
  191. if (ret < 0)
  192. return ret;
  193. }
  194. return avctx->codec->send_packet(avctx, avpkt);
  195. }
  196. // Emulation via old API. Assume avpkt is likely not refcounted, while
  197. // decoder output is always refcounted, and avoid copying.
  198. if (avctx->internal->buffer_pkt->size || avctx->internal->buffer_frame->buf[0])
  199. return AVERROR(EAGAIN);
  200. // The goal is decoding the first frame of the packet without using memcpy,
  201. // because the common case is having only 1 frame per packet (especially
  202. // with video, but audio too). In other cases, it can't be avoided, unless
  203. // the user is feeding refcounted packets.
  204. return do_decode(avctx, (AVPacket *)avpkt);
  205. }
  206. int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
  207. {
  208. int ret;
  209. av_frame_unref(frame);
  210. if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
  211. return AVERROR(EINVAL);
  212. if (avctx->codec->receive_frame) {
  213. if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
  214. return AVERROR_EOF;
  215. return avctx->codec->receive_frame(avctx, frame);
  216. }
  217. // Emulation via old API.
  218. if (!avctx->internal->buffer_frame->buf[0]) {
  219. if (!avctx->internal->buffer_pkt->size && !avctx->internal->draining)
  220. return AVERROR(EAGAIN);
  221. while (1) {
  222. if ((ret = do_decode(avctx, avctx->internal->buffer_pkt)) < 0) {
  223. av_packet_unref(avctx->internal->buffer_pkt);
  224. return ret;
  225. }
  226. // Some audio decoders may consume partial data without returning
  227. // a frame (fate-wmapro-2ch). There is no way to make the caller
  228. // call avcodec_receive_frame() again without returning a frame,
  229. // so try to decode more in these cases.
  230. if (avctx->internal->buffer_frame->buf[0] ||
  231. !avctx->internal->buffer_pkt->size)
  232. break;
  233. }
  234. }
  235. if (!avctx->internal->buffer_frame->buf[0])
  236. return avctx->internal->draining ? AVERROR_EOF : AVERROR(EAGAIN);
  237. av_frame_move_ref(frame, avctx->internal->buffer_frame);
  238. return 0;
  239. }
  240. int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
  241. int *got_picture_ptr,
  242. AVPacket *avpkt)
  243. {
  244. AVCodecInternal *avci = avctx->internal;
  245. int ret;
  246. *got_picture_ptr = 0;
  247. if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx))
  248. return -1;
  249. if (!avctx->codec->decode) {
  250. av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
  251. return AVERROR(ENOSYS);
  252. }
  253. avctx->internal->pkt = avpkt;
  254. ret = apply_param_change(avctx, avpkt);
  255. if (ret < 0)
  256. return ret;
  257. av_frame_unref(picture);
  258. if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size ||
  259. (avctx->active_thread_type & FF_THREAD_FRAME)) {
  260. if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
  261. ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr,
  262. avpkt);
  263. else {
  264. ret = avctx->codec->decode(avctx, picture, got_picture_ptr,
  265. avpkt);
  266. if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
  267. picture->pkt_dts = avpkt->dts;
  268. /* get_buffer is supposed to set frame parameters */
  269. if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
  270. picture->sample_aspect_ratio = avctx->sample_aspect_ratio;
  271. picture->width = avctx->width;
  272. picture->height = avctx->height;
  273. picture->format = avctx->pix_fmt;
  274. }
  275. }
  276. emms_c(); //needed to avoid an emms_c() call before every return;
  277. if (*got_picture_ptr) {
  278. if (!avctx->refcounted_frames) {
  279. int err = unrefcount_frame(avci, picture);
  280. if (err < 0)
  281. return err;
  282. }
  283. avctx->frame_number++;
  284. } else
  285. av_frame_unref(picture);
  286. } else
  287. ret = 0;
  288. #if FF_API_AVCTX_TIMEBASE
  289. if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
  290. avctx->time_base = av_inv_q(avctx->framerate);
  291. #endif
  292. return ret;
  293. }
  294. int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
  295. AVFrame *frame,
  296. int *got_frame_ptr,
  297. AVPacket *avpkt)
  298. {
  299. AVCodecInternal *avci = avctx->internal;
  300. int ret = 0;
  301. *got_frame_ptr = 0;
  302. if (!avctx->codec->decode) {
  303. av_log(avctx, AV_LOG_ERROR, "This decoder requires using the avcodec_send_packet() API.\n");
  304. return AVERROR(ENOSYS);
  305. }
  306. avctx->internal->pkt = avpkt;
  307. if (!avpkt->data && avpkt->size) {
  308. av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
  309. return AVERROR(EINVAL);
  310. }
  311. ret = apply_param_change(avctx, avpkt);
  312. if (ret < 0)
  313. return ret;
  314. av_frame_unref(frame);
  315. if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
  316. ret = avctx->codec->decode(avctx, frame, got_frame_ptr, avpkt);
  317. if (ret >= 0 && *got_frame_ptr) {
  318. avctx->frame_number++;
  319. frame->pkt_dts = avpkt->dts;
  320. if (frame->format == AV_SAMPLE_FMT_NONE)
  321. frame->format = avctx->sample_fmt;
  322. if (!avctx->refcounted_frames) {
  323. int err = unrefcount_frame(avci, frame);
  324. if (err < 0)
  325. return err;
  326. }
  327. } else
  328. av_frame_unref(frame);
  329. }
  330. return ret;
  331. }
  332. int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
  333. int *got_sub_ptr,
  334. AVPacket *avpkt)
  335. {
  336. int ret;
  337. avctx->internal->pkt = avpkt;
  338. *got_sub_ptr = 0;
  339. ret = avctx->codec->decode(avctx, sub, got_sub_ptr, avpkt);
  340. if (*got_sub_ptr)
  341. avctx->frame_number++;
  342. return ret;
  343. }
  344. static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt)
  345. {
  346. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
  347. return desc->flags & AV_PIX_FMT_FLAG_HWACCEL;
  348. }
  349. enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
  350. {
  351. while (*fmt != AV_PIX_FMT_NONE && is_hwaccel_pix_fmt(*fmt))
  352. ++fmt;
  353. return fmt[0];
  354. }
  355. static AVHWAccel *find_hwaccel(enum AVCodecID codec_id,
  356. enum AVPixelFormat pix_fmt)
  357. {
  358. AVHWAccel *hwaccel = NULL;
  359. while ((hwaccel = av_hwaccel_next(hwaccel)))
  360. if (hwaccel->id == codec_id
  361. && hwaccel->pix_fmt == pix_fmt)
  362. return hwaccel;
  363. return NULL;
  364. }
  365. static int setup_hwaccel(AVCodecContext *avctx,
  366. const enum AVPixelFormat fmt,
  367. const char *name)
  368. {
  369. AVHWAccel *hwa = find_hwaccel(avctx->codec_id, fmt);
  370. int ret = 0;
  371. if (!hwa) {
  372. av_log(avctx, AV_LOG_ERROR,
  373. "Could not find an AVHWAccel for the pixel format: %s",
  374. name);
  375. return AVERROR(ENOENT);
  376. }
  377. if (hwa->priv_data_size) {
  378. avctx->internal->hwaccel_priv_data = av_mallocz(hwa->priv_data_size);
  379. if (!avctx->internal->hwaccel_priv_data)
  380. return AVERROR(ENOMEM);
  381. }
  382. if (hwa->init) {
  383. ret = hwa->init(avctx);
  384. if (ret < 0) {
  385. av_freep(&avctx->internal->hwaccel_priv_data);
  386. return ret;
  387. }
  388. }
  389. avctx->hwaccel = hwa;
  390. return 0;
  391. }
  392. int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
  393. {
  394. const AVPixFmtDescriptor *desc;
  395. enum AVPixelFormat *choices;
  396. enum AVPixelFormat ret;
  397. unsigned n = 0;
  398. while (fmt[n] != AV_PIX_FMT_NONE)
  399. ++n;
  400. av_assert0(n >= 1);
  401. avctx->sw_pix_fmt = fmt[n - 1];
  402. av_assert2(!is_hwaccel_pix_fmt(avctx->sw_pix_fmt));
  403. choices = av_malloc_array(n + 1, sizeof(*choices));
  404. if (!choices)
  405. return AV_PIX_FMT_NONE;
  406. memcpy(choices, fmt, (n + 1) * sizeof(*choices));
  407. for (;;) {
  408. if (avctx->hwaccel && avctx->hwaccel->uninit)
  409. avctx->hwaccel->uninit(avctx);
  410. av_freep(&avctx->internal->hwaccel_priv_data);
  411. avctx->hwaccel = NULL;
  412. av_buffer_unref(&avctx->hw_frames_ctx);
  413. ret = avctx->get_format(avctx, choices);
  414. desc = av_pix_fmt_desc_get(ret);
  415. if (!desc) {
  416. ret = AV_PIX_FMT_NONE;
  417. break;
  418. }
  419. if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
  420. break;
  421. if (avctx->hw_frames_ctx) {
  422. AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  423. if (hw_frames_ctx->format != ret) {
  424. av_log(avctx, AV_LOG_ERROR, "Format returned from get_buffer() "
  425. "does not match the format of provided AVHWFramesContext\n");
  426. ret = AV_PIX_FMT_NONE;
  427. break;
  428. }
  429. }
  430. if (!setup_hwaccel(avctx, ret, desc->name))
  431. break;
  432. /* Remove failed hwaccel from choices */
  433. for (n = 0; choices[n] != ret; n++)
  434. av_assert0(choices[n] != AV_PIX_FMT_NONE);
  435. do
  436. choices[n] = choices[n + 1];
  437. while (choices[n++] != AV_PIX_FMT_NONE);
  438. }
  439. av_freep(&choices);
  440. return ret;
  441. }
  442. static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
  443. {
  444. FramePool *pool = avctx->internal->pool;
  445. int i, ret;
  446. switch (avctx->codec_type) {
  447. case AVMEDIA_TYPE_VIDEO: {
  448. uint8_t *data[4];
  449. int linesize[4];
  450. int size[4] = { 0 };
  451. int w = frame->width;
  452. int h = frame->height;
  453. int tmpsize, unaligned;
  454. if (pool->format == frame->format &&
  455. pool->width == frame->width && pool->height == frame->height)
  456. return 0;
  457. avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
  458. do {
  459. // NOTE: do not align linesizes individually, this breaks e.g. assumptions
  460. // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
  461. av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
  462. // increase alignment of w for next try (rhs gives the lowest bit set in w)
  463. w += w & ~(w - 1);
  464. unaligned = 0;
  465. for (i = 0; i < 4; i++)
  466. unaligned |= linesize[i] % pool->stride_align[i];
  467. } while (unaligned);
  468. tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
  469. NULL, linesize);
  470. if (tmpsize < 0)
  471. return -1;
  472. for (i = 0; i < 3 && data[i + 1]; i++)
  473. size[i] = data[i + 1] - data[i];
  474. size[i] = tmpsize - (data[i] - data[0]);
  475. for (i = 0; i < 4; i++) {
  476. av_buffer_pool_uninit(&pool->pools[i]);
  477. pool->linesize[i] = linesize[i];
  478. if (size[i]) {
  479. pool->pools[i] = av_buffer_pool_init(size[i] + 16, NULL);
  480. if (!pool->pools[i]) {
  481. ret = AVERROR(ENOMEM);
  482. goto fail;
  483. }
  484. }
  485. }
  486. pool->format = frame->format;
  487. pool->width = frame->width;
  488. pool->height = frame->height;
  489. break;
  490. }
  491. case AVMEDIA_TYPE_AUDIO: {
  492. int ch = av_get_channel_layout_nb_channels(frame->channel_layout);
  493. int planar = av_sample_fmt_is_planar(frame->format);
  494. int planes = planar ? ch : 1;
  495. if (pool->format == frame->format && pool->planes == planes &&
  496. pool->channels == ch && frame->nb_samples == pool->samples)
  497. return 0;
  498. av_buffer_pool_uninit(&pool->pools[0]);
  499. ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
  500. frame->nb_samples, frame->format, 0);
  501. if (ret < 0)
  502. goto fail;
  503. pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
  504. if (!pool->pools[0]) {
  505. ret = AVERROR(ENOMEM);
  506. goto fail;
  507. }
  508. pool->format = frame->format;
  509. pool->planes = planes;
  510. pool->channels = ch;
  511. pool->samples = frame->nb_samples;
  512. break;
  513. }
  514. default: av_assert0(0);
  515. }
  516. return 0;
  517. fail:
  518. for (i = 0; i < 4; i++)
  519. av_buffer_pool_uninit(&pool->pools[i]);
  520. pool->format = -1;
  521. pool->planes = pool->channels = pool->samples = 0;
  522. pool->width = pool->height = 0;
  523. return ret;
  524. }
  525. static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
  526. {
  527. FramePool *pool = avctx->internal->pool;
  528. int planes = pool->planes;
  529. int i;
  530. frame->linesize[0] = pool->linesize[0];
  531. if (planes > AV_NUM_DATA_POINTERS) {
  532. frame->extended_data = av_mallocz(planes * sizeof(*frame->extended_data));
  533. frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
  534. frame->extended_buf = av_mallocz(frame->nb_extended_buf *
  535. sizeof(*frame->extended_buf));
  536. if (!frame->extended_data || !frame->extended_buf) {
  537. av_freep(&frame->extended_data);
  538. av_freep(&frame->extended_buf);
  539. return AVERROR(ENOMEM);
  540. }
  541. } else
  542. frame->extended_data = frame->data;
  543. for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
  544. frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
  545. if (!frame->buf[i])
  546. goto fail;
  547. frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
  548. }
  549. for (i = 0; i < frame->nb_extended_buf; i++) {
  550. frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
  551. if (!frame->extended_buf[i])
  552. goto fail;
  553. frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
  554. }
  555. if (avctx->debug & FF_DEBUG_BUFFERS)
  556. av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
  557. return 0;
  558. fail:
  559. av_frame_unref(frame);
  560. return AVERROR(ENOMEM);
  561. }
  562. static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
  563. {
  564. FramePool *pool = s->internal->pool;
  565. int i;
  566. if (pic->data[0]) {
  567. av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
  568. return -1;
  569. }
  570. memset(pic->data, 0, sizeof(pic->data));
  571. pic->extended_data = pic->data;
  572. for (i = 0; i < 4 && pool->pools[i]; i++) {
  573. pic->linesize[i] = pool->linesize[i];
  574. pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
  575. if (!pic->buf[i])
  576. goto fail;
  577. pic->data[i] = pic->buf[i]->data;
  578. }
  579. for (; i < AV_NUM_DATA_POINTERS; i++) {
  580. pic->data[i] = NULL;
  581. pic->linesize[i] = 0;
  582. }
  583. if (pic->data[1] && !pic->data[2])
  584. avpriv_set_systematic_pal2((uint32_t *)pic->data[1], s->pix_fmt);
  585. if (s->debug & FF_DEBUG_BUFFERS)
  586. av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
  587. return 0;
  588. fail:
  589. av_frame_unref(pic);
  590. return AVERROR(ENOMEM);
  591. }
  592. int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
  593. {
  594. int ret;
  595. if (avctx->hw_frames_ctx)
  596. return av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
  597. if ((ret = update_frame_pool(avctx, frame)) < 0)
  598. return ret;
  599. switch (avctx->codec_type) {
  600. case AVMEDIA_TYPE_VIDEO:
  601. return video_get_buffer(avctx, frame);
  602. case AVMEDIA_TYPE_AUDIO:
  603. return audio_get_buffer(avctx, frame);
  604. default:
  605. return -1;
  606. }
  607. }
  608. int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
  609. {
  610. AVPacket *pkt = avctx->internal->pkt;
  611. int i;
  612. struct {
  613. enum AVPacketSideDataType packet;
  614. enum AVFrameSideDataType frame;
  615. } sd[] = {
  616. { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
  617. { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
  618. { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
  619. { AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
  620. };
  621. frame->color_primaries = avctx->color_primaries;
  622. frame->color_trc = avctx->color_trc;
  623. frame->colorspace = avctx->colorspace;
  624. frame->color_range = avctx->color_range;
  625. frame->chroma_location = avctx->chroma_sample_location;
  626. frame->reordered_opaque = avctx->reordered_opaque;
  627. if (!pkt) {
  628. #if FF_API_PKT_PTS
  629. FF_DISABLE_DEPRECATION_WARNINGS
  630. frame->pkt_pts = AV_NOPTS_VALUE;
  631. FF_ENABLE_DEPRECATION_WARNINGS
  632. #endif
  633. frame->pts = AV_NOPTS_VALUE;
  634. return 0;
  635. }
  636. #if FF_API_PKT_PTS
  637. FF_DISABLE_DEPRECATION_WARNINGS
  638. frame->pkt_pts = pkt->pts;
  639. FF_ENABLE_DEPRECATION_WARNINGS
  640. #endif
  641. frame->pts = pkt->pts;
  642. for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
  643. int size;
  644. uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
  645. if (packet_sd) {
  646. AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
  647. sd[i].frame,
  648. size);
  649. if (!frame_sd)
  650. return AVERROR(ENOMEM);
  651. memcpy(frame_sd->data, packet_sd, size);
  652. }
  653. }
  654. return 0;
  655. }
  656. int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
  657. {
  658. const AVHWAccel *hwaccel = avctx->hwaccel;
  659. int override_dimensions = 1;
  660. int ret;
  661. switch (avctx->codec_type) {
  662. case AVMEDIA_TYPE_VIDEO:
  663. if (frame->width <= 0 || frame->height <= 0) {
  664. frame->width = FFMAX(avctx->width, avctx->coded_width);
  665. frame->height = FFMAX(avctx->height, avctx->coded_height);
  666. override_dimensions = 0;
  667. }
  668. if (frame->format < 0)
  669. frame->format = avctx->pix_fmt;
  670. if (!frame->sample_aspect_ratio.num)
  671. frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
  672. if (av_image_check_sar(frame->width, frame->height,
  673. frame->sample_aspect_ratio) < 0) {
  674. av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
  675. frame->sample_aspect_ratio.num,
  676. frame->sample_aspect_ratio.den);
  677. frame->sample_aspect_ratio = (AVRational){ 0, 1 };
  678. }
  679. if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
  680. return ret;
  681. break;
  682. case AVMEDIA_TYPE_AUDIO:
  683. if (!frame->sample_rate)
  684. frame->sample_rate = avctx->sample_rate;
  685. if (frame->format < 0)
  686. frame->format = avctx->sample_fmt;
  687. if (!frame->channel_layout) {
  688. if (avctx->channel_layout) {
  689. if (av_get_channel_layout_nb_channels(avctx->channel_layout) !=
  690. avctx->channels) {
  691. av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
  692. "configuration.\n");
  693. return AVERROR(EINVAL);
  694. }
  695. frame->channel_layout = avctx->channel_layout;
  696. } else {
  697. if (avctx->channels > FF_SANE_NB_CHANNELS) {
  698. av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
  699. avctx->channels);
  700. return AVERROR(ENOSYS);
  701. }
  702. frame->channel_layout = av_get_default_channel_layout(avctx->channels);
  703. if (!frame->channel_layout)
  704. frame->channel_layout = (1ULL << avctx->channels) - 1;
  705. }
  706. }
  707. break;
  708. default: return AVERROR(EINVAL);
  709. }
  710. ret = ff_decode_frame_props(avctx, frame);
  711. if (ret < 0)
  712. return ret;
  713. if (hwaccel) {
  714. if (hwaccel->alloc_frame) {
  715. ret = hwaccel->alloc_frame(avctx, frame);
  716. goto end;
  717. }
  718. } else
  719. avctx->sw_pix_fmt = avctx->pix_fmt;
  720. ret = avctx->get_buffer2(avctx, frame, flags);
  721. end:
  722. if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions) {
  723. frame->width = avctx->width;
  724. frame->height = avctx->height;
  725. }
  726. return ret;
  727. }
  728. int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
  729. {
  730. AVFrame *tmp;
  731. int ret;
  732. av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
  733. if (!frame->data[0])
  734. return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
  735. if (av_frame_is_writable(frame))
  736. return ff_decode_frame_props(avctx, frame);
  737. tmp = av_frame_alloc();
  738. if (!tmp)
  739. return AVERROR(ENOMEM);
  740. av_frame_move_ref(tmp, frame);
  741. ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
  742. if (ret < 0) {
  743. av_frame_free(&tmp);
  744. return ret;
  745. }
  746. av_frame_copy(frame, tmp);
  747. av_frame_free(&tmp);
  748. return 0;
  749. }
  750. void avcodec_flush_buffers(AVCodecContext *avctx)
  751. {
  752. avctx->internal->draining = 0;
  753. avctx->internal->draining_done = 0;
  754. av_frame_unref(avctx->internal->buffer_frame);
  755. av_packet_unref(avctx->internal->buffer_pkt);
  756. avctx->internal->buffer_pkt_valid = 0;
  757. if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME)
  758. ff_thread_flush(avctx);
  759. else if (avctx->codec->flush)
  760. avctx->codec->flush(avctx);
  761. if (!avctx->refcounted_frames)
  762. av_frame_unref(avctx->internal->to_free);
  763. }