You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

675 lines
22KB

  1. /*
  2. * Apple ProRes compatible decoder
  3. *
  4. * Copyright (c) 2010-2011 Maxim Poliakovski
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * This is a decoder for Apple ProRes 422 SD/HQ/LT/Proxy and ProRes 4444.
  25. * It is used for storing and editing high definition video data in Apple's Final Cut Pro.
  26. *
  27. * @see http://wiki.multimedia.cx/index.php?title=Apple_ProRes
  28. */
  29. #define LONG_BITSTREAM_READER // some ProRes vlc codes require up to 28 bits to be read at once
  30. #include <stdint.h>
  31. #include "libavutil/intmath.h"
  32. #include "avcodec.h"
  33. #include "internal.h"
  34. #include "proresdata.h"
  35. #include "proresdsp.h"
  36. #include "get_bits.h"
  37. typedef struct {
  38. const uint8_t *index; ///< pointers to the data of this slice
  39. int slice_num;
  40. int x_pos, y_pos;
  41. int slice_width;
  42. int prev_slice_sf; ///< scalefactor of the previous decoded slice
  43. DECLARE_ALIGNED(16, DCTELEM, blocks)[8 * 4 * 64];
  44. DECLARE_ALIGNED(16, int16_t, qmat_luma_scaled)[64];
  45. DECLARE_ALIGNED(16, int16_t, qmat_chroma_scaled)[64];
  46. } ProresThreadData;
  47. typedef struct {
  48. ProresDSPContext dsp;
  49. AVFrame picture;
  50. ScanTable scantable;
  51. int scantable_type; ///< -1 = uninitialized, 0 = progressive, 1/2 = interlaced
  52. int frame_type; ///< 0 = progressive, 1 = top-field first, 2 = bottom-field first
  53. int pic_format; ///< 2 = 422, 3 = 444
  54. uint8_t qmat_luma[64]; ///< dequantization matrix for luma
  55. uint8_t qmat_chroma[64]; ///< dequantization matrix for chroma
  56. int qmat_changed; ///< 1 - global quantization matrices changed
  57. int total_slices; ///< total number of slices in a picture
  58. ProresThreadData *slice_data;
  59. int pic_num;
  60. int chroma_factor;
  61. int mb_chroma_factor;
  62. int num_chroma_blocks; ///< number of chrominance blocks in a macroblock
  63. int num_x_slices;
  64. int num_y_slices;
  65. int slice_width_factor;
  66. int slice_height_factor;
  67. int num_x_mbs;
  68. int num_y_mbs;
  69. int alpha_info;
  70. } ProresContext;
  71. static av_cold int decode_init(AVCodecContext *avctx)
  72. {
  73. ProresContext *ctx = avctx->priv_data;
  74. ctx->total_slices = 0;
  75. ctx->slice_data = NULL;
  76. avctx->bits_per_raw_sample = PRORES_BITS_PER_SAMPLE;
  77. ff_proresdsp_init(&ctx->dsp);
  78. avctx->coded_frame = &ctx->picture;
  79. avcodec_get_frame_defaults(&ctx->picture);
  80. ctx->picture.type = AV_PICTURE_TYPE_I;
  81. ctx->picture.key_frame = 1;
  82. ctx->scantable_type = -1; // set scantable type to uninitialized
  83. memset(ctx->qmat_luma, 4, 64);
  84. memset(ctx->qmat_chroma, 4, 64);
  85. return 0;
  86. }
  87. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  88. const int data_size, AVCodecContext *avctx)
  89. {
  90. int hdr_size, version, width, height, flags;
  91. const uint8_t *ptr;
  92. hdr_size = AV_RB16(buf);
  93. if (hdr_size > data_size) {
  94. av_log(avctx, AV_LOG_ERROR, "frame data too small\n");
  95. return AVERROR_INVALIDDATA;
  96. }
  97. version = AV_RB16(buf + 2);
  98. if (version >= 2) {
  99. av_log(avctx, AV_LOG_ERROR,
  100. "unsupported header version: %d\n", version);
  101. return AVERROR_INVALIDDATA;
  102. }
  103. width = AV_RB16(buf + 8);
  104. height = AV_RB16(buf + 10);
  105. if (width != avctx->width || height != avctx->height) {
  106. av_log(avctx, AV_LOG_ERROR,
  107. "picture dimension changed: old: %d x %d, new: %d x %d\n",
  108. avctx->width, avctx->height, width, height);
  109. return AVERROR_INVALIDDATA;
  110. }
  111. ctx->frame_type = (buf[12] >> 2) & 3;
  112. if (ctx->frame_type > 2) {
  113. av_log(avctx, AV_LOG_ERROR,
  114. "unsupported frame type: %d\n", ctx->frame_type);
  115. return AVERROR_INVALIDDATA;
  116. }
  117. ctx->chroma_factor = (buf[12] >> 6) & 3;
  118. ctx->mb_chroma_factor = ctx->chroma_factor + 2;
  119. ctx->num_chroma_blocks = (1 << ctx->chroma_factor) >> 1;
  120. switch (ctx->chroma_factor) {
  121. case 2:
  122. avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
  123. break;
  124. case 3:
  125. avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
  126. break;
  127. default:
  128. av_log(avctx, AV_LOG_ERROR,
  129. "unsupported picture format: %d\n", ctx->pic_format);
  130. return AVERROR_INVALIDDATA;
  131. }
  132. if (ctx->scantable_type != ctx->frame_type) {
  133. if (!ctx->frame_type)
  134. ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable,
  135. ff_prores_progressive_scan);
  136. else
  137. ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable,
  138. ff_prores_interlaced_scan);
  139. ctx->scantable_type = ctx->frame_type;
  140. }
  141. if (ctx->frame_type) { /* if interlaced */
  142. ctx->picture.interlaced_frame = 1;
  143. ctx->picture.top_field_first = ctx->frame_type & 1;
  144. }
  145. avctx->color_primaries = buf[14];
  146. avctx->color_trc = buf[15];
  147. avctx->colorspace = buf[16];
  148. ctx->alpha_info = buf[17] & 0xf;
  149. if (ctx->alpha_info)
  150. av_log_missing_feature(avctx, "Alpha channel", 0);
  151. ctx->qmat_changed = 0;
  152. ptr = buf + 20;
  153. flags = buf[19];
  154. if (flags & 2) {
  155. if (ptr - buf > hdr_size - 64) {
  156. av_log(avctx, AV_LOG_ERROR, "header data too small\n");
  157. return AVERROR_INVALIDDATA;
  158. }
  159. if (memcmp(ctx->qmat_luma, ptr, 64)) {
  160. memcpy(ctx->qmat_luma, ptr, 64);
  161. ctx->qmat_changed = 1;
  162. }
  163. ptr += 64;
  164. } else {
  165. memset(ctx->qmat_luma, 4, 64);
  166. ctx->qmat_changed = 1;
  167. }
  168. if (flags & 1) {
  169. if (ptr - buf > hdr_size - 64) {
  170. av_log(avctx, AV_LOG_ERROR, "header data too small\n");
  171. return -1;
  172. }
  173. if (memcmp(ctx->qmat_chroma, ptr, 64)) {
  174. memcpy(ctx->qmat_chroma, ptr, 64);
  175. ctx->qmat_changed = 1;
  176. }
  177. } else {
  178. memset(ctx->qmat_chroma, 4, 64);
  179. ctx->qmat_changed = 1;
  180. }
  181. return hdr_size;
  182. }
  183. static int decode_picture_header(ProresContext *ctx, const uint8_t *buf,
  184. const int data_size, AVCodecContext *avctx)
  185. {
  186. int i, hdr_size, pic_data_size, num_slices;
  187. int slice_width_factor, slice_height_factor;
  188. int remainder, num_x_slices;
  189. const uint8_t *data_ptr, *index_ptr;
  190. hdr_size = data_size > 0 ? buf[0] >> 3 : 0;
  191. if (hdr_size < 8 || hdr_size > data_size) {
  192. av_log(avctx, AV_LOG_ERROR, "picture header too small\n");
  193. return AVERROR_INVALIDDATA;
  194. }
  195. pic_data_size = AV_RB32(buf + 1);
  196. if (pic_data_size > data_size) {
  197. av_log(avctx, AV_LOG_ERROR, "picture data too small\n");
  198. return AVERROR_INVALIDDATA;
  199. }
  200. slice_width_factor = buf[7] >> 4;
  201. slice_height_factor = buf[7] & 0xF;
  202. if (slice_width_factor > 3 || slice_height_factor) {
  203. av_log(avctx, AV_LOG_ERROR,
  204. "unsupported slice dimension: %d x %d\n",
  205. 1 << slice_width_factor, 1 << slice_height_factor);
  206. return AVERROR_INVALIDDATA;
  207. }
  208. ctx->slice_width_factor = slice_width_factor;
  209. ctx->slice_height_factor = slice_height_factor;
  210. ctx->num_x_mbs = (avctx->width + 15) >> 4;
  211. ctx->num_y_mbs = (avctx->height +
  212. (1 << (4 + ctx->picture.interlaced_frame)) - 1) >>
  213. (4 + ctx->picture.interlaced_frame);
  214. remainder = ctx->num_x_mbs & ((1 << slice_width_factor) - 1);
  215. num_x_slices = (ctx->num_x_mbs >> slice_width_factor) + (remainder & 1) +
  216. ((remainder >> 1) & 1) + ((remainder >> 2) & 1);
  217. num_slices = num_x_slices * ctx->num_y_mbs;
  218. if (num_slices != AV_RB16(buf + 5)) {
  219. av_log(avctx, AV_LOG_ERROR, "invalid number of slices\n");
  220. return AVERROR_INVALIDDATA;
  221. }
  222. if (ctx->total_slices != num_slices) {
  223. av_freep(&ctx->slice_data);
  224. ctx->slice_data = av_malloc((num_slices + 1) * sizeof(ctx->slice_data[0]));
  225. if (!ctx->slice_data)
  226. return AVERROR(ENOMEM);
  227. ctx->total_slices = num_slices;
  228. }
  229. if (hdr_size + num_slices * 2 > data_size) {
  230. av_log(avctx, AV_LOG_ERROR, "slice table too small\n");
  231. return AVERROR_INVALIDDATA;
  232. }
  233. /* parse slice table allowing quick access to the slice data */
  234. index_ptr = buf + hdr_size;
  235. data_ptr = index_ptr + num_slices * 2;
  236. for (i = 0; i < num_slices; i++) {
  237. ctx->slice_data[i].index = data_ptr;
  238. ctx->slice_data[i].prev_slice_sf = 0;
  239. data_ptr += AV_RB16(index_ptr + i * 2);
  240. }
  241. ctx->slice_data[i].index = data_ptr;
  242. ctx->slice_data[i].prev_slice_sf = 0;
  243. if (data_ptr > buf + data_size) {
  244. av_log(avctx, AV_LOG_ERROR, "out of slice data\n");
  245. return -1;
  246. }
  247. return pic_data_size;
  248. }
  249. /**
  250. * Read an unsigned rice/exp golomb codeword.
  251. */
  252. static inline int decode_vlc_codeword(GetBitContext *gb, unsigned codebook)
  253. {
  254. unsigned int rice_order, exp_order, switch_bits;
  255. unsigned int buf, code;
  256. int log, prefix_len, len;
  257. OPEN_READER(re, gb);
  258. UPDATE_CACHE(re, gb);
  259. buf = GET_CACHE(re, gb);
  260. /* number of prefix bits to switch between Rice and expGolomb */
  261. switch_bits = (codebook & 3) + 1;
  262. rice_order = codebook >> 5; /* rice code order */
  263. exp_order = (codebook >> 2) & 7; /* exp golomb code order */
  264. log = 31 - av_log2(buf); /* count prefix bits (zeroes) */
  265. if (log < switch_bits) { /* ok, we got a rice code */
  266. if (!rice_order) {
  267. /* shortcut for faster decoding of rice codes without remainder */
  268. code = log;
  269. LAST_SKIP_BITS(re, gb, log + 1);
  270. } else {
  271. prefix_len = log + 1;
  272. code = (log << rice_order) + NEG_USR32(buf << prefix_len, rice_order);
  273. LAST_SKIP_BITS(re, gb, prefix_len + rice_order);
  274. }
  275. } else { /* otherwise we got a exp golomb code */
  276. len = (log << 1) - switch_bits + exp_order + 1;
  277. code = NEG_USR32(buf, len) - (1 << exp_order) + (switch_bits << rice_order);
  278. LAST_SKIP_BITS(re, gb, len);
  279. }
  280. CLOSE_READER(re, gb);
  281. return code;
  282. }
  283. #define LSB2SIGN(x) (-((x) & 1))
  284. #define TOSIGNED(x) (((x) >> 1) ^ LSB2SIGN(x))
  285. /**
  286. * Decode DC coefficients for all blocks in a slice.
  287. */
  288. static inline void decode_dc_coeffs(GetBitContext *gb, DCTELEM *out,
  289. int nblocks)
  290. {
  291. DCTELEM prev_dc;
  292. int i, sign;
  293. int16_t delta;
  294. unsigned int code;
  295. code = decode_vlc_codeword(gb, FIRST_DC_CB);
  296. out[0] = prev_dc = TOSIGNED(code);
  297. out += 64; /* move to the DC coeff of the next block */
  298. delta = 3;
  299. for (i = 1; i < nblocks; i++, out += 64) {
  300. code = decode_vlc_codeword(gb, ff_prores_dc_codebook[FFMIN(FFABS(delta), 3)]);
  301. sign = -(((delta >> 15) & 1) ^ (code & 1));
  302. delta = (((code + 1) >> 1) ^ sign) - sign;
  303. prev_dc += delta;
  304. out[0] = prev_dc;
  305. }
  306. }
  307. /**
  308. * Decode AC coefficients for all blocks in a slice.
  309. */
  310. static inline void decode_ac_coeffs(GetBitContext *gb, DCTELEM *out,
  311. int blocks_per_slice,
  312. int plane_size_factor,
  313. const uint8_t *scan)
  314. {
  315. int pos, block_mask, run, level, sign, run_cb_index, lev_cb_index;
  316. int max_coeffs, bits_left;
  317. /* set initial prediction values */
  318. run = 4;
  319. level = 2;
  320. max_coeffs = blocks_per_slice << 6;
  321. block_mask = blocks_per_slice - 1;
  322. for (pos = blocks_per_slice - 1; pos < max_coeffs;) {
  323. run_cb_index = ff_prores_run_to_cb_index[FFMIN(run, 15)];
  324. lev_cb_index = ff_prores_lev_to_cb_index[FFMIN(level, 9)];
  325. bits_left = get_bits_left(gb);
  326. if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
  327. return;
  328. run = decode_vlc_codeword(gb, ff_prores_ac_codebook[run_cb_index]);
  329. bits_left = get_bits_left(gb);
  330. if (bits_left <= 0 || (bits_left <= 8 && !show_bits(gb, bits_left)))
  331. return;
  332. level = decode_vlc_codeword(gb, ff_prores_ac_codebook[lev_cb_index]) + 1;
  333. pos += run + 1;
  334. if (pos >= max_coeffs)
  335. break;
  336. sign = get_sbits(gb, 1);
  337. out[((pos & block_mask) << 6) + scan[pos >> plane_size_factor]] =
  338. (level ^ sign) - sign;
  339. }
  340. }
  341. /**
  342. * Decode a slice plane (luma or chroma).
  343. */
  344. static void decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
  345. const uint8_t *buf,
  346. int data_size, uint16_t *out_ptr,
  347. int linesize, int mbs_per_slice,
  348. int blocks_per_mb, int plane_size_factor,
  349. const int16_t *qmat, int is_chroma)
  350. {
  351. GetBitContext gb;
  352. DCTELEM *block_ptr;
  353. int mb_num, blocks_per_slice;
  354. blocks_per_slice = mbs_per_slice * blocks_per_mb;
  355. memset(td->blocks, 0, 8 * 4 * 64 * sizeof(*td->blocks));
  356. init_get_bits(&gb, buf, data_size << 3);
  357. decode_dc_coeffs(&gb, td->blocks, blocks_per_slice);
  358. decode_ac_coeffs(&gb, td->blocks, blocks_per_slice,
  359. plane_size_factor, ctx->scantable.permutated);
  360. /* inverse quantization, inverse transform and output */
  361. block_ptr = td->blocks;
  362. if (!is_chroma) {
  363. for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
  364. ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
  365. block_ptr += 64;
  366. if (blocks_per_mb > 2) {
  367. ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
  368. block_ptr += 64;
  369. }
  370. ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
  371. block_ptr += 64;
  372. if (blocks_per_mb > 2) {
  373. ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
  374. block_ptr += 64;
  375. }
  376. }
  377. } else {
  378. for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
  379. ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
  380. block_ptr += 64;
  381. ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
  382. block_ptr += 64;
  383. if (blocks_per_mb > 2) {
  384. ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
  385. block_ptr += 64;
  386. ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
  387. block_ptr += 64;
  388. }
  389. }
  390. }
  391. }
  392. static int decode_slice(AVCodecContext *avctx, void *tdata)
  393. {
  394. ProresThreadData *td = tdata;
  395. ProresContext *ctx = avctx->priv_data;
  396. int mb_x_pos = td->x_pos;
  397. int mb_y_pos = td->y_pos;
  398. int pic_num = ctx->pic_num;
  399. int slice_num = td->slice_num;
  400. int mbs_per_slice = td->slice_width;
  401. const uint8_t *buf;
  402. uint8_t *y_data, *u_data, *v_data;
  403. AVFrame *pic = avctx->coded_frame;
  404. int i, sf, slice_width_factor;
  405. int slice_data_size, hdr_size, y_data_size, u_data_size, v_data_size;
  406. int y_linesize, u_linesize, v_linesize;
  407. buf = ctx->slice_data[slice_num].index;
  408. slice_data_size = ctx->slice_data[slice_num + 1].index - buf;
  409. slice_width_factor = av_log2(mbs_per_slice);
  410. y_data = pic->data[0];
  411. u_data = pic->data[1];
  412. v_data = pic->data[2];
  413. y_linesize = pic->linesize[0];
  414. u_linesize = pic->linesize[1];
  415. v_linesize = pic->linesize[2];
  416. if (pic->interlaced_frame) {
  417. if (!(pic_num ^ pic->top_field_first)) {
  418. y_data += y_linesize;
  419. u_data += u_linesize;
  420. v_data += v_linesize;
  421. }
  422. y_linesize <<= 1;
  423. u_linesize <<= 1;
  424. v_linesize <<= 1;
  425. }
  426. if (slice_data_size < 6) {
  427. av_log(avctx, AV_LOG_ERROR, "slice data too small\n");
  428. return AVERROR_INVALIDDATA;
  429. }
  430. /* parse slice header */
  431. hdr_size = buf[0] >> 3;
  432. y_data_size = AV_RB16(buf + 2);
  433. u_data_size = AV_RB16(buf + 4);
  434. v_data_size = hdr_size > 7 ? AV_RB16(buf + 6) :
  435. slice_data_size - y_data_size - u_data_size - hdr_size;
  436. if (hdr_size + y_data_size + u_data_size + v_data_size > slice_data_size ||
  437. v_data_size < 0 || hdr_size < 6) {
  438. av_log(avctx, AV_LOG_ERROR, "invalid data size\n");
  439. return AVERROR_INVALIDDATA;
  440. }
  441. sf = av_clip(buf[1], 1, 224);
  442. sf = sf > 128 ? (sf - 96) << 2 : sf;
  443. /* scale quantization matrixes according with slice's scale factor */
  444. /* TODO: this can be SIMD-optimized a lot */
  445. if (ctx->qmat_changed || sf != td->prev_slice_sf) {
  446. td->prev_slice_sf = sf;
  447. for (i = 0; i < 64; i++) {
  448. td->qmat_luma_scaled[ctx->dsp.idct_permutation[i]] = ctx->qmat_luma[i] * sf;
  449. td->qmat_chroma_scaled[ctx->dsp.idct_permutation[i]] = ctx->qmat_chroma[i] * sf;
  450. }
  451. }
  452. /* decode luma plane */
  453. decode_slice_plane(ctx, td, buf + hdr_size, y_data_size,
  454. (uint16_t*) (y_data + (mb_y_pos << 4) * y_linesize +
  455. (mb_x_pos << 5)), y_linesize,
  456. mbs_per_slice, 4, slice_width_factor + 2,
  457. td->qmat_luma_scaled, 0);
  458. /* decode U chroma plane */
  459. decode_slice_plane(ctx, td, buf + hdr_size + y_data_size, u_data_size,
  460. (uint16_t*) (u_data + (mb_y_pos << 4) * u_linesize +
  461. (mb_x_pos << ctx->mb_chroma_factor)),
  462. u_linesize, mbs_per_slice, ctx->num_chroma_blocks,
  463. slice_width_factor + ctx->chroma_factor - 1,
  464. td->qmat_chroma_scaled, 1);
  465. /* decode V chroma plane */
  466. decode_slice_plane(ctx, td, buf + hdr_size + y_data_size + u_data_size,
  467. v_data_size,
  468. (uint16_t*) (v_data + (mb_y_pos << 4) * v_linesize +
  469. (mb_x_pos << ctx->mb_chroma_factor)),
  470. v_linesize, mbs_per_slice, ctx->num_chroma_blocks,
  471. slice_width_factor + ctx->chroma_factor - 1,
  472. td->qmat_chroma_scaled, 1);
  473. return 0;
  474. }
  475. static int decode_picture(ProresContext *ctx, int pic_num,
  476. AVCodecContext *avctx)
  477. {
  478. int slice_num, slice_width, x_pos, y_pos;
  479. slice_num = 0;
  480. ctx->pic_num = pic_num;
  481. for (y_pos = 0; y_pos < ctx->num_y_mbs; y_pos++) {
  482. slice_width = 1 << ctx->slice_width_factor;
  483. for (x_pos = 0; x_pos < ctx->num_x_mbs && slice_width;
  484. x_pos += slice_width) {
  485. while (ctx->num_x_mbs - x_pos < slice_width)
  486. slice_width >>= 1;
  487. ctx->slice_data[slice_num].slice_num = slice_num;
  488. ctx->slice_data[slice_num].x_pos = x_pos;
  489. ctx->slice_data[slice_num].y_pos = y_pos;
  490. ctx->slice_data[slice_num].slice_width = slice_width;
  491. slice_num++;
  492. }
  493. }
  494. return avctx->execute(avctx, decode_slice,
  495. ctx->slice_data, NULL, slice_num,
  496. sizeof(ctx->slice_data[0]));
  497. }
  498. #define MOVE_DATA_PTR(nbytes) buf += (nbytes); buf_size -= (nbytes)
  499. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  500. AVPacket *avpkt)
  501. {
  502. ProresContext *ctx = avctx->priv_data;
  503. AVFrame *picture = avctx->coded_frame;
  504. const uint8_t *buf = avpkt->data;
  505. int buf_size = avpkt->size;
  506. int frame_hdr_size, pic_num, pic_data_size;
  507. /* check frame atom container */
  508. if (buf_size < 28 || buf_size < AV_RB32(buf) ||
  509. AV_RB32(buf + 4) != FRAME_ID) {
  510. av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
  511. return AVERROR_INVALIDDATA;
  512. }
  513. MOVE_DATA_PTR(8);
  514. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  515. if (frame_hdr_size < 0)
  516. return AVERROR_INVALIDDATA;
  517. MOVE_DATA_PTR(frame_hdr_size);
  518. if (picture->data[0])
  519. avctx->release_buffer(avctx, picture);
  520. picture->reference = 0;
  521. if (ff_get_buffer(avctx, picture) < 0)
  522. return -1;
  523. for (pic_num = 0; ctx->picture.interlaced_frame - pic_num + 1; pic_num++) {
  524. pic_data_size = decode_picture_header(ctx, buf, buf_size, avctx);
  525. if (pic_data_size < 0)
  526. return AVERROR_INVALIDDATA;
  527. if (decode_picture(ctx, pic_num, avctx))
  528. return -1;
  529. MOVE_DATA_PTR(pic_data_size);
  530. }
  531. *got_frame = 1;
  532. *(AVFrame*) data = *avctx->coded_frame;
  533. return avpkt->size;
  534. }
  535. static av_cold int decode_close(AVCodecContext *avctx)
  536. {
  537. ProresContext *ctx = avctx->priv_data;
  538. if (ctx->picture.data[0])
  539. avctx->release_buffer(avctx, &ctx->picture);
  540. av_freep(&ctx->slice_data);
  541. return 0;
  542. }
  543. AVCodec ff_prores_decoder = {
  544. .name = "prores",
  545. .type = AVMEDIA_TYPE_VIDEO,
  546. .id = AV_CODEC_ID_PRORES,
  547. .priv_data_size = sizeof(ProresContext),
  548. .init = decode_init,
  549. .close = decode_close,
  550. .decode = decode_frame,
  551. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
  552. .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)")
  553. };