You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

734 lines
23KB

  1. /*
  2. * Apple ProRes compatible decoder
  3. *
  4. * Copyright (c) 2010-2011 Maxim Poliakovski
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * This is a decoder for Apple ProRes 422 SD/HQ/LT/Proxy and ProRes 4444.
  25. * It is used for storing and editing high definition video data in Apple's Final Cut Pro.
  26. *
  27. * @see http://wiki.multimedia.cx/index.php?title=Apple_ProRes
  28. */
  29. #define A32_BITSTREAM_READER // some ProRes vlc codes require up to 28 bits to be read at once
  30. #include <stdint.h>
  31. #include "libavutil/intmath.h"
  32. #include "avcodec.h"
  33. #include "dsputil.h"
  34. #include "get_bits.h"
  35. #define BITS_PER_SAMPLE 10 ///< output precision of that decoder
  36. #define BIAS (1 << (BITS_PER_SAMPLE - 1)) ///< bias value for converting signed pixels into unsigned ones
  37. #define CLIP_MIN (1 << (BITS_PER_SAMPLE - 8)) ///< minimum value for clipping resulting pixels
  38. #define CLIP_MAX (1 << BITS_PER_SAMPLE) - CLIP_MIN - 1 ///< maximum value for clipping resulting pixels
  39. typedef struct {
  40. DSPContext dsp;
  41. AVFrame picture;
  42. ScanTable scantable;
  43. int scantable_type; ///< -1 = uninitialized, 0 = progressive, 1/2 = interlaced
  44. int frame_type; ///< 0 = progressive, 1 = top-field first, 2 = bottom-field first
  45. int pic_format; ///< 2 = 422, 3 = 444
  46. uint8_t qmat_luma[64]; ///< dequantization matrix for luma
  47. uint8_t qmat_chroma[64]; ///< dequantization matrix for chroma
  48. int qmat_changed; ///< 1 - global quantization matrices changed
  49. int prev_slice_sf; ///< scalefactor of the previous decoded slice
  50. DECLARE_ALIGNED(16, int16_t, qmat_luma_scaled[64]);
  51. DECLARE_ALIGNED(16, int16_t, qmat_chroma_scaled[64]);
  52. DECLARE_ALIGNED(16, DCTELEM, blocks[8 * 4 * 64]);
  53. int total_slices; ///< total number of slices in a picture
  54. const uint8_t **slice_data_index; ///< array of pointers to the data of each slice
  55. int chroma_factor;
  56. int mb_chroma_factor;
  57. int num_chroma_blocks; ///< number of chrominance blocks in a macroblock
  58. int num_x_slices;
  59. int num_y_slices;
  60. int slice_width_factor;
  61. int slice_height_factor;
  62. int num_x_mbs;
  63. int num_y_mbs;
  64. } ProresContext;
  65. static const uint8_t progressive_scan[64] = {
  66. 0, 1, 8, 9, 2, 3, 10, 11,
  67. 16, 17, 24, 25, 18, 19, 26, 27,
  68. 4, 5, 12, 20, 13, 6, 7, 14,
  69. 21, 28, 29, 22, 15, 23, 30, 31,
  70. 32, 33, 40, 48, 41, 34, 35, 42,
  71. 49, 56, 57, 50, 43, 36, 37, 44,
  72. 51, 58, 59, 52, 45, 38, 39, 46,
  73. 53, 60, 61, 54, 47, 55, 62, 63
  74. };
  75. static const uint8_t interlaced_scan[64] = {
  76. 0, 8, 1, 9, 16, 24, 17, 25,
  77. 2, 10, 3, 11, 18, 26, 19, 27,
  78. 32, 40, 33, 34, 41, 48, 56, 49,
  79. 42, 35, 43, 50, 57, 58, 51, 59,
  80. 4, 12, 5, 6, 13, 20, 28, 21,
  81. 14, 7, 15, 22, 29, 36, 44, 37,
  82. 30, 23, 31, 38, 45, 52, 60, 53,
  83. 46, 39, 47, 54, 61, 62, 55, 63
  84. };
  85. static av_cold int decode_init(AVCodecContext *avctx)
  86. {
  87. ProresContext *ctx = avctx->priv_data;
  88. ctx->total_slices = 0;
  89. ctx->slice_data_index = 0;
  90. avctx->pix_fmt = PIX_FMT_YUV422P10; // set default pixel format
  91. avctx->bits_per_raw_sample = BITS_PER_SAMPLE;
  92. dsputil_init(&ctx->dsp, avctx);
  93. avctx->coded_frame = &ctx->picture;
  94. avcodec_get_frame_defaults(&ctx->picture);
  95. ctx->picture.type = AV_PICTURE_TYPE_I;
  96. ctx->picture.key_frame = 1;
  97. ctx->scantable_type = -1; // set scantable type to uninitialized
  98. memset(ctx->qmat_luma, 4, 64);
  99. memset(ctx->qmat_chroma, 4, 64);
  100. ctx->prev_slice_sf = 0;
  101. return 0;
  102. }
  103. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  104. const int data_size, AVCodecContext *avctx)
  105. {
  106. int hdr_size, version, width, height, flags;
  107. const uint8_t *ptr;
  108. hdr_size = AV_RB16(buf);
  109. if (hdr_size > data_size) {
  110. av_log(avctx, AV_LOG_ERROR, "frame data too short!\n");
  111. return -1;
  112. }
  113. version = AV_RB16(buf + 2);
  114. if (version >= 2) {
  115. av_log(avctx, AV_LOG_ERROR,
  116. "unsupported header version: %d\n", version);
  117. return -1;
  118. }
  119. width = AV_RB16(buf + 8);
  120. height = AV_RB16(buf + 10);
  121. if (width != avctx->width || height != avctx->height) {
  122. av_log(avctx, AV_LOG_ERROR,
  123. "picture dimension changed! Old: %d x %d, new: %d x %d\n",
  124. avctx->width, avctx->height, width, height);
  125. return -1;
  126. }
  127. ctx->frame_type = (buf[12] >> 2) & 3;
  128. if (ctx->frame_type > 2) {
  129. av_log(avctx, AV_LOG_ERROR,
  130. "unsupported frame type: %d!\n", ctx->frame_type);
  131. return -1;
  132. }
  133. ctx->chroma_factor = (buf[12] >> 6) & 3;
  134. ctx->mb_chroma_factor = ctx->chroma_factor + 2;
  135. ctx->num_chroma_blocks = (1 << ctx->chroma_factor) >> 1;
  136. switch (ctx->chroma_factor) {
  137. case 2:
  138. avctx->pix_fmt = PIX_FMT_YUV422P10;
  139. break;
  140. case 3:
  141. avctx->pix_fmt = PIX_FMT_YUV444P10;
  142. break;
  143. default:
  144. av_log(avctx, AV_LOG_ERROR,
  145. "unsupported picture format: %d!\n", ctx->pic_format);
  146. return -1;
  147. }
  148. if (ctx->scantable_type != ctx->frame_type) {
  149. if (!ctx->frame_type)
  150. ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable,
  151. progressive_scan);
  152. else
  153. ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable,
  154. interlaced_scan);
  155. ctx->scantable_type = ctx->frame_type;
  156. }
  157. if (ctx->frame_type) { /* if interlaced */
  158. ctx->picture.interlaced_frame = 1;
  159. ctx->picture.top_field_first = ctx->frame_type & 1;
  160. }
  161. ctx->qmat_changed = 0;
  162. ptr = buf + 20;
  163. flags = buf[19];
  164. if (flags & 2) {
  165. if (ptr - buf > hdr_size - 64) {
  166. av_log(avctx, AV_LOG_ERROR, "Too short header data\n");
  167. return -1;
  168. }
  169. if (memcmp(ctx->qmat_luma, ptr, 64)) {
  170. memcpy(ctx->qmat_luma, ptr, 64);
  171. ctx->qmat_changed = 1;
  172. }
  173. ptr += 64;
  174. } else {
  175. memset(ctx->qmat_luma, 4, 64);
  176. ctx->qmat_changed = 1;
  177. }
  178. if (flags & 1) {
  179. if (ptr - buf > hdr_size - 64) {
  180. av_log(avctx, AV_LOG_ERROR, "Too short header data\n");
  181. return -1;
  182. }
  183. if (memcmp(ctx->qmat_chroma, ptr, 64)) {
  184. memcpy(ctx->qmat_chroma, ptr, 64);
  185. ctx->qmat_changed = 1;
  186. }
  187. } else {
  188. memset(ctx->qmat_chroma, 4, 64);
  189. ctx->qmat_changed = 1;
  190. }
  191. return hdr_size;
  192. }
  193. static int decode_picture_header(ProresContext *ctx, const uint8_t *buf,
  194. const int data_size, AVCodecContext *avctx)
  195. {
  196. int i, hdr_size, pic_data_size, num_slices;
  197. int slice_width_factor, slice_height_factor;
  198. int remainder, num_x_slices;
  199. const uint8_t *data_ptr, *index_ptr;
  200. hdr_size = data_size > 0 ? buf[0] >> 3 : 0;
  201. if (hdr_size < 8 || hdr_size > data_size) {
  202. av_log(avctx, AV_LOG_ERROR, "picture header too short!\n");
  203. return -1;
  204. }
  205. pic_data_size = AV_RB32(buf + 1);
  206. if (pic_data_size > data_size) {
  207. av_log(avctx, AV_LOG_ERROR, "picture data too short!\n");
  208. return -1;
  209. }
  210. slice_width_factor = buf[7] >> 4;
  211. slice_height_factor = buf[7] & 0xF;
  212. if (slice_width_factor > 3 || slice_height_factor) {
  213. av_log(avctx, AV_LOG_ERROR,
  214. "unsupported slice dimension: %d x %d!\n",
  215. 1 << slice_width_factor, 1 << slice_height_factor);
  216. return -1;
  217. }
  218. ctx->slice_width_factor = slice_width_factor;
  219. ctx->slice_height_factor = slice_height_factor;
  220. ctx->num_x_mbs = (avctx->width + 15) >> 4;
  221. ctx->num_y_mbs =
  222. (avctx->height + (1 << (4 + ctx->picture.interlaced_frame)) - 1) >>
  223. (4 + ctx->picture.interlaced_frame);
  224. remainder = ctx->num_x_mbs & ((1 << slice_width_factor) - 1);
  225. num_x_slices = (ctx->num_x_mbs >> slice_width_factor) + (remainder & 1) +
  226. ((remainder >> 1) & 1) + ((remainder >> 2) & 1);
  227. num_slices = num_x_slices * ctx->num_y_mbs;
  228. if (num_slices != AV_RB16(buf + 5)) {
  229. av_log(avctx, AV_LOG_ERROR, "invalid number of slices!\n");
  230. return -1;
  231. }
  232. if (ctx->total_slices != num_slices) {
  233. av_freep(&ctx->slice_data_index);
  234. ctx->slice_data_index =
  235. av_malloc((num_slices + 1) * sizeof(uint8_t*));
  236. if (!ctx->slice_data_index)
  237. return AVERROR(ENOMEM);
  238. ctx->total_slices = num_slices;
  239. }
  240. if (hdr_size + num_slices * 2 > data_size) {
  241. av_log(avctx, AV_LOG_ERROR, "slice table too short!\n");
  242. return -1;
  243. }
  244. /* parse slice table allowing quick access to the slice data */
  245. index_ptr = buf + hdr_size;
  246. data_ptr = index_ptr + num_slices * 2;
  247. for (i = 0; i < num_slices; i++) {
  248. ctx->slice_data_index[i] = data_ptr;
  249. data_ptr += AV_RB16(index_ptr + i * 2);
  250. }
  251. ctx->slice_data_index[i] = data_ptr;
  252. if (data_ptr > buf + data_size) {
  253. av_log(avctx, AV_LOG_ERROR, "out of slice data!\n");
  254. return -1;
  255. }
  256. return pic_data_size;
  257. }
  258. /**
  259. * Read an unsigned rice/exp golomb codeword.
  260. */
  261. static inline int decode_vlc_codeword(GetBitContext *gb, uint8_t codebook)
  262. {
  263. unsigned int rice_order, exp_order, switch_bits;
  264. unsigned int buf, code;
  265. int log, prefix_len, len;
  266. OPEN_READER(re, gb);
  267. UPDATE_CACHE(re, gb);
  268. buf = GET_CACHE(re, gb);
  269. /* number of prefix bits to switch between Rice and expGolomb */
  270. switch_bits = (codebook & 3) + 1;
  271. rice_order = codebook >> 5; /* rice code order */
  272. exp_order = (codebook >> 2) & 7; /* exp golomb code order */
  273. log = 31 - av_log2(buf); /* count prefix bits (zeroes) */
  274. if (log < switch_bits) { /* ok, we got a rice code */
  275. if (!rice_order) {
  276. /* shortcut for faster decoding of rice codes without remainder */
  277. code = log;
  278. LAST_SKIP_BITS(re, gb, log + 1);
  279. } else {
  280. prefix_len = log + 1;
  281. code = (log << rice_order) + NEG_USR32((buf << prefix_len), rice_order);
  282. LAST_SKIP_BITS(re, gb, prefix_len + rice_order);
  283. }
  284. } else { /* otherwise we got a exp golomb code */
  285. len = (log << 1) - switch_bits + exp_order + 1;
  286. code = NEG_USR32(buf, len) - (1 << exp_order) + (switch_bits << rice_order);
  287. LAST_SKIP_BITS(re, gb, len);
  288. }
  289. CLOSE_READER(re, gb);
  290. return code;
  291. }
  292. #define LSB2SIGN(x) (-((x) & 1))
  293. #define TOSIGNED(x) (((x) >> 1) ^ LSB2SIGN(x))
  294. #define FIRST_DC_CB 0xB8 // rice_order = 5, exp_golomb_order = 6, switch_bits = 0
  295. static uint8_t dc_codebook[4] = {
  296. 0x04, // rice_order = 0, exp_golomb_order = 1, switch_bits = 0
  297. 0x28, // rice_order = 1, exp_golomb_order = 2, switch_bits = 0
  298. 0x4D, // rice_order = 2, exp_golomb_order = 3, switch_bits = 1
  299. 0x70 // rice_order = 3, exp_golomb_order = 4, switch_bits = 0
  300. };
  301. /**
  302. * Decode DC coefficients for all blocks in a slice.
  303. */
  304. static inline void decode_dc_coeffs(GetBitContext *gb, DCTELEM *out,
  305. int nblocks)
  306. {
  307. DCTELEM prev_dc;
  308. int i, sign;
  309. int16_t delta;
  310. unsigned int code;
  311. code = decode_vlc_codeword(gb, FIRST_DC_CB);
  312. out[0] = prev_dc = TOSIGNED(code);
  313. out += 64; /* move to the DC coeff of the next block */
  314. delta = 3;
  315. for (i = 1; i < nblocks; i++, out += 64) {
  316. code = decode_vlc_codeword(gb, dc_codebook[FFMIN(FFABS(delta), 3)]);
  317. sign = -(((delta >> 15) & 1) ^ (code & 1));
  318. delta = (((code + 1) >> 1) ^ sign) - sign;
  319. prev_dc += delta;
  320. out[0] = prev_dc;
  321. }
  322. }
  323. static uint8_t ac_codebook[7] = {
  324. 0x04, // rice_order = 0, exp_golomb_order = 1, switch_bits = 0
  325. 0x28, // rice_order = 1, exp_golomb_order = 2, switch_bits = 0
  326. 0x4C, // rice_order = 2, exp_golomb_order = 3, switch_bits = 0
  327. 0x05, // rice_order = 0, exp_golomb_order = 1, switch_bits = 1
  328. 0x29, // rice_order = 1, exp_golomb_order = 2, switch_bits = 1
  329. 0x06, // rice_order = 0, exp_golomb_order = 1, switch_bits = 2
  330. 0x0A, // rice_order = 0, exp_golomb_order = 2, switch_bits = 2
  331. };
  332. /**
  333. * Lookup tables for adaptive switching between codebooks
  334. * according with previous run/level value.
  335. */
  336. static uint8_t run_to_cb_index[16] =
  337. { 5, 5, 3, 3, 0, 4, 4, 4, 4, 1, 1, 1, 1, 1, 1, 2 };
  338. static uint8_t lev_to_cb_index[10] = { 0, 6, 3, 5, 0, 1, 1, 1, 1, 2 };
  339. /**
  340. * Decode AC coefficients for all blocks in a slice.
  341. */
  342. static inline void decode_ac_coeffs(GetBitContext *gb, DCTELEM *out,
  343. int blocks_per_slice,
  344. int plane_size_factor,
  345. const uint8_t *scan)
  346. {
  347. int pos, block_mask, run, level, sign, run_cb_index, lev_cb_index;
  348. int max_coeffs, bits_left;
  349. /* set initial prediction values */
  350. run = 4;
  351. level = 2;
  352. max_coeffs = blocks_per_slice << 6;
  353. block_mask = blocks_per_slice - 1;
  354. for (pos = blocks_per_slice - 1; pos < max_coeffs;) {
  355. run_cb_index = run_to_cb_index[FFMIN(run, 15)];
  356. lev_cb_index = lev_to_cb_index[FFMIN(level, 9)];
  357. bits_left = get_bits_left(gb);
  358. if (bits_left <= 8 && !show_bits(gb, bits_left))
  359. return;
  360. run = decode_vlc_codeword(gb, ac_codebook[run_cb_index]);
  361. bits_left = get_bits_left(gb);
  362. if (bits_left <= 8 && !show_bits(gb, bits_left))
  363. return;
  364. level = decode_vlc_codeword(gb, ac_codebook[lev_cb_index]) + 1;
  365. pos += run + 1;
  366. if (pos >= max_coeffs)
  367. break;
  368. sign = get_sbits(gb, 1);
  369. out[((pos & block_mask) << 6) + scan[pos >> plane_size_factor]] =
  370. (level ^ sign) - sign;
  371. }
  372. }
  373. #define CLIP_AND_BIAS(x) (av_clip((x) + BIAS, CLIP_MIN, CLIP_MAX))
  374. /**
  375. * Add bias value, clamp and output pixels of a slice
  376. */
  377. static void put_pixels(const DCTELEM *in, uint16_t *out, int stride,
  378. int mbs_per_slice, int blocks_per_mb)
  379. {
  380. int mb, x, y, src_offset, dst_offset;
  381. const DCTELEM *src1, *src2;
  382. uint16_t *dst1, *dst2;
  383. src1 = in;
  384. src2 = in + (blocks_per_mb << 5);
  385. dst1 = out;
  386. dst2 = out + (stride << 3);
  387. for (mb = 0; mb < mbs_per_slice; mb++) {
  388. for (y = 0, dst_offset = 0; y < 8; y++, dst_offset += stride) {
  389. for (x = 0; x < 8; x++) {
  390. src_offset = (y << 3) + x;
  391. dst1[dst_offset + x] = CLIP_AND_BIAS(src1[src_offset]);
  392. dst2[dst_offset + x] = CLIP_AND_BIAS(src2[src_offset]);
  393. if (blocks_per_mb > 2) {
  394. dst1[dst_offset + x + 8] =
  395. CLIP_AND_BIAS(src1[src_offset + 64]);
  396. dst2[dst_offset + x + 8] =
  397. CLIP_AND_BIAS(src2[src_offset + 64]);
  398. }
  399. }
  400. }
  401. src1 += blocks_per_mb << 6;
  402. src2 += blocks_per_mb << 6;
  403. dst1 += blocks_per_mb << 2;
  404. dst2 += blocks_per_mb << 2;
  405. }
  406. }
  407. /**
  408. * Decode a slice plane (luma or chroma).
  409. */
  410. static void decode_slice_plane(ProresContext *ctx, const uint8_t *buf,
  411. int data_size, uint16_t *out_ptr,
  412. int linesize, int mbs_per_slice,
  413. int blocks_per_mb, int plane_size_factor,
  414. const int16_t *qmat)
  415. {
  416. GetBitContext gb;
  417. DCTELEM *block_ptr;
  418. int i, blk_num, blocks_per_slice;
  419. blocks_per_slice = mbs_per_slice * blocks_per_mb;
  420. memset(ctx->blocks, 0, 8 * 4 * 64 * sizeof(*ctx->blocks));
  421. init_get_bits(&gb, buf, data_size << 3);
  422. decode_dc_coeffs(&gb, ctx->blocks, blocks_per_slice);
  423. decode_ac_coeffs(&gb, ctx->blocks, blocks_per_slice,
  424. plane_size_factor, ctx->scantable.permutated);
  425. /* inverse quantization, inverse transform and output */
  426. block_ptr = ctx->blocks;
  427. for (blk_num = 0; blk_num < blocks_per_slice;
  428. blk_num++, block_ptr += 64) {
  429. /* TODO: the correct solution shoud be (block_ptr[i] * qmat[i]) >> 1
  430. * and the input of the inverse transform should be scaled by 2
  431. * in order to avoid rounding errors.
  432. * Due to the fact the existing Libav transforms are incompatible with
  433. * that input I temporally introduced the coarse solution below... */
  434. for (i = 0; i < 64; i++)
  435. block_ptr[i] = (block_ptr[i] * qmat[i]) >> 2;
  436. ctx->dsp.idct(block_ptr);
  437. }
  438. put_pixels(ctx->blocks, out_ptr, linesize >> 1, mbs_per_slice,
  439. blocks_per_mb);
  440. }
  441. static int decode_slice(ProresContext *ctx, int pic_num, int slice_num,
  442. int mb_x_pos, int mb_y_pos, int mbs_per_slice,
  443. AVCodecContext *avctx)
  444. {
  445. const uint8_t *buf;
  446. uint8_t *y_data, *u_data, *v_data;
  447. AVFrame *pic = avctx->coded_frame;
  448. int i, sf, slice_width_factor;
  449. int slice_data_size, hdr_size, y_data_size, u_data_size, v_data_size;
  450. int y_linesize, u_linesize, v_linesize;
  451. buf = ctx->slice_data_index[slice_num];
  452. slice_data_size = ctx->slice_data_index[slice_num + 1] - buf;
  453. slice_width_factor = av_log2(mbs_per_slice);
  454. y_data = pic->data[0];
  455. u_data = pic->data[1];
  456. v_data = pic->data[2];
  457. y_linesize = pic->linesize[0];
  458. u_linesize = pic->linesize[1];
  459. v_linesize = pic->linesize[2];
  460. if (pic->interlaced_frame) {
  461. if (!(pic_num ^ pic->top_field_first)) {
  462. y_data += y_linesize;
  463. u_data += u_linesize;
  464. v_data += v_linesize;
  465. }
  466. y_linesize <<= 1;
  467. u_linesize <<= 1;
  468. v_linesize <<= 1;
  469. }
  470. if (slice_data_size < 6) {
  471. av_log(avctx, AV_LOG_ERROR, "slice data too short!\n");
  472. return -1;
  473. }
  474. /* parse slice header */
  475. hdr_size = buf[0] >> 3;
  476. y_data_size = AV_RB16(buf + 2);
  477. u_data_size = AV_RB16(buf + 4);
  478. v_data_size = slice_data_size - y_data_size - u_data_size - hdr_size;
  479. if (v_data_size < 0 || hdr_size < 6) {
  480. av_log(avctx, AV_LOG_ERROR, "invalid data sizes!\n");
  481. return -1;
  482. }
  483. sf = av_clip(buf[1], 1, 224);
  484. sf = sf > 128 ? (sf - 96) << 2 : sf;
  485. /* scale quantization matrixes according with slice's scale factor */
  486. /* TODO: this can be SIMD-optimized alot */
  487. if (ctx->qmat_changed || sf != ctx->prev_slice_sf) {
  488. ctx->prev_slice_sf = sf;
  489. for (i = 0; i < 64; i++) {
  490. ctx->qmat_luma_scaled[i] = ctx->qmat_luma[i] * sf;
  491. ctx->qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * sf;
  492. }
  493. }
  494. /* decode luma plane */
  495. decode_slice_plane(ctx, buf + hdr_size, y_data_size,
  496. (uint16_t*) (y_data + (mb_y_pos << 4) * y_linesize +
  497. (mb_x_pos << 5)), y_linesize,
  498. mbs_per_slice, 4, slice_width_factor + 2,
  499. ctx->qmat_luma_scaled);
  500. /* decode U chroma plane */
  501. decode_slice_plane(ctx, buf + hdr_size + y_data_size, u_data_size,
  502. (uint16_t*) (u_data + (mb_y_pos << 4) * u_linesize +
  503. (mb_x_pos << ctx->mb_chroma_factor)),
  504. u_linesize, mbs_per_slice, ctx->num_chroma_blocks,
  505. slice_width_factor + ctx->chroma_factor - 1,
  506. ctx->qmat_chroma_scaled);
  507. /* decode V chroma plane */
  508. decode_slice_plane(ctx, buf + hdr_size + y_data_size + u_data_size,
  509. v_data_size,
  510. (uint16_t*) (v_data + (mb_y_pos << 4) * v_linesize +
  511. (mb_x_pos << ctx->mb_chroma_factor)),
  512. v_linesize, mbs_per_slice, ctx->num_chroma_blocks,
  513. slice_width_factor + ctx->chroma_factor - 1,
  514. ctx->qmat_chroma_scaled);
  515. return 0;
  516. }
  517. static int decode_picture(ProresContext *ctx, int pic_num,
  518. AVCodecContext *avctx)
  519. {
  520. int slice_num, slice_width, x_pos, y_pos;
  521. slice_num = 0;
  522. for (y_pos = 0; y_pos < ctx->num_y_mbs; y_pos++) {
  523. slice_width = 1 << ctx->slice_width_factor;
  524. for (x_pos = 0; x_pos < ctx->num_x_mbs && slice_width;
  525. x_pos += slice_width) {
  526. while (ctx->num_x_mbs - x_pos < slice_width)
  527. slice_width >>= 1;
  528. if (decode_slice(ctx, pic_num, slice_num, x_pos, y_pos,
  529. slice_width, avctx) < 0)
  530. return -1;
  531. slice_num++;
  532. }
  533. }
  534. return 0;
  535. }
  536. #define FRAME_ID MKBETAG('i', 'c', 'p', 'f')
  537. #define MOVE_DATA_PTR(nbytes) buf += (nbytes); buf_size -= (nbytes)
  538. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  539. AVPacket *avpkt)
  540. {
  541. ProresContext *ctx = avctx->priv_data;
  542. AVFrame *picture = avctx->coded_frame;
  543. const uint8_t *buf = avpkt->data;
  544. int buf_size = avpkt->size;
  545. int frame_hdr_size, pic_num, pic_data_size;
  546. /* check frame atom container */
  547. if (buf_size < 28 || buf_size < AV_RB32(buf) ||
  548. AV_RB32(buf + 4) != FRAME_ID) {
  549. av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
  550. return -1;
  551. }
  552. MOVE_DATA_PTR(8);
  553. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  554. if (frame_hdr_size < 0)
  555. return -1;
  556. MOVE_DATA_PTR(frame_hdr_size);
  557. if (picture->data[0])
  558. avctx->release_buffer(avctx, picture);
  559. picture->reference = 0;
  560. if (avctx->get_buffer(avctx, picture) < 0)
  561. return -1;
  562. for (pic_num = 0; ctx->picture.interlaced_frame - pic_num + 1; pic_num++) {
  563. pic_data_size = decode_picture_header(ctx, buf, buf_size, avctx);
  564. if (pic_data_size < 0)
  565. return -1;
  566. if (decode_picture(ctx, pic_num, avctx))
  567. return -1;
  568. MOVE_DATA_PTR(pic_data_size);
  569. }
  570. *data_size = sizeof(AVPicture);
  571. *(AVFrame*) data = *avctx->coded_frame;
  572. return avpkt->size;
  573. }
  574. static av_cold int decode_close(AVCodecContext *avctx)
  575. {
  576. ProresContext *ctx = avctx->priv_data;
  577. if (ctx->picture.data[0])
  578. avctx->release_buffer(avctx, &ctx->picture);
  579. av_freep(&ctx->slice_data_index);
  580. return 0;
  581. }
  582. AVCodec ff_prores_decoder = {
  583. .name = "ProRes",
  584. .type = AVMEDIA_TYPE_VIDEO,
  585. .id = CODEC_ID_PRORES,
  586. .priv_data_size = sizeof(ProresContext),
  587. .init = decode_init,
  588. .close = decode_close,
  589. .decode = decode_frame,
  590. .capabilities = CODEC_CAP_DR1,
  591. .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)")
  592. };