You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

784 lines
25KB

  1. /*
  2. * Apple ProRes compatible decoder
  3. *
  4. * Copyright (c) 2010-2011 Maxim Poliakovski
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * This is a decoder for Apple ProRes 422 SD/HQ/LT/Proxy and ProRes 4444.
  25. * It is used for storing and editing high definition video data in Apple's Final Cut Pro.
  26. *
  27. * @see http://wiki.multimedia.cx/index.php?title=Apple_ProRes
  28. */
  29. #define LONG_BITSTREAM_READER // some ProRes vlc codes require up to 28 bits to be read at once
  30. #include <stdint.h>
  31. #include "libavutil/intmath.h"
  32. #include "avcodec.h"
  33. #include "idctdsp.h"
  34. #include "internal.h"
  35. #include "proresdata.h"
  36. #include "proresdsp.h"
  37. #include "get_bits.h"
  38. typedef struct ProresThreadData {
  39. const uint8_t *index; ///< pointers to the data of this slice
  40. int slice_num;
  41. int x_pos, y_pos;
  42. int slice_width;
  43. int prev_slice_sf; ///< scalefactor of the previous decoded slice
  44. DECLARE_ALIGNED(16, int16_t, blocks)[8 * 4 * 64];
  45. DECLARE_ALIGNED(16, int16_t, qmat_luma_scaled)[64];
  46. DECLARE_ALIGNED(16, int16_t, qmat_chroma_scaled)[64];
  47. } ProresThreadData;
  48. typedef struct ProresContext {
  49. ProresDSPContext dsp;
  50. AVFrame *frame;
  51. ScanTable scantable;
  52. int scantable_type; ///< -1 = uninitialized, 0 = progressive, 1/2 = interlaced
  53. int frame_type; ///< 0 = progressive, 1 = top-field first, 2 = bottom-field first
  54. int pic_format; ///< 2 = 422, 3 = 444
  55. uint8_t qmat_luma[64]; ///< dequantization matrix for luma
  56. uint8_t qmat_chroma[64]; ///< dequantization matrix for chroma
  57. int qmat_changed; ///< 1 - global quantization matrices changed
  58. int total_slices; ///< total number of slices in a picture
  59. ProresThreadData *slice_data;
  60. int pic_num;
  61. int chroma_factor;
  62. int mb_chroma_factor;
  63. int num_chroma_blocks; ///< number of chrominance blocks in a macroblock
  64. int num_x_slices;
  65. int num_y_slices;
  66. int slice_width_factor;
  67. int slice_height_factor;
  68. int num_x_mbs;
  69. int num_y_mbs;
  70. int alpha_info;
  71. } ProresContext;
  72. static av_cold int decode_init(AVCodecContext *avctx)
  73. {
  74. ProresContext *ctx = avctx->priv_data;
  75. ctx->total_slices = 0;
  76. ctx->slice_data = NULL;
  77. avctx->bits_per_raw_sample = PRORES_BITS_PER_SAMPLE;
  78. ff_proresdsp_init(&ctx->dsp);
  79. ctx->scantable_type = -1; // set scantable type to uninitialized
  80. memset(ctx->qmat_luma, 4, 64);
  81. memset(ctx->qmat_chroma, 4, 64);
  82. return 0;
  83. }
  84. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  85. const int data_size, AVCodecContext *avctx)
  86. {
  87. int hdr_size, version, width, height, flags;
  88. const uint8_t *ptr;
  89. hdr_size = AV_RB16(buf);
  90. if (hdr_size > data_size) {
  91. av_log(avctx, AV_LOG_ERROR, "frame data too small\n");
  92. return AVERROR_INVALIDDATA;
  93. }
  94. version = AV_RB16(buf + 2);
  95. if (version >= 2) {
  96. av_log(avctx, AV_LOG_ERROR,
  97. "unsupported header version: %d\n", version);
  98. return AVERROR_INVALIDDATA;
  99. }
  100. width = AV_RB16(buf + 8);
  101. height = AV_RB16(buf + 10);
  102. if (width != avctx->width || height != avctx->height) {
  103. av_log(avctx, AV_LOG_ERROR,
  104. "picture dimension changed: old: %d x %d, new: %d x %d\n",
  105. avctx->width, avctx->height, width, height);
  106. return AVERROR_INVALIDDATA;
  107. }
  108. ctx->frame_type = (buf[12] >> 2) & 3;
  109. if (ctx->frame_type > 2) {
  110. av_log(avctx, AV_LOG_ERROR,
  111. "unsupported frame type: %d\n", ctx->frame_type);
  112. return AVERROR_INVALIDDATA;
  113. }
  114. ctx->chroma_factor = (buf[12] >> 6) & 3;
  115. ctx->mb_chroma_factor = ctx->chroma_factor + 2;
  116. ctx->num_chroma_blocks = (1 << ctx->chroma_factor) >> 1;
  117. ctx->alpha_info = buf[17] & 0xf;
  118. if (ctx->alpha_info > 2) {
  119. av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
  120. return AVERROR_INVALIDDATA;
  121. }
  122. switch (ctx->chroma_factor) {
  123. case 2:
  124. avctx->pix_fmt = ctx->alpha_info ? AV_PIX_FMT_YUVA422P10
  125. : AV_PIX_FMT_YUV422P10;
  126. break;
  127. case 3:
  128. avctx->pix_fmt = ctx->alpha_info ? AV_PIX_FMT_YUVA444P10
  129. : AV_PIX_FMT_YUV444P10;
  130. break;
  131. default:
  132. av_log(avctx, AV_LOG_ERROR,
  133. "unsupported picture format: %d\n", ctx->pic_format);
  134. return AVERROR_INVALIDDATA;
  135. }
  136. if (ctx->scantable_type != ctx->frame_type) {
  137. if (!ctx->frame_type)
  138. ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable,
  139. ff_prores_progressive_scan);
  140. else
  141. ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable,
  142. ff_prores_interlaced_scan);
  143. ctx->scantable_type = ctx->frame_type;
  144. }
  145. if (ctx->frame_type) { /* if interlaced */
  146. ctx->frame->interlaced_frame = 1;
  147. ctx->frame->top_field_first = ctx->frame_type & 1;
  148. } else {
  149. ctx->frame->interlaced_frame = 0;
  150. }
  151. avctx->color_primaries = buf[14];
  152. avctx->color_trc = buf[15];
  153. avctx->colorspace = buf[16];
  154. avctx->color_range = AVCOL_RANGE_MPEG;
  155. ctx->qmat_changed = 0;
  156. ptr = buf + 20;
  157. flags = buf[19];
  158. if (flags & 2) {
  159. if (ptr - buf > hdr_size - 64) {
  160. av_log(avctx, AV_LOG_ERROR, "header data too small\n");
  161. return AVERROR_INVALIDDATA;
  162. }
  163. if (memcmp(ctx->qmat_luma, ptr, 64)) {
  164. memcpy(ctx->qmat_luma, ptr, 64);
  165. ctx->qmat_changed = 1;
  166. }
  167. ptr += 64;
  168. } else {
  169. memset(ctx->qmat_luma, 4, 64);
  170. ctx->qmat_changed = 1;
  171. }
  172. if (flags & 1) {
  173. if (ptr - buf > hdr_size - 64) {
  174. av_log(avctx, AV_LOG_ERROR, "header data too small\n");
  175. return -1;
  176. }
  177. if (memcmp(ctx->qmat_chroma, ptr, 64)) {
  178. memcpy(ctx->qmat_chroma, ptr, 64);
  179. ctx->qmat_changed = 1;
  180. }
  181. } else {
  182. memset(ctx->qmat_chroma, 4, 64);
  183. ctx->qmat_changed = 1;
  184. }
  185. return hdr_size;
  186. }
  187. static int decode_picture_header(ProresContext *ctx, const uint8_t *buf,
  188. const int data_size, AVCodecContext *avctx)
  189. {
  190. int i, hdr_size, pic_data_size, num_slices;
  191. int slice_width_factor, slice_height_factor;
  192. int remainder, num_x_slices;
  193. const uint8_t *data_ptr, *index_ptr;
  194. hdr_size = data_size > 0 ? buf[0] >> 3 : 0;
  195. if (hdr_size < 8 || hdr_size > data_size) {
  196. av_log(avctx, AV_LOG_ERROR, "picture header too small\n");
  197. return AVERROR_INVALIDDATA;
  198. }
  199. pic_data_size = AV_RB32(buf + 1);
  200. if (pic_data_size > data_size) {
  201. av_log(avctx, AV_LOG_ERROR, "picture data too small\n");
  202. return AVERROR_INVALIDDATA;
  203. }
  204. slice_width_factor = buf[7] >> 4;
  205. slice_height_factor = buf[7] & 0xF;
  206. if (slice_width_factor > 3 || slice_height_factor) {
  207. av_log(avctx, AV_LOG_ERROR,
  208. "unsupported slice dimension: %d x %d\n",
  209. 1 << slice_width_factor, 1 << slice_height_factor);
  210. return AVERROR_INVALIDDATA;
  211. }
  212. ctx->slice_width_factor = slice_width_factor;
  213. ctx->slice_height_factor = slice_height_factor;
  214. ctx->num_x_mbs = (avctx->width + 15) >> 4;
  215. ctx->num_y_mbs = (avctx->height +
  216. (1 << (4 + ctx->frame->interlaced_frame)) - 1) >>
  217. (4 + ctx->frame->interlaced_frame);
  218. remainder = ctx->num_x_mbs & ((1 << slice_width_factor) - 1);
  219. num_x_slices = (ctx->num_x_mbs >> slice_width_factor) + (remainder & 1) +
  220. ((remainder >> 1) & 1) + ((remainder >> 2) & 1);
  221. num_slices = num_x_slices * ctx->num_y_mbs;
  222. if (num_slices != AV_RB16(buf + 5)) {
  223. av_log(avctx, AV_LOG_ERROR, "invalid number of slices\n");
  224. return AVERROR_INVALIDDATA;
  225. }
  226. if (ctx->total_slices != num_slices) {
  227. av_freep(&ctx->slice_data);
  228. ctx->slice_data = av_malloc((num_slices + 1) * sizeof(ctx->slice_data[0]));
  229. if (!ctx->slice_data)
  230. return AVERROR(ENOMEM);
  231. ctx->total_slices = num_slices;
  232. }
  233. if (hdr_size + num_slices * 2 > data_size) {
  234. av_log(avctx, AV_LOG_ERROR, "slice table too small\n");
  235. return AVERROR_INVALIDDATA;
  236. }
  237. /* parse slice table allowing quick access to the slice data */
  238. index_ptr = buf + hdr_size;
  239. data_ptr = index_ptr + num_slices * 2;
  240. for (i = 0; i < num_slices; i++) {
  241. ctx->slice_data[i].index = data_ptr;
  242. ctx->slice_data[i].prev_slice_sf = 0;
  243. data_ptr += AV_RB16(index_ptr + i * 2);
  244. }
  245. ctx->slice_data[i].index = data_ptr;
  246. ctx->slice_data[i].prev_slice_sf = 0;
  247. if (data_ptr > buf + data_size) {
  248. av_log(avctx, AV_LOG_ERROR, "out of slice data\n");
  249. return -1;
  250. }
  251. return pic_data_size;
  252. }
  253. /**
  254. * Read an unsigned rice/exp golomb codeword.
  255. */
  256. static inline int decode_vlc_codeword(GetBitContext *gb, unsigned codebook)
  257. {
  258. unsigned int rice_order, exp_order, switch_bits;
  259. unsigned int buf, code;
  260. int log, prefix_len, len;
  261. OPEN_READER(re, gb);
  262. UPDATE_CACHE(re, gb);
  263. buf = GET_CACHE(re, gb);
  264. /* number of prefix bits to switch between Rice and expGolomb */
  265. switch_bits = (codebook & 3) + 1;
  266. rice_order = codebook >> 5; /* rice code order */
  267. exp_order = (codebook >> 2) & 7; /* exp golomb code order */
  268. log = 31 - av_log2(buf); /* count prefix bits (zeroes) */
  269. if (log < switch_bits) { /* ok, we got a rice code */
  270. if (!rice_order) {
  271. /* shortcut for faster decoding of rice codes without remainder */
  272. code = log;
  273. LAST_SKIP_BITS(re, gb, log + 1);
  274. } else {
  275. prefix_len = log + 1;
  276. code = (log << rice_order) + NEG_USR32(buf << prefix_len, rice_order);
  277. LAST_SKIP_BITS(re, gb, prefix_len + rice_order);
  278. }
  279. } else { /* otherwise we got a exp golomb code */
  280. len = (log << 1) - switch_bits + exp_order + 1;
  281. code = NEG_USR32(buf, len) - (1 << exp_order) + (switch_bits << rice_order);
  282. LAST_SKIP_BITS(re, gb, len);
  283. }
  284. CLOSE_READER(re, gb);
  285. return code;
  286. }
  287. #define LSB2SIGN(x) (-((x) & 1))
  288. #define TOSIGNED(x) (((x) >> 1) ^ LSB2SIGN(x))
  289. /**
  290. * Decode DC coefficients for all blocks in a slice.
  291. */
  292. static inline void decode_dc_coeffs(GetBitContext *gb, int16_t *out,
  293. int nblocks)
  294. {
  295. int16_t prev_dc;
  296. int i, sign;
  297. int16_t delta;
  298. unsigned int code;
  299. code = decode_vlc_codeword(gb, FIRST_DC_CB);
  300. out[0] = prev_dc = TOSIGNED(code);
  301. out += 64; /* move to the DC coeff of the next block */
  302. delta = 3;
  303. for (i = 1; i < nblocks; i++, out += 64) {
  304. code = decode_vlc_codeword(gb, ff_prores_dc_codebook[FFMIN(FFABS(delta), 3)]);
  305. sign = -(((delta >> 15) & 1) ^ (code & 1));
  306. delta = (((code + 1) >> 1) ^ sign) - sign;
  307. prev_dc += delta;
  308. out[0] = prev_dc;
  309. }
  310. }
  311. #define MAX_PADDING 16
  312. /**
  313. * Decode AC coefficients for all blocks in a slice.
  314. */
  315. static inline int decode_ac_coeffs(GetBitContext *gb, int16_t *out,
  316. int blocks_per_slice,
  317. int plane_size_factor,
  318. const uint8_t *scan)
  319. {
  320. int pos, block_mask, run, level, sign, run_cb_index, lev_cb_index;
  321. int max_coeffs, bits_left;
  322. /* set initial prediction values */
  323. run = 4;
  324. level = 2;
  325. max_coeffs = blocks_per_slice << 6;
  326. block_mask = blocks_per_slice - 1;
  327. for (pos = blocks_per_slice - 1; pos < max_coeffs;) {
  328. run_cb_index = ff_prores_run_to_cb_index[FFMIN(run, 15)];
  329. lev_cb_index = ff_prores_lev_to_cb_index[FFMIN(level, 9)];
  330. bits_left = get_bits_left(gb);
  331. if (bits_left <= 0 || (bits_left <= MAX_PADDING && !show_bits(gb, bits_left)))
  332. return 0;
  333. run = decode_vlc_codeword(gb, ff_prores_ac_codebook[run_cb_index]);
  334. if (run < 0)
  335. return AVERROR_INVALIDDATA;
  336. bits_left = get_bits_left(gb);
  337. if (bits_left <= 0 || (bits_left <= MAX_PADDING && !show_bits(gb, bits_left)))
  338. return AVERROR_INVALIDDATA;
  339. level = decode_vlc_codeword(gb, ff_prores_ac_codebook[lev_cb_index]) + 1;
  340. if (level < 0)
  341. return AVERROR_INVALIDDATA;
  342. pos += run + 1;
  343. if (pos >= max_coeffs)
  344. break;
  345. sign = get_sbits(gb, 1);
  346. out[((pos & block_mask) << 6) + scan[pos >> plane_size_factor]] =
  347. (level ^ sign) - sign;
  348. }
  349. return 0;
  350. }
  351. /**
  352. * Decode a slice plane (luma or chroma).
  353. */
  354. static int decode_slice_plane(ProresContext *ctx, ProresThreadData *td,
  355. const uint8_t *buf,
  356. int data_size, uint16_t *out_ptr,
  357. int linesize, int mbs_per_slice,
  358. int blocks_per_mb, int plane_size_factor,
  359. const int16_t *qmat, int is_chroma)
  360. {
  361. GetBitContext gb;
  362. int16_t *block_ptr;
  363. int mb_num, blocks_per_slice, ret;
  364. blocks_per_slice = mbs_per_slice * blocks_per_mb;
  365. memset(td->blocks, 0, 8 * 4 * 64 * sizeof(*td->blocks));
  366. init_get_bits(&gb, buf, data_size << 3);
  367. decode_dc_coeffs(&gb, td->blocks, blocks_per_slice);
  368. ret = decode_ac_coeffs(&gb, td->blocks, blocks_per_slice,
  369. plane_size_factor, ctx->scantable.permutated);
  370. if (ret < 0)
  371. return ret;
  372. /* inverse quantization, inverse transform and output */
  373. block_ptr = td->blocks;
  374. if (!is_chroma) {
  375. for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
  376. ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
  377. block_ptr += 64;
  378. if (blocks_per_mb > 2) {
  379. ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
  380. block_ptr += 64;
  381. }
  382. ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
  383. block_ptr += 64;
  384. if (blocks_per_mb > 2) {
  385. ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
  386. block_ptr += 64;
  387. }
  388. }
  389. } else {
  390. for (mb_num = 0; mb_num < mbs_per_slice; mb_num++, out_ptr += blocks_per_mb * 4) {
  391. ctx->dsp.idct_put(out_ptr, linesize, block_ptr, qmat);
  392. block_ptr += 64;
  393. ctx->dsp.idct_put(out_ptr + linesize * 4, linesize, block_ptr, qmat);
  394. block_ptr += 64;
  395. if (blocks_per_mb > 2) {
  396. ctx->dsp.idct_put(out_ptr + 8, linesize, block_ptr, qmat);
  397. block_ptr += 64;
  398. ctx->dsp.idct_put(out_ptr + linesize * 4 + 8, linesize, block_ptr, qmat);
  399. block_ptr += 64;
  400. }
  401. }
  402. }
  403. return 0;
  404. }
  405. static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
  406. const int num_bits)
  407. {
  408. const int mask = (1 << num_bits) - 1;
  409. int i, idx, val, alpha_val;
  410. idx = 0;
  411. alpha_val = mask;
  412. do {
  413. do {
  414. if (get_bits1(gb))
  415. val = get_bits(gb, num_bits);
  416. else {
  417. int sign;
  418. val = get_bits(gb, num_bits == 16 ? 7 : 4);
  419. sign = val & 1;
  420. val = (val + 2) >> 1;
  421. if (sign)
  422. val = -val;
  423. }
  424. alpha_val = (alpha_val + val) & mask;
  425. if (num_bits == 16)
  426. dst[idx++] = alpha_val >> 6;
  427. else
  428. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  429. if (idx >= num_coeffs - 1)
  430. break;
  431. } while (get_bits1(gb));
  432. val = get_bits(gb, 4);
  433. if (!val)
  434. val = get_bits(gb, 11);
  435. if (idx + val > num_coeffs)
  436. val = num_coeffs - idx;
  437. if (num_bits == 16)
  438. for (i = 0; i < val; i++)
  439. dst[idx++] = alpha_val >> 6;
  440. else
  441. for (i = 0; i < val; i++)
  442. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  443. } while (idx < num_coeffs);
  444. }
  445. /**
  446. * Decode alpha slice plane.
  447. */
  448. static void decode_alpha_plane(ProresContext *ctx, ProresThreadData *td,
  449. const uint8_t *buf, int data_size,
  450. uint16_t *out_ptr, int linesize,
  451. int mbs_per_slice)
  452. {
  453. GetBitContext gb;
  454. int i;
  455. uint16_t *block_ptr;
  456. memset(td->blocks, 0, 8 * 4 * 64 * sizeof(*td->blocks));
  457. init_get_bits(&gb, buf, data_size << 3);
  458. if (ctx->alpha_info == 2)
  459. unpack_alpha(&gb, td->blocks, mbs_per_slice * 4 * 64, 16);
  460. else
  461. unpack_alpha(&gb, td->blocks, mbs_per_slice * 4 * 64, 8);
  462. block_ptr = td->blocks;
  463. for (i = 0; i < 16; i++) {
  464. memcpy(out_ptr, block_ptr, 16 * mbs_per_slice * sizeof(*out_ptr));
  465. out_ptr += linesize >> 1;
  466. block_ptr += 16 * mbs_per_slice;
  467. }
  468. }
  469. static int decode_slice(AVCodecContext *avctx, void *tdata)
  470. {
  471. ProresThreadData *td = tdata;
  472. ProresContext *ctx = avctx->priv_data;
  473. int mb_x_pos = td->x_pos;
  474. int mb_y_pos = td->y_pos;
  475. int pic_num = ctx->pic_num;
  476. int slice_num = td->slice_num;
  477. int mbs_per_slice = td->slice_width;
  478. const uint8_t *buf;
  479. uint8_t *y_data, *u_data, *v_data, *a_data;
  480. AVFrame *pic = ctx->frame;
  481. int i, sf, slice_width_factor;
  482. int slice_data_size, hdr_size;
  483. int y_data_size, u_data_size, v_data_size, a_data_size;
  484. int y_linesize, u_linesize, v_linesize, a_linesize;
  485. int coff[4];
  486. int ret;
  487. buf = ctx->slice_data[slice_num].index;
  488. slice_data_size = ctx->slice_data[slice_num + 1].index - buf;
  489. slice_width_factor = av_log2(mbs_per_slice);
  490. y_data = pic->data[0];
  491. u_data = pic->data[1];
  492. v_data = pic->data[2];
  493. a_data = pic->data[3];
  494. y_linesize = pic->linesize[0];
  495. u_linesize = pic->linesize[1];
  496. v_linesize = pic->linesize[2];
  497. a_linesize = pic->linesize[3];
  498. if (pic->interlaced_frame) {
  499. if (!(pic_num ^ pic->top_field_first)) {
  500. y_data += y_linesize;
  501. u_data += u_linesize;
  502. v_data += v_linesize;
  503. if (a_data)
  504. a_data += a_linesize;
  505. }
  506. y_linesize <<= 1;
  507. u_linesize <<= 1;
  508. v_linesize <<= 1;
  509. a_linesize <<= 1;
  510. }
  511. y_data += (mb_y_pos << 4) * y_linesize + (mb_x_pos << 5);
  512. u_data += (mb_y_pos << 4) * u_linesize + (mb_x_pos << ctx->mb_chroma_factor);
  513. v_data += (mb_y_pos << 4) * v_linesize + (mb_x_pos << ctx->mb_chroma_factor);
  514. if (a_data)
  515. a_data += (mb_y_pos << 4) * a_linesize + (mb_x_pos << 5);
  516. if (slice_data_size < 6) {
  517. av_log(avctx, AV_LOG_ERROR, "slice data too small\n");
  518. return AVERROR_INVALIDDATA;
  519. }
  520. /* parse slice header */
  521. hdr_size = buf[0] >> 3;
  522. coff[0] = hdr_size;
  523. y_data_size = AV_RB16(buf + 2);
  524. coff[1] = coff[0] + y_data_size;
  525. u_data_size = AV_RB16(buf + 4);
  526. coff[2] = coff[1] + u_data_size;
  527. v_data_size = hdr_size > 7 ? AV_RB16(buf + 6) : slice_data_size - coff[2];
  528. coff[3] = coff[2] + v_data_size;
  529. a_data_size = slice_data_size - coff[3];
  530. /* if V or alpha component size is negative that means that previous
  531. component sizes are too large */
  532. if (v_data_size < 0 || a_data_size < 0 || hdr_size < 6) {
  533. av_log(avctx, AV_LOG_ERROR, "invalid data size\n");
  534. return AVERROR_INVALIDDATA;
  535. }
  536. sf = av_clip(buf[1], 1, 224);
  537. sf = sf > 128 ? (sf - 96) << 2 : sf;
  538. /* scale quantization matrixes according with slice's scale factor */
  539. /* TODO: this can be SIMD-optimized a lot */
  540. if (ctx->qmat_changed || sf != td->prev_slice_sf) {
  541. td->prev_slice_sf = sf;
  542. for (i = 0; i < 64; i++) {
  543. td->qmat_luma_scaled[ctx->dsp.idct_permutation[i]] = ctx->qmat_luma[i] * sf;
  544. td->qmat_chroma_scaled[ctx->dsp.idct_permutation[i]] = ctx->qmat_chroma[i] * sf;
  545. }
  546. }
  547. /* decode luma plane */
  548. ret = decode_slice_plane(ctx, td, buf + coff[0], y_data_size,
  549. (uint16_t*) y_data, y_linesize,
  550. mbs_per_slice, 4, slice_width_factor + 2,
  551. td->qmat_luma_scaled, 0);
  552. if (ret < 0)
  553. return ret;
  554. /* decode U chroma plane */
  555. ret = decode_slice_plane(ctx, td, buf + coff[1], u_data_size,
  556. (uint16_t*) u_data, u_linesize,
  557. mbs_per_slice, ctx->num_chroma_blocks,
  558. slice_width_factor + ctx->chroma_factor - 1,
  559. td->qmat_chroma_scaled, 1);
  560. if (ret < 0)
  561. return ret;
  562. /* decode V chroma plane */
  563. ret = decode_slice_plane(ctx, td, buf + coff[2], v_data_size,
  564. (uint16_t*) v_data, v_linesize,
  565. mbs_per_slice, ctx->num_chroma_blocks,
  566. slice_width_factor + ctx->chroma_factor - 1,
  567. td->qmat_chroma_scaled, 1);
  568. if (ret < 0)
  569. return ret;
  570. /* decode alpha plane if available */
  571. if (a_data && a_data_size)
  572. decode_alpha_plane(ctx, td, buf + coff[3], a_data_size,
  573. (uint16_t*) a_data, a_linesize,
  574. mbs_per_slice);
  575. return 0;
  576. }
  577. static int decode_picture(ProresContext *ctx, int pic_num,
  578. AVCodecContext *avctx)
  579. {
  580. int slice_num, slice_width, x_pos, y_pos;
  581. slice_num = 0;
  582. ctx->pic_num = pic_num;
  583. for (y_pos = 0; y_pos < ctx->num_y_mbs; y_pos++) {
  584. slice_width = 1 << ctx->slice_width_factor;
  585. for (x_pos = 0; x_pos < ctx->num_x_mbs && slice_width;
  586. x_pos += slice_width) {
  587. while (ctx->num_x_mbs - x_pos < slice_width)
  588. slice_width >>= 1;
  589. ctx->slice_data[slice_num].slice_num = slice_num;
  590. ctx->slice_data[slice_num].x_pos = x_pos;
  591. ctx->slice_data[slice_num].y_pos = y_pos;
  592. ctx->slice_data[slice_num].slice_width = slice_width;
  593. slice_num++;
  594. }
  595. }
  596. return avctx->execute(avctx, decode_slice,
  597. ctx->slice_data, NULL, slice_num,
  598. sizeof(ctx->slice_data[0]));
  599. }
  600. #define MOVE_DATA_PTR(nbytes) buf += (nbytes); buf_size -= (nbytes)
  601. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  602. AVPacket *avpkt)
  603. {
  604. ProresContext *ctx = avctx->priv_data;
  605. const uint8_t *buf = avpkt->data;
  606. int buf_size = avpkt->size;
  607. int frame_hdr_size, pic_num, pic_data_size;
  608. ctx->frame = data;
  609. ctx->frame->pict_type = AV_PICTURE_TYPE_I;
  610. ctx->frame->key_frame = 1;
  611. /* check frame atom container */
  612. if (buf_size < 28 || buf_size < AV_RB32(buf) ||
  613. AV_RB32(buf + 4) != FRAME_ID) {
  614. av_log(avctx, AV_LOG_ERROR, "invalid frame\n");
  615. return AVERROR_INVALIDDATA;
  616. }
  617. MOVE_DATA_PTR(8);
  618. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  619. if (frame_hdr_size < 0)
  620. return AVERROR_INVALIDDATA;
  621. MOVE_DATA_PTR(frame_hdr_size);
  622. if (ff_get_buffer(avctx, ctx->frame, 0) < 0)
  623. return -1;
  624. for (pic_num = 0; ctx->frame->interlaced_frame - pic_num + 1; pic_num++) {
  625. pic_data_size = decode_picture_header(ctx, buf, buf_size, avctx);
  626. if (pic_data_size < 0)
  627. return AVERROR_INVALIDDATA;
  628. if (decode_picture(ctx, pic_num, avctx))
  629. return -1;
  630. MOVE_DATA_PTR(pic_data_size);
  631. }
  632. ctx->frame = NULL;
  633. *got_frame = 1;
  634. return avpkt->size;
  635. }
  636. static av_cold int decode_close(AVCodecContext *avctx)
  637. {
  638. ProresContext *ctx = avctx->priv_data;
  639. av_freep(&ctx->slice_data);
  640. return 0;
  641. }
  642. AVCodec ff_prores_decoder = {
  643. .name = "prores",
  644. .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"),
  645. .type = AVMEDIA_TYPE_VIDEO,
  646. .id = AV_CODEC_ID_PRORES,
  647. .priv_data_size = sizeof(ProresContext),
  648. .init = decode_init,
  649. .close = decode_close,
  650. .decode = decode_frame,
  651. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
  652. };