You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

694 lines
22KB

  1. /*
  2. * Copyright (c) 2010-2011 Maxim Poliakovski
  3. * Copyright (c) 2010-2011 Elvis Presley
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
  24. */
  25. //#define DEBUG
  26. #define LONG_BITSTREAM_READER
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "internal.h"
  30. #include "simple_idct.h"
  31. #include "proresdec.h"
  32. static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
  33. {
  34. int i;
  35. for (i = 0; i < 64; i++)
  36. dst[i] = permutation[src[i]];
  37. }
  38. static const uint8_t progressive_scan[64] = {
  39. 0, 1, 8, 9, 2, 3, 10, 11,
  40. 16, 17, 24, 25, 18, 19, 26, 27,
  41. 4, 5, 12, 20, 13, 6, 7, 14,
  42. 21, 28, 29, 22, 15, 23, 30, 31,
  43. 32, 33, 40, 48, 41, 34, 35, 42,
  44. 49, 56, 57, 50, 43, 36, 37, 44,
  45. 51, 58, 59, 52, 45, 38, 39, 46,
  46. 53, 60, 61, 54, 47, 55, 62, 63
  47. };
  48. static const uint8_t interlaced_scan[64] = {
  49. 0, 8, 1, 9, 16, 24, 17, 25,
  50. 2, 10, 3, 11, 18, 26, 19, 27,
  51. 32, 40, 33, 34, 41, 48, 56, 49,
  52. 42, 35, 43, 50, 57, 58, 51, 59,
  53. 4, 12, 5, 6, 13, 20, 28, 21,
  54. 14, 7, 15, 22, 29, 36, 44, 37,
  55. 30, 23, 31, 38, 45, 52, 60, 53,
  56. 46, 39, 47, 54, 61, 62, 55, 63,
  57. };
  58. static av_cold int decode_init(AVCodecContext *avctx)
  59. {
  60. ProresContext *ctx = avctx->priv_data;
  61. uint8_t idct_permutation[64];
  62. avctx->bits_per_raw_sample = 10;
  63. ff_dsputil_init(&ctx->dsp, avctx);
  64. ff_proresdsp_init(&ctx->prodsp, avctx);
  65. ff_init_scantable_permutation(idct_permutation,
  66. ctx->prodsp.idct_permutation_type);
  67. permute(ctx->progressive_scan, progressive_scan, idct_permutation);
  68. permute(ctx->interlaced_scan, interlaced_scan, idct_permutation);
  69. return 0;
  70. }
  71. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  72. const int data_size, AVCodecContext *avctx)
  73. {
  74. int hdr_size, width, height, flags;
  75. int version;
  76. const uint8_t *ptr;
  77. hdr_size = AV_RB16(buf);
  78. av_dlog(avctx, "header size %d\n", hdr_size);
  79. if (hdr_size > data_size) {
  80. av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
  81. return -1;
  82. }
  83. version = AV_RB16(buf + 2);
  84. av_dlog(avctx, "%.4s version %d\n", buf+4, version);
  85. if (version > 1) {
  86. av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
  87. return -1;
  88. }
  89. width = AV_RB16(buf + 8);
  90. height = AV_RB16(buf + 10);
  91. if (width != avctx->width || height != avctx->height) {
  92. av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
  93. avctx->width, avctx->height, width, height);
  94. return -1;
  95. }
  96. ctx->frame_type = (buf[12] >> 2) & 3;
  97. ctx->alpha_info = buf[17] & 0xf;
  98. if (ctx->alpha_info > 2) {
  99. av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
  100. return AVERROR_INVALIDDATA;
  101. }
  102. av_dlog(avctx, "frame type %d\n", ctx->frame_type);
  103. if (ctx->frame_type == 0) {
  104. ctx->scan = ctx->progressive_scan; // permuted
  105. } else {
  106. ctx->scan = ctx->interlaced_scan; // permuted
  107. ctx->frame->interlaced_frame = 1;
  108. ctx->frame->top_field_first = ctx->frame_type == 1;
  109. }
  110. if (ctx->alpha_info) {
  111. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
  112. } else {
  113. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
  114. }
  115. ptr = buf + 20;
  116. flags = buf[19];
  117. av_dlog(avctx, "flags %x\n", flags);
  118. if (flags & 2) {
  119. if(buf + data_size - ptr < 64) {
  120. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  121. return -1;
  122. }
  123. permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
  124. ptr += 64;
  125. } else {
  126. memset(ctx->qmat_luma, 4, 64);
  127. }
  128. if (flags & 1) {
  129. if(buf + data_size - ptr < 64) {
  130. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  131. return -1;
  132. }
  133. permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
  134. } else {
  135. memset(ctx->qmat_chroma, 4, 64);
  136. }
  137. return hdr_size;
  138. }
  139. static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
  140. {
  141. ProresContext *ctx = avctx->priv_data;
  142. int i, hdr_size, slice_count;
  143. unsigned pic_data_size;
  144. int log2_slice_mb_width, log2_slice_mb_height;
  145. int slice_mb_count, mb_x, mb_y;
  146. const uint8_t *data_ptr, *index_ptr;
  147. hdr_size = buf[0] >> 3;
  148. if (hdr_size < 8 || hdr_size > buf_size) {
  149. av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
  150. return -1;
  151. }
  152. pic_data_size = AV_RB32(buf + 1);
  153. if (pic_data_size > buf_size) {
  154. av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
  155. return -1;
  156. }
  157. log2_slice_mb_width = buf[7] >> 4;
  158. log2_slice_mb_height = buf[7] & 0xF;
  159. if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
  160. av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
  161. 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
  162. return -1;
  163. }
  164. ctx->mb_width = (avctx->width + 15) >> 4;
  165. if (ctx->frame_type)
  166. ctx->mb_height = (avctx->height + 31) >> 5;
  167. else
  168. ctx->mb_height = (avctx->height + 15) >> 4;
  169. slice_count = AV_RB16(buf + 5);
  170. if (ctx->slice_count != slice_count || !ctx->slices) {
  171. av_freep(&ctx->slices);
  172. ctx->slices = av_mallocz(slice_count * sizeof(*ctx->slices));
  173. if (!ctx->slices)
  174. return AVERROR(ENOMEM);
  175. ctx->slice_count = slice_count;
  176. }
  177. if (!slice_count)
  178. return AVERROR(EINVAL);
  179. if (hdr_size + slice_count*2 > buf_size) {
  180. av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
  181. return -1;
  182. }
  183. // parse slice information
  184. index_ptr = buf + hdr_size;
  185. data_ptr = index_ptr + slice_count*2;
  186. slice_mb_count = 1 << log2_slice_mb_width;
  187. mb_x = 0;
  188. mb_y = 0;
  189. for (i = 0; i < slice_count; i++) {
  190. SliceContext *slice = &ctx->slices[i];
  191. slice->data = data_ptr;
  192. data_ptr += AV_RB16(index_ptr + i*2);
  193. while (ctx->mb_width - mb_x < slice_mb_count)
  194. slice_mb_count >>= 1;
  195. slice->mb_x = mb_x;
  196. slice->mb_y = mb_y;
  197. slice->mb_count = slice_mb_count;
  198. slice->data_size = data_ptr - slice->data;
  199. if (slice->data_size < 6) {
  200. av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
  201. return -1;
  202. }
  203. mb_x += slice_mb_count;
  204. if (mb_x == ctx->mb_width) {
  205. slice_mb_count = 1 << log2_slice_mb_width;
  206. mb_x = 0;
  207. mb_y++;
  208. }
  209. if (data_ptr > buf + buf_size) {
  210. av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
  211. return -1;
  212. }
  213. }
  214. if (mb_x || mb_y != ctx->mb_height) {
  215. av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
  216. mb_y, ctx->mb_height);
  217. return -1;
  218. }
  219. return pic_data_size;
  220. }
  221. #define DECODE_CODEWORD(val, codebook) \
  222. do { \
  223. unsigned int rice_order, exp_order, switch_bits; \
  224. unsigned int q, buf, bits; \
  225. \
  226. UPDATE_CACHE(re, gb); \
  227. buf = GET_CACHE(re, gb); \
  228. \
  229. /* number of bits to switch between rice and exp golomb */ \
  230. switch_bits = codebook & 3; \
  231. rice_order = codebook >> 5; \
  232. exp_order = (codebook >> 2) & 7; \
  233. \
  234. q = 31 - av_log2(buf); \
  235. \
  236. if (q > switch_bits) { /* exp golomb */ \
  237. bits = exp_order - switch_bits + (q<<1); \
  238. val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
  239. ((switch_bits + 1) << rice_order); \
  240. SKIP_BITS(re, gb, bits); \
  241. } else if (rice_order) { \
  242. SKIP_BITS(re, gb, q+1); \
  243. val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
  244. SKIP_BITS(re, gb, rice_order); \
  245. } else { \
  246. val = q; \
  247. SKIP_BITS(re, gb, q+1); \
  248. } \
  249. } while (0)
  250. #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
  251. #define FIRST_DC_CB 0xB8
  252. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  253. static av_always_inline void decode_dc_coeffs(GetBitContext *gb, int16_t *out,
  254. int blocks_per_slice)
  255. {
  256. int16_t prev_dc;
  257. int code, i, sign;
  258. OPEN_READER(re, gb);
  259. DECODE_CODEWORD(code, FIRST_DC_CB);
  260. prev_dc = TOSIGNED(code);
  261. out[0] = prev_dc;
  262. out += 64; // dc coeff for the next block
  263. code = 5;
  264. sign = 0;
  265. for (i = 1; i < blocks_per_slice; i++, out += 64) {
  266. DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)]);
  267. if(code) sign ^= -(code & 1);
  268. else sign = 0;
  269. prev_dc += (((code + 1) >> 1) ^ sign) - sign;
  270. out[0] = prev_dc;
  271. }
  272. CLOSE_READER(re, gb);
  273. }
  274. // adaptive codebook switching lut according to previous run/level values
  275. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  276. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
  277. static av_always_inline void decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
  278. int16_t *out, int blocks_per_slice)
  279. {
  280. ProresContext *ctx = avctx->priv_data;
  281. int block_mask, sign;
  282. unsigned pos, run, level;
  283. int max_coeffs, i, bits_left;
  284. int log2_block_count = av_log2(blocks_per_slice);
  285. OPEN_READER(re, gb);
  286. UPDATE_CACHE(re, gb); \
  287. run = 4;
  288. level = 2;
  289. max_coeffs = 64 << log2_block_count;
  290. block_mask = blocks_per_slice - 1;
  291. for (pos = block_mask;;) {
  292. bits_left = gb->size_in_bits - re_index;
  293. if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
  294. break;
  295. DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
  296. pos += run + 1;
  297. if (pos >= max_coeffs) {
  298. av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
  299. return;
  300. }
  301. DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
  302. level += 1;
  303. i = pos >> log2_block_count;
  304. sign = SHOW_SBITS(re, gb, 1);
  305. SKIP_BITS(re, gb, 1);
  306. out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
  307. }
  308. CLOSE_READER(re, gb);
  309. }
  310. static void decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
  311. uint16_t *dst, int dst_stride,
  312. const uint8_t *buf, unsigned buf_size,
  313. const int16_t *qmat)
  314. {
  315. ProresContext *ctx = avctx->priv_data;
  316. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  317. int16_t *block;
  318. GetBitContext gb;
  319. int i, blocks_per_slice = slice->mb_count<<2;
  320. for (i = 0; i < blocks_per_slice; i++)
  321. ctx->dsp.clear_block(blocks+(i<<6));
  322. init_get_bits(&gb, buf, buf_size << 3);
  323. decode_dc_coeffs(&gb, blocks, blocks_per_slice);
  324. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice);
  325. block = blocks;
  326. for (i = 0; i < slice->mb_count; i++) {
  327. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  328. ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
  329. ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
  330. ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
  331. block += 4*64;
  332. dst += 16;
  333. }
  334. }
  335. static void decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
  336. uint16_t *dst, int dst_stride,
  337. const uint8_t *buf, unsigned buf_size,
  338. const int16_t *qmat, int log2_blocks_per_mb)
  339. {
  340. ProresContext *ctx = avctx->priv_data;
  341. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  342. int16_t *block;
  343. GetBitContext gb;
  344. int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
  345. for (i = 0; i < blocks_per_slice; i++)
  346. ctx->dsp.clear_block(blocks+(i<<6));
  347. init_get_bits(&gb, buf, buf_size << 3);
  348. decode_dc_coeffs(&gb, blocks, blocks_per_slice);
  349. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice);
  350. block = blocks;
  351. for (i = 0; i < slice->mb_count; i++) {
  352. for (j = 0; j < log2_blocks_per_mb; j++) {
  353. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  354. ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
  355. block += 2*64;
  356. dst += 8;
  357. }
  358. }
  359. }
  360. static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
  361. const int num_bits)
  362. {
  363. const int mask = (1 << num_bits) - 1;
  364. int i, idx, val, alpha_val;
  365. idx = 0;
  366. alpha_val = mask;
  367. do {
  368. do {
  369. if (get_bits1(gb)) {
  370. val = get_bits(gb, num_bits);
  371. } else {
  372. int sign;
  373. val = get_bits(gb, num_bits == 16 ? 7 : 4);
  374. sign = val & 1;
  375. val = (val + 2) >> 1;
  376. if (sign)
  377. val = -val;
  378. }
  379. alpha_val = (alpha_val + val) & mask;
  380. if (num_bits == 16) {
  381. dst[idx++] = alpha_val >> 6;
  382. } else {
  383. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  384. }
  385. if (idx == num_coeffs - 1)
  386. break;
  387. } while (get_bits_left(gb)>0 && get_bits1(gb));
  388. val = get_bits(gb, 4);
  389. if (!val)
  390. val = get_bits(gb, 11);
  391. if (idx + val > num_coeffs)
  392. val = num_coeffs - idx;
  393. if (num_bits == 16) {
  394. for (i = 0; i < val; i++)
  395. dst[idx++] = alpha_val >> 6;
  396. } else {
  397. for (i = 0; i < val; i++)
  398. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  399. }
  400. } while (idx < num_coeffs);
  401. }
  402. /**
  403. * Decode alpha slice plane.
  404. */
  405. static void decode_slice_alpha(ProresContext *ctx,
  406. uint16_t *dst, int dst_stride,
  407. const uint8_t *buf, int buf_size,
  408. int blocks_per_slice)
  409. {
  410. GetBitContext gb;
  411. int i;
  412. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  413. int16_t *block;
  414. for (i = 0; i < blocks_per_slice<<2; i++)
  415. ctx->dsp.clear_block(blocks+(i<<6));
  416. init_get_bits(&gb, buf, buf_size << 3);
  417. if (ctx->alpha_info == 2) {
  418. unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
  419. } else {
  420. unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
  421. }
  422. block = blocks;
  423. for (i = 0; i < 16; i++) {
  424. memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
  425. dst += dst_stride >> 1;
  426. block += 16 * blocks_per_slice;
  427. }
  428. }
  429. static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
  430. {
  431. ProresContext *ctx = avctx->priv_data;
  432. SliceContext *slice = &ctx->slices[jobnr];
  433. const uint8_t *buf = slice->data;
  434. AVFrame *pic = ctx->frame;
  435. int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
  436. int luma_stride, chroma_stride;
  437. int y_data_size, u_data_size, v_data_size, a_data_size;
  438. uint8_t *dest_y, *dest_u, *dest_v, *dest_a;
  439. int16_t qmat_luma_scaled[64];
  440. int16_t qmat_chroma_scaled[64];
  441. int mb_x_shift;
  442. slice->ret = -1;
  443. //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
  444. // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
  445. // slice header
  446. hdr_size = buf[0] >> 3;
  447. qscale = av_clip(buf[1], 1, 224);
  448. qscale = qscale > 128 ? qscale - 96 << 2: qscale;
  449. y_data_size = AV_RB16(buf + 2);
  450. u_data_size = AV_RB16(buf + 4);
  451. v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
  452. if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
  453. a_data_size = slice->data_size - y_data_size - u_data_size -
  454. v_data_size - hdr_size;
  455. if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
  456. || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
  457. av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
  458. return -1;
  459. }
  460. buf += hdr_size;
  461. for (i = 0; i < 64; i++) {
  462. qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
  463. qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
  464. }
  465. if (ctx->frame_type == 0) {
  466. luma_stride = pic->linesize[0];
  467. chroma_stride = pic->linesize[1];
  468. } else {
  469. luma_stride = pic->linesize[0] << 1;
  470. chroma_stride = pic->linesize[1] << 1;
  471. }
  472. if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
  473. mb_x_shift = 5;
  474. log2_chroma_blocks_per_mb = 2;
  475. } else {
  476. mb_x_shift = 4;
  477. log2_chroma_blocks_per_mb = 1;
  478. }
  479. dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  480. dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  481. dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  482. dest_a = pic->data[3] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  483. if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
  484. dest_y += pic->linesize[0];
  485. dest_u += pic->linesize[1];
  486. dest_v += pic->linesize[2];
  487. dest_a += pic->linesize[3];
  488. }
  489. decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
  490. buf, y_data_size, qmat_luma_scaled);
  491. if (!(avctx->flags & CODEC_FLAG_GRAY)) {
  492. decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
  493. buf + y_data_size, u_data_size,
  494. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  495. decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
  496. buf + y_data_size + u_data_size, v_data_size,
  497. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  498. }
  499. /* decode alpha plane if available */
  500. if (ctx->alpha_info && pic->data[3] && a_data_size)
  501. decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
  502. buf + y_data_size + u_data_size + v_data_size,
  503. a_data_size, slice->mb_count);
  504. slice->ret = 0;
  505. return 0;
  506. }
  507. static int decode_picture(AVCodecContext *avctx)
  508. {
  509. ProresContext *ctx = avctx->priv_data;
  510. int i;
  511. avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
  512. for (i = 0; i < ctx->slice_count; i++)
  513. if (ctx->slices[i].ret < 0)
  514. return ctx->slices[i].ret;
  515. return 0;
  516. }
  517. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  518. AVPacket *avpkt)
  519. {
  520. ProresContext *ctx = avctx->priv_data;
  521. AVFrame *frame = data;
  522. const uint8_t *buf = avpkt->data;
  523. int buf_size = avpkt->size;
  524. int frame_hdr_size, pic_size;
  525. if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
  526. av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
  527. return -1;
  528. }
  529. ctx->frame = frame;
  530. ctx->frame->pict_type = AV_PICTURE_TYPE_I;
  531. ctx->frame->key_frame = 1;
  532. ctx->first_field = 1;
  533. buf += 8;
  534. buf_size -= 8;
  535. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  536. if (frame_hdr_size < 0)
  537. return -1;
  538. buf += frame_hdr_size;
  539. buf_size -= frame_hdr_size;
  540. if (ff_get_buffer(avctx, frame, 0) < 0)
  541. return -1;
  542. decode_picture:
  543. pic_size = decode_picture_header(avctx, buf, buf_size);
  544. if (pic_size < 0) {
  545. av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
  546. return -1;
  547. }
  548. if (decode_picture(avctx)) {
  549. av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
  550. return -1;
  551. }
  552. buf += pic_size;
  553. buf_size -= pic_size;
  554. if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
  555. ctx->first_field = 0;
  556. goto decode_picture;
  557. }
  558. *got_frame = 1;
  559. return avpkt->size;
  560. }
  561. static av_cold int decode_close(AVCodecContext *avctx)
  562. {
  563. ProresContext *ctx = avctx->priv_data;
  564. av_freep(&ctx->slices);
  565. return 0;
  566. }
  567. AVCodec ff_prores_decoder = {
  568. .name = "prores",
  569. .type = AVMEDIA_TYPE_VIDEO,
  570. .id = AV_CODEC_ID_PRORES,
  571. .priv_data_size = sizeof(ProresContext),
  572. .init = decode_init,
  573. .close = decode_close,
  574. .decode = decode_frame,
  575. .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
  576. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_SLICE_THREADS,
  577. };