You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

697 lines
23KB

  1. /*
  2. * Copyright (c) 2010-2011 Maxim Poliakovski
  3. * Copyright (c) 2010-2011 Elvis Presley
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
  24. */
  25. //#define DEBUG
  26. #define LONG_BITSTREAM_READER
  27. #include "libavutil/internal.h"
  28. #include "avcodec.h"
  29. #include "get_bits.h"
  30. #include "idctdsp.h"
  31. #include "internal.h"
  32. #include "simple_idct.h"
  33. #include "proresdec.h"
  34. #include "proresdata.h"
  35. static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
  36. {
  37. int i;
  38. for (i = 0; i < 64; i++)
  39. dst[i] = permutation[src[i]];
  40. }
  41. static av_cold int decode_init(AVCodecContext *avctx)
  42. {
  43. ProresContext *ctx = avctx->priv_data;
  44. uint8_t idct_permutation[64];
  45. avctx->bits_per_raw_sample = 10;
  46. ff_blockdsp_init(&ctx->bdsp, avctx);
  47. ff_proresdsp_init(&ctx->prodsp, avctx);
  48. ff_init_scantable_permutation(idct_permutation,
  49. ctx->prodsp.idct_permutation_type);
  50. permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
  51. permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);
  52. return 0;
  53. }
  54. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  55. const int data_size, AVCodecContext *avctx)
  56. {
  57. int hdr_size, width, height, flags;
  58. int version;
  59. const uint8_t *ptr;
  60. hdr_size = AV_RB16(buf);
  61. ff_dlog(avctx, "header size %d\n", hdr_size);
  62. if (hdr_size > data_size) {
  63. av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
  64. return AVERROR_INVALIDDATA;
  65. }
  66. version = AV_RB16(buf + 2);
  67. ff_dlog(avctx, "%.4s version %d\n", buf+4, version);
  68. if (version > 1) {
  69. av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
  70. return AVERROR_PATCHWELCOME;
  71. }
  72. width = AV_RB16(buf + 8);
  73. height = AV_RB16(buf + 10);
  74. if (width != avctx->width || height != avctx->height) {
  75. av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
  76. avctx->width, avctx->height, width, height);
  77. return AVERROR_PATCHWELCOME;
  78. }
  79. ctx->frame_type = (buf[12] >> 2) & 3;
  80. ctx->alpha_info = buf[17] & 0xf;
  81. if (ctx->alpha_info > 2) {
  82. av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
  83. return AVERROR_INVALIDDATA;
  84. }
  85. if (avctx->skip_alpha) ctx->alpha_info = 0;
  86. ff_dlog(avctx, "frame type %d\n", ctx->frame_type);
  87. if (ctx->frame_type == 0) {
  88. ctx->scan = ctx->progressive_scan; // permuted
  89. } else {
  90. ctx->scan = ctx->interlaced_scan; // permuted
  91. ctx->frame->interlaced_frame = 1;
  92. ctx->frame->top_field_first = ctx->frame_type == 1;
  93. }
  94. if (ctx->alpha_info) {
  95. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
  96. } else {
  97. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
  98. }
  99. ptr = buf + 20;
  100. flags = buf[19];
  101. ff_dlog(avctx, "flags %x\n", flags);
  102. if (flags & 2) {
  103. if(buf + data_size - ptr < 64) {
  104. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  105. return AVERROR_INVALIDDATA;
  106. }
  107. permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
  108. ptr += 64;
  109. } else {
  110. memset(ctx->qmat_luma, 4, 64);
  111. }
  112. if (flags & 1) {
  113. if(buf + data_size - ptr < 64) {
  114. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  115. return AVERROR_INVALIDDATA;
  116. }
  117. permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
  118. } else {
  119. memset(ctx->qmat_chroma, 4, 64);
  120. }
  121. return hdr_size;
  122. }
  123. static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
  124. {
  125. ProresContext *ctx = avctx->priv_data;
  126. int i, hdr_size, slice_count;
  127. unsigned pic_data_size;
  128. int log2_slice_mb_width, log2_slice_mb_height;
  129. int slice_mb_count, mb_x, mb_y;
  130. const uint8_t *data_ptr, *index_ptr;
  131. hdr_size = buf[0] >> 3;
  132. if (hdr_size < 8 || hdr_size > buf_size) {
  133. av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
  134. return AVERROR_INVALIDDATA;
  135. }
  136. pic_data_size = AV_RB32(buf + 1);
  137. if (pic_data_size > buf_size) {
  138. av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
  139. return AVERROR_INVALIDDATA;
  140. }
  141. log2_slice_mb_width = buf[7] >> 4;
  142. log2_slice_mb_height = buf[7] & 0xF;
  143. if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
  144. av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
  145. 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
  146. return AVERROR_INVALIDDATA;
  147. }
  148. ctx->mb_width = (avctx->width + 15) >> 4;
  149. if (ctx->frame_type)
  150. ctx->mb_height = (avctx->height + 31) >> 5;
  151. else
  152. ctx->mb_height = (avctx->height + 15) >> 4;
  153. slice_count = AV_RB16(buf + 5);
  154. if (ctx->slice_count != slice_count || !ctx->slices) {
  155. av_freep(&ctx->slices);
  156. ctx->slice_count = 0;
  157. ctx->slices = av_mallocz_array(slice_count, sizeof(*ctx->slices));
  158. if (!ctx->slices)
  159. return AVERROR(ENOMEM);
  160. ctx->slice_count = slice_count;
  161. }
  162. if (!slice_count)
  163. return AVERROR(EINVAL);
  164. if (hdr_size + slice_count*2 > buf_size) {
  165. av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
  166. return AVERROR_INVALIDDATA;
  167. }
  168. // parse slice information
  169. index_ptr = buf + hdr_size;
  170. data_ptr = index_ptr + slice_count*2;
  171. slice_mb_count = 1 << log2_slice_mb_width;
  172. mb_x = 0;
  173. mb_y = 0;
  174. for (i = 0; i < slice_count; i++) {
  175. SliceContext *slice = &ctx->slices[i];
  176. slice->data = data_ptr;
  177. data_ptr += AV_RB16(index_ptr + i*2);
  178. while (ctx->mb_width - mb_x < slice_mb_count)
  179. slice_mb_count >>= 1;
  180. slice->mb_x = mb_x;
  181. slice->mb_y = mb_y;
  182. slice->mb_count = slice_mb_count;
  183. slice->data_size = data_ptr - slice->data;
  184. if (slice->data_size < 6) {
  185. av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
  186. return AVERROR_INVALIDDATA;
  187. }
  188. mb_x += slice_mb_count;
  189. if (mb_x == ctx->mb_width) {
  190. slice_mb_count = 1 << log2_slice_mb_width;
  191. mb_x = 0;
  192. mb_y++;
  193. }
  194. if (data_ptr > buf + buf_size) {
  195. av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
  196. return AVERROR_INVALIDDATA;
  197. }
  198. }
  199. if (mb_x || mb_y != ctx->mb_height) {
  200. av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
  201. mb_y, ctx->mb_height);
  202. return AVERROR_INVALIDDATA;
  203. }
  204. return pic_data_size;
  205. }
  206. #define DECODE_CODEWORD(val, codebook) \
  207. do { \
  208. unsigned int rice_order, exp_order, switch_bits; \
  209. unsigned int q, buf, bits; \
  210. \
  211. UPDATE_CACHE(re, gb); \
  212. buf = GET_CACHE(re, gb); \
  213. \
  214. /* number of bits to switch between rice and exp golomb */ \
  215. switch_bits = codebook & 3; \
  216. rice_order = codebook >> 5; \
  217. exp_order = (codebook >> 2) & 7; \
  218. \
  219. q = 31 - av_log2(buf); \
  220. \
  221. if (q > switch_bits) { /* exp golomb */ \
  222. bits = exp_order - switch_bits + (q<<1); \
  223. if (bits > FFMIN(MIN_CACHE_BITS, 31)) \
  224. return AVERROR_INVALIDDATA; \
  225. val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
  226. ((switch_bits + 1) << rice_order); \
  227. SKIP_BITS(re, gb, bits); \
  228. } else if (rice_order) { \
  229. SKIP_BITS(re, gb, q+1); \
  230. val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
  231. SKIP_BITS(re, gb, rice_order); \
  232. } else { \
  233. val = q; \
  234. SKIP_BITS(re, gb, q+1); \
  235. } \
  236. } while (0)
  237. #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
  238. #define FIRST_DC_CB 0xB8
  239. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  240. static av_always_inline int decode_dc_coeffs(GetBitContext *gb, int16_t *out,
  241. int blocks_per_slice)
  242. {
  243. int16_t prev_dc;
  244. int code, i, sign;
  245. OPEN_READER(re, gb);
  246. DECODE_CODEWORD(code, FIRST_DC_CB);
  247. prev_dc = TOSIGNED(code);
  248. out[0] = prev_dc;
  249. out += 64; // dc coeff for the next block
  250. code = 5;
  251. sign = 0;
  252. for (i = 1; i < blocks_per_slice; i++, out += 64) {
  253. DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)]);
  254. if(code) sign ^= -(code & 1);
  255. else sign = 0;
  256. prev_dc += (((code + 1) >> 1) ^ sign) - sign;
  257. out[0] = prev_dc;
  258. }
  259. CLOSE_READER(re, gb);
  260. return 0;
  261. }
  262. // adaptive codebook switching lut according to previous run/level values
  263. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  264. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
  265. static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
  266. int16_t *out, int blocks_per_slice)
  267. {
  268. ProresContext *ctx = avctx->priv_data;
  269. int block_mask, sign;
  270. unsigned pos, run, level;
  271. int max_coeffs, i, bits_left;
  272. int log2_block_count = av_log2(blocks_per_slice);
  273. OPEN_READER(re, gb);
  274. UPDATE_CACHE(re, gb); \
  275. run = 4;
  276. level = 2;
  277. max_coeffs = 64 << log2_block_count;
  278. block_mask = blocks_per_slice - 1;
  279. for (pos = block_mask;;) {
  280. bits_left = gb->size_in_bits - re_index;
  281. if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
  282. break;
  283. DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
  284. pos += run + 1;
  285. if (pos >= max_coeffs) {
  286. av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
  287. return AVERROR_INVALIDDATA;
  288. }
  289. DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
  290. level += 1;
  291. i = pos >> log2_block_count;
  292. sign = SHOW_SBITS(re, gb, 1);
  293. SKIP_BITS(re, gb, 1);
  294. out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
  295. }
  296. CLOSE_READER(re, gb);
  297. return 0;
  298. }
  299. static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
  300. uint16_t *dst, int dst_stride,
  301. const uint8_t *buf, unsigned buf_size,
  302. const int16_t *qmat)
  303. {
  304. ProresContext *ctx = avctx->priv_data;
  305. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  306. int16_t *block;
  307. GetBitContext gb;
  308. int i, blocks_per_slice = slice->mb_count<<2;
  309. int ret;
  310. for (i = 0; i < blocks_per_slice; i++)
  311. ctx->bdsp.clear_block(blocks+(i<<6));
  312. init_get_bits(&gb, buf, buf_size << 3);
  313. if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
  314. return ret;
  315. if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
  316. return ret;
  317. block = blocks;
  318. for (i = 0; i < slice->mb_count; i++) {
  319. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  320. ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
  321. ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
  322. ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
  323. block += 4*64;
  324. dst += 16;
  325. }
  326. return 0;
  327. }
  328. static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
  329. uint16_t *dst, int dst_stride,
  330. const uint8_t *buf, unsigned buf_size,
  331. const int16_t *qmat, int log2_blocks_per_mb)
  332. {
  333. ProresContext *ctx = avctx->priv_data;
  334. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  335. int16_t *block;
  336. GetBitContext gb;
  337. int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
  338. int ret;
  339. for (i = 0; i < blocks_per_slice; i++)
  340. ctx->bdsp.clear_block(blocks+(i<<6));
  341. init_get_bits(&gb, buf, buf_size << 3);
  342. if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
  343. return ret;
  344. if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
  345. return ret;
  346. block = blocks;
  347. for (i = 0; i < slice->mb_count; i++) {
  348. for (j = 0; j < log2_blocks_per_mb; j++) {
  349. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  350. ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
  351. block += 2*64;
  352. dst += 8;
  353. }
  354. }
  355. return 0;
  356. }
  357. static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
  358. const int num_bits)
  359. {
  360. const int mask = (1 << num_bits) - 1;
  361. int i, idx, val, alpha_val;
  362. idx = 0;
  363. alpha_val = mask;
  364. do {
  365. do {
  366. if (get_bits1(gb)) {
  367. val = get_bits(gb, num_bits);
  368. } else {
  369. int sign;
  370. val = get_bits(gb, num_bits == 16 ? 7 : 4);
  371. sign = val & 1;
  372. val = (val + 2) >> 1;
  373. if (sign)
  374. val = -val;
  375. }
  376. alpha_val = (alpha_val + val) & mask;
  377. if (num_bits == 16) {
  378. dst[idx++] = alpha_val >> 6;
  379. } else {
  380. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  381. }
  382. if (idx >= num_coeffs)
  383. break;
  384. } while (get_bits_left(gb)>0 && get_bits1(gb));
  385. val = get_bits(gb, 4);
  386. if (!val)
  387. val = get_bits(gb, 11);
  388. if (idx + val > num_coeffs)
  389. val = num_coeffs - idx;
  390. if (num_bits == 16) {
  391. for (i = 0; i < val; i++)
  392. dst[idx++] = alpha_val >> 6;
  393. } else {
  394. for (i = 0; i < val; i++)
  395. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  396. }
  397. } while (idx < num_coeffs);
  398. }
  399. /**
  400. * Decode alpha slice plane.
  401. */
  402. static void decode_slice_alpha(ProresContext *ctx,
  403. uint16_t *dst, int dst_stride,
  404. const uint8_t *buf, int buf_size,
  405. int blocks_per_slice)
  406. {
  407. GetBitContext gb;
  408. int i;
  409. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  410. int16_t *block;
  411. for (i = 0; i < blocks_per_slice<<2; i++)
  412. ctx->bdsp.clear_block(blocks+(i<<6));
  413. init_get_bits(&gb, buf, buf_size << 3);
  414. if (ctx->alpha_info == 2) {
  415. unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
  416. } else {
  417. unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
  418. }
  419. block = blocks;
  420. for (i = 0; i < 16; i++) {
  421. memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
  422. dst += dst_stride >> 1;
  423. block += 16 * blocks_per_slice;
  424. }
  425. }
  426. static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
  427. {
  428. ProresContext *ctx = avctx->priv_data;
  429. SliceContext *slice = &ctx->slices[jobnr];
  430. const uint8_t *buf = slice->data;
  431. AVFrame *pic = ctx->frame;
  432. int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
  433. int luma_stride, chroma_stride;
  434. int y_data_size, u_data_size, v_data_size, a_data_size;
  435. uint8_t *dest_y, *dest_u, *dest_v, *dest_a;
  436. int16_t qmat_luma_scaled[64];
  437. int16_t qmat_chroma_scaled[64];
  438. int mb_x_shift;
  439. int ret;
  440. slice->ret = -1;
  441. //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
  442. // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
  443. // slice header
  444. hdr_size = buf[0] >> 3;
  445. qscale = av_clip(buf[1], 1, 224);
  446. qscale = qscale > 128 ? qscale - 96 << 2: qscale;
  447. y_data_size = AV_RB16(buf + 2);
  448. u_data_size = AV_RB16(buf + 4);
  449. v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
  450. if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
  451. a_data_size = slice->data_size - y_data_size - u_data_size -
  452. v_data_size - hdr_size;
  453. if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
  454. || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
  455. av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
  456. return AVERROR_INVALIDDATA;
  457. }
  458. buf += hdr_size;
  459. for (i = 0; i < 64; i++) {
  460. qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
  461. qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
  462. }
  463. if (ctx->frame_type == 0) {
  464. luma_stride = pic->linesize[0];
  465. chroma_stride = pic->linesize[1];
  466. } else {
  467. luma_stride = pic->linesize[0] << 1;
  468. chroma_stride = pic->linesize[1] << 1;
  469. }
  470. if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
  471. mb_x_shift = 5;
  472. log2_chroma_blocks_per_mb = 2;
  473. } else {
  474. mb_x_shift = 4;
  475. log2_chroma_blocks_per_mb = 1;
  476. }
  477. dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  478. dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  479. dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  480. dest_a = pic->data[3] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  481. if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
  482. dest_y += pic->linesize[0];
  483. dest_u += pic->linesize[1];
  484. dest_v += pic->linesize[2];
  485. dest_a += pic->linesize[3];
  486. }
  487. ret = decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
  488. buf, y_data_size, qmat_luma_scaled);
  489. if (ret < 0)
  490. return ret;
  491. if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
  492. ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
  493. buf + y_data_size, u_data_size,
  494. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  495. if (ret < 0)
  496. return ret;
  497. ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
  498. buf + y_data_size + u_data_size, v_data_size,
  499. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  500. if (ret < 0)
  501. return ret;
  502. }
  503. /* decode alpha plane if available */
  504. if (ctx->alpha_info && pic->data[3] && a_data_size)
  505. decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
  506. buf + y_data_size + u_data_size + v_data_size,
  507. a_data_size, slice->mb_count);
  508. slice->ret = 0;
  509. return 0;
  510. }
  511. static int decode_picture(AVCodecContext *avctx)
  512. {
  513. ProresContext *ctx = avctx->priv_data;
  514. int i;
  515. avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
  516. for (i = 0; i < ctx->slice_count; i++)
  517. if (ctx->slices[i].ret < 0)
  518. return ctx->slices[i].ret;
  519. return 0;
  520. }
  521. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  522. AVPacket *avpkt)
  523. {
  524. ProresContext *ctx = avctx->priv_data;
  525. AVFrame *frame = data;
  526. const uint8_t *buf = avpkt->data;
  527. int buf_size = avpkt->size;
  528. int frame_hdr_size, pic_size, ret;
  529. if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
  530. av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
  531. return AVERROR_INVALIDDATA;
  532. }
  533. ctx->frame = frame;
  534. ctx->frame->pict_type = AV_PICTURE_TYPE_I;
  535. ctx->frame->key_frame = 1;
  536. ctx->first_field = 1;
  537. buf += 8;
  538. buf_size -= 8;
  539. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  540. if (frame_hdr_size < 0)
  541. return frame_hdr_size;
  542. buf += frame_hdr_size;
  543. buf_size -= frame_hdr_size;
  544. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  545. return ret;
  546. decode_picture:
  547. pic_size = decode_picture_header(avctx, buf, buf_size);
  548. if (pic_size < 0) {
  549. av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
  550. return pic_size;
  551. }
  552. if ((ret = decode_picture(avctx)) < 0) {
  553. av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
  554. return ret;
  555. }
  556. buf += pic_size;
  557. buf_size -= pic_size;
  558. if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
  559. ctx->first_field = 0;
  560. goto decode_picture;
  561. }
  562. *got_frame = 1;
  563. return avpkt->size;
  564. }
  565. static av_cold int decode_close(AVCodecContext *avctx)
  566. {
  567. ProresContext *ctx = avctx->priv_data;
  568. av_freep(&ctx->slices);
  569. return 0;
  570. }
  571. AVCodec ff_prores_decoder = {
  572. .name = "prores",
  573. .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
  574. .type = AVMEDIA_TYPE_VIDEO,
  575. .id = AV_CODEC_ID_PRORES,
  576. .priv_data_size = sizeof(ProresContext),
  577. .init = decode_init,
  578. .close = decode_close,
  579. .decode = decode_frame,
  580. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
  581. };