You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

691 lines
22KB

  1. /*
  2. * Copyright (c) 2010-2011 Maxim Poliakovski
  3. * Copyright (c) 2010-2011 Elvis Presley
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
  24. */
  25. //#define DEBUG
  26. #define LONG_BITSTREAM_READER
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "idctdsp.h"
  30. #include "internal.h"
  31. #include "simple_idct.h"
  32. #include "proresdec.h"
  33. #include "proresdata.h"
  34. static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
  35. {
  36. int i;
  37. for (i = 0; i < 64; i++)
  38. dst[i] = permutation[src[i]];
  39. }
  40. static av_cold int decode_init(AVCodecContext *avctx)
  41. {
  42. ProresContext *ctx = avctx->priv_data;
  43. uint8_t idct_permutation[64];
  44. avctx->bits_per_raw_sample = 10;
  45. ff_blockdsp_init(&ctx->bdsp, avctx);
  46. ff_proresdsp_init(&ctx->prodsp, avctx);
  47. ff_init_scantable_permutation(idct_permutation,
  48. ctx->prodsp.idct_permutation_type);
  49. permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
  50. permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);
  51. return 0;
  52. }
  53. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  54. const int data_size, AVCodecContext *avctx)
  55. {
  56. int hdr_size, width, height, flags;
  57. int version;
  58. const uint8_t *ptr;
  59. hdr_size = AV_RB16(buf);
  60. av_dlog(avctx, "header size %d\n", hdr_size);
  61. if (hdr_size > data_size) {
  62. av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
  63. return AVERROR_INVALIDDATA;
  64. }
  65. version = AV_RB16(buf + 2);
  66. av_dlog(avctx, "%.4s version %d\n", buf+4, version);
  67. if (version > 1) {
  68. av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
  69. return AVERROR_PATCHWELCOME;
  70. }
  71. width = AV_RB16(buf + 8);
  72. height = AV_RB16(buf + 10);
  73. if (width != avctx->width || height != avctx->height) {
  74. av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
  75. avctx->width, avctx->height, width, height);
  76. return AVERROR_PATCHWELCOME;
  77. }
  78. ctx->frame_type = (buf[12] >> 2) & 3;
  79. ctx->alpha_info = buf[17] & 0xf;
  80. if (ctx->alpha_info > 2) {
  81. av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
  82. return AVERROR_INVALIDDATA;
  83. }
  84. if (avctx->skip_alpha) ctx->alpha_info = 0;
  85. av_dlog(avctx, "frame type %d\n", ctx->frame_type);
  86. if (ctx->frame_type == 0) {
  87. ctx->scan = ctx->progressive_scan; // permuted
  88. } else {
  89. ctx->scan = ctx->interlaced_scan; // permuted
  90. ctx->frame->interlaced_frame = 1;
  91. ctx->frame->top_field_first = ctx->frame_type == 1;
  92. }
  93. if (ctx->alpha_info) {
  94. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
  95. } else {
  96. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
  97. }
  98. ptr = buf + 20;
  99. flags = buf[19];
  100. av_dlog(avctx, "flags %x\n", flags);
  101. if (flags & 2) {
  102. if(buf + data_size - ptr < 64) {
  103. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  104. return AVERROR_INVALIDDATA;
  105. }
  106. permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
  107. ptr += 64;
  108. } else {
  109. memset(ctx->qmat_luma, 4, 64);
  110. }
  111. if (flags & 1) {
  112. if(buf + data_size - ptr < 64) {
  113. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  114. return AVERROR_INVALIDDATA;
  115. }
  116. permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
  117. } else {
  118. memset(ctx->qmat_chroma, 4, 64);
  119. }
  120. return hdr_size;
  121. }
  122. static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
  123. {
  124. ProresContext *ctx = avctx->priv_data;
  125. int i, hdr_size, slice_count;
  126. unsigned pic_data_size;
  127. int log2_slice_mb_width, log2_slice_mb_height;
  128. int slice_mb_count, mb_x, mb_y;
  129. const uint8_t *data_ptr, *index_ptr;
  130. hdr_size = buf[0] >> 3;
  131. if (hdr_size < 8 || hdr_size > buf_size) {
  132. av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
  133. return AVERROR_INVALIDDATA;
  134. }
  135. pic_data_size = AV_RB32(buf + 1);
  136. if (pic_data_size > buf_size) {
  137. av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
  138. return AVERROR_INVALIDDATA;
  139. }
  140. log2_slice_mb_width = buf[7] >> 4;
  141. log2_slice_mb_height = buf[7] & 0xF;
  142. if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
  143. av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
  144. 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
  145. return AVERROR_INVALIDDATA;
  146. }
  147. ctx->mb_width = (avctx->width + 15) >> 4;
  148. if (ctx->frame_type)
  149. ctx->mb_height = (avctx->height + 31) >> 5;
  150. else
  151. ctx->mb_height = (avctx->height + 15) >> 4;
  152. slice_count = AV_RB16(buf + 5);
  153. if (ctx->slice_count != slice_count || !ctx->slices) {
  154. av_freep(&ctx->slices);
  155. ctx->slice_count = 0;
  156. ctx->slices = av_mallocz_array(slice_count, sizeof(*ctx->slices));
  157. if (!ctx->slices)
  158. return AVERROR(ENOMEM);
  159. ctx->slice_count = slice_count;
  160. }
  161. if (!slice_count)
  162. return AVERROR(EINVAL);
  163. if (hdr_size + slice_count*2 > buf_size) {
  164. av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
  165. return AVERROR_INVALIDDATA;
  166. }
  167. // parse slice information
  168. index_ptr = buf + hdr_size;
  169. data_ptr = index_ptr + slice_count*2;
  170. slice_mb_count = 1 << log2_slice_mb_width;
  171. mb_x = 0;
  172. mb_y = 0;
  173. for (i = 0; i < slice_count; i++) {
  174. SliceContext *slice = &ctx->slices[i];
  175. slice->data = data_ptr;
  176. data_ptr += AV_RB16(index_ptr + i*2);
  177. while (ctx->mb_width - mb_x < slice_mb_count)
  178. slice_mb_count >>= 1;
  179. slice->mb_x = mb_x;
  180. slice->mb_y = mb_y;
  181. slice->mb_count = slice_mb_count;
  182. slice->data_size = data_ptr - slice->data;
  183. if (slice->data_size < 6) {
  184. av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
  185. return AVERROR_INVALIDDATA;
  186. }
  187. mb_x += slice_mb_count;
  188. if (mb_x == ctx->mb_width) {
  189. slice_mb_count = 1 << log2_slice_mb_width;
  190. mb_x = 0;
  191. mb_y++;
  192. }
  193. if (data_ptr > buf + buf_size) {
  194. av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
  195. return AVERROR_INVALIDDATA;
  196. }
  197. }
  198. if (mb_x || mb_y != ctx->mb_height) {
  199. av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
  200. mb_y, ctx->mb_height);
  201. return AVERROR_INVALIDDATA;
  202. }
  203. return pic_data_size;
  204. }
  205. #define DECODE_CODEWORD(val, codebook) \
  206. do { \
  207. unsigned int rice_order, exp_order, switch_bits; \
  208. unsigned int q, buf, bits; \
  209. \
  210. UPDATE_CACHE(re, gb); \
  211. buf = GET_CACHE(re, gb); \
  212. \
  213. /* number of bits to switch between rice and exp golomb */ \
  214. switch_bits = codebook & 3; \
  215. rice_order = codebook >> 5; \
  216. exp_order = (codebook >> 2) & 7; \
  217. \
  218. q = 31 - av_log2(buf); \
  219. \
  220. if (q > switch_bits) { /* exp golomb */ \
  221. bits = exp_order - switch_bits + (q<<1); \
  222. val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
  223. ((switch_bits + 1) << rice_order); \
  224. SKIP_BITS(re, gb, bits); \
  225. } else if (rice_order) { \
  226. SKIP_BITS(re, gb, q+1); \
  227. val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
  228. SKIP_BITS(re, gb, rice_order); \
  229. } else { \
  230. val = q; \
  231. SKIP_BITS(re, gb, q+1); \
  232. } \
  233. } while (0)
  234. #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
  235. #define FIRST_DC_CB 0xB8
  236. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  237. static av_always_inline void decode_dc_coeffs(GetBitContext *gb, int16_t *out,
  238. int blocks_per_slice)
  239. {
  240. int16_t prev_dc;
  241. int code, i, sign;
  242. OPEN_READER(re, gb);
  243. DECODE_CODEWORD(code, FIRST_DC_CB);
  244. prev_dc = TOSIGNED(code);
  245. out[0] = prev_dc;
  246. out += 64; // dc coeff for the next block
  247. code = 5;
  248. sign = 0;
  249. for (i = 1; i < blocks_per_slice; i++, out += 64) {
  250. DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)]);
  251. if(code) sign ^= -(code & 1);
  252. else sign = 0;
  253. prev_dc += (((code + 1) >> 1) ^ sign) - sign;
  254. out[0] = prev_dc;
  255. }
  256. CLOSE_READER(re, gb);
  257. }
  258. // adaptive codebook switching lut according to previous run/level values
  259. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  260. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
  261. static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
  262. int16_t *out, int blocks_per_slice)
  263. {
  264. ProresContext *ctx = avctx->priv_data;
  265. int block_mask, sign;
  266. unsigned pos, run, level;
  267. int max_coeffs, i, bits_left;
  268. int log2_block_count = av_log2(blocks_per_slice);
  269. OPEN_READER(re, gb);
  270. UPDATE_CACHE(re, gb); \
  271. run = 4;
  272. level = 2;
  273. max_coeffs = 64 << log2_block_count;
  274. block_mask = blocks_per_slice - 1;
  275. for (pos = block_mask;;) {
  276. bits_left = gb->size_in_bits - re_index;
  277. if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
  278. break;
  279. DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
  280. pos += run + 1;
  281. if (pos >= max_coeffs) {
  282. av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
  283. return AVERROR_INVALIDDATA;
  284. }
  285. DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
  286. level += 1;
  287. i = pos >> log2_block_count;
  288. sign = SHOW_SBITS(re, gb, 1);
  289. SKIP_BITS(re, gb, 1);
  290. out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
  291. }
  292. CLOSE_READER(re, gb);
  293. return 0;
  294. }
  295. static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
  296. uint16_t *dst, int dst_stride,
  297. const uint8_t *buf, unsigned buf_size,
  298. const int16_t *qmat)
  299. {
  300. ProresContext *ctx = avctx->priv_data;
  301. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  302. int16_t *block;
  303. GetBitContext gb;
  304. int i, blocks_per_slice = slice->mb_count<<2;
  305. int ret;
  306. for (i = 0; i < blocks_per_slice; i++)
  307. ctx->bdsp.clear_block(blocks+(i<<6));
  308. init_get_bits(&gb, buf, buf_size << 3);
  309. decode_dc_coeffs(&gb, blocks, blocks_per_slice);
  310. if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
  311. return ret;
  312. block = blocks;
  313. for (i = 0; i < slice->mb_count; i++) {
  314. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  315. ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
  316. ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
  317. ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
  318. block += 4*64;
  319. dst += 16;
  320. }
  321. return 0;
  322. }
  323. static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
  324. uint16_t *dst, int dst_stride,
  325. const uint8_t *buf, unsigned buf_size,
  326. const int16_t *qmat, int log2_blocks_per_mb)
  327. {
  328. ProresContext *ctx = avctx->priv_data;
  329. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  330. int16_t *block;
  331. GetBitContext gb;
  332. int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
  333. int ret;
  334. for (i = 0; i < blocks_per_slice; i++)
  335. ctx->bdsp.clear_block(blocks+(i<<6));
  336. init_get_bits(&gb, buf, buf_size << 3);
  337. decode_dc_coeffs(&gb, blocks, blocks_per_slice);
  338. if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
  339. return ret;
  340. block = blocks;
  341. for (i = 0; i < slice->mb_count; i++) {
  342. for (j = 0; j < log2_blocks_per_mb; j++) {
  343. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  344. ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
  345. block += 2*64;
  346. dst += 8;
  347. }
  348. }
  349. return 0;
  350. }
  351. static void unpack_alpha(GetBitContext *gb, uint16_t *dst, int num_coeffs,
  352. const int num_bits)
  353. {
  354. const int mask = (1 << num_bits) - 1;
  355. int i, idx, val, alpha_val;
  356. idx = 0;
  357. alpha_val = mask;
  358. do {
  359. do {
  360. if (get_bits1(gb)) {
  361. val = get_bits(gb, num_bits);
  362. } else {
  363. int sign;
  364. val = get_bits(gb, num_bits == 16 ? 7 : 4);
  365. sign = val & 1;
  366. val = (val + 2) >> 1;
  367. if (sign)
  368. val = -val;
  369. }
  370. alpha_val = (alpha_val + val) & mask;
  371. if (num_bits == 16) {
  372. dst[idx++] = alpha_val >> 6;
  373. } else {
  374. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  375. }
  376. if (idx >= num_coeffs)
  377. break;
  378. } while (get_bits_left(gb)>0 && get_bits1(gb));
  379. val = get_bits(gb, 4);
  380. if (!val)
  381. val = get_bits(gb, 11);
  382. if (idx + val > num_coeffs)
  383. val = num_coeffs - idx;
  384. if (num_bits == 16) {
  385. for (i = 0; i < val; i++)
  386. dst[idx++] = alpha_val >> 6;
  387. } else {
  388. for (i = 0; i < val; i++)
  389. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  390. }
  391. } while (idx < num_coeffs);
  392. }
  393. /**
  394. * Decode alpha slice plane.
  395. */
  396. static void decode_slice_alpha(ProresContext *ctx,
  397. uint16_t *dst, int dst_stride,
  398. const uint8_t *buf, int buf_size,
  399. int blocks_per_slice)
  400. {
  401. GetBitContext gb;
  402. int i;
  403. LOCAL_ALIGNED_16(int16_t, blocks, [8*4*64]);
  404. int16_t *block;
  405. for (i = 0; i < blocks_per_slice<<2; i++)
  406. ctx->bdsp.clear_block(blocks+(i<<6));
  407. init_get_bits(&gb, buf, buf_size << 3);
  408. if (ctx->alpha_info == 2) {
  409. unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
  410. } else {
  411. unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
  412. }
  413. block = blocks;
  414. for (i = 0; i < 16; i++) {
  415. memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
  416. dst += dst_stride >> 1;
  417. block += 16 * blocks_per_slice;
  418. }
  419. }
  420. static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
  421. {
  422. ProresContext *ctx = avctx->priv_data;
  423. SliceContext *slice = &ctx->slices[jobnr];
  424. const uint8_t *buf = slice->data;
  425. AVFrame *pic = ctx->frame;
  426. int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
  427. int luma_stride, chroma_stride;
  428. int y_data_size, u_data_size, v_data_size, a_data_size;
  429. uint8_t *dest_y, *dest_u, *dest_v, *dest_a;
  430. int16_t qmat_luma_scaled[64];
  431. int16_t qmat_chroma_scaled[64];
  432. int mb_x_shift;
  433. int ret;
  434. slice->ret = -1;
  435. //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
  436. // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
  437. // slice header
  438. hdr_size = buf[0] >> 3;
  439. qscale = av_clip(buf[1], 1, 224);
  440. qscale = qscale > 128 ? qscale - 96 << 2: qscale;
  441. y_data_size = AV_RB16(buf + 2);
  442. u_data_size = AV_RB16(buf + 4);
  443. v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
  444. if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
  445. a_data_size = slice->data_size - y_data_size - u_data_size -
  446. v_data_size - hdr_size;
  447. if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
  448. || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
  449. av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
  450. return AVERROR_INVALIDDATA;
  451. }
  452. buf += hdr_size;
  453. for (i = 0; i < 64; i++) {
  454. qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
  455. qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
  456. }
  457. if (ctx->frame_type == 0) {
  458. luma_stride = pic->linesize[0];
  459. chroma_stride = pic->linesize[1];
  460. } else {
  461. luma_stride = pic->linesize[0] << 1;
  462. chroma_stride = pic->linesize[1] << 1;
  463. }
  464. if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
  465. mb_x_shift = 5;
  466. log2_chroma_blocks_per_mb = 2;
  467. } else {
  468. mb_x_shift = 4;
  469. log2_chroma_blocks_per_mb = 1;
  470. }
  471. dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  472. dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  473. dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  474. dest_a = pic->data[3] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  475. if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
  476. dest_y += pic->linesize[0];
  477. dest_u += pic->linesize[1];
  478. dest_v += pic->linesize[2];
  479. dest_a += pic->linesize[3];
  480. }
  481. ret = decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
  482. buf, y_data_size, qmat_luma_scaled);
  483. if (ret < 0)
  484. return ret;
  485. if (!(avctx->flags & AV_CODEC_FLAG_GRAY)) {
  486. ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
  487. buf + y_data_size, u_data_size,
  488. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  489. if (ret < 0)
  490. return ret;
  491. ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
  492. buf + y_data_size + u_data_size, v_data_size,
  493. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  494. if (ret < 0)
  495. return ret;
  496. }
  497. /* decode alpha plane if available */
  498. if (ctx->alpha_info && pic->data[3] && a_data_size)
  499. decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
  500. buf + y_data_size + u_data_size + v_data_size,
  501. a_data_size, slice->mb_count);
  502. slice->ret = 0;
  503. return 0;
  504. }
  505. static int decode_picture(AVCodecContext *avctx)
  506. {
  507. ProresContext *ctx = avctx->priv_data;
  508. int i;
  509. avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
  510. for (i = 0; i < ctx->slice_count; i++)
  511. if (ctx->slices[i].ret < 0)
  512. return ctx->slices[i].ret;
  513. return 0;
  514. }
  515. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  516. AVPacket *avpkt)
  517. {
  518. ProresContext *ctx = avctx->priv_data;
  519. AVFrame *frame = data;
  520. const uint8_t *buf = avpkt->data;
  521. int buf_size = avpkt->size;
  522. int frame_hdr_size, pic_size, ret;
  523. if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
  524. av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
  525. return AVERROR_INVALIDDATA;
  526. }
  527. ctx->frame = frame;
  528. ctx->frame->pict_type = AV_PICTURE_TYPE_I;
  529. ctx->frame->key_frame = 1;
  530. ctx->first_field = 1;
  531. buf += 8;
  532. buf_size -= 8;
  533. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  534. if (frame_hdr_size < 0)
  535. return frame_hdr_size;
  536. buf += frame_hdr_size;
  537. buf_size -= frame_hdr_size;
  538. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  539. return ret;
  540. decode_picture:
  541. pic_size = decode_picture_header(avctx, buf, buf_size);
  542. if (pic_size < 0) {
  543. av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
  544. return pic_size;
  545. }
  546. if ((ret = decode_picture(avctx)) < 0) {
  547. av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
  548. return ret;
  549. }
  550. buf += pic_size;
  551. buf_size -= pic_size;
  552. if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
  553. ctx->first_field = 0;
  554. goto decode_picture;
  555. }
  556. *got_frame = 1;
  557. return avpkt->size;
  558. }
  559. static av_cold int decode_close(AVCodecContext *avctx)
  560. {
  561. ProresContext *ctx = avctx->priv_data;
  562. av_freep(&ctx->slices);
  563. return 0;
  564. }
  565. AVCodec ff_prores_decoder = {
  566. .name = "prores",
  567. .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
  568. .type = AVMEDIA_TYPE_VIDEO,
  569. .id = AV_CODEC_ID_PRORES,
  570. .priv_data_size = sizeof(ProresContext),
  571. .init = decode_init,
  572. .close = decode_close,
  573. .decode = decode_frame,
  574. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
  575. };