You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

771 lines
25KB

  1. /*
  2. * Copyright (c) 2010-2011 Maxim Poliakovski
  3. * Copyright (c) 2010-2011 Elvis Presley
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
  24. */
  25. //#define DEBUG
  26. #define LONG_BITSTREAM_READER
  27. #include "libavutil/internal.h"
  28. #include "avcodec.h"
  29. #include "get_bits.h"
  30. #include "idctdsp.h"
  31. #include "internal.h"
  32. #include "profiles.h"
  33. #include "simple_idct.h"
  34. #include "proresdec.h"
  35. #include "proresdata.h"
  36. #include "thread.h"
  37. static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
  38. {
  39. int i;
  40. for (i = 0; i < 64; i++)
  41. dst[i] = permutation[src[i]];
  42. }
  43. static void unpack_alpha_10(GetBitContext *gb, uint16_t *dst, int num_coeffs,
  44. const int num_bits)
  45. {
  46. const int mask = (1 << num_bits) - 1;
  47. int i, idx, val, alpha_val;
  48. idx = 0;
  49. alpha_val = mask;
  50. do {
  51. do {
  52. if (get_bits1(gb)) {
  53. val = get_bits(gb, num_bits);
  54. } else {
  55. int sign;
  56. val = get_bits(gb, num_bits == 16 ? 7 : 4);
  57. sign = val & 1;
  58. val = (val + 2) >> 1;
  59. if (sign)
  60. val = -val;
  61. }
  62. alpha_val = (alpha_val + val) & mask;
  63. if (num_bits == 16) {
  64. dst[idx++] = alpha_val >> 6;
  65. } else {
  66. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  67. }
  68. if (idx >= num_coeffs)
  69. break;
  70. } while (get_bits_left(gb)>0 && get_bits1(gb));
  71. val = get_bits(gb, 4);
  72. if (!val)
  73. val = get_bits(gb, 11);
  74. if (idx + val > num_coeffs)
  75. val = num_coeffs - idx;
  76. if (num_bits == 16) {
  77. for (i = 0; i < val; i++)
  78. dst[idx++] = alpha_val >> 6;
  79. } else {
  80. for (i = 0; i < val; i++)
  81. dst[idx++] = (alpha_val << 2) | (alpha_val >> 6);
  82. }
  83. } while (idx < num_coeffs);
  84. }
  85. static av_cold int decode_init(AVCodecContext *avctx)
  86. {
  87. int ret = 0;
  88. ProresContext *ctx = avctx->priv_data;
  89. uint8_t idct_permutation[64];
  90. avctx->bits_per_raw_sample = 10;
  91. switch (avctx->codec_tag) {
  92. case MKTAG('a','p','c','o'):
  93. avctx->profile = FF_PROFILE_PRORES_PROXY;
  94. break;
  95. case MKTAG('a','p','c','s'):
  96. avctx->profile = FF_PROFILE_PRORES_LT;
  97. break;
  98. case MKTAG('a','p','c','n'):
  99. avctx->profile = FF_PROFILE_PRORES_STANDARD;
  100. break;
  101. case MKTAG('a','p','c','h'):
  102. avctx->profile = FF_PROFILE_PRORES_HQ;
  103. break;
  104. case MKTAG('a','p','4','h'):
  105. avctx->profile = FF_PROFILE_PRORES_4444;
  106. break;
  107. case MKTAG('a','p','4','x'):
  108. avctx->profile = FF_PROFILE_PRORES_XQ;
  109. break;
  110. default:
  111. avctx->profile = FF_PROFILE_UNKNOWN;
  112. av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag);
  113. }
  114. ff_blockdsp_init(&ctx->bdsp, avctx);
  115. ret = ff_proresdsp_init(&ctx->prodsp, avctx);
  116. if (ret < 0) {
  117. av_log(avctx, AV_LOG_ERROR, "Fail to init proresdsp for bits per raw sample %d\n", avctx->bits_per_raw_sample);
  118. return ret;
  119. }
  120. ff_init_scantable_permutation(idct_permutation,
  121. ctx->prodsp.idct_permutation_type);
  122. permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation);
  123. permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation);
  124. if (avctx->bits_per_raw_sample == 10){
  125. ctx->unpack_alpha = unpack_alpha_10;
  126. } else {
  127. av_log(avctx, AV_LOG_ERROR, "Fail to set unpack_alpha for bits per raw sample %d\n", avctx->bits_per_raw_sample);
  128. return AVERROR_BUG;
  129. }
  130. return ret;
  131. }
  132. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  133. const int data_size, AVCodecContext *avctx)
  134. {
  135. int hdr_size, width, height, flags;
  136. int version;
  137. const uint8_t *ptr;
  138. hdr_size = AV_RB16(buf);
  139. ff_dlog(avctx, "header size %d\n", hdr_size);
  140. if (hdr_size > data_size) {
  141. av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
  142. return AVERROR_INVALIDDATA;
  143. }
  144. version = AV_RB16(buf + 2);
  145. ff_dlog(avctx, "%.4s version %d\n", buf+4, version);
  146. if (version > 1) {
  147. av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
  148. return AVERROR_PATCHWELCOME;
  149. }
  150. width = AV_RB16(buf + 8);
  151. height = AV_RB16(buf + 10);
  152. if (width != avctx->width || height != avctx->height) {
  153. av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
  154. avctx->width, avctx->height, width, height);
  155. return AVERROR_PATCHWELCOME;
  156. }
  157. ctx->frame_type = (buf[12] >> 2) & 3;
  158. ctx->alpha_info = buf[17] & 0xf;
  159. if (ctx->alpha_info > 2) {
  160. av_log(avctx, AV_LOG_ERROR, "Invalid alpha mode %d\n", ctx->alpha_info);
  161. return AVERROR_INVALIDDATA;
  162. }
  163. if (avctx->skip_alpha) ctx->alpha_info = 0;
  164. ff_dlog(avctx, "frame type %d\n", ctx->frame_type);
  165. if (ctx->frame_type == 0) {
  166. ctx->scan = ctx->progressive_scan; // permuted
  167. } else {
  168. ctx->scan = ctx->interlaced_scan; // permuted
  169. ctx->frame->interlaced_frame = 1;
  170. ctx->frame->top_field_first = ctx->frame_type == 1;
  171. }
  172. if (ctx->alpha_info) {
  173. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
  174. } else {
  175. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
  176. }
  177. avctx->color_primaries = buf[14];
  178. avctx->color_trc = buf[15];
  179. avctx->colorspace = buf[16];
  180. avctx->color_range = AVCOL_RANGE_MPEG;
  181. ptr = buf + 20;
  182. flags = buf[19];
  183. ff_dlog(avctx, "flags %x\n", flags);
  184. if (flags & 2) {
  185. if(buf + data_size - ptr < 64) {
  186. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  187. return AVERROR_INVALIDDATA;
  188. }
  189. permute(ctx->qmat_luma, ctx->prodsp.idct_permutation, ptr);
  190. ptr += 64;
  191. } else {
  192. memset(ctx->qmat_luma, 4, 64);
  193. }
  194. if (flags & 1) {
  195. if(buf + data_size - ptr < 64) {
  196. av_log(avctx, AV_LOG_ERROR, "Header truncated\n");
  197. return AVERROR_INVALIDDATA;
  198. }
  199. permute(ctx->qmat_chroma, ctx->prodsp.idct_permutation, ptr);
  200. } else {
  201. memset(ctx->qmat_chroma, 4, 64);
  202. }
  203. return hdr_size;
  204. }
  205. static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
  206. {
  207. ProresContext *ctx = avctx->priv_data;
  208. int i, hdr_size, slice_count;
  209. unsigned pic_data_size;
  210. int log2_slice_mb_width, log2_slice_mb_height;
  211. int slice_mb_count, mb_x, mb_y;
  212. const uint8_t *data_ptr, *index_ptr;
  213. hdr_size = buf[0] >> 3;
  214. if (hdr_size < 8 || hdr_size > buf_size) {
  215. av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
  216. return AVERROR_INVALIDDATA;
  217. }
  218. pic_data_size = AV_RB32(buf + 1);
  219. if (pic_data_size > buf_size) {
  220. av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
  221. return AVERROR_INVALIDDATA;
  222. }
  223. log2_slice_mb_width = buf[7] >> 4;
  224. log2_slice_mb_height = buf[7] & 0xF;
  225. if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
  226. av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
  227. 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
  228. return AVERROR_INVALIDDATA;
  229. }
  230. ctx->mb_width = (avctx->width + 15) >> 4;
  231. if (ctx->frame_type)
  232. ctx->mb_height = (avctx->height + 31) >> 5;
  233. else
  234. ctx->mb_height = (avctx->height + 15) >> 4;
  235. // QT ignores the written value
  236. // slice_count = AV_RB16(buf + 5);
  237. slice_count = ctx->mb_height * ((ctx->mb_width >> log2_slice_mb_width) +
  238. av_popcount(ctx->mb_width & (1 << log2_slice_mb_width) - 1));
  239. if (ctx->slice_count != slice_count || !ctx->slices) {
  240. av_freep(&ctx->slices);
  241. ctx->slice_count = 0;
  242. ctx->slices = av_mallocz_array(slice_count, sizeof(*ctx->slices));
  243. if (!ctx->slices)
  244. return AVERROR(ENOMEM);
  245. ctx->slice_count = slice_count;
  246. }
  247. if (!slice_count)
  248. return AVERROR(EINVAL);
  249. if (hdr_size + slice_count*2 > buf_size) {
  250. av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
  251. return AVERROR_INVALIDDATA;
  252. }
  253. // parse slice information
  254. index_ptr = buf + hdr_size;
  255. data_ptr = index_ptr + slice_count*2;
  256. slice_mb_count = 1 << log2_slice_mb_width;
  257. mb_x = 0;
  258. mb_y = 0;
  259. for (i = 0; i < slice_count; i++) {
  260. SliceContext *slice = &ctx->slices[i];
  261. slice->data = data_ptr;
  262. data_ptr += AV_RB16(index_ptr + i*2);
  263. while (ctx->mb_width - mb_x < slice_mb_count)
  264. slice_mb_count >>= 1;
  265. slice->mb_x = mb_x;
  266. slice->mb_y = mb_y;
  267. slice->mb_count = slice_mb_count;
  268. slice->data_size = data_ptr - slice->data;
  269. if (slice->data_size < 6) {
  270. av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
  271. return AVERROR_INVALIDDATA;
  272. }
  273. mb_x += slice_mb_count;
  274. if (mb_x == ctx->mb_width) {
  275. slice_mb_count = 1 << log2_slice_mb_width;
  276. mb_x = 0;
  277. mb_y++;
  278. }
  279. if (data_ptr > buf + buf_size) {
  280. av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
  281. return AVERROR_INVALIDDATA;
  282. }
  283. }
  284. if (mb_x || mb_y != ctx->mb_height) {
  285. av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
  286. mb_y, ctx->mb_height);
  287. return AVERROR_INVALIDDATA;
  288. }
  289. return pic_data_size;
  290. }
  291. #define DECODE_CODEWORD(val, codebook, SKIP) \
  292. do { \
  293. unsigned int rice_order, exp_order, switch_bits; \
  294. unsigned int q, buf, bits; \
  295. \
  296. UPDATE_CACHE(re, gb); \
  297. buf = GET_CACHE(re, gb); \
  298. \
  299. /* number of bits to switch between rice and exp golomb */ \
  300. switch_bits = codebook & 3; \
  301. rice_order = codebook >> 5; \
  302. exp_order = (codebook >> 2) & 7; \
  303. \
  304. q = 31 - av_log2(buf); \
  305. \
  306. if (q > switch_bits) { /* exp golomb */ \
  307. bits = exp_order - switch_bits + (q<<1); \
  308. if (bits > FFMIN(MIN_CACHE_BITS, 31)) \
  309. return AVERROR_INVALIDDATA; \
  310. val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
  311. ((switch_bits + 1) << rice_order); \
  312. SKIP(re, gb, bits); \
  313. } else if (rice_order) { \
  314. SKIP_BITS(re, gb, q+1); \
  315. val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
  316. SKIP(re, gb, rice_order); \
  317. } else { \
  318. val = q; \
  319. SKIP(re, gb, q+1); \
  320. } \
  321. } while (0)
  322. #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
  323. #define FIRST_DC_CB 0xB8
  324. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  325. static av_always_inline int decode_dc_coeffs(GetBitContext *gb, int16_t *out,
  326. int blocks_per_slice)
  327. {
  328. int16_t prev_dc;
  329. int code, i, sign;
  330. OPEN_READER(re, gb);
  331. DECODE_CODEWORD(code, FIRST_DC_CB, LAST_SKIP_BITS);
  332. prev_dc = TOSIGNED(code);
  333. out[0] = prev_dc;
  334. out += 64; // dc coeff for the next block
  335. code = 5;
  336. sign = 0;
  337. for (i = 1; i < blocks_per_slice; i++, out += 64) {
  338. DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6U)], LAST_SKIP_BITS);
  339. if(code) sign ^= -(code & 1);
  340. else sign = 0;
  341. prev_dc += (((code + 1) >> 1) ^ sign) - sign;
  342. out[0] = prev_dc;
  343. }
  344. CLOSE_READER(re, gb);
  345. return 0;
  346. }
  347. // adaptive codebook switching lut according to previous run/level values
  348. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  349. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
  350. static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
  351. int16_t *out, int blocks_per_slice)
  352. {
  353. ProresContext *ctx = avctx->priv_data;
  354. int block_mask, sign;
  355. unsigned pos, run, level;
  356. int max_coeffs, i, bits_left;
  357. int log2_block_count = av_log2(blocks_per_slice);
  358. OPEN_READER(re, gb);
  359. UPDATE_CACHE(re, gb); \
  360. run = 4;
  361. level = 2;
  362. max_coeffs = 64 << log2_block_count;
  363. block_mask = blocks_per_slice - 1;
  364. for (pos = block_mask;;) {
  365. bits_left = gb->size_in_bits - re_index;
  366. if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
  367. break;
  368. DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)], LAST_SKIP_BITS);
  369. pos += run + 1;
  370. if (pos >= max_coeffs) {
  371. av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
  372. return AVERROR_INVALIDDATA;
  373. }
  374. DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)], SKIP_BITS);
  375. level += 1;
  376. i = pos >> log2_block_count;
  377. sign = SHOW_SBITS(re, gb, 1);
  378. SKIP_BITS(re, gb, 1);
  379. out[((pos & block_mask) << 6) + ctx->scan[i]] = ((level ^ sign) - sign);
  380. }
  381. CLOSE_READER(re, gb);
  382. return 0;
  383. }
  384. static int decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
  385. uint16_t *dst, int dst_stride,
  386. const uint8_t *buf, unsigned buf_size,
  387. const int16_t *qmat)
  388. {
  389. ProresContext *ctx = avctx->priv_data;
  390. LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
  391. int16_t *block;
  392. GetBitContext gb;
  393. int i, blocks_per_slice = slice->mb_count<<2;
  394. int ret;
  395. for (i = 0; i < blocks_per_slice; i++)
  396. ctx->bdsp.clear_block(blocks+(i<<6));
  397. init_get_bits(&gb, buf, buf_size << 3);
  398. if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
  399. return ret;
  400. if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
  401. return ret;
  402. block = blocks;
  403. for (i = 0; i < slice->mb_count; i++) {
  404. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  405. ctx->prodsp.idct_put(dst +8, dst_stride, block+(1<<6), qmat);
  406. ctx->prodsp.idct_put(dst+4*dst_stride , dst_stride, block+(2<<6), qmat);
  407. ctx->prodsp.idct_put(dst+4*dst_stride+8, dst_stride, block+(3<<6), qmat);
  408. block += 4*64;
  409. dst += 16;
  410. }
  411. return 0;
  412. }
  413. static int decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
  414. uint16_t *dst, int dst_stride,
  415. const uint8_t *buf, unsigned buf_size,
  416. const int16_t *qmat, int log2_blocks_per_mb)
  417. {
  418. ProresContext *ctx = avctx->priv_data;
  419. LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
  420. int16_t *block;
  421. GetBitContext gb;
  422. int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
  423. int ret;
  424. for (i = 0; i < blocks_per_slice; i++)
  425. ctx->bdsp.clear_block(blocks+(i<<6));
  426. init_get_bits(&gb, buf, buf_size << 3);
  427. if ((ret = decode_dc_coeffs(&gb, blocks, blocks_per_slice)) < 0)
  428. return ret;
  429. if ((ret = decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice)) < 0)
  430. return ret;
  431. block = blocks;
  432. for (i = 0; i < slice->mb_count; i++) {
  433. for (j = 0; j < log2_blocks_per_mb; j++) {
  434. ctx->prodsp.idct_put(dst, dst_stride, block+(0<<6), qmat);
  435. ctx->prodsp.idct_put(dst+4*dst_stride, dst_stride, block+(1<<6), qmat);
  436. block += 2*64;
  437. dst += 8;
  438. }
  439. }
  440. return 0;
  441. }
  442. /**
  443. * Decode alpha slice plane.
  444. */
  445. static void decode_slice_alpha(ProresContext *ctx,
  446. uint16_t *dst, int dst_stride,
  447. const uint8_t *buf, int buf_size,
  448. int blocks_per_slice)
  449. {
  450. GetBitContext gb;
  451. int i;
  452. LOCAL_ALIGNED_32(int16_t, blocks, [8*4*64]);
  453. int16_t *block;
  454. for (i = 0; i < blocks_per_slice<<2; i++)
  455. ctx->bdsp.clear_block(blocks+(i<<6));
  456. init_get_bits(&gb, buf, buf_size << 3);
  457. if (ctx->alpha_info == 2) {
  458. ctx->unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 16);
  459. } else {
  460. ctx->unpack_alpha(&gb, blocks, blocks_per_slice * 4 * 64, 8);
  461. }
  462. block = blocks;
  463. for (i = 0; i < 16; i++) {
  464. memcpy(dst, block, 16 * blocks_per_slice * sizeof(*dst));
  465. dst += dst_stride >> 1;
  466. block += 16 * blocks_per_slice;
  467. }
  468. }
  469. static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
  470. {
  471. ProresContext *ctx = avctx->priv_data;
  472. SliceContext *slice = &ctx->slices[jobnr];
  473. const uint8_t *buf = slice->data;
  474. AVFrame *pic = ctx->frame;
  475. int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
  476. int luma_stride, chroma_stride;
  477. int y_data_size, u_data_size, v_data_size, a_data_size;
  478. uint8_t *dest_y, *dest_u, *dest_v, *dest_a;
  479. LOCAL_ALIGNED_16(int16_t, qmat_luma_scaled, [64]);
  480. LOCAL_ALIGNED_16(int16_t, qmat_chroma_scaled,[64]);
  481. int mb_x_shift;
  482. int ret;
  483. slice->ret = -1;
  484. //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
  485. // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
  486. // slice header
  487. hdr_size = buf[0] >> 3;
  488. qscale = av_clip(buf[1], 1, 224);
  489. qscale = qscale > 128 ? qscale - 96 << 2: qscale;
  490. y_data_size = AV_RB16(buf + 2);
  491. u_data_size = AV_RB16(buf + 4);
  492. v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
  493. if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
  494. a_data_size = slice->data_size - y_data_size - u_data_size -
  495. v_data_size - hdr_size;
  496. if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0
  497. || hdr_size+y_data_size+u_data_size+v_data_size > slice->data_size){
  498. av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
  499. return AVERROR_INVALIDDATA;
  500. }
  501. buf += hdr_size;
  502. for (i = 0; i < 64; i++) {
  503. qmat_luma_scaled [i] = ctx->qmat_luma [i] * qscale;
  504. qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
  505. }
  506. if (ctx->frame_type == 0) {
  507. luma_stride = pic->linesize[0];
  508. chroma_stride = pic->linesize[1];
  509. } else {
  510. luma_stride = pic->linesize[0] << 1;
  511. chroma_stride = pic->linesize[1] << 1;
  512. }
  513. if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) {
  514. mb_x_shift = 5;
  515. log2_chroma_blocks_per_mb = 2;
  516. } else {
  517. mb_x_shift = 4;
  518. log2_chroma_blocks_per_mb = 1;
  519. }
  520. dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  521. dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  522. dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  523. dest_a = pic->data[3] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  524. if (ctx->frame_type && ctx->first_field ^ ctx->frame->top_field_first) {
  525. dest_y += pic->linesize[0];
  526. dest_u += pic->linesize[1];
  527. dest_v += pic->linesize[2];
  528. dest_a += pic->linesize[3];
  529. }
  530. ret = decode_slice_luma(avctx, slice, (uint16_t*)dest_y, luma_stride,
  531. buf, y_data_size, qmat_luma_scaled);
  532. if (ret < 0)
  533. return ret;
  534. if (!(avctx->flags & AV_CODEC_FLAG_GRAY) && (u_data_size + v_data_size) > 0) {
  535. ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_u, chroma_stride,
  536. buf + y_data_size, u_data_size,
  537. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  538. if (ret < 0)
  539. return ret;
  540. ret = decode_slice_chroma(avctx, slice, (uint16_t*)dest_v, chroma_stride,
  541. buf + y_data_size + u_data_size, v_data_size,
  542. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  543. if (ret < 0)
  544. return ret;
  545. }
  546. else {
  547. size_t mb_max_x = slice->mb_count << (mb_x_shift - 1);
  548. size_t i, j;
  549. for (i = 0; i < 16; ++i)
  550. for (j = 0; j < mb_max_x; ++j) {
  551. *(uint16_t*)(dest_u + (i * chroma_stride) + (j << 1)) = 511;
  552. *(uint16_t*)(dest_v + (i * chroma_stride) + (j << 1)) = 511;
  553. }
  554. }
  555. /* decode alpha plane if available */
  556. if (ctx->alpha_info && pic->data[3] && a_data_size)
  557. decode_slice_alpha(ctx, (uint16_t*)dest_a, luma_stride,
  558. buf + y_data_size + u_data_size + v_data_size,
  559. a_data_size, slice->mb_count);
  560. slice->ret = 0;
  561. return 0;
  562. }
  563. static int decode_picture(AVCodecContext *avctx)
  564. {
  565. ProresContext *ctx = avctx->priv_data;
  566. int i;
  567. int error = 0;
  568. avctx->execute2(avctx, decode_slice_thread, NULL, NULL, ctx->slice_count);
  569. for (i = 0; i < ctx->slice_count; i++)
  570. error += ctx->slices[i].ret < 0;
  571. if (error)
  572. ctx->frame->decode_error_flags = FF_DECODE_ERROR_INVALID_BITSTREAM;
  573. if (error < ctx->slice_count)
  574. return 0;
  575. return ctx->slices[0].ret;
  576. }
  577. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  578. AVPacket *avpkt)
  579. {
  580. ProresContext *ctx = avctx->priv_data;
  581. ThreadFrame tframe = { .f = data };
  582. AVFrame *frame = data;
  583. const uint8_t *buf = avpkt->data;
  584. int buf_size = avpkt->size;
  585. int frame_hdr_size, pic_size, ret;
  586. if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
  587. av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
  588. return AVERROR_INVALIDDATA;
  589. }
  590. ctx->frame = frame;
  591. ctx->frame->pict_type = AV_PICTURE_TYPE_I;
  592. ctx->frame->key_frame = 1;
  593. ctx->first_field = 1;
  594. buf += 8;
  595. buf_size -= 8;
  596. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  597. if (frame_hdr_size < 0)
  598. return frame_hdr_size;
  599. buf += frame_hdr_size;
  600. buf_size -= frame_hdr_size;
  601. if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
  602. return ret;
  603. decode_picture:
  604. pic_size = decode_picture_header(avctx, buf, buf_size);
  605. if (pic_size < 0) {
  606. av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
  607. return pic_size;
  608. }
  609. if ((ret = decode_picture(avctx)) < 0) {
  610. av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
  611. return ret;
  612. }
  613. buf += pic_size;
  614. buf_size -= pic_size;
  615. if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
  616. ctx->first_field = 0;
  617. goto decode_picture;
  618. }
  619. *got_frame = 1;
  620. return avpkt->size;
  621. }
  622. #if HAVE_THREADS
  623. static int decode_init_thread_copy(AVCodecContext *avctx)
  624. {
  625. ProresContext *ctx = avctx->priv_data;
  626. ctx->slices = NULL;
  627. return 0;
  628. }
  629. #endif
  630. static av_cold int decode_close(AVCodecContext *avctx)
  631. {
  632. ProresContext *ctx = avctx->priv_data;
  633. av_freep(&ctx->slices);
  634. return 0;
  635. }
  636. AVCodec ff_prores_decoder = {
  637. .name = "prores",
  638. .long_name = NULL_IF_CONFIG_SMALL("ProRes (iCodec Pro)"),
  639. .type = AVMEDIA_TYPE_VIDEO,
  640. .id = AV_CODEC_ID_PRORES,
  641. .priv_data_size = sizeof(ProresContext),
  642. .init = decode_init,
  643. .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
  644. .close = decode_close,
  645. .decode = decode_frame,
  646. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
  647. .profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
  648. };