You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

642 lines
20KB

  1. /*
  2. * Copyright (c) 2010-2011 Maxim Poliakovski
  3. * Copyright (c) 2010-2011 Elvis Presley
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public
  9. * License as published by the Free Software Foundation;
  10. * version 2 of the License.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4h' (4444)
  24. */
  25. //#define DEBUG
  26. #define A32_BITSTREAM_READER
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "dsputil.h"
  30. #include "simple_idct.h"
  31. typedef struct {
  32. const uint8_t *data;
  33. unsigned mb_x;
  34. unsigned mb_y;
  35. unsigned mb_count;
  36. unsigned data_size;
  37. } SliceContext;
  38. typedef struct {
  39. AVFrame frame;
  40. DSPContext dsp;
  41. int frame_type; ///< 0 = progressive, 1 = tff, 2 = bff
  42. uint8_t qmat_luma[64];
  43. uint8_t qmat_chroma[64];
  44. SliceContext *slices;
  45. int slice_count; ///< number of slices in the current picture
  46. unsigned mb_width; ///< width of the current picture in mb
  47. unsigned mb_height; ///< height of the current picture in mb
  48. uint8_t progressive_scan[64];
  49. uint8_t interlaced_scan[64];
  50. const uint8_t *scan;
  51. int first_field;
  52. void (*idct_put)(DCTELEM *, uint8_t *restrict, int);
  53. } ProresContext;
  54. static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
  55. {
  56. int i;
  57. for (i = 0; i < 64; i++)
  58. dst[i] = permutation[src[i]];
  59. }
  60. static av_always_inline void put_pixels(const DCTELEM *block, uint8_t *restrict pixels, int stride)
  61. {
  62. int16_t *p = (int16_t*)pixels;
  63. int i, j;
  64. stride >>= 1;
  65. for(i = 0; i < 8; i++) {
  66. for (j = 0; j < 8; j++) {
  67. p[j] = av_clip(block[j], 4, 1019);
  68. }
  69. p += stride;
  70. block += 8;
  71. }
  72. }
  73. static void idct_put(DCTELEM *block, uint8_t *restrict pixels, int stride)
  74. {
  75. ff_simple_idct_10(block);
  76. put_pixels(block, pixels, stride);
  77. }
  78. static const uint8_t progressive_scan[64] = {
  79. 0, 1, 8, 9, 2, 3, 10, 11,
  80. 16, 17, 24, 25, 18, 19, 26, 27,
  81. 4, 5, 12, 20, 13, 6, 7, 14,
  82. 21, 28, 29, 22, 15, 23, 30, 31,
  83. 32, 33, 40, 48, 41, 34, 35, 42,
  84. 49, 56, 57, 50, 43, 36, 37, 44,
  85. 51, 58, 59, 52, 45, 38, 39, 46,
  86. 53, 60, 61, 54, 47, 55, 62, 63
  87. };
  88. static const uint8_t interlaced_scan[64] = {
  89. 0, 8, 1, 9, 16, 24, 17, 25,
  90. 2, 10, 3, 11, 18, 26, 19, 27,
  91. 32, 40, 33, 34, 41, 48, 56, 49,
  92. 42, 35, 43, 50, 57, 58, 51, 59,
  93. 4, 12, 5, 6, 13, 20, 28, 21,
  94. 14, 7, 15, 22, 29, 36, 44, 37,
  95. 30, 23, 31, 38, 45, 52, 60, 53,
  96. 46, 39, 47, 54, 61, 62, 55, 63,
  97. };
  98. static av_cold int decode_init(AVCodecContext *avctx)
  99. {
  100. ProresContext *ctx = avctx->priv_data;
  101. avctx->bits_per_raw_sample = 10;
  102. dsputil_init(&ctx->dsp, avctx);
  103. avctx->coded_frame = &ctx->frame;
  104. ctx->frame.type = FF_I_TYPE;
  105. ctx->frame.key_frame = 1;
  106. ctx->idct_put = idct_put;
  107. memcpy(ctx->progressive_scan, progressive_scan, sizeof(progressive_scan));
  108. memcpy(ctx->interlaced_scan, interlaced_scan, sizeof(interlaced_scan));
  109. return 0;
  110. }
  111. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  112. const int data_size, AVCodecContext *avctx)
  113. {
  114. int hdr_size, width, height, flags;
  115. int version;
  116. const uint8_t *ptr;
  117. const uint8_t *scan;
  118. hdr_size = AV_RB16(buf);
  119. av_dlog(avctx, "header size %d\n", hdr_size);
  120. if (hdr_size > data_size) {
  121. av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
  122. return -1;
  123. }
  124. version = AV_RB16(buf + 2);
  125. av_dlog(avctx, "%.4s version %d\n", buf+4, version);
  126. if (version > 1) {
  127. av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
  128. return -1;
  129. }
  130. width = AV_RB16(buf + 8);
  131. height = AV_RB16(buf + 10);
  132. if (width != avctx->width || height != avctx->height) {
  133. av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
  134. avctx->width, avctx->height, width, height);
  135. return -1;
  136. }
  137. ctx->frame_type = (buf[12] >> 2) & 3;
  138. av_dlog(avctx, "frame type %d\n", ctx->frame_type);
  139. if (ctx->frame_type == 0) {
  140. scan = progressive_scan;
  141. ctx->scan = ctx->progressive_scan; // permuted
  142. } else {
  143. scan = interlaced_scan;
  144. ctx->scan = ctx->interlaced_scan; // permuted
  145. ctx->frame.interlaced_frame = 1;
  146. ctx->frame.top_field_first = ctx->frame_type == 1;
  147. }
  148. avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? PIX_FMT_YUV444P10 : PIX_FMT_YUV422P10;
  149. ptr = buf + 20;
  150. flags = buf[19];
  151. av_dlog(avctx, "flags %x\n", flags);
  152. if (flags & 2) {
  153. permute(ctx->qmat_luma, scan, ptr);
  154. ptr += 64;
  155. } else {
  156. memset(ctx->qmat_luma, 4, 64);
  157. }
  158. if (flags & 1) {
  159. permute(ctx->qmat_chroma, scan, ptr);
  160. } else {
  161. memset(ctx->qmat_chroma, 4, 64);
  162. }
  163. return hdr_size;
  164. }
  165. static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
  166. {
  167. ProresContext *ctx = avctx->priv_data;
  168. int i, hdr_size, slice_count;
  169. unsigned pic_data_size;
  170. int log2_slice_mb_width, log2_slice_mb_height;
  171. int slice_mb_count, mb_x, mb_y;
  172. const uint8_t *data_ptr, *index_ptr;
  173. hdr_size = buf[0] >> 3;
  174. if (hdr_size < 8 || hdr_size > buf_size) {
  175. av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
  176. return -1;
  177. }
  178. pic_data_size = AV_RB32(buf + 1);
  179. if (pic_data_size > buf_size) {
  180. av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
  181. return -1;
  182. }
  183. log2_slice_mb_width = buf[7] >> 4;
  184. log2_slice_mb_height = buf[7] & 0xF;
  185. if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
  186. av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
  187. 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
  188. return -1;
  189. }
  190. ctx->mb_width = (avctx->width + 15) >> 4;
  191. if (ctx->frame_type)
  192. ctx->mb_height = (avctx->height + 31) >> 5;
  193. else
  194. ctx->mb_height = (avctx->height + 15) >> 4;
  195. slice_count = AV_RB16(buf + 5);
  196. if (ctx->slice_count != slice_count || !ctx->slices) {
  197. av_freep(&ctx->slices);
  198. ctx->slices = av_mallocz(slice_count * sizeof(*ctx->slices));
  199. if (!ctx->slices)
  200. return AVERROR(ENOMEM);
  201. ctx->slice_count = slice_count;
  202. }
  203. if (!slice_count)
  204. return AVERROR(EINVAL);
  205. if (hdr_size + slice_count*2 > buf_size) {
  206. av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
  207. return -1;
  208. }
  209. // parse slice information
  210. index_ptr = buf + hdr_size;
  211. data_ptr = index_ptr + slice_count*2;
  212. slice_mb_count = 1 << log2_slice_mb_width;
  213. mb_x = 0;
  214. mb_y = 0;
  215. for (i = 0; i < slice_count; i++) {
  216. SliceContext *slice = &ctx->slices[i];
  217. slice->data = data_ptr;
  218. data_ptr += AV_RB16(index_ptr + i*2);
  219. while (ctx->mb_width - mb_x < slice_mb_count)
  220. slice_mb_count >>= 1;
  221. slice->mb_x = mb_x;
  222. slice->mb_y = mb_y;
  223. slice->mb_count = slice_mb_count;
  224. slice->data_size = data_ptr - slice->data;
  225. if (slice->data_size < 6) {
  226. av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
  227. return -1;
  228. }
  229. mb_x += slice_mb_count;
  230. if (mb_x == ctx->mb_width) {
  231. slice_mb_count = 1 << log2_slice_mb_width;
  232. mb_x = 0;
  233. mb_y++;
  234. }
  235. if (data_ptr > buf + buf_size) {
  236. av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
  237. return -1;
  238. }
  239. }
  240. if (mb_x || mb_y != ctx->mb_height) {
  241. av_log(avctx, AV_LOG_ERROR, "error wrong mb count y %d h %d\n",
  242. mb_y, ctx->mb_height);
  243. return -1;
  244. }
  245. return pic_data_size;
  246. }
  247. #define DECODE_CODEWORD(val, codebook) \
  248. do { \
  249. unsigned int rice_order, exp_order, switch_bits; \
  250. unsigned int q, buf, bits; \
  251. \
  252. UPDATE_CACHE(re, gb); \
  253. buf = GET_CACHE(re, gb); \
  254. \
  255. /* number of bits to switch between rice and exp golomb */ \
  256. switch_bits = codebook & 3; \
  257. rice_order = codebook >> 5; \
  258. exp_order = (codebook >> 2) & 7; \
  259. \
  260. q = 31 - av_log2(buf); \
  261. \
  262. if (q > switch_bits) { /* exp golomb */ \
  263. bits = exp_order - switch_bits + (q<<1); \
  264. val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
  265. ((switch_bits + 1) << rice_order); \
  266. SKIP_BITS(re, gb, bits); \
  267. } else if (rice_order) { \
  268. SKIP_BITS(re, gb, q+1); \
  269. val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
  270. SKIP_BITS(re, gb, rice_order); \
  271. } else { \
  272. val = q; \
  273. SKIP_BITS(re, gb, q+1); \
  274. } \
  275. } while (0); \
  276. #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
  277. #define FIRST_DC_CB 0xB8
  278. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  279. static av_always_inline void decode_dc_coeffs(GetBitContext *gb, DCTELEM *out,
  280. int blocks_per_slice, const int *qmat)
  281. {
  282. DCTELEM prev_dc;
  283. int code, i, sign;
  284. OPEN_READER(re, gb);
  285. DECODE_CODEWORD(code, FIRST_DC_CB);
  286. prev_dc = TOSIGNED(code);
  287. out[0] = 4096 + ((prev_dc * qmat[0]) >> 2);
  288. out += 64; // dc coeff for the next block
  289. code = 5;
  290. sign = 0;
  291. for (i = 1; i < blocks_per_slice; i++, out += 64) {
  292. DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6)]);
  293. if(code) sign ^= -(code & 1);
  294. else sign = 0;
  295. prev_dc += (((code + 1) >> 1) ^ sign) - sign;
  296. out[0] = 4096 + ((prev_dc * qmat[0]) >> 2);
  297. }
  298. CLOSE_READER(re, gb);
  299. }
  300. // adaptive codebook switching lut according to previous run/level values
  301. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  302. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
  303. static av_always_inline void decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
  304. DCTELEM *out, int blocks_per_slice,
  305. const int *qmat)
  306. {
  307. ProresContext *ctx = avctx->priv_data;
  308. int block_mask, sign;
  309. unsigned pos, run, level;
  310. int max_coeffs, i, bits_left;
  311. int log2_block_count = av_log2(blocks_per_slice);
  312. OPEN_READER(re, gb);
  313. run = 4;
  314. level = 2;
  315. max_coeffs = 64 << log2_block_count;
  316. block_mask = blocks_per_slice - 1;
  317. for (pos = block_mask;;) {
  318. bits_left = gb->size_in_bits - (((uint8_t*)re_buffer_ptr - gb->buffer)*8 - 32 + re_bit_count);
  319. if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
  320. break;
  321. DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
  322. pos += run + 1;
  323. if (pos >= max_coeffs) {
  324. av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
  325. return;
  326. }
  327. DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
  328. level += 1;
  329. i = pos >> log2_block_count;
  330. sign = SHOW_SBITS(re, gb, 1);
  331. SKIP_BITS(re, gb, 1);
  332. out[((pos & block_mask) << 6) + ctx->scan[i]] = (((level ^ sign) - sign) * qmat[i]) >> 2;
  333. }
  334. CLOSE_READER(re, gb);
  335. }
  336. static void decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
  337. uint8_t *dst, int dst_stride,
  338. const uint8_t *buf, unsigned buf_size,
  339. const int *qmat)
  340. {
  341. ProresContext *ctx = avctx->priv_data;
  342. LOCAL_ALIGNED_16(DCTELEM, blocks, [8*4*64]);
  343. DCTELEM *block;
  344. GetBitContext gb;
  345. int i, blocks_per_slice = slice->mb_count<<2;
  346. for (i = 0; i < blocks_per_slice; i++)
  347. ctx->dsp.clear_block(blocks+(i<<6));
  348. init_get_bits(&gb, buf, buf_size << 3);
  349. decode_dc_coeffs(&gb, blocks, blocks_per_slice, qmat);
  350. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice, qmat);
  351. block = blocks;
  352. for (i = 0; i < slice->mb_count; i++) {
  353. ctx->idct_put(block+(0<<6), dst, dst_stride);
  354. ctx->idct_put(block+(1<<6), dst+16, dst_stride);
  355. ctx->idct_put(block+(2<<6), dst+8*dst_stride, dst_stride);
  356. ctx->idct_put(block+(3<<6), dst+8*dst_stride+16, dst_stride);
  357. block += 4*64;
  358. dst += 32;
  359. }
  360. }
  361. static void decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
  362. uint8_t *dst, int dst_stride,
  363. const uint8_t *buf, unsigned buf_size,
  364. const int *qmat, int log2_blocks_per_mb)
  365. {
  366. ProresContext *ctx = avctx->priv_data;
  367. LOCAL_ALIGNED_16(DCTELEM, blocks, [8*4*64]);
  368. DCTELEM *block;
  369. GetBitContext gb;
  370. int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
  371. for (i = 0; i < blocks_per_slice; i++)
  372. ctx->dsp.clear_block(blocks+(i<<6));
  373. init_get_bits(&gb, buf, buf_size << 3);
  374. decode_dc_coeffs(&gb, blocks, blocks_per_slice, qmat);
  375. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice, qmat);
  376. block = blocks;
  377. for (i = 0; i < slice->mb_count; i++) {
  378. for (j = 0; j < log2_blocks_per_mb; j++) {
  379. ctx->idct_put(block+(0<<6), dst, dst_stride);
  380. ctx->idct_put(block+(1<<6), dst+8*dst_stride, dst_stride);
  381. block += 2*64;
  382. dst += 16;
  383. }
  384. }
  385. }
  386. static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
  387. {
  388. ProresContext *ctx = avctx->priv_data;
  389. SliceContext *slice = &ctx->slices[jobnr];
  390. const uint8_t *buf = slice->data;
  391. AVFrame *pic = avctx->coded_frame;
  392. int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
  393. int luma_stride, chroma_stride;
  394. int y_data_size, u_data_size, v_data_size;
  395. uint8_t *dest_y, *dest_u, *dest_v;
  396. int qmat_luma_scaled[64];
  397. int qmat_chroma_scaled[64];
  398. int mb_x_shift;
  399. //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
  400. // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
  401. // slice header
  402. hdr_size = buf[0] >> 3;
  403. qscale = av_clip(buf[1], 1, 224);
  404. qscale = qscale > 128 ? qscale - 96 << 2: qscale;
  405. y_data_size = AV_RB16(buf + 2);
  406. u_data_size = AV_RB16(buf + 4);
  407. v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
  408. if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
  409. if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0) {
  410. av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
  411. return -1;
  412. }
  413. buf += hdr_size;
  414. for (i = 0; i < 64; i++) {
  415. qmat_luma_scaled[i] = ctx->qmat_luma[i] * qscale;
  416. qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
  417. }
  418. if (ctx->frame_type == 0) {
  419. luma_stride = pic->linesize[0];
  420. chroma_stride = pic->linesize[1];
  421. } else {
  422. luma_stride = pic->linesize[0] << 1;
  423. chroma_stride = pic->linesize[1] << 1;
  424. }
  425. if (avctx->pix_fmt == PIX_FMT_YUV444P10) {
  426. mb_x_shift = 5;
  427. log2_chroma_blocks_per_mb = 2;
  428. } else {
  429. mb_x_shift = 4;
  430. log2_chroma_blocks_per_mb = 1;
  431. }
  432. dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  433. dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  434. dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  435. if (ctx->frame_type && ctx->first_field ^ ctx->frame.top_field_first) {
  436. dest_y += pic->linesize[0];
  437. dest_u += pic->linesize[1];
  438. dest_v += pic->linesize[2];
  439. }
  440. decode_slice_luma(avctx, slice, dest_y, luma_stride,
  441. buf, y_data_size, qmat_luma_scaled);
  442. if (!(avctx->flags & CODEC_FLAG_GRAY)) {
  443. decode_slice_chroma(avctx, slice, dest_u, chroma_stride,
  444. buf + y_data_size, u_data_size,
  445. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  446. decode_slice_chroma(avctx, slice, dest_v, chroma_stride,
  447. buf + y_data_size + u_data_size, v_data_size,
  448. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  449. }
  450. return 0;
  451. }
  452. static int decode_picture(AVCodecContext *avctx)
  453. {
  454. ProresContext *ctx = avctx->priv_data;
  455. int i, threads_ret[ctx->slice_count];
  456. avctx->execute2(avctx, decode_slice_thread, NULL, threads_ret, ctx->slice_count);
  457. for (i = 0; i < ctx->slice_count; i++)
  458. if (threads_ret[i] < 0)
  459. return threads_ret[i];
  460. return 0;
  461. }
  462. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  463. AVPacket *avpkt)
  464. {
  465. ProresContext *ctx = avctx->priv_data;
  466. AVFrame *frame = avctx->coded_frame;
  467. const uint8_t *buf = avpkt->data;
  468. int buf_size = avpkt->size;
  469. int frame_hdr_size, pic_size;
  470. if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
  471. av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
  472. return -1;
  473. }
  474. ctx->first_field = 1;
  475. buf += 8;
  476. buf_size -= 8;
  477. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  478. if (frame_hdr_size < 0)
  479. return -1;
  480. buf += frame_hdr_size;
  481. buf_size -= frame_hdr_size;
  482. if (frame->data[0])
  483. avctx->release_buffer(avctx, frame);
  484. if (avctx->get_buffer(avctx, frame) < 0)
  485. return -1;
  486. decode_picture:
  487. pic_size = decode_picture_header(avctx, buf, buf_size);
  488. if (pic_size < 0) {
  489. av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
  490. return -1;
  491. }
  492. if (decode_picture(avctx)) {
  493. av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
  494. return -1;
  495. }
  496. buf += pic_size;
  497. buf_size -= pic_size;
  498. if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
  499. ctx->first_field = 0;
  500. goto decode_picture;
  501. }
  502. *data_size = sizeof(AVFrame);
  503. *(AVFrame*)data = *frame;
  504. return avpkt->size;
  505. }
  506. static av_cold int decode_close(AVCodecContext *avctx)
  507. {
  508. ProresContext *ctx = avctx->priv_data;
  509. AVFrame *frame = avctx->coded_frame;
  510. if (frame->data[0])
  511. avctx->release_buffer(avctx, frame);
  512. av_freep(&ctx->slices);
  513. return 0;
  514. }
  515. AVCodec ff_prores_gpl_decoder = {
  516. .name = "prores_gpl",
  517. .type = AVMEDIA_TYPE_VIDEO,
  518. .id = CODEC_ID_PRORES,
  519. .priv_data_size = sizeof(ProresContext),
  520. .init = decode_init,
  521. .close = decode_close,
  522. .decode = decode_frame,
  523. .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
  524. .capabilities = CODEC_CAP_SLICE_THREADS,
  525. };