You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

633 lines
20KB

  1. /*
  2. * Copyright (c) 2010-2011 Maxim Poliakovski
  3. * Copyright (c) 2010-2011 Elvis Presley
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public
  9. * License as published by the Free Software Foundation;
  10. * version 2 of the License.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4c' (4444)
  24. */
  25. //#define DEBUG
  26. #define A32_BITSTREAM_READER
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "dsputil.h"
  30. #include "simple_idct.h"
  31. typedef struct {
  32. const uint8_t *data;
  33. unsigned mb_x;
  34. unsigned mb_y;
  35. unsigned mb_count;
  36. unsigned data_size;
  37. } SliceContext;
  38. typedef struct {
  39. AVFrame frame;
  40. DSPContext dsp;
  41. int frame_type; ///< 0 = progressive, 1 = tff, 2 = bff
  42. uint8_t qmat_luma[64];
  43. uint8_t qmat_chroma[64];
  44. SliceContext *slices;
  45. int slice_count; ///< number of slices in the current picture
  46. unsigned mb_width; ///< width of the current picture in mb
  47. unsigned mb_height; ///< height of the current picture in mb
  48. uint8_t progressive_scan[64];
  49. uint8_t interlaced_scan[64];
  50. const uint8_t *scan;
  51. int first_field;
  52. void (*idct_put)(DCTELEM *, uint8_t *restrict, int);
  53. } ProresContext;
  54. static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
  55. {
  56. int i;
  57. for (i = 0; i < 64; i++)
  58. dst[i] = permutation[src[i]];
  59. }
  60. static av_always_inline void put_pixels(const DCTELEM *block, uint8_t *restrict pixels, int stride)
  61. {
  62. int16_t *p = (int16_t*)pixels;
  63. int i, j;
  64. stride >>= 1;
  65. for(i = 0; i < 8; i++) {
  66. for (j = 0; j < 8; j++) {
  67. p[j] = av_clip(block[j], 4, 1019);
  68. }
  69. p += stride;
  70. block += 8;
  71. }
  72. }
  73. static void idct_put(DCTELEM *block, uint8_t *restrict pixels, int stride)
  74. {
  75. ff_simple_idct_10(block);
  76. put_pixels(block, pixels, stride);
  77. }
  78. static const uint8_t progressive_scan[64] = {
  79. 0, 1, 8, 9, 2, 3, 10, 11,
  80. 16, 17, 24, 25, 18, 19, 26, 27,
  81. 4, 5, 12, 20, 13, 6, 7, 14,
  82. 21, 28, 29, 22, 15, 23, 30, 31,
  83. 32, 33, 40, 48, 41, 34, 35, 42,
  84. 49, 56, 57, 50, 43, 36, 37, 44,
  85. 51, 58, 59, 52, 45, 38, 39, 46,
  86. 53, 60, 61, 54, 47, 55, 62, 63
  87. };
  88. static const uint8_t interlaced_scan[64] = {
  89. 0, 8, 1, 9, 16, 24, 17, 25,
  90. 2, 10, 3, 11, 18, 26, 19, 27,
  91. 32, 40, 33, 34, 41, 48, 56, 49,
  92. 42, 35, 43, 50, 57, 58, 51, 59,
  93. 4, 12, 5, 6, 13, 20, 28, 21,
  94. 14, 7, 15, 22, 29, 36, 44, 37,
  95. 30, 23, 31, 38, 45, 52, 60, 53,
  96. 46, 39, 47, 54, 61, 62, 55, 63,
  97. };
  98. static av_cold int decode_init(AVCodecContext *avctx)
  99. {
  100. ProresContext *ctx = avctx->priv_data;
  101. avctx->bits_per_raw_sample = 10;
  102. dsputil_init(&ctx->dsp, avctx);
  103. avctx->coded_frame = &ctx->frame;
  104. ctx->frame.type = FF_I_TYPE;
  105. ctx->frame.key_frame = 1;
  106. ctx->idct_put = idct_put;
  107. memcpy(ctx->progressive_scan, progressive_scan, sizeof(progressive_scan));
  108. memcpy(ctx->interlaced_scan, interlaced_scan, sizeof(interlaced_scan));
  109. return 0;
  110. }
  111. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  112. const int data_size, AVCodecContext *avctx)
  113. {
  114. int hdr_size, width, height, flags;
  115. int version;
  116. const uint8_t *ptr;
  117. const uint8_t *scan;
  118. hdr_size = AV_RB16(buf);
  119. av_dlog(avctx, "header size %d\n", hdr_size);
  120. if (hdr_size > data_size) {
  121. av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
  122. return -1;
  123. }
  124. version = AV_RB16(buf + 2);
  125. av_dlog(avctx, "%.4s version %d\n", buf+4, version);
  126. if (version > 1) {
  127. av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
  128. return -1;
  129. }
  130. width = AV_RB16(buf + 8);
  131. height = AV_RB16(buf + 10);
  132. if (width != avctx->width || height != avctx->height) {
  133. av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
  134. avctx->width, avctx->height, width, height);
  135. return -1;
  136. }
  137. ctx->frame_type = (buf[12] >> 2) & 3;
  138. av_dlog(avctx, "frame type %d\n", ctx->frame_type);
  139. if (ctx->frame_type == 0) {
  140. scan = progressive_scan;
  141. ctx->scan = ctx->progressive_scan; // permuted
  142. } else {
  143. scan = interlaced_scan;
  144. ctx->scan = ctx->interlaced_scan; // permuted
  145. ctx->frame.interlaced_frame = 1;
  146. ctx->frame.top_field_first = ctx->frame_type == 1;
  147. }
  148. avctx->pix_fmt = ((buf[12] & 0xC0) == 0xC0) ? PIX_FMT_YUV444P10 : PIX_FMT_YUV422P10;
  149. ptr = buf + 20;
  150. flags = buf[19];
  151. av_dlog(avctx, "flags %x\n", flags);
  152. if (flags & 2) {
  153. permute(ctx->qmat_luma, scan, ptr);
  154. ptr += 64;
  155. } else {
  156. memset(ctx->qmat_luma, 4, 64);
  157. }
  158. if (flags & 1) {
  159. permute(ctx->qmat_chroma, scan, ptr);
  160. } else {
  161. memset(ctx->qmat_chroma, 4, 64);
  162. }
  163. return hdr_size;
  164. }
  165. static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
  166. {
  167. ProresContext *ctx = avctx->priv_data;
  168. int i, hdr_size, slice_count;
  169. unsigned pic_data_size;
  170. int log2_slice_mb_width, log2_slice_mb_height;
  171. int slice_mb_count, mb_x, mb_y;
  172. const uint8_t *data_ptr, *index_ptr;
  173. hdr_size = buf[0] >> 3;
  174. if (hdr_size < 8 || hdr_size > buf_size) {
  175. av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
  176. return -1;
  177. }
  178. pic_data_size = AV_RB32(buf + 1);
  179. if (pic_data_size > buf_size) {
  180. av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
  181. return -1;
  182. }
  183. log2_slice_mb_width = buf[7] >> 4;
  184. log2_slice_mb_height = buf[7] & 0xF;
  185. if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
  186. av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
  187. 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
  188. return -1;
  189. }
  190. ctx->mb_width = (avctx->width + 15) >> 4;
  191. ctx->mb_height = (avctx->height + 15) >> 4;
  192. slice_count = AV_RB16(buf + 5);
  193. if (ctx->slice_count != slice_count || !ctx->slices) {
  194. av_freep(&ctx->slices);
  195. ctx->slices = av_mallocz(slice_count * sizeof(*ctx->slices));
  196. if (!ctx->slices)
  197. return AVERROR(ENOMEM);
  198. ctx->slice_count = slice_count;
  199. }
  200. if (!slice_count)
  201. return AVERROR(EINVAL);
  202. if (hdr_size + slice_count*2 > buf_size) {
  203. av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
  204. return -1;
  205. }
  206. // parse slice information
  207. index_ptr = buf + hdr_size;
  208. data_ptr = index_ptr + slice_count*2;
  209. slice_mb_count = 1 << log2_slice_mb_width;
  210. mb_x = 0;
  211. mb_y = 0;
  212. for (i = 0; i < slice_count; i++) {
  213. SliceContext *slice = &ctx->slices[i];
  214. slice->data = data_ptr;
  215. data_ptr += AV_RB16(index_ptr + i*2);
  216. while (ctx->mb_width - mb_x < slice_mb_count)
  217. slice_mb_count >>= 1;
  218. slice->mb_x = mb_x;
  219. slice->mb_y = mb_y;
  220. slice->mb_count = slice_mb_count;
  221. slice->data_size = data_ptr - slice->data;
  222. if (slice->data_size < 6) {
  223. av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
  224. return -1;
  225. }
  226. mb_x += slice_mb_count;
  227. if (mb_x == ctx->mb_width) {
  228. slice_mb_count = 1 << log2_slice_mb_width;
  229. mb_x = 0;
  230. mb_y++;
  231. }
  232. if (data_ptr > buf + buf_size) {
  233. av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
  234. return -1;
  235. }
  236. }
  237. return pic_data_size;
  238. }
  239. #define DECODE_CODEWORD(val, codebook) \
  240. do { \
  241. unsigned int rice_order, exp_order, switch_bits; \
  242. unsigned int q, buf, bits; \
  243. \
  244. UPDATE_CACHE(re, gb); \
  245. buf = GET_CACHE(re, gb); \
  246. \
  247. /* number of bits to switch between rice and exp golomb */ \
  248. switch_bits = codebook & 3; \
  249. rice_order = codebook >> 5; \
  250. exp_order = (codebook >> 2) & 7; \
  251. \
  252. q = 31-av_log2(buf); \
  253. \
  254. if (q > switch_bits) { /* exp golomb */ \
  255. bits = exp_order - switch_bits + (q<<1); \
  256. val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
  257. ((switch_bits + 1) << rice_order); \
  258. SKIP_BITS(re, gb, bits); \
  259. } else if (rice_order) { \
  260. SKIP_BITS(re, gb, q+1); \
  261. val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
  262. SKIP_BITS(re, gb, rice_order); \
  263. } else { \
  264. val = q; \
  265. SKIP_BITS(re, gb, q+1); \
  266. } \
  267. } while (0); \
  268. #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
  269. #define FIRST_DC_CB 0xB8
  270. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  271. static av_always_inline void decode_dc_coeffs(GetBitContext *gb, DCTELEM *out,
  272. int blocks_per_slice, const int *qmat)
  273. {
  274. DCTELEM prev_dc;
  275. int code, i, sign;
  276. OPEN_READER(re, gb);
  277. DECODE_CODEWORD(code, FIRST_DC_CB);
  278. prev_dc = TOSIGNED(code);
  279. out[0] = 4096 + ((prev_dc * qmat[0]) >> 2);
  280. out += 64; // dc coeff for the next block
  281. code = 5;
  282. sign = 0;
  283. for (i = 1; i < blocks_per_slice; i++, out += 64) {
  284. DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6)]);
  285. if(code) sign ^= -(code & 1);
  286. else sign = 0;
  287. prev_dc += (((code + 1) >> 1) ^ sign) - sign;
  288. out[0] = 4096 + ((prev_dc * qmat[0]) >> 2);
  289. }
  290. CLOSE_READER(re, gb);
  291. }
  292. // adaptive codebook switching lut according to previous run/level values
  293. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  294. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
  295. static av_always_inline void decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
  296. DCTELEM *out, int blocks_per_slice,
  297. const int *qmat)
  298. {
  299. ProresContext *ctx = avctx->priv_data;
  300. int block_mask, sign;
  301. unsigned pos, run, level;
  302. int max_coeffs, i, bits_left;
  303. int log2_block_count = av_log2(blocks_per_slice);
  304. OPEN_READER(re, gb);
  305. run = 4;
  306. level = 2;
  307. max_coeffs = 64 << log2_block_count;
  308. block_mask = blocks_per_slice - 1;
  309. for (pos = block_mask;;) {
  310. bits_left = gb->size_in_bits - (((uint8_t*)re_buffer_ptr - gb->buffer)*8 - 32 + re_bit_count);
  311. if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
  312. break;
  313. DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
  314. pos += run + 1;
  315. if (pos >= max_coeffs) {
  316. av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
  317. return;
  318. }
  319. DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
  320. level += 1;
  321. i = pos >> log2_block_count;
  322. sign = SHOW_SBITS(re, gb, 1);
  323. SKIP_BITS(re, gb, 1);
  324. out[((pos & block_mask) << 6) + ctx->scan[i]] = (((level ^ sign) - sign) * qmat[i]) >> 2;
  325. }
  326. CLOSE_READER(re, gb);
  327. }
  328. static void decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
  329. uint8_t *dst, int dst_stride,
  330. const uint8_t *buf, unsigned buf_size,
  331. const int *qmat)
  332. {
  333. ProresContext *ctx = avctx->priv_data;
  334. LOCAL_ALIGNED_16(DCTELEM, blocks, [8*4*64]);
  335. DCTELEM *block;
  336. GetBitContext gb;
  337. int i, blocks_per_slice = slice->mb_count<<2;
  338. for (i = 0; i < blocks_per_slice; i++)
  339. ctx->dsp.clear_block(blocks+(i<<6));
  340. init_get_bits(&gb, buf, buf_size << 3);
  341. decode_dc_coeffs(&gb, blocks, blocks_per_slice, qmat);
  342. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice, qmat);
  343. block = blocks;
  344. for (i = 0; i < slice->mb_count; i++) {
  345. ctx->idct_put(block+(0<<6), dst, dst_stride);
  346. ctx->idct_put(block+(1<<6), dst+16, dst_stride);
  347. ctx->idct_put(block+(2<<6), dst+8*dst_stride, dst_stride);
  348. ctx->idct_put(block+(3<<6), dst+8*dst_stride+16, dst_stride);
  349. block += 4*64;
  350. dst += 32;
  351. }
  352. }
  353. static void decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
  354. uint8_t *dst, int dst_stride,
  355. const uint8_t *buf, unsigned buf_size,
  356. const int *qmat, int log2_blocks_per_mb)
  357. {
  358. ProresContext *ctx = avctx->priv_data;
  359. LOCAL_ALIGNED_16(DCTELEM, blocks, [8*4*64]);
  360. DCTELEM *block;
  361. GetBitContext gb;
  362. int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
  363. for (i = 0; i < blocks_per_slice; i++)
  364. ctx->dsp.clear_block(blocks+(i<<6));
  365. init_get_bits(&gb, buf, buf_size << 3);
  366. decode_dc_coeffs(&gb, blocks, blocks_per_slice, qmat);
  367. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice, qmat);
  368. block = blocks;
  369. for (i = 0; i < slice->mb_count; i++) {
  370. for (j = 0; j < log2_blocks_per_mb; j++) {
  371. ctx->idct_put(block+(0<<6), dst, dst_stride);
  372. ctx->idct_put(block+(1<<6), dst+8*dst_stride, dst_stride);
  373. block += 2*64;
  374. dst += 16;
  375. }
  376. }
  377. }
  378. static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
  379. {
  380. ProresContext *ctx = avctx->priv_data;
  381. SliceContext *slice = &ctx->slices[jobnr];
  382. const uint8_t *buf = slice->data;
  383. AVFrame *pic = avctx->coded_frame;
  384. int i, hdr_size, qscale, log2_chroma_blocks_per_mb;
  385. int luma_stride, chroma_stride;
  386. int y_data_size, u_data_size, v_data_size;
  387. uint8_t *dest_y, *dest_u, *dest_v;
  388. int qmat_luma_scaled[64];
  389. int qmat_chroma_scaled[64];
  390. int mb_x_shift;
  391. //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
  392. // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
  393. // slice header
  394. hdr_size = buf[0] >> 3;
  395. qscale = av_clip(buf[1], 1, 224);
  396. qscale = qscale > 128 ? qscale - 96 << 2: qscale;
  397. y_data_size = AV_RB16(buf + 2);
  398. u_data_size = AV_RB16(buf + 4);
  399. v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
  400. if (hdr_size > 7) v_data_size = AV_RB16(buf + 6);
  401. if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0) {
  402. av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
  403. return -1;
  404. }
  405. buf += hdr_size;
  406. for (i = 0; i < 64; i++) {
  407. qmat_luma_scaled[i] = ctx->qmat_luma[i] * qscale;
  408. qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
  409. }
  410. if (ctx->frame_type == 0) {
  411. luma_stride = pic->linesize[0];
  412. chroma_stride = pic->linesize[1];
  413. } else {
  414. luma_stride = pic->linesize[0] << 1;
  415. chroma_stride = pic->linesize[1] << 1;
  416. }
  417. if (avctx->pix_fmt == PIX_FMT_YUV444P10) {
  418. mb_x_shift = 5;
  419. log2_chroma_blocks_per_mb = 2;
  420. } else {
  421. mb_x_shift = 4;
  422. log2_chroma_blocks_per_mb = 1;
  423. }
  424. dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  425. dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  426. dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  427. if (ctx->frame_type && ctx->first_field ^ ctx->frame.top_field_first) {
  428. dest_y += pic->linesize[0];
  429. dest_u += pic->linesize[1];
  430. dest_v += pic->linesize[2];
  431. }
  432. decode_slice_luma(avctx, slice, dest_y, luma_stride,
  433. buf, y_data_size, qmat_luma_scaled);
  434. if (!(avctx->flags & CODEC_FLAG_GRAY)) {
  435. decode_slice_chroma(avctx, slice, dest_u, chroma_stride,
  436. buf + y_data_size, u_data_size,
  437. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  438. decode_slice_chroma(avctx, slice, dest_v, chroma_stride,
  439. buf + y_data_size + u_data_size, v_data_size,
  440. qmat_chroma_scaled, log2_chroma_blocks_per_mb);
  441. }
  442. return 0;
  443. }
  444. static int decode_picture(AVCodecContext *avctx)
  445. {
  446. ProresContext *ctx = avctx->priv_data;
  447. int i, threads_ret[ctx->slice_count];
  448. avctx->execute2(avctx, decode_slice_thread, NULL, threads_ret, ctx->slice_count);
  449. for (i = 0; i < ctx->slice_count; i++)
  450. if (threads_ret[i] < 0)
  451. return threads_ret[i];
  452. return 0;
  453. }
  454. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  455. AVPacket *avpkt)
  456. {
  457. ProresContext *ctx = avctx->priv_data;
  458. AVFrame *frame = avctx->coded_frame;
  459. const uint8_t *buf = avpkt->data;
  460. int buf_size = avpkt->size;
  461. int frame_hdr_size, pic_size;
  462. if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
  463. av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
  464. return -1;
  465. }
  466. ctx->first_field = 1;
  467. buf += 8;
  468. buf_size -= 8;
  469. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  470. if (frame_hdr_size < 0)
  471. return -1;
  472. buf += frame_hdr_size;
  473. buf_size -= frame_hdr_size;
  474. decode_picture:
  475. pic_size = decode_picture_header(avctx, buf, buf_size);
  476. if (pic_size < 0) {
  477. av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
  478. return -1;
  479. }
  480. if (frame->data[0])
  481. avctx->release_buffer(avctx, frame);
  482. if (avctx->get_buffer(avctx, frame) < 0)
  483. return -1;
  484. if (decode_picture(avctx)) {
  485. av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
  486. return -1;
  487. }
  488. buf += pic_size;
  489. buf_size -= pic_size;
  490. if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
  491. ctx->first_field = 0;
  492. goto decode_picture;
  493. }
  494. *data_size = sizeof(AVFrame);
  495. *(AVFrame*)data = *frame;
  496. return avpkt->size;
  497. }
  498. static av_cold int decode_close(AVCodecContext *avctx)
  499. {
  500. ProresContext *ctx = avctx->priv_data;
  501. AVFrame *frame = avctx->coded_frame;
  502. if (frame->data[0])
  503. avctx->release_buffer(avctx, frame);
  504. av_freep(&ctx->slices);
  505. return 0;
  506. }
  507. AVCodec ff_prores_gpl_decoder = {
  508. .name = "prores_gpl",
  509. .type = AVMEDIA_TYPE_VIDEO,
  510. .id = CODEC_ID_PRORES,
  511. .priv_data_size = sizeof(ProresContext),
  512. .init = decode_init,
  513. .close = decode_close,
  514. .decode = decode_frame,
  515. .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
  516. .capabilities = CODEC_CAP_SLICE_THREADS,
  517. };