You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

630 lines
20KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License as published by the Free Software Foundation;
  7. * version 2 of the License.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. /**
  19. * @file libavcodec/proresdec.c
  20. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy), 'ap4c' (4444)
  21. */
  22. //#define DEBUG
  23. #define A32_BITSTREAM_READER
  24. #include "avcodec.h"
  25. #include "get_bits.h"
  26. #include "dsputil.h"
  27. #include "simple_idct.h"
  28. typedef struct {
  29. const uint8_t *data;
  30. unsigned mb_x;
  31. unsigned mb_y;
  32. unsigned mb_count;
  33. unsigned data_size;
  34. } SliceContext;
  35. typedef struct {
  36. AVFrame frame;
  37. DSPContext dsp;
  38. int frame_type; ///< 0 = progressive, 1 = tff, 2 = bff
  39. uint8_t qmat_luma[64];
  40. uint8_t qmat_chroma[64];
  41. SliceContext *slices;
  42. int slice_count; ///< number of slices in the current picture
  43. unsigned mb_width; ///< width of the current picture in mb
  44. unsigned mb_height; ///< height of the current picture in mb
  45. uint8_t progressive_scan[64];
  46. uint8_t interlaced_scan[64];
  47. const uint8_t *scan;
  48. int first_field;
  49. void (*idct_put)(DCTELEM *, uint8_t *restrict, int);
  50. } ProresContext;
  51. static void permute(uint8_t *dst, const uint8_t *src, const uint8_t permutation[64])
  52. {
  53. int i;
  54. for (i = 0; i < 64; i++)
  55. dst[i] = permutation[src[i]];
  56. }
  57. static av_always_inline void put_pixels(const DCTELEM *block, uint8_t *restrict pixels, int stride)
  58. {
  59. int16_t *p = (int16_t*)pixels;
  60. int i, j;
  61. stride >>= 1;
  62. for(i = 0; i < 8; i++) {
  63. for (j = 0; j < 8; j++) {
  64. p[j] = av_clip(block[j], 4, 1019);
  65. }
  66. p += stride;
  67. block += 8;
  68. }
  69. }
  70. static void idct_put(DCTELEM *block, uint8_t *restrict pixels, int stride)
  71. {
  72. ff_simple_idct_10(block);
  73. put_pixels(block, pixels, stride);
  74. }
  75. static const uint8_t progressive_scan[64] = {
  76. 0, 1, 8, 9, 2, 3, 10, 11,
  77. 16, 17, 24, 25, 18, 19, 26, 27,
  78. 4, 5, 12, 20, 13, 6, 7, 14,
  79. 21, 28, 29, 22, 15, 23, 30, 31,
  80. 32, 33, 40, 48, 41, 34, 35, 42,
  81. 49, 56, 57, 50, 43, 36, 37, 44,
  82. 51, 58, 59, 52, 45, 38, 39, 46,
  83. 53, 60, 61, 54, 47, 55, 62, 63
  84. };
  85. static const uint8_t interlaced_scan[64] = {
  86. 0, 8, 1, 9, 16, 24, 17, 25,
  87. 2, 10, 3, 11, 18, 26, 19, 27,
  88. 32, 40, 33, 34, 41, 48, 56, 49,
  89. 42, 35, 43, 50, 57, 58, 51, 59,
  90. 4, 12, 5, 6, 13, 20, 28, 21,
  91. 14, 7, 15, 22, 29, 36, 44, 37,
  92. 30, 23, 31, 38, 45, 52, 60, 53,
  93. 46, 39, 47, 54, 61, 62, 55, 63,
  94. };
  95. static av_cold int decode_init(AVCodecContext *avctx)
  96. {
  97. ProresContext *ctx = avctx->priv_data;
  98. avctx->bits_per_raw_sample = 10;
  99. dsputil_init(&ctx->dsp, avctx);
  100. avctx->coded_frame = &ctx->frame;
  101. ctx->frame.type = FF_I_TYPE;
  102. ctx->frame.key_frame = 1;
  103. ctx->idct_put = idct_put;
  104. memcpy(ctx->progressive_scan, progressive_scan, sizeof(progressive_scan));
  105. memcpy(ctx->interlaced_scan, interlaced_scan, sizeof(interlaced_scan));
  106. return 0;
  107. }
  108. static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
  109. const int data_size, AVCodecContext *avctx)
  110. {
  111. int hdr_size, width, height, flags;
  112. int version;
  113. const uint8_t *ptr;
  114. const uint8_t *scan;
  115. hdr_size = AV_RB16(buf);
  116. av_dlog(avctx, "header size %d\n", hdr_size);
  117. if (hdr_size > data_size) {
  118. av_log(avctx, AV_LOG_ERROR, "error, wrong header size\n");
  119. return -1;
  120. }
  121. version = AV_RB16(buf + 2);
  122. av_dlog(avctx, "%.4s version %d\n", buf+4, version);
  123. if (version > 1) {
  124. av_log(avctx, AV_LOG_ERROR, "unsupported version: %d\n", version);
  125. return -1;
  126. }
  127. width = AV_RB16(buf + 8);
  128. height = AV_RB16(buf + 10);
  129. if (width != avctx->width || height != avctx->height) {
  130. av_log(avctx, AV_LOG_ERROR, "picture resolution change: %dx%d -> %dx%d\n",
  131. avctx->width, avctx->height, width, height);
  132. return -1;
  133. }
  134. ctx->frame_type = (buf[12] >> 2) & 3;
  135. av_dlog(avctx, "frame type %d\n", ctx->frame_type);
  136. if (ctx->frame_type == 0) {
  137. scan = progressive_scan;
  138. ctx->scan = ctx->progressive_scan; // permuted
  139. } else {
  140. scan = interlaced_scan;
  141. ctx->scan = ctx->interlaced_scan; // permuted
  142. ctx->frame.interlaced_frame = 1;
  143. ctx->frame.top_field_first = ctx->frame_type == 1;
  144. }
  145. avctx->pix_fmt = ((buf[12] & 0xC0) == 0xC0) ? PIX_FMT_YUV444P10 : PIX_FMT_YUV422P10;
  146. ptr = buf + 20;
  147. flags = buf[19];
  148. av_dlog(avctx, "flags %x\n", flags);
  149. if (flags & 2) {
  150. permute(ctx->qmat_luma, scan, ptr);
  151. ptr += 64;
  152. } else {
  153. memset(ctx->qmat_luma, 4, 64);
  154. }
  155. if (flags & 1) {
  156. permute(ctx->qmat_chroma, scan, ptr);
  157. } else {
  158. memset(ctx->qmat_chroma, 4, 64);
  159. }
  160. return hdr_size;
  161. }
  162. static int decode_picture_header(AVCodecContext *avctx, const uint8_t *buf, const int buf_size)
  163. {
  164. ProresContext *ctx = avctx->priv_data;
  165. int i, hdr_size, slice_count;
  166. unsigned pic_data_size;
  167. int log2_slice_mb_width, log2_slice_mb_height;
  168. int slice_mb_count, mb_x, mb_y;
  169. const uint8_t *data_ptr, *index_ptr;
  170. hdr_size = buf[0] >> 3;
  171. if (hdr_size < 8 || hdr_size > buf_size) {
  172. av_log(avctx, AV_LOG_ERROR, "error, wrong picture header size\n");
  173. return -1;
  174. }
  175. pic_data_size = AV_RB32(buf + 1);
  176. if (pic_data_size > buf_size) {
  177. av_log(avctx, AV_LOG_ERROR, "error, wrong picture data size\n");
  178. return -1;
  179. }
  180. log2_slice_mb_width = buf[7] >> 4;
  181. log2_slice_mb_height = buf[7] & 0xF;
  182. if (log2_slice_mb_width > 3 || log2_slice_mb_height) {
  183. av_log(avctx, AV_LOG_ERROR, "unsupported slice resolution: %dx%d\n",
  184. 1 << log2_slice_mb_width, 1 << log2_slice_mb_height);
  185. return -1;
  186. }
  187. ctx->mb_width = (avctx->width + 15) >> 4;
  188. ctx->mb_height = (avctx->height + 15) >> 4;
  189. slice_count = AV_RB16(buf + 5);
  190. if (ctx->slice_count != slice_count || !ctx->slices) {
  191. av_freep(&ctx->slices);
  192. ctx->slices = av_mallocz(slice_count * sizeof(*ctx->slices));
  193. if (!ctx->slices)
  194. return AVERROR(ENOMEM);
  195. ctx->slice_count = slice_count;
  196. }
  197. if (!slice_count)
  198. return AVERROR(EINVAL);
  199. if (hdr_size + slice_count*2 > buf_size) {
  200. av_log(avctx, AV_LOG_ERROR, "error, wrong slice count\n");
  201. return -1;
  202. }
  203. // parse slice information
  204. index_ptr = buf + hdr_size;
  205. data_ptr = index_ptr + slice_count*2;
  206. slice_mb_count = 1 << log2_slice_mb_width;
  207. mb_x = 0;
  208. mb_y = 0;
  209. for (i = 0; i < slice_count; i++) {
  210. SliceContext *slice = &ctx->slices[i];
  211. slice->data = data_ptr;
  212. data_ptr += AV_RB16(index_ptr + i*2);
  213. while (ctx->mb_width - mb_x < slice_mb_count)
  214. slice_mb_count >>= 1;
  215. slice->mb_x = mb_x;
  216. slice->mb_y = mb_y;
  217. slice->mb_count = slice_mb_count;
  218. slice->data_size = data_ptr - slice->data;
  219. if (slice->data_size < 6) {
  220. av_log(avctx, AV_LOG_ERROR, "error, wrong slice data size\n");
  221. return -1;
  222. }
  223. mb_x += slice_mb_count;
  224. if (mb_x == ctx->mb_width) {
  225. slice_mb_count = 1 << log2_slice_mb_width;
  226. mb_x = 0;
  227. mb_y++;
  228. }
  229. if (data_ptr > buf + buf_size) {
  230. av_log(avctx, AV_LOG_ERROR, "error, slice out of bounds\n");
  231. return -1;
  232. }
  233. }
  234. return pic_data_size;
  235. }
  236. #define DECODE_CODEWORD(val, codebook) \
  237. do { \
  238. unsigned int rice_order, exp_order, switch_bits; \
  239. unsigned int q, buf, bits; \
  240. \
  241. UPDATE_CACHE(re, gb); \
  242. buf = GET_CACHE(re, gb); \
  243. \
  244. /* number of bits to switch between rice and exp golomb */ \
  245. switch_bits = codebook & 3; \
  246. rice_order = codebook >> 5; \
  247. exp_order = (codebook >> 2) & 7; \
  248. \
  249. q = 31-av_log2(buf); \
  250. \
  251. if (q > switch_bits) { /* exp golomb */ \
  252. bits = exp_order - switch_bits + (q<<1); \
  253. val = SHOW_UBITS(re, gb, bits) - (1 << exp_order) + \
  254. ((switch_bits + 1) << rice_order); \
  255. SKIP_BITS(re, gb, bits); \
  256. } else if (rice_order) { \
  257. SKIP_BITS(re, gb, q+1); \
  258. val = (q << rice_order) + SHOW_UBITS(re, gb, rice_order); \
  259. SKIP_BITS(re, gb, rice_order); \
  260. } else { \
  261. val = q; \
  262. SKIP_BITS(re, gb, q+1); \
  263. } \
  264. } while (0); \
  265. #define TOSIGNED(x) (((x) >> 1) ^ (-((x) & 1)))
  266. #define FIRST_DC_CB 0xB8
  267. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  268. static av_always_inline void decode_dc_coeffs(GetBitContext *gb, DCTELEM *out,
  269. int blocks_per_slice, const int *qmat)
  270. {
  271. DCTELEM prev_dc;
  272. int code, i, sign;
  273. OPEN_READER(re, gb);
  274. DECODE_CODEWORD(code, FIRST_DC_CB);
  275. prev_dc = TOSIGNED(code);
  276. out[0] = 4096 + ((prev_dc * qmat[0]) >> 2);
  277. out += 64; // dc coeff for the next block
  278. code = 5;
  279. sign = 0;
  280. for (i = 1; i < blocks_per_slice; i++, out += 64) {
  281. DECODE_CODEWORD(code, dc_codebook[FFMIN(code, 6)]);
  282. if(code) sign ^= -(code & 1);
  283. else sign = 0;
  284. prev_dc += (((code + 1) >> 1) ^ sign) - sign;
  285. out[0] = 4096 + ((prev_dc * qmat[0]) >> 2);
  286. }
  287. CLOSE_READER(re, gb);
  288. }
  289. // adaptive codebook switching lut according to previous run/level values
  290. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29, 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  291. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28, 0x28, 0x28, 0x28, 0x4C };
  292. static av_always_inline void decode_ac_coeffs(AVCodecContext *avctx, GetBitContext *gb,
  293. DCTELEM *out, int blocks_per_slice,
  294. const int *qmat)
  295. {
  296. ProresContext *ctx = avctx->priv_data;
  297. int block_mask, sign;
  298. unsigned pos, run, level;
  299. int max_coeffs, i, bits_left;
  300. int log2_block_count = av_log2(blocks_per_slice);
  301. OPEN_READER(re, gb);
  302. run = 4;
  303. level = 2;
  304. max_coeffs = 64 << log2_block_count;
  305. block_mask = blocks_per_slice - 1;
  306. for (pos = block_mask;;) {
  307. bits_left = gb->size_in_bits - (((uint8_t*)re_buffer_ptr - gb->buffer)*8 - 32 + re_bit_count);
  308. if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left)))
  309. break;
  310. DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)]);
  311. pos += run + 1;
  312. if (pos >= max_coeffs) {
  313. av_log(avctx, AV_LOG_ERROR, "ac tex damaged %d, %d\n", pos, max_coeffs);
  314. return;
  315. }
  316. DECODE_CODEWORD(level, lev_to_cb[FFMIN(level, 9)]);
  317. level += 1;
  318. i = pos >> log2_block_count;
  319. sign = SHOW_SBITS(re, gb, 1);
  320. SKIP_BITS(re, gb, 1);
  321. out[((pos & block_mask) << 6) + ctx->scan[i]] = (((level ^ sign) - sign) * qmat[i]) >> 2;
  322. }
  323. CLOSE_READER(re, gb);
  324. }
  325. static void decode_slice_luma(AVCodecContext *avctx, SliceContext *slice,
  326. uint8_t *dst, int dst_stride,
  327. const uint8_t *buf, unsigned buf_size,
  328. const int *qmat)
  329. {
  330. ProresContext *ctx = avctx->priv_data;
  331. LOCAL_ALIGNED_16(DCTELEM, blocks, [8*4*64]);
  332. DCTELEM *block;
  333. GetBitContext gb;
  334. int i, blocks_per_slice = slice->mb_count<<2;
  335. for (i = 0; i < blocks_per_slice; i++)
  336. ctx->dsp.clear_block(blocks+(i<<6));
  337. init_get_bits(&gb, buf, buf_size << 3);
  338. decode_dc_coeffs(&gb, blocks, blocks_per_slice, qmat);
  339. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice, qmat);
  340. block = blocks;
  341. for (i = 0; i < slice->mb_count; i++) {
  342. ctx->idct_put(block+(0<<6), dst, dst_stride);
  343. ctx->idct_put(block+(1<<6), dst+16, dst_stride);
  344. ctx->idct_put(block+(2<<6), dst+8*dst_stride, dst_stride);
  345. ctx->idct_put(block+(3<<6), dst+8*dst_stride+16, dst_stride);
  346. block += 4*64;
  347. dst += 32;
  348. }
  349. }
  350. static void decode_slice_chroma(AVCodecContext *avctx, SliceContext *slice,
  351. uint8_t *dst, int dst_stride,
  352. const uint8_t *buf, unsigned buf_size,
  353. const int *qmat, int log2_blocks_per_mb)
  354. {
  355. ProresContext *ctx = avctx->priv_data;
  356. LOCAL_ALIGNED_16(DCTELEM, blocks, [8*4*64]);
  357. DCTELEM *block;
  358. GetBitContext gb;
  359. int i, j, blocks_per_slice = slice->mb_count << log2_blocks_per_mb;
  360. for (i = 0; i < blocks_per_slice; i++)
  361. ctx->dsp.clear_block(blocks+(i<<6));
  362. init_get_bits(&gb, buf, buf_size << 3);
  363. decode_dc_coeffs(&gb, blocks, blocks_per_slice, qmat);
  364. decode_ac_coeffs(avctx, &gb, blocks, blocks_per_slice, qmat);
  365. block = blocks;
  366. for (i = 0; i < slice->mb_count; i++) {
  367. for (j = 0; j < log2_blocks_per_mb; j++) {
  368. ctx->idct_put(block+(0<<6), dst, dst_stride);
  369. ctx->idct_put(block+(1<<6), dst+8*dst_stride, dst_stride);
  370. block += 2*64;
  371. dst += 16;
  372. }
  373. }
  374. }
  375. static int decode_slice_thread(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
  376. {
  377. ProresContext *ctx = avctx->priv_data;
  378. SliceContext *slice = &ctx->slices[jobnr];
  379. const uint8_t *buf = slice->data;
  380. AVFrame *pic = avctx->coded_frame;
  381. int i, hdr_size, qscale;
  382. int luma_stride, chroma_stride;
  383. int y_data_size, u_data_size, v_data_size;
  384. uint8_t *dest_y, *dest_u, *dest_v;
  385. int qmat_luma_scaled[64];
  386. int qmat_chroma_scaled[64];
  387. int mb_x_shift;
  388. //av_log(avctx, AV_LOG_INFO, "slice %d mb width %d mb x %d y %d\n",
  389. // jobnr, slice->mb_count, slice->mb_x, slice->mb_y);
  390. // slice header
  391. hdr_size = buf[0] >> 3;
  392. qscale = av_clip(buf[1], 1, 224);
  393. qscale = qscale > 128 ? qscale - 96 << 2: qscale;
  394. y_data_size = AV_RB16(buf + 2);
  395. u_data_size = AV_RB16(buf + 4);
  396. v_data_size = slice->data_size - y_data_size - u_data_size - hdr_size;
  397. if (y_data_size < 0 || u_data_size < 0 || v_data_size < 0) {
  398. av_log(avctx, AV_LOG_ERROR, "invalid plane data size\n");
  399. return -1;
  400. }
  401. buf += hdr_size;
  402. for (i = 0; i < 64; i++) {
  403. qmat_luma_scaled[i] = ctx->qmat_luma[i] * qscale;
  404. qmat_chroma_scaled[i] = ctx->qmat_chroma[i] * qscale;
  405. }
  406. if (ctx->frame_type == 0) {
  407. luma_stride = pic->linesize[0];
  408. chroma_stride = pic->linesize[1];
  409. } else {
  410. luma_stride = pic->linesize[0] << 1;
  411. chroma_stride = pic->linesize[1] << 1;
  412. }
  413. mb_x_shift = (avctx->pix_fmt == PIX_FMT_YUV444P10) ? 5 : 4;
  414. dest_y = pic->data[0] + (slice->mb_y << 4) * luma_stride + (slice->mb_x << 5);
  415. dest_u = pic->data[1] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  416. dest_v = pic->data[2] + (slice->mb_y << 4) * chroma_stride + (slice->mb_x << mb_x_shift);
  417. if (ctx->frame_type && ctx->first_field ^ ctx->frame.top_field_first) {
  418. dest_y += pic->linesize[0];
  419. dest_u += pic->linesize[1];
  420. dest_v += pic->linesize[2];
  421. }
  422. decode_slice_luma(avctx, slice, dest_y, luma_stride,
  423. buf, y_data_size, qmat_luma_scaled);
  424. if ((avctx->flags & CODEC_FLAG_GRAY)) {
  425. } else if (avctx->pix_fmt == PIX_FMT_YUV444P10) {
  426. decode_slice_chroma(avctx, slice, dest_u, chroma_stride,
  427. buf + y_data_size, u_data_size,
  428. qmat_chroma_scaled, 2);
  429. decode_slice_chroma(avctx, slice, dest_v, chroma_stride,
  430. buf + y_data_size + u_data_size, v_data_size,
  431. qmat_chroma_scaled, 2);
  432. } else {
  433. decode_slice_chroma(avctx, slice, dest_u, chroma_stride,
  434. buf + y_data_size, u_data_size,
  435. qmat_chroma_scaled, 1);
  436. decode_slice_chroma(avctx, slice, dest_v, chroma_stride,
  437. buf + y_data_size + u_data_size, v_data_size,
  438. qmat_chroma_scaled, 1);
  439. }
  440. return 0;
  441. }
  442. static int decode_picture(AVCodecContext *avctx)
  443. {
  444. ProresContext *ctx = avctx->priv_data;
  445. int i, threads_ret[ctx->slice_count];
  446. avctx->execute2(avctx, decode_slice_thread, NULL, threads_ret, ctx->slice_count);
  447. for (i = 0; i < ctx->slice_count; i++)
  448. if (threads_ret[i] < 0)
  449. return threads_ret[i];
  450. return 0;
  451. }
  452. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  453. AVPacket *avpkt)
  454. {
  455. ProresContext *ctx = avctx->priv_data;
  456. AVFrame *frame = avctx->coded_frame;
  457. const uint8_t *buf = avpkt->data;
  458. int buf_size = avpkt->size;
  459. int frame_hdr_size, pic_size;
  460. if (buf_size < 28 || AV_RL32(buf + 4) != AV_RL32("icpf")) {
  461. av_log(avctx, AV_LOG_ERROR, "invalid frame header\n");
  462. return -1;
  463. }
  464. ctx->first_field = 1;
  465. buf += 8;
  466. buf_size -= 8;
  467. frame_hdr_size = decode_frame_header(ctx, buf, buf_size, avctx);
  468. if (frame_hdr_size < 0)
  469. return -1;
  470. buf += frame_hdr_size;
  471. buf_size -= frame_hdr_size;
  472. decode_picture:
  473. pic_size = decode_picture_header(avctx, buf, buf_size);
  474. if (pic_size < 0) {
  475. av_log(avctx, AV_LOG_ERROR, "error decoding picture header\n");
  476. return -1;
  477. }
  478. if (frame->data[0])
  479. avctx->release_buffer(avctx, frame);
  480. if (avctx->get_buffer(avctx, frame) < 0)
  481. return -1;
  482. if (decode_picture(avctx)) {
  483. av_log(avctx, AV_LOG_ERROR, "error decoding picture\n");
  484. return -1;
  485. }
  486. buf += pic_size;
  487. buf_size -= pic_size;
  488. if (ctx->frame_type && buf_size > 0 && ctx->first_field) {
  489. ctx->first_field = 0;
  490. goto decode_picture;
  491. }
  492. *data_size = sizeof(AVFrame);
  493. *(AVFrame*)data = *frame;
  494. return avpkt->size;
  495. }
  496. static av_cold int decode_close(AVCodecContext *avctx)
  497. {
  498. ProresContext *ctx = avctx->priv_data;
  499. AVFrame *frame = avctx->coded_frame;
  500. if (frame->data[0])
  501. avctx->release_buffer(avctx, frame);
  502. av_freep(&ctx->slices);
  503. return 0;
  504. }
  505. AVCodec ff_prores_decoder = {
  506. .name = "prores",
  507. .type = AVMEDIA_TYPE_VIDEO,
  508. .id = CODEC_ID_PRORES,
  509. .priv_data_size = sizeof(ProresContext),
  510. .init = decode_init,
  511. .close = decode_close,
  512. .decode = decode_frame,
  513. .long_name = NULL_IF_CONFIG_SMALL("ProRes"),
  514. .capabilities = CODEC_CAP_SLICE_THREADS,
  515. };