You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

626 lines
21KB

  1. /*
  2. * Apple ProRes encoder
  3. *
  4. * Copyright (c) 2011 Anatoliy Wasserman
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Apple ProRes encoder (Anatoliy Wasserman version)
  25. * Known FOURCCs: 'apch' (HQ), 'apcn' (SD), 'apcs' (LT), 'acpo' (Proxy)
  26. */
  27. #include "avcodec.h"
  28. #include "internal.h"
  29. #include "put_bits.h"
  30. #include "bytestream.h"
  31. #include "dsputil.h"
  32. #define DEFAULT_SLICE_MB_WIDTH 8
  33. #define FF_PROFILE_PRORES_PROXY 0
  34. #define FF_PROFILE_PRORES_LT 1
  35. #define FF_PROFILE_PRORES_STANDARD 2
  36. #define FF_PROFILE_PRORES_HQ 3
  37. static const AVProfile profiles[] = {
  38. { FF_PROFILE_PRORES_PROXY, "apco"},
  39. { FF_PROFILE_PRORES_LT, "apcs"},
  40. { FF_PROFILE_PRORES_STANDARD, "apcn"},
  41. { FF_PROFILE_PRORES_HQ, "apch"},
  42. { FF_PROFILE_UNKNOWN }
  43. };
  44. static const int qp_start_table[4] = { 4, 1, 1, 1 };
  45. static const int qp_end_table[4] = { 8, 9, 6, 6 };
  46. static const int bitrate_table[5] = { 1000, 2100, 3500, 5400 };
  47. static const uint8_t progressive_scan[64] = {
  48. 0, 1, 8, 9, 2, 3, 10, 11,
  49. 16, 17, 24, 25, 18, 19, 26, 27,
  50. 4, 5, 12, 20, 13, 6, 7, 14,
  51. 21, 28, 29, 22, 15, 23, 30, 31,
  52. 32, 33, 40, 48, 41, 34, 35, 42,
  53. 49, 56, 57, 50, 43, 36, 37, 44,
  54. 51, 58, 59, 52, 45, 38, 39, 46,
  55. 53, 60, 61, 54, 47, 55, 62, 63
  56. };
  57. static const uint8_t QMAT_LUMA[4][64] = {
  58. {
  59. 4, 7, 9, 11, 13, 14, 15, 63,
  60. 7, 7, 11, 12, 14, 15, 63, 63,
  61. 9, 11, 13, 14, 15, 63, 63, 63,
  62. 11, 11, 13, 14, 63, 63, 63, 63,
  63. 11, 13, 14, 63, 63, 63, 63, 63,
  64. 13, 14, 63, 63, 63, 63, 63, 63,
  65. 13, 63, 63, 63, 63, 63, 63, 63,
  66. 63, 63, 63, 63, 63, 63, 63, 63
  67. }, {
  68. 4, 5, 6, 7, 9, 11, 13, 15,
  69. 5, 5, 7, 8, 11, 13, 15, 17,
  70. 6, 7, 9, 11, 13, 15, 15, 17,
  71. 7, 7, 9, 11, 13, 15, 17, 19,
  72. 7, 9, 11, 13, 14, 16, 19, 23,
  73. 9, 11, 13, 14, 16, 19, 23, 29,
  74. 9, 11, 13, 15, 17, 21, 28, 35,
  75. 11, 13, 16, 17, 21, 28, 35, 41
  76. }, {
  77. 4, 4, 5, 5, 6, 7, 7, 9,
  78. 4, 4, 5, 6, 7, 7, 9, 9,
  79. 5, 5, 6, 7, 7, 9, 9, 10,
  80. 5, 5, 6, 7, 7, 9, 9, 10,
  81. 5, 6, 7, 7, 8, 9, 10, 12,
  82. 6, 7, 7, 8, 9, 10, 12, 15,
  83. 6, 7, 7, 9, 10, 11, 14, 17,
  84. 7, 7, 9, 10, 11, 14, 17, 21
  85. }, {
  86. 4, 4, 4, 4, 4, 4, 4, 4,
  87. 4, 4, 4, 4, 4, 4, 4, 4,
  88. 4, 4, 4, 4, 4, 4, 4, 4,
  89. 4, 4, 4, 4, 4, 4, 4, 5,
  90. 4, 4, 4, 4, 4, 4, 5, 5,
  91. 4, 4, 4, 4, 4, 5, 5, 6,
  92. 4, 4, 4, 4, 5, 5, 6, 7,
  93. 4, 4, 4, 4, 5, 6, 7, 7
  94. }
  95. };
  96. static const uint8_t QMAT_CHROMA[4][64] = {
  97. {
  98. 4, 7, 9, 11, 13, 14, 63, 63,
  99. 7, 7, 11, 12, 14, 63, 63, 63,
  100. 9, 11, 13, 14, 63, 63, 63, 63,
  101. 11, 11, 13, 14, 63, 63, 63, 63,
  102. 11, 13, 14, 63, 63, 63, 63, 63,
  103. 13, 14, 63, 63, 63, 63, 63, 63,
  104. 13, 63, 63, 63, 63, 63, 63, 63,
  105. 63, 63, 63, 63, 63, 63, 63, 63
  106. }, {
  107. 4, 5, 6, 7, 9, 11, 13, 15,
  108. 5, 5, 7, 8, 11, 13, 15, 17,
  109. 6, 7, 9, 11, 13, 15, 15, 17,
  110. 7, 7, 9, 11, 13, 15, 17, 19,
  111. 7, 9, 11, 13, 14, 16, 19, 23,
  112. 9, 11, 13, 14, 16, 19, 23, 29,
  113. 9, 11, 13, 15, 17, 21, 28, 35,
  114. 11, 13, 16, 17, 21, 28, 35, 41
  115. }, {
  116. 4, 4, 5, 5, 6, 7, 7, 9,
  117. 4, 4, 5, 6, 7, 7, 9, 9,
  118. 5, 5, 6, 7, 7, 9, 9, 10,
  119. 5, 5, 6, 7, 7, 9, 9, 10,
  120. 5, 6, 7, 7, 8, 9, 10, 12,
  121. 6, 7, 7, 8, 9, 10, 12, 15,
  122. 6, 7, 7, 9, 10, 11, 14, 17,
  123. 7, 7, 9, 10, 11, 14, 17, 21
  124. }, {
  125. 4, 4, 4, 4, 4, 4, 4, 4,
  126. 4, 4, 4, 4, 4, 4, 4, 4,
  127. 4, 4, 4, 4, 4, 4, 4, 4,
  128. 4, 4, 4, 4, 4, 4, 4, 5,
  129. 4, 4, 4, 4, 4, 4, 5, 5,
  130. 4, 4, 4, 4, 4, 5, 5, 6,
  131. 4, 4, 4, 4, 5, 5, 6, 7,
  132. 4, 4, 4, 4, 5, 6, 7, 7
  133. }
  134. };
  135. typedef struct {
  136. uint8_t* fill_y;
  137. uint8_t* fill_u;
  138. uint8_t* fill_v;
  139. int qmat_luma[16][64];
  140. int qmat_chroma[16][64];
  141. } ProresContext;
  142. static void encode_codeword(PutBitContext *pb, int val, int codebook)
  143. {
  144. unsigned int rice_order, exp_order, switch_bits, first_exp, exp, zeros,
  145. mask;
  146. /* number of bits to switch between rice and exp golomb */
  147. switch_bits = codebook & 3;
  148. rice_order = codebook >> 5;
  149. exp_order = (codebook >> 2) & 7;
  150. first_exp = ((switch_bits + 1) << rice_order);
  151. if (val >= first_exp) { /* exp golomb */
  152. val -= first_exp;
  153. val += (1 << exp_order);
  154. exp = av_log2(val);
  155. zeros = exp - exp_order + switch_bits + 1;
  156. put_bits(pb, zeros, 0);
  157. put_bits(pb, exp + 1, val);
  158. } else if (rice_order) {
  159. mask = (1 << rice_order) - 1;
  160. put_bits(pb, (val >> rice_order), 0);
  161. put_bits(pb, 1, 1);
  162. put_bits(pb, rice_order, val & mask);
  163. } else {
  164. put_bits(pb, val, 0);
  165. put_bits(pb, 1, 1);
  166. }
  167. }
  168. #define QSCALE(qmat,ind,val) ((val) / (qmat[ind]))
  169. #define TO_GOLOMB(val) ((val << 1) ^ (val >> 31))
  170. #define DIFF_SIGN(val, sign) ((val >> 31) ^ sign)
  171. #define IS_NEGATIVE(val) (((val >> 31) ^ -1) + 1)
  172. #define TO_GOLOMB2(val,sign) (val==0 ? 0 : (val << 1) + sign)
  173. static av_always_inline int get_level(int val)
  174. {
  175. int sign = (val >> 31);
  176. return (val ^ sign) - sign;
  177. }
  178. #define FIRST_DC_CB 0xB8
  179. static const uint8_t dc_codebook[7] = { 0x04, 0x28, 0x28, 0x4D, 0x4D, 0x70, 0x70};
  180. static void encode_dc_coeffs(PutBitContext *pb, int16_t *in,
  181. int blocks_per_slice, int *qmat)
  182. {
  183. int prev_dc, code;
  184. int i, sign, idx;
  185. int new_dc, delta, diff_sign, new_code;
  186. prev_dc = QSCALE(qmat, 0, in[0] - 16384);
  187. code = TO_GOLOMB(prev_dc);
  188. encode_codeword(pb, code, FIRST_DC_CB);
  189. code = 5; sign = 0; idx = 64;
  190. for (i = 1; i < blocks_per_slice; i++, idx += 64) {
  191. new_dc = QSCALE(qmat, 0, in[idx] - 16384);
  192. delta = new_dc - prev_dc;
  193. diff_sign = DIFF_SIGN(delta, sign);
  194. new_code = TO_GOLOMB2(get_level(delta), diff_sign);
  195. encode_codeword(pb, new_code, dc_codebook[FFMIN(code, 6)]);
  196. code = new_code;
  197. sign = delta >> 31;
  198. prev_dc = new_dc;
  199. }
  200. }
  201. static const uint8_t run_to_cb[16] = { 0x06, 0x06, 0x05, 0x05, 0x04, 0x29,
  202. 0x29, 0x29, 0x29, 0x28, 0x28, 0x28, 0x28, 0x28, 0x28, 0x4C };
  203. static const uint8_t lev_to_cb[10] = { 0x04, 0x0A, 0x05, 0x06, 0x04, 0x28,
  204. 0x28, 0x28, 0x28, 0x4C };
  205. static void encode_ac_coeffs(AVCodecContext *avctx, PutBitContext *pb,
  206. int16_t *in, int blocks_per_slice, int *qmat)
  207. {
  208. int prev_run = 4;
  209. int prev_level = 2;
  210. int run = 0, level, code, i, j;
  211. for (i = 1; i < 64; i++) {
  212. int indp = progressive_scan[i];
  213. for (j = 0; j < blocks_per_slice; j++) {
  214. int val = QSCALE(qmat, indp, in[(j << 6) + indp]);
  215. if (val) {
  216. encode_codeword(pb, run, run_to_cb[FFMIN(prev_run, 15)]);
  217. prev_run = run;
  218. run = 0;
  219. level = get_level(val);
  220. code = level - 1;
  221. encode_codeword(pb, code, lev_to_cb[FFMIN(prev_level, 9)]);
  222. prev_level = level;
  223. put_bits(pb, 1, IS_NEGATIVE(val));
  224. } else {
  225. ++run;
  226. }
  227. }
  228. }
  229. }
  230. static void get(uint8_t *pixels, int stride, int16_t* block)
  231. {
  232. int16_t *p = (int16_t*)pixels;
  233. int i, j;
  234. stride >>= 1;
  235. for (i = 0; i < 8; i++) {
  236. for (j = 0; j < 8; j++) {
  237. block[j] = p[j];
  238. }
  239. p += stride;
  240. block += 8;
  241. }
  242. }
  243. static void fdct_get(uint8_t *pixels, int stride, int16_t* block)
  244. {
  245. get(pixels, stride, block);
  246. ff_jpeg_fdct_islow_10(block);
  247. }
  248. static int encode_slice_plane(AVCodecContext *avctx, int mb_count,
  249. uint8_t *src, int src_stride, uint8_t *buf, unsigned buf_size,
  250. int *qmat, int chroma)
  251. {
  252. DECLARE_ALIGNED(16, int16_t, blocks)[DEFAULT_SLICE_MB_WIDTH << 8], *block;
  253. int i, blocks_per_slice;
  254. PutBitContext pb;
  255. block = blocks;
  256. for (i = 0; i < mb_count; i++) {
  257. fdct_get(src, src_stride, block + (0 << 6));
  258. fdct_get(src + 8 * src_stride, src_stride, block + ((2 - chroma) << 6));
  259. if (!chroma) {
  260. fdct_get(src + 16, src_stride, block + (1 << 6));
  261. fdct_get(src + 16 + 8 * src_stride, src_stride, block + (3 << 6));
  262. }
  263. block += (256 >> chroma);
  264. src += (32 >> chroma);
  265. }
  266. blocks_per_slice = mb_count << (2 - chroma);
  267. init_put_bits(&pb, buf, buf_size << 3);
  268. encode_dc_coeffs(&pb, blocks, blocks_per_slice, qmat);
  269. encode_ac_coeffs(avctx, &pb, blocks, blocks_per_slice, qmat);
  270. flush_put_bits(&pb);
  271. return put_bits_ptr(&pb) - pb.buf;
  272. }
  273. static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx,
  274. uint8_t *dest_y, uint8_t *dest_u, uint8_t *dest_v, int luma_stride,
  275. int chroma_stride, unsigned mb_count, uint8_t *buf, unsigned data_size,
  276. unsigned* y_data_size, unsigned* u_data_size, unsigned* v_data_size,
  277. int qp)
  278. {
  279. ProresContext* ctx = avctx->priv_data;
  280. *y_data_size = encode_slice_plane(avctx, mb_count, dest_y, luma_stride,
  281. buf, data_size, ctx->qmat_luma[qp - 1], 0);
  282. if (!(avctx->flags & CODEC_FLAG_GRAY)) {
  283. *u_data_size = encode_slice_plane(avctx, mb_count, dest_u,
  284. chroma_stride, buf + *y_data_size, data_size - *y_data_size,
  285. ctx->qmat_chroma[qp - 1], 1);
  286. *v_data_size = encode_slice_plane(avctx, mb_count, dest_v,
  287. chroma_stride, buf + *y_data_size + *u_data_size,
  288. data_size - *y_data_size - *u_data_size,
  289. ctx->qmat_chroma[qp - 1], 1);
  290. }
  291. return *y_data_size + *u_data_size + *v_data_size;
  292. }
  293. static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y,
  294. unsigned stride, unsigned width, unsigned height, uint16_t *dst,
  295. unsigned dst_width, unsigned dst_height)
  296. {
  297. int box_width = FFMIN(width - x, dst_width);
  298. int box_height = FFMIN(height - y, dst_height);
  299. int i, j, src_stride = stride >> 1;
  300. uint16_t last_pix, *last_line;
  301. src += y * src_stride + x;
  302. for (i = 0; i < box_height; ++i) {
  303. for (j = 0; j < box_width; ++j) {
  304. dst[j] = src[j];
  305. }
  306. last_pix = dst[j - 1];
  307. for (; j < dst_width; j++)
  308. dst[j] = last_pix;
  309. src += src_stride;
  310. dst += dst_width;
  311. }
  312. last_line = dst - dst_width;
  313. for (; i < dst_height; i++) {
  314. for (j = 0; j < dst_width; ++j) {
  315. dst[j] = last_line[j];
  316. }
  317. dst += dst_width;
  318. }
  319. }
  320. static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x,
  321. int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size,
  322. int unsafe, int *qp)
  323. {
  324. int luma_stride, chroma_stride;
  325. int hdr_size = 6, slice_size;
  326. uint8_t *dest_y, *dest_u, *dest_v;
  327. unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0;
  328. ProresContext* ctx = avctx->priv_data;
  329. int tgt_bits = (mb_count * bitrate_table[avctx->profile]) >> 2;
  330. int low_bytes = (tgt_bits - (tgt_bits >> 3)) >> 3; // 12% bitrate fluctuation
  331. int high_bytes = (tgt_bits + (tgt_bits >> 3)) >> 3;
  332. luma_stride = pic->linesize[0];
  333. chroma_stride = pic->linesize[1];
  334. dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5);
  335. dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << 4);
  336. dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << 4);
  337. if (unsafe) {
  338. subimage_with_fill((uint16_t *) pic->data[0], mb_x << 4, mb_y << 4,
  339. luma_stride, avctx->width, avctx->height,
  340. (uint16_t *) ctx->fill_y, mb_count << 4, 16);
  341. subimage_with_fill((uint16_t *) pic->data[1], mb_x << 3, mb_y << 4,
  342. chroma_stride, avctx->width >> 1, avctx->height,
  343. (uint16_t *) ctx->fill_u, mb_count << 3, 16);
  344. subimage_with_fill((uint16_t *) pic->data[2], mb_x << 3, mb_y << 4,
  345. chroma_stride, avctx->width >> 1, avctx->height,
  346. (uint16_t *) ctx->fill_v, mb_count << 3, 16);
  347. encode_slice_data(avctx, ctx->fill_y, ctx->fill_u, ctx->fill_v,
  348. mb_count << 5, mb_count << 4, mb_count, buf + hdr_size,
  349. data_size - hdr_size, &y_data_size, &u_data_size, &v_data_size,
  350. *qp);
  351. } else {
  352. slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
  353. luma_stride, chroma_stride, mb_count, buf + hdr_size,
  354. data_size - hdr_size, &y_data_size, &u_data_size, &v_data_size,
  355. *qp);
  356. if (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]) {
  357. do {
  358. *qp += 1;
  359. slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
  360. luma_stride, chroma_stride, mb_count, buf + hdr_size,
  361. data_size - hdr_size, &y_data_size, &u_data_size,
  362. &v_data_size, *qp);
  363. } while (slice_size > high_bytes && *qp < qp_end_table[avctx->profile]);
  364. } else if (slice_size < low_bytes && *qp
  365. > qp_start_table[avctx->profile]) {
  366. do {
  367. *qp -= 1;
  368. slice_size = encode_slice_data(avctx, dest_y, dest_u, dest_v,
  369. luma_stride, chroma_stride, mb_count, buf + hdr_size,
  370. data_size - hdr_size, &y_data_size, &u_data_size,
  371. &v_data_size, *qp);
  372. } while (slice_size < low_bytes && *qp > qp_start_table[avctx->profile]);
  373. }
  374. }
  375. buf[0] = hdr_size << 3;
  376. buf[1] = *qp;
  377. AV_WB16(buf + 2, y_data_size);
  378. AV_WB16(buf + 4, u_data_size);
  379. return hdr_size + y_data_size + u_data_size + v_data_size;
  380. }
  381. static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic,
  382. uint8_t *buf, const int buf_size)
  383. {
  384. int mb_width = (avctx->width + 15) >> 4;
  385. int mb_height = (avctx->height + 15) >> 4;
  386. int hdr_size, sl_size, i;
  387. int mb_y, sl_data_size, qp;
  388. int unsafe_bot, unsafe_right;
  389. uint8_t *sl_data, *sl_data_sizes;
  390. int slice_per_line = 0, rem = mb_width;
  391. for (i = av_log2(DEFAULT_SLICE_MB_WIDTH); i >= 0; --i) {
  392. slice_per_line += rem >> i;
  393. rem &= (1 << i) - 1;
  394. }
  395. qp = qp_start_table[avctx->profile];
  396. hdr_size = 8; sl_data_size = buf_size - hdr_size;
  397. sl_data_sizes = buf + hdr_size;
  398. sl_data = sl_data_sizes + (slice_per_line * mb_height * 2);
  399. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  400. int mb_x = 0;
  401. int slice_mb_count = DEFAULT_SLICE_MB_WIDTH;
  402. while (mb_x < mb_width) {
  403. while (mb_width - mb_x < slice_mb_count)
  404. slice_mb_count >>= 1;
  405. unsafe_bot = (avctx->height & 0xf) && (mb_y == mb_height - 1);
  406. unsafe_right = (avctx->width & 0xf) && (mb_x + slice_mb_count == mb_width);
  407. sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count,
  408. sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp);
  409. bytestream_put_be16(&sl_data_sizes, sl_size);
  410. sl_data += sl_size;
  411. sl_data_size -= sl_size;
  412. mb_x += slice_mb_count;
  413. }
  414. }
  415. buf[0] = hdr_size << 3;
  416. AV_WB32(buf + 1, sl_data - buf);
  417. AV_WB16(buf + 5, slice_per_line * mb_height);
  418. buf[7] = av_log2(DEFAULT_SLICE_MB_WIDTH) << 4;
  419. return sl_data - buf;
  420. }
  421. static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  422. const AVFrame *pict, int *got_packet)
  423. {
  424. int header_size = 148;
  425. uint8_t *buf;
  426. int pic_size, ret;
  427. int frame_size = FFALIGN(avctx->width, 16) * FFALIGN(avctx->height, 16)*16 + 500 + FF_MIN_BUFFER_SIZE; //FIXME choose tighter limit
  428. if ((ret = ff_alloc_packet2(avctx, pkt, frame_size + FF_MIN_BUFFER_SIZE)) < 0)
  429. return ret;
  430. buf = pkt->data;
  431. pic_size = prores_encode_picture(avctx, pict, buf + header_size + 8,
  432. pkt->size - header_size - 8);
  433. bytestream_put_be32(&buf, pic_size + 8 + header_size);
  434. bytestream_put_buffer(&buf, "icpf", 4);
  435. bytestream_put_be16(&buf, header_size);
  436. bytestream_put_be16(&buf, 0);
  437. bytestream_put_buffer(&buf, "fmpg", 4);
  438. bytestream_put_be16(&buf, avctx->width);
  439. bytestream_put_be16(&buf, avctx->height);
  440. *buf++ = 0x83; // {10}(422){00}{00}(frame){11}
  441. *buf++ = 0;
  442. *buf++ = 2;
  443. *buf++ = 2;
  444. *buf++ = 6;
  445. *buf++ = 32;
  446. *buf++ = 0;
  447. *buf++ = 3;
  448. bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64);
  449. bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64);
  450. pkt->flags |= AV_PKT_FLAG_KEY;
  451. pkt->size = pic_size + 8 + header_size;
  452. *got_packet = 1;
  453. return 0;
  454. }
  455. static void scale_mat(const uint8_t* src, int* dst, int scale)
  456. {
  457. int i;
  458. for (i = 0; i < 64; i++)
  459. dst[i] = src[i] * scale;
  460. }
  461. static av_cold int prores_encode_init(AVCodecContext *avctx)
  462. {
  463. int i;
  464. ProresContext* ctx = avctx->priv_data;
  465. if (avctx->pix_fmt != AV_PIX_FMT_YUV422P10) {
  466. av_log(avctx, AV_LOG_ERROR, "need YUV422P10\n");
  467. return -1;
  468. }
  469. if (avctx->width & 0x1) {
  470. av_log(avctx, AV_LOG_ERROR,
  471. "frame width needs to be multiple of 2\n");
  472. return -1;
  473. }
  474. if ((avctx->height & 0xf) || (avctx->width & 0xf)) {
  475. ctx->fill_y = av_malloc(4 * (DEFAULT_SLICE_MB_WIDTH << 8));
  476. if (!ctx->fill_y)
  477. return AVERROR(ENOMEM);
  478. ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9);
  479. ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 8);
  480. }
  481. if (avctx->profile == FF_PROFILE_UNKNOWN) {
  482. avctx->profile = FF_PROFILE_PRORES_STANDARD;
  483. av_log(avctx, AV_LOG_INFO,
  484. "encoding with ProRes standard (apcn) profile\n");
  485. } else if (avctx->profile < FF_PROFILE_PRORES_PROXY
  486. || avctx->profile > FF_PROFILE_PRORES_HQ) {
  487. av_log(
  488. avctx,
  489. AV_LOG_ERROR,
  490. "unknown profile %d, use [0 - apco, 1 - apcs, 2 - apcn (default), 3 - apch]\n",
  491. avctx->profile);
  492. return -1;
  493. }
  494. avctx->codec_tag = AV_RL32((const uint8_t*)profiles[avctx->profile].name);
  495. for (i = 1; i <= 16; i++) {
  496. scale_mat(QMAT_LUMA[avctx->profile] , ctx->qmat_luma[i - 1] , i);
  497. scale_mat(QMAT_CHROMA[avctx->profile], ctx->qmat_chroma[i - 1], i);
  498. }
  499. avctx->coded_frame = avcodec_alloc_frame();
  500. avctx->coded_frame->key_frame = 1;
  501. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  502. return 0;
  503. }
  504. static av_cold int prores_encode_close(AVCodecContext *avctx)
  505. {
  506. ProresContext* ctx = avctx->priv_data;
  507. av_freep(&avctx->coded_frame);
  508. av_freep(&ctx->fill_y);
  509. return 0;
  510. }
  511. AVCodec ff_prores_anatoliy_encoder = {
  512. .name = "prores_anatoliy",
  513. .type = AVMEDIA_TYPE_VIDEO,
  514. .id = AV_CODEC_ID_PRORES,
  515. .priv_data_size = sizeof(ProresContext),
  516. .init = prores_encode_init,
  517. .close = prores_encode_close,
  518. .encode2 = prores_encode_frame,
  519. .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_NONE},
  520. .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
  521. .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
  522. .profiles = profiles
  523. };
  524. AVCodec ff_prores_encoder = {
  525. .name = "prores",
  526. .type = AVMEDIA_TYPE_VIDEO,
  527. .id = AV_CODEC_ID_PRORES,
  528. .priv_data_size = sizeof(ProresContext),
  529. .init = prores_encode_init,
  530. .close = prores_encode_close,
  531. .encode2 = prores_encode_frame,
  532. .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_NONE},
  533. .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes"),
  534. .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
  535. .profiles = profiles
  536. };