You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

652 lines
22KB

  1. /*
  2. * MJPEG encoder
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2003 Alex Beregszaszi
  5. * Copyright (c) 2003-2004 Michael Niedermayer
  6. *
  7. * Support for external huffman table, various fixes (AVID workaround),
  8. * aspecting, new decode_frame mechanism and apple mjpeg-b support
  9. * by Alex Beregszaszi
  10. *
  11. * This file is part of FFmpeg.
  12. *
  13. * FFmpeg is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * FFmpeg is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with FFmpeg; if not, write to the Free Software
  25. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  26. */
  27. /**
  28. * @file
  29. * MJPEG encoder.
  30. */
  31. #include "libavutil/pixdesc.h"
  32. #include "avcodec.h"
  33. #include "jpegtables.h"
  34. #include "mjpegenc_common.h"
  35. #include "mjpegenc_huffman.h"
  36. #include "mpegvideo.h"
  37. #include "mjpeg.h"
  38. #include "mjpegenc.h"
  39. #include "profiles.h"
  40. static av_cold void init_uni_ac_vlc(const uint8_t huff_size_ac[256],
  41. uint8_t *uni_ac_vlc_len)
  42. {
  43. for (int i = 0; i < 128; i++) {
  44. int level = i - 64;
  45. if (!level)
  46. continue;
  47. for (int run = 0; run < 64; run++) {
  48. int len, code, nbits;
  49. int alevel = FFABS(level);
  50. len = (run >> 4) * huff_size_ac[0xf0];
  51. nbits= av_log2_16bit(alevel) + 1;
  52. code = ((15&run) << 4) | nbits;
  53. len += huff_size_ac[code] + nbits;
  54. uni_ac_vlc_len[UNI_AC_ENC_INDEX(run, i)] = len;
  55. // We ignore EOB as its just a constant which does not change generally
  56. }
  57. }
  58. }
  59. #if CONFIG_MJPEG_ENCODER
  60. /**
  61. * Encodes and outputs the entire frame in the JPEG format.
  62. *
  63. * @param s The MpegEncContext.
  64. */
  65. static void mjpeg_encode_picture_frame(MpegEncContext *s)
  66. {
  67. int nbits, code, table_id;
  68. MJpegContext *m = s->mjpeg_ctx;
  69. uint8_t *huff_size[4] = { m->huff_size_dc_luminance,
  70. m->huff_size_dc_chrominance,
  71. m->huff_size_ac_luminance,
  72. m->huff_size_ac_chrominance };
  73. uint16_t *huff_code[4] = { m->huff_code_dc_luminance,
  74. m->huff_code_dc_chrominance,
  75. m->huff_code_ac_luminance,
  76. m->huff_code_ac_chrominance };
  77. size_t total_bits = 0;
  78. size_t bytes_needed;
  79. s->header_bits = get_bits_diff(s);
  80. // Estimate the total size first
  81. for (int i = 0; i < m->huff_ncode; i++) {
  82. table_id = m->huff_buffer[i].table_id;
  83. code = m->huff_buffer[i].code;
  84. nbits = code & 0xf;
  85. total_bits += huff_size[table_id][code] + nbits;
  86. }
  87. bytes_needed = (total_bits + 7) / 8;
  88. ff_mpv_reallocate_putbitbuffer(s, bytes_needed, bytes_needed);
  89. for (int i = 0; i < m->huff_ncode; i++) {
  90. table_id = m->huff_buffer[i].table_id;
  91. code = m->huff_buffer[i].code;
  92. nbits = code & 0xf;
  93. put_bits(&s->pb, huff_size[table_id][code], huff_code[table_id][code]);
  94. if (nbits != 0) {
  95. put_sbits(&s->pb, nbits, m->huff_buffer[i].mant);
  96. }
  97. }
  98. m->huff_ncode = 0;
  99. s->i_tex_bits = get_bits_diff(s);
  100. }
  101. /**
  102. * Builds all 4 optimal Huffman tables.
  103. *
  104. * Uses the data stored in the JPEG buffer to compute the tables.
  105. * Stores the Huffman tables in the bits_* and val_* arrays in the MJpegContext.
  106. *
  107. * @param m MJpegContext containing the JPEG buffer.
  108. */
  109. static void mjpeg_build_optimal_huffman(MJpegContext *m)
  110. {
  111. MJpegEncHuffmanContext dc_luminance_ctx;
  112. MJpegEncHuffmanContext dc_chrominance_ctx;
  113. MJpegEncHuffmanContext ac_luminance_ctx;
  114. MJpegEncHuffmanContext ac_chrominance_ctx;
  115. MJpegEncHuffmanContext *ctx[4] = { &dc_luminance_ctx,
  116. &dc_chrominance_ctx,
  117. &ac_luminance_ctx,
  118. &ac_chrominance_ctx };
  119. for (int i = 0; i < 4; i++)
  120. ff_mjpeg_encode_huffman_init(ctx[i]);
  121. for (int i = 0; i < m->huff_ncode; i++) {
  122. int table_id = m->huff_buffer[i].table_id;
  123. int code = m->huff_buffer[i].code;
  124. ff_mjpeg_encode_huffman_increment(ctx[table_id], code);
  125. }
  126. ff_mjpeg_encode_huffman_close(&dc_luminance_ctx,
  127. m->bits_dc_luminance,
  128. m->val_dc_luminance, 12);
  129. ff_mjpeg_encode_huffman_close(&dc_chrominance_ctx,
  130. m->bits_dc_chrominance,
  131. m->val_dc_chrominance, 12);
  132. ff_mjpeg_encode_huffman_close(&ac_luminance_ctx,
  133. m->bits_ac_luminance,
  134. m->val_ac_luminance, 256);
  135. ff_mjpeg_encode_huffman_close(&ac_chrominance_ctx,
  136. m->bits_ac_chrominance,
  137. m->val_ac_chrominance, 256);
  138. ff_mjpeg_build_huffman_codes(m->huff_size_dc_luminance,
  139. m->huff_code_dc_luminance,
  140. m->bits_dc_luminance,
  141. m->val_dc_luminance);
  142. ff_mjpeg_build_huffman_codes(m->huff_size_dc_chrominance,
  143. m->huff_code_dc_chrominance,
  144. m->bits_dc_chrominance,
  145. m->val_dc_chrominance);
  146. ff_mjpeg_build_huffman_codes(m->huff_size_ac_luminance,
  147. m->huff_code_ac_luminance,
  148. m->bits_ac_luminance,
  149. m->val_ac_luminance);
  150. ff_mjpeg_build_huffman_codes(m->huff_size_ac_chrominance,
  151. m->huff_code_ac_chrominance,
  152. m->bits_ac_chrominance,
  153. m->val_ac_chrominance);
  154. }
  155. #endif
  156. /**
  157. * Writes the complete JPEG frame when optimal huffman tables are enabled,
  158. * otherwise writes the stuffing.
  159. *
  160. * Header + values + stuffing.
  161. *
  162. * @param s The MpegEncContext.
  163. * @return int Error code, 0 if successful.
  164. */
  165. int ff_mjpeg_encode_stuffing(MpegEncContext *s)
  166. {
  167. PutBitContext *pbc = &s->pb;
  168. int mb_y = s->mb_y - !s->mb_x;
  169. int ret;
  170. #if CONFIG_MJPEG_ENCODER
  171. if (s->huffman == HUFFMAN_TABLE_OPTIMAL) {
  172. MJpegContext *m = s->mjpeg_ctx;
  173. mjpeg_build_optimal_huffman(m);
  174. // Replace the VLCs with the optimal ones.
  175. // The default ones may be used for trellis during quantization.
  176. init_uni_ac_vlc(m->huff_size_ac_luminance, m->uni_ac_vlc_len);
  177. init_uni_ac_vlc(m->huff_size_ac_chrominance, m->uni_chroma_ac_vlc_len);
  178. s->intra_ac_vlc_length =
  179. s->intra_ac_vlc_last_length = m->uni_ac_vlc_len;
  180. s->intra_chroma_ac_vlc_length =
  181. s->intra_chroma_ac_vlc_last_length = m->uni_chroma_ac_vlc_len;
  182. ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
  183. s->pred, s->intra_matrix, s->chroma_intra_matrix);
  184. mjpeg_encode_picture_frame(s);
  185. }
  186. #endif
  187. ret = ff_mpv_reallocate_putbitbuffer(s, put_bits_count(&s->pb) / 8 + 100,
  188. put_bits_count(&s->pb) / 4 + 1000);
  189. if (ret < 0) {
  190. av_log(s->avctx, AV_LOG_ERROR, "Buffer reallocation failed\n");
  191. goto fail;
  192. }
  193. ff_mjpeg_escape_FF(pbc, s->esc_pos);
  194. if ((s->avctx->active_thread_type & FF_THREAD_SLICE) && mb_y < s->mb_height - 1)
  195. put_marker(pbc, RST0 + (mb_y&7));
  196. s->esc_pos = put_bytes_count(pbc, 0);
  197. fail:
  198. for (int i = 0; i < 3; i++)
  199. s->last_dc[i] = 128 << s->intra_dc_precision;
  200. return ret;
  201. }
  202. static int alloc_huffman(MpegEncContext *s)
  203. {
  204. MJpegContext *m = s->mjpeg_ctx;
  205. size_t num_mbs, num_blocks, num_codes;
  206. int blocks_per_mb;
  207. // We need to init this here as the mjpeg init is called before the common init,
  208. s->mb_width = (s->width + 15) / 16;
  209. s->mb_height = (s->height + 15) / 16;
  210. switch (s->chroma_format) {
  211. case CHROMA_420: blocks_per_mb = 6; break;
  212. case CHROMA_422: blocks_per_mb = 8; break;
  213. case CHROMA_444: blocks_per_mb = 12; break;
  214. default: av_assert0(0);
  215. };
  216. // Make sure we have enough space to hold this frame.
  217. num_mbs = s->mb_width * s->mb_height;
  218. num_blocks = num_mbs * blocks_per_mb;
  219. num_codes = num_blocks * 64;
  220. m->huff_buffer = av_malloc_array(num_codes, sizeof(MJpegHuffmanCode));
  221. if (!m->huff_buffer)
  222. return AVERROR(ENOMEM);
  223. return 0;
  224. }
  225. av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
  226. {
  227. MJpegContext *m;
  228. int ret;
  229. av_assert0(s->slice_context_count == 1);
  230. /* The following check is automatically true for AMV,
  231. * but it doesn't hurt either. */
  232. ret = ff_mjpeg_encode_check_pix_fmt(s->avctx);
  233. if (ret < 0)
  234. return ret;
  235. if (s->width > 65500 || s->height > 65500) {
  236. av_log(s, AV_LOG_ERROR, "JPEG does not support resolutions above 65500x65500\n");
  237. return AVERROR(EINVAL);
  238. }
  239. m = av_mallocz(sizeof(MJpegContext));
  240. if (!m)
  241. return AVERROR(ENOMEM);
  242. s->min_qcoeff=-1023;
  243. s->max_qcoeff= 1023;
  244. // Build default Huffman tables.
  245. // These may be overwritten later with more optimal Huffman tables, but
  246. // they are needed at least right now for some processes like trellis.
  247. ff_mjpeg_build_huffman_codes(m->huff_size_dc_luminance,
  248. m->huff_code_dc_luminance,
  249. avpriv_mjpeg_bits_dc_luminance,
  250. avpriv_mjpeg_val_dc);
  251. ff_mjpeg_build_huffman_codes(m->huff_size_dc_chrominance,
  252. m->huff_code_dc_chrominance,
  253. avpriv_mjpeg_bits_dc_chrominance,
  254. avpriv_mjpeg_val_dc);
  255. ff_mjpeg_build_huffman_codes(m->huff_size_ac_luminance,
  256. m->huff_code_ac_luminance,
  257. avpriv_mjpeg_bits_ac_luminance,
  258. avpriv_mjpeg_val_ac_luminance);
  259. ff_mjpeg_build_huffman_codes(m->huff_size_ac_chrominance,
  260. m->huff_code_ac_chrominance,
  261. avpriv_mjpeg_bits_ac_chrominance,
  262. avpriv_mjpeg_val_ac_chrominance);
  263. init_uni_ac_vlc(m->huff_size_ac_luminance, m->uni_ac_vlc_len);
  264. init_uni_ac_vlc(m->huff_size_ac_chrominance, m->uni_chroma_ac_vlc_len);
  265. s->intra_ac_vlc_length =
  266. s->intra_ac_vlc_last_length = m->uni_ac_vlc_len;
  267. s->intra_chroma_ac_vlc_length =
  268. s->intra_chroma_ac_vlc_last_length = m->uni_chroma_ac_vlc_len;
  269. // Buffers start out empty.
  270. m->huff_ncode = 0;
  271. s->mjpeg_ctx = m;
  272. if(s->huffman == HUFFMAN_TABLE_OPTIMAL)
  273. return alloc_huffman(s);
  274. return 0;
  275. }
  276. av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
  277. {
  278. if (s->mjpeg_ctx) {
  279. av_freep(&s->mjpeg_ctx->huff_buffer);
  280. av_freep(&s->mjpeg_ctx);
  281. }
  282. }
  283. /**
  284. * Add code and table_id to the JPEG buffer.
  285. *
  286. * @param s The MJpegContext which contains the JPEG buffer.
  287. * @param table_id Which Huffman table the code belongs to.
  288. * @param code The encoded exponent of the coefficients and the run-bits.
  289. */
  290. static inline void ff_mjpeg_encode_code(MJpegContext *s, uint8_t table_id, int code)
  291. {
  292. MJpegHuffmanCode *c = &s->huff_buffer[s->huff_ncode++];
  293. c->table_id = table_id;
  294. c->code = code;
  295. }
  296. /**
  297. * Add the coefficient's data to the JPEG buffer.
  298. *
  299. * @param s The MJpegContext which contains the JPEG buffer.
  300. * @param table_id Which Huffman table the code belongs to.
  301. * @param val The coefficient.
  302. * @param run The run-bits.
  303. */
  304. static void ff_mjpeg_encode_coef(MJpegContext *s, uint8_t table_id, int val, int run)
  305. {
  306. int mant, code;
  307. if (val == 0) {
  308. av_assert0(run == 0);
  309. ff_mjpeg_encode_code(s, table_id, 0);
  310. } else {
  311. mant = val;
  312. if (val < 0) {
  313. val = -val;
  314. mant--;
  315. }
  316. code = (run << 4) | (av_log2_16bit(val) + 1);
  317. s->huff_buffer[s->huff_ncode].mant = mant;
  318. ff_mjpeg_encode_code(s, table_id, code);
  319. }
  320. }
  321. /**
  322. * Add the block's data into the JPEG buffer.
  323. *
  324. * @param s The MJpegEncContext that contains the JPEG buffer.
  325. * @param block The block.
  326. * @param n The block's index or number.
  327. */
  328. static void record_block(MpegEncContext *s, int16_t *block, int n)
  329. {
  330. int i, j, table_id;
  331. int component, dc, last_index, val, run;
  332. MJpegContext *m = s->mjpeg_ctx;
  333. /* DC coef */
  334. component = (n <= 3 ? 0 : (n&1) + 1);
  335. table_id = (n <= 3 ? 0 : 1);
  336. dc = block[0]; /* overflow is impossible */
  337. val = dc - s->last_dc[component];
  338. ff_mjpeg_encode_coef(m, table_id, val, 0);
  339. s->last_dc[component] = dc;
  340. /* AC coefs */
  341. run = 0;
  342. last_index = s->block_last_index[n];
  343. table_id |= 2;
  344. for(i=1;i<=last_index;i++) {
  345. j = s->intra_scantable.permutated[i];
  346. val = block[j];
  347. if (val == 0) {
  348. run++;
  349. } else {
  350. while (run >= 16) {
  351. ff_mjpeg_encode_code(m, table_id, 0xf0);
  352. run -= 16;
  353. }
  354. ff_mjpeg_encode_coef(m, table_id, val, run);
  355. run = 0;
  356. }
  357. }
  358. /* output EOB only if not already 64 values */
  359. if (last_index < 63 || run != 0)
  360. ff_mjpeg_encode_code(m, table_id, 0);
  361. }
  362. static void encode_block(MpegEncContext *s, int16_t *block, int n)
  363. {
  364. int mant, nbits, code, i, j;
  365. int component, dc, run, last_index, val;
  366. MJpegContext *m = s->mjpeg_ctx;
  367. uint8_t *huff_size_ac;
  368. uint16_t *huff_code_ac;
  369. /* DC coef */
  370. component = (n <= 3 ? 0 : (n&1) + 1);
  371. dc = block[0]; /* overflow is impossible */
  372. val = dc - s->last_dc[component];
  373. if (n < 4) {
  374. ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_luminance, m->huff_code_dc_luminance);
  375. huff_size_ac = m->huff_size_ac_luminance;
  376. huff_code_ac = m->huff_code_ac_luminance;
  377. } else {
  378. ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance);
  379. huff_size_ac = m->huff_size_ac_chrominance;
  380. huff_code_ac = m->huff_code_ac_chrominance;
  381. }
  382. s->last_dc[component] = dc;
  383. /* AC coefs */
  384. run = 0;
  385. last_index = s->block_last_index[n];
  386. for(i=1;i<=last_index;i++) {
  387. j = s->intra_scantable.permutated[i];
  388. val = block[j];
  389. if (val == 0) {
  390. run++;
  391. } else {
  392. while (run >= 16) {
  393. put_bits(&s->pb, huff_size_ac[0xf0], huff_code_ac[0xf0]);
  394. run -= 16;
  395. }
  396. mant = val;
  397. if (val < 0) {
  398. val = -val;
  399. mant--;
  400. }
  401. nbits= av_log2_16bit(val) + 1;
  402. code = (run << 4) | nbits;
  403. put_bits(&s->pb, huff_size_ac[code], huff_code_ac[code]);
  404. put_sbits(&s->pb, nbits, mant);
  405. run = 0;
  406. }
  407. }
  408. /* output EOB only if not already 64 values */
  409. if (last_index < 63 || run != 0)
  410. put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
  411. }
  412. void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
  413. {
  414. int i;
  415. if (s->huffman == HUFFMAN_TABLE_OPTIMAL) {
  416. if (s->chroma_format == CHROMA_444) {
  417. record_block(s, block[0], 0);
  418. record_block(s, block[2], 2);
  419. record_block(s, block[4], 4);
  420. record_block(s, block[8], 8);
  421. record_block(s, block[5], 5);
  422. record_block(s, block[9], 9);
  423. if (16*s->mb_x+8 < s->width) {
  424. record_block(s, block[1], 1);
  425. record_block(s, block[3], 3);
  426. record_block(s, block[6], 6);
  427. record_block(s, block[10], 10);
  428. record_block(s, block[7], 7);
  429. record_block(s, block[11], 11);
  430. }
  431. } else {
  432. for(i=0;i<5;i++) {
  433. record_block(s, block[i], i);
  434. }
  435. if (s->chroma_format == CHROMA_420) {
  436. record_block(s, block[5], 5);
  437. } else {
  438. record_block(s, block[6], 6);
  439. record_block(s, block[5], 5);
  440. record_block(s, block[7], 7);
  441. }
  442. }
  443. } else {
  444. if (s->chroma_format == CHROMA_444) {
  445. encode_block(s, block[0], 0);
  446. encode_block(s, block[2], 2);
  447. encode_block(s, block[4], 4);
  448. encode_block(s, block[8], 8);
  449. encode_block(s, block[5], 5);
  450. encode_block(s, block[9], 9);
  451. if (16*s->mb_x+8 < s->width) {
  452. encode_block(s, block[1], 1);
  453. encode_block(s, block[3], 3);
  454. encode_block(s, block[6], 6);
  455. encode_block(s, block[10], 10);
  456. encode_block(s, block[7], 7);
  457. encode_block(s, block[11], 11);
  458. }
  459. } else {
  460. for(i=0;i<5;i++) {
  461. encode_block(s, block[i], i);
  462. }
  463. if (s->chroma_format == CHROMA_420) {
  464. encode_block(s, block[5], 5);
  465. } else {
  466. encode_block(s, block[6], 6);
  467. encode_block(s, block[5], 5);
  468. encode_block(s, block[7], 7);
  469. }
  470. }
  471. s->i_tex_bits += get_bits_diff(s);
  472. }
  473. }
  474. #if CONFIG_AMV_ENCODER
  475. // maximum over s->mjpeg_vsample[i]
  476. #define V_MAX 2
  477. static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
  478. const AVFrame *pic_arg, int *got_packet)
  479. {
  480. MpegEncContext *s = avctx->priv_data;
  481. AVFrame *pic;
  482. int i, ret;
  483. int chroma_h_shift, chroma_v_shift;
  484. av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
  485. if ((avctx->height & 15) && avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
  486. av_log(avctx, AV_LOG_ERROR,
  487. "Heights which are not a multiple of 16 might fail with some decoders, "
  488. "use vstrict=-1 / -strict -1 to use %d anyway.\n", avctx->height);
  489. av_log(avctx, AV_LOG_WARNING, "If you have a device that plays AMV videos, please test if videos "
  490. "with such heights work with it and report your findings to ffmpeg-devel@ffmpeg.org\n");
  491. return AVERROR_EXPERIMENTAL;
  492. }
  493. pic = av_frame_clone(pic_arg);
  494. if (!pic)
  495. return AVERROR(ENOMEM);
  496. //picture should be flipped upside-down
  497. for(i=0; i < 3; i++) {
  498. int vsample = i ? 2 >> chroma_v_shift : 2;
  499. pic->data[i] += pic->linesize[i] * (vsample * s->height / V_MAX - 1);
  500. pic->linesize[i] *= -1;
  501. }
  502. ret = ff_mpv_encode_picture(avctx, pkt, pic, got_packet);
  503. av_frame_free(&pic);
  504. return ret;
  505. }
  506. #endif
  507. #define OFFSET(x) offsetof(MpegEncContext, x)
  508. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  509. static const AVOption options[] = {
  510. FF_MPV_COMMON_OPTS
  511. { "pred", "Prediction method", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 3, VE, "pred" },
  512. { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "pred" },
  513. { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, INT_MIN, INT_MAX, VE, "pred" },
  514. { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3 }, INT_MIN, INT_MAX, VE, "pred" },
  515. { "huffman", "Huffman table strategy", OFFSET(huffman), AV_OPT_TYPE_INT, { .i64 = HUFFMAN_TABLE_OPTIMAL }, 0, NB_HUFFMAN_TABLE_OPTION - 1, VE, "huffman" },
  516. { "default", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = HUFFMAN_TABLE_DEFAULT }, INT_MIN, INT_MAX, VE, "huffman" },
  517. { "optimal", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = HUFFMAN_TABLE_OPTIMAL }, INT_MIN, INT_MAX, VE, "huffman" },
  518. { NULL},
  519. };
  520. #if CONFIG_MJPEG_ENCODER
  521. static const AVClass mjpeg_class = {
  522. .class_name = "mjpeg encoder",
  523. .item_name = av_default_item_name,
  524. .option = options,
  525. .version = LIBAVUTIL_VERSION_INT,
  526. };
  527. AVCodec ff_mjpeg_encoder = {
  528. .name = "mjpeg",
  529. .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
  530. .type = AVMEDIA_TYPE_VIDEO,
  531. .id = AV_CODEC_ID_MJPEG,
  532. .priv_data_size = sizeof(MpegEncContext),
  533. .init = ff_mpv_encode_init,
  534. .encode2 = ff_mpv_encode_picture,
  535. .close = ff_mpv_encode_end,
  536. .capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
  537. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
  538. .pix_fmts = (const enum AVPixelFormat[]) {
  539. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
  540. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  541. AV_PIX_FMT_NONE
  542. },
  543. .priv_class = &mjpeg_class,
  544. .profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles),
  545. };
  546. #endif
  547. #if CONFIG_AMV_ENCODER
  548. static const AVClass amv_class = {
  549. .class_name = "amv encoder",
  550. .item_name = av_default_item_name,
  551. .option = options,
  552. .version = LIBAVUTIL_VERSION_INT,
  553. };
  554. AVCodec ff_amv_encoder = {
  555. .name = "amv",
  556. .long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
  557. .type = AVMEDIA_TYPE_VIDEO,
  558. .id = AV_CODEC_ID_AMV,
  559. .priv_data_size = sizeof(MpegEncContext),
  560. .init = ff_mpv_encode_init,
  561. .encode2 = amv_encode_picture,
  562. .close = ff_mpv_encode_end,
  563. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
  564. .pix_fmts = (const enum AVPixelFormat[]) {
  565. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_NONE
  566. },
  567. .priv_class = &amv_class,
  568. };
  569. #endif