You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

634 lines
20KB

  1. /*
  2. * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  5. * the algorithm used
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * huffyuv encoder
  26. */
  27. #include "avcodec.h"
  28. #include "huffyuv.h"
  29. #include "huffman.h"
  30. #include "put_bits.h"
  31. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
  32. uint8_t *src, int w, int left)
  33. {
  34. int i;
  35. if (w < 32) {
  36. for (i = 0; i < w; i++) {
  37. const int temp = src[i];
  38. dst[i] = temp - left;
  39. left = temp;
  40. }
  41. return left;
  42. } else {
  43. for (i = 0; i < 16; i++) {
  44. const int temp = src[i];
  45. dst[i] = temp - left;
  46. left = temp;
  47. }
  48. s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
  49. return src[w-1];
  50. }
  51. }
  52. static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
  53. uint8_t *src, int w,
  54. int *red, int *green, int *blue)
  55. {
  56. int i;
  57. int r,g,b;
  58. r = *red;
  59. g = *green;
  60. b = *blue;
  61. for (i = 0; i < FFMIN(w, 4); i++) {
  62. const int rt = src[i * 4 + R];
  63. const int gt = src[i * 4 + G];
  64. const int bt = src[i * 4 + B];
  65. dst[i * 4 + R] = rt - r;
  66. dst[i * 4 + G] = gt - g;
  67. dst[i * 4 + B] = bt - b;
  68. r = rt;
  69. g = gt;
  70. b = bt;
  71. }
  72. s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
  73. *red = src[(w - 1) * 4 + R];
  74. *green = src[(w - 1) * 4 + G];
  75. *blue = src[(w - 1) * 4 + B];
  76. }
  77. static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
  78. {
  79. int i;
  80. int index = 0;
  81. for (i = 0; i < 256;) {
  82. int val = len[i];
  83. int repeat = 0;
  84. for (; i < 256 && len[i] == val && repeat < 255; i++)
  85. repeat++;
  86. assert(val < 32 && val >0 && repeat<256 && repeat>0);
  87. if ( repeat > 7) {
  88. buf[index++] = val;
  89. buf[index++] = repeat;
  90. } else {
  91. buf[index++] = val | (repeat << 5);
  92. }
  93. }
  94. return index;
  95. }
  96. static av_cold int encode_init(AVCodecContext *avctx)
  97. {
  98. HYuvContext *s = avctx->priv_data;
  99. int i, j;
  100. ff_huffyuv_common_init(avctx);
  101. avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
  102. avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
  103. s->version = 2;
  104. avctx->coded_frame = &s->picture;
  105. switch (avctx->pix_fmt) {
  106. case AV_PIX_FMT_YUV420P:
  107. s->bitstream_bpp = 12;
  108. break;
  109. case AV_PIX_FMT_YUV422P:
  110. s->bitstream_bpp = 16;
  111. break;
  112. case AV_PIX_FMT_RGB32:
  113. s->bitstream_bpp = 24;
  114. break;
  115. default:
  116. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  117. return -1;
  118. }
  119. avctx->bits_per_coded_sample = s->bitstream_bpp;
  120. s->decorrelate = s->bitstream_bpp >= 24;
  121. s->predictor = avctx->prediction_method;
  122. s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
  123. if (avctx->context_model == 1) {
  124. s->context = avctx->context_model;
  125. if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
  126. av_log(avctx, AV_LOG_ERROR,
  127. "context=1 is not compatible with "
  128. "2 pass huffyuv encoding\n");
  129. return -1;
  130. }
  131. }else s->context= 0;
  132. if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
  133. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  134. av_log(avctx, AV_LOG_ERROR,
  135. "Error: YV12 is not supported by huffyuv; use "
  136. "vcodec=ffvhuff or format=422p\n");
  137. return -1;
  138. }
  139. if (avctx->context_model) {
  140. av_log(avctx, AV_LOG_ERROR,
  141. "Error: per-frame huffman tables are not supported "
  142. "by huffyuv; use vcodec=ffvhuff\n");
  143. return -1;
  144. }
  145. if (s->interlaced != ( s->height > 288 ))
  146. av_log(avctx, AV_LOG_INFO,
  147. "using huffyuv 2.2.0 or newer interlacing flag\n");
  148. }
  149. if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
  150. av_log(avctx, AV_LOG_ERROR,
  151. "Error: RGB is incompatible with median predictor\n");
  152. return -1;
  153. }
  154. ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
  155. ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
  156. ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
  157. if (s->context)
  158. ((uint8_t*)avctx->extradata)[2] |= 0x40;
  159. ((uint8_t*)avctx->extradata)[3] = 0;
  160. s->avctx->extradata_size = 4;
  161. if (avctx->stats_in) {
  162. char *p = avctx->stats_in;
  163. for (i = 0; i < 3; i++)
  164. for (j = 0; j < 256; j++)
  165. s->stats[i][j] = 1;
  166. for (;;) {
  167. for (i = 0; i < 3; i++) {
  168. char *next;
  169. for (j = 0; j < 256; j++) {
  170. s->stats[i][j] += strtol(p, &next, 0);
  171. if (next == p) return -1;
  172. p = next;
  173. }
  174. }
  175. if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
  176. }
  177. } else {
  178. for (i = 0; i < 3; i++)
  179. for (j = 0; j < 256; j++) {
  180. int d = FFMIN(j, 256 - j);
  181. s->stats[i][j] = 100000000 / (d + 1);
  182. }
  183. }
  184. for (i = 0; i < 3; i++) {
  185. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  186. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) {
  187. return -1;
  188. }
  189. s->avctx->extradata_size +=
  190. store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
  191. }
  192. if (s->context) {
  193. for (i = 0; i < 3; i++) {
  194. int pels = s->width * s->height / (i ? 40 : 10);
  195. for (j = 0; j < 256; j++) {
  196. int d = FFMIN(j, 256 - j);
  197. s->stats[i][j] = pels/(d + 1);
  198. }
  199. }
  200. } else {
  201. for (i = 0; i < 3; i++)
  202. for (j = 0; j < 256; j++)
  203. s->stats[i][j]= 0;
  204. }
  205. ff_huffyuv_alloc_temp(s);
  206. s->picture_number=0;
  207. return 0;
  208. }
  209. static int encode_422_bitstream(HYuvContext *s, int offset, int count)
  210. {
  211. int i;
  212. const uint8_t *y = s->temp[0] + offset;
  213. const uint8_t *u = s->temp[1] + offset / 2;
  214. const uint8_t *v = s->temp[2] + offset / 2;
  215. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
  216. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  217. return -1;
  218. }
  219. #define LOAD4\
  220. int y0 = y[2 * i];\
  221. int y1 = y[2 * i + 1];\
  222. int u0 = u[i];\
  223. int v0 = v[i];
  224. count /= 2;
  225. if (s->flags & CODEC_FLAG_PASS1) {
  226. for(i = 0; i < count; i++) {
  227. LOAD4;
  228. s->stats[0][y0]++;
  229. s->stats[1][u0]++;
  230. s->stats[0][y1]++;
  231. s->stats[2][v0]++;
  232. }
  233. }
  234. if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
  235. return 0;
  236. if (s->context) {
  237. for (i = 0; i < count; i++) {
  238. LOAD4;
  239. s->stats[0][y0]++;
  240. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  241. s->stats[1][u0]++;
  242. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  243. s->stats[0][y1]++;
  244. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  245. s->stats[2][v0]++;
  246. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  247. }
  248. } else {
  249. for(i = 0; i < count; i++) {
  250. LOAD4;
  251. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  252. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  253. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  254. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  255. }
  256. }
  257. return 0;
  258. }
  259. static int encode_gray_bitstream(HYuvContext *s, int count)
  260. {
  261. int i;
  262. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
  263. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  264. return -1;
  265. }
  266. #define LOAD2\
  267. int y0 = s->temp[0][2 * i];\
  268. int y1 = s->temp[0][2 * i + 1];
  269. #define STAT2\
  270. s->stats[0][y0]++;\
  271. s->stats[0][y1]++;
  272. #define WRITE2\
  273. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
  274. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  275. count /= 2;
  276. if (s->flags & CODEC_FLAG_PASS1) {
  277. for (i = 0; i < count; i++) {
  278. LOAD2;
  279. STAT2;
  280. }
  281. }
  282. if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
  283. return 0;
  284. if (s->context) {
  285. for (i = 0; i < count; i++) {
  286. LOAD2;
  287. STAT2;
  288. WRITE2;
  289. }
  290. } else {
  291. for (i = 0; i < count; i++) {
  292. LOAD2;
  293. WRITE2;
  294. }
  295. }
  296. return 0;
  297. }
  298. static int encode_bgr_bitstream(HYuvContext *s, int count)
  299. {
  300. int i;
  301. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 3 * 4 * count) {
  302. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  303. return -1;
  304. }
  305. #define LOAD3\
  306. int g = s->temp[0][4 * i + G];\
  307. int b = (s->temp[0][4 * i + B] - g) & 0xff;\
  308. int r = (s->temp[0][4 * i + R] - g) & 0xff;
  309. #define STAT3\
  310. s->stats[0][b]++;\
  311. s->stats[1][g]++;\
  312. s->stats[2][r]++;
  313. #define WRITE3\
  314. put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
  315. put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
  316. put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
  317. if ((s->flags & CODEC_FLAG_PASS1) &&
  318. (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
  319. for (i = 0; i < count; i++) {
  320. LOAD3;
  321. STAT3;
  322. }
  323. } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
  324. for (i = 0; i < count; i++) {
  325. LOAD3;
  326. STAT3;
  327. WRITE3;
  328. }
  329. } else {
  330. for (i = 0; i < count; i++) {
  331. LOAD3;
  332. WRITE3;
  333. }
  334. }
  335. return 0;
  336. }
  337. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  338. const AVFrame *pict, int *got_packet)
  339. {
  340. HYuvContext *s = avctx->priv_data;
  341. const int width = s->width;
  342. const int width2 = s->width>>1;
  343. const int height = s->height;
  344. const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  345. const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  346. const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  347. AVFrame * const p = &s->picture;
  348. int i, j, size = 0, ret;
  349. if (!pkt->data &&
  350. (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
  351. av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
  352. return ret;
  353. }
  354. *p = *pict;
  355. p->pict_type = AV_PICTURE_TYPE_I;
  356. p->key_frame = 1;
  357. if (s->context) {
  358. for (i = 0; i < 3; i++) {
  359. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  360. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0)
  361. return -1;
  362. size += store_table(s, s->len[i], &pkt->data[size]);
  363. }
  364. for (i = 0; i < 3; i++)
  365. for (j = 0; j < 256; j++)
  366. s->stats[i][j] >>= 1;
  367. }
  368. init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
  369. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  370. avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  371. int lefty, leftu, leftv, y, cy;
  372. put_bits(&s->pb, 8, leftv = p->data[2][0]);
  373. put_bits(&s->pb, 8, lefty = p->data[0][1]);
  374. put_bits(&s->pb, 8, leftu = p->data[1][0]);
  375. put_bits(&s->pb, 8, p->data[0][0]);
  376. lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
  377. leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
  378. leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
  379. encode_422_bitstream(s, 2, width-2);
  380. if (s->predictor==MEDIAN) {
  381. int lefttopy, lefttopu, lefttopv;
  382. cy = y = 1;
  383. if (s->interlaced) {
  384. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
  385. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
  386. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
  387. encode_422_bitstream(s, 0, width);
  388. y++; cy++;
  389. }
  390. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
  391. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
  392. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
  393. encode_422_bitstream(s, 0, 4);
  394. lefttopy = p->data[0][3];
  395. lefttopu = p->data[1][1];
  396. lefttopv = p->data[2][1];
  397. s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
  398. s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
  399. s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
  400. encode_422_bitstream(s, 0, width - 4);
  401. y++; cy++;
  402. for (; y < height; y++,cy++) {
  403. uint8_t *ydst, *udst, *vdst;
  404. if (s->bitstream_bpp == 12) {
  405. while (2 * cy > y) {
  406. ydst = p->data[0] + p->linesize[0] * y;
  407. s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  408. encode_gray_bitstream(s, width);
  409. y++;
  410. }
  411. if (y >= height) break;
  412. }
  413. ydst = p->data[0] + p->linesize[0] * y;
  414. udst = p->data[1] + p->linesize[1] * cy;
  415. vdst = p->data[2] + p->linesize[2] * cy;
  416. s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  417. s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  418. s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  419. encode_422_bitstream(s, 0, width);
  420. }
  421. } else {
  422. for (cy = y = 1; y < height; y++, cy++) {
  423. uint8_t *ydst, *udst, *vdst;
  424. /* encode a luma only line & y++ */
  425. if (s->bitstream_bpp == 12) {
  426. ydst = p->data[0] + p->linesize[0] * y;
  427. if (s->predictor == PLANE && s->interlaced < y) {
  428. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  429. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  430. } else {
  431. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  432. }
  433. encode_gray_bitstream(s, width);
  434. y++;
  435. if (y >= height) break;
  436. }
  437. ydst = p->data[0] + p->linesize[0] * y;
  438. udst = p->data[1] + p->linesize[1] * cy;
  439. vdst = p->data[2] + p->linesize[2] * cy;
  440. if (s->predictor == PLANE && s->interlaced < cy) {
  441. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  442. s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  443. s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
  444. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  445. leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  446. leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
  447. } else {
  448. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  449. leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  450. leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  451. }
  452. encode_422_bitstream(s, 0, width);
  453. }
  454. }
  455. } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
  456. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  457. const int stride = -p->linesize[0];
  458. const int fake_stride = -fake_ystride;
  459. int y;
  460. int leftr, leftg, leftb;
  461. put_bits(&s->pb, 8, leftr = data[R]);
  462. put_bits(&s->pb, 8, leftg = data[G]);
  463. put_bits(&s->pb, 8, leftb = data[B]);
  464. put_bits(&s->pb, 8, 0);
  465. sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb);
  466. encode_bgr_bitstream(s, width - 1);
  467. for (y = 1; y < s->height; y++) {
  468. uint8_t *dst = data + y*stride;
  469. if (s->predictor == PLANE && s->interlaced < y) {
  470. s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
  471. sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
  472. } else {
  473. sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
  474. }
  475. encode_bgr_bitstream(s, width);
  476. }
  477. } else {
  478. av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
  479. }
  480. emms_c();
  481. size += (put_bits_count(&s->pb) + 31) / 8;
  482. put_bits(&s->pb, 16, 0);
  483. put_bits(&s->pb, 15, 0);
  484. size /= 4;
  485. if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
  486. int j;
  487. char *p = avctx->stats_out;
  488. char *end = p + 1024*30;
  489. for (i = 0; i < 3; i++) {
  490. for (j = 0; j < 256; j++) {
  491. snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
  492. p += strlen(p);
  493. s->stats[i][j]= 0;
  494. }
  495. snprintf(p, end-p, "\n");
  496. p++;
  497. }
  498. } else
  499. avctx->stats_out[0] = '\0';
  500. if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
  501. flush_put_bits(&s->pb);
  502. s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
  503. }
  504. s->picture_number++;
  505. pkt->size = size * 4;
  506. pkt->flags |= AV_PKT_FLAG_KEY;
  507. *got_packet = 1;
  508. return 0;
  509. }
  510. static av_cold int encode_end(AVCodecContext *avctx)
  511. {
  512. HYuvContext *s = avctx->priv_data;
  513. ff_huffyuv_common_end(s);
  514. av_freep(&avctx->extradata);
  515. av_freep(&avctx->stats_out);
  516. return 0;
  517. }
  518. #if CONFIG_HUFFYUV_ENCODER
  519. AVCodec ff_huffyuv_encoder = {
  520. .name = "huffyuv",
  521. .type = AVMEDIA_TYPE_VIDEO,
  522. .id = AV_CODEC_ID_HUFFYUV,
  523. .priv_data_size = sizeof(HYuvContext),
  524. .init = encode_init,
  525. .encode2 = encode_frame,
  526. .close = encode_end,
  527. .pix_fmts = (const enum AVPixelFormat[]){
  528. AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  529. },
  530. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
  531. };
  532. #endif
  533. #if CONFIG_FFVHUFF_ENCODER
  534. AVCodec ff_ffvhuff_encoder = {
  535. .name = "ffvhuff",
  536. .type = AVMEDIA_TYPE_VIDEO,
  537. .id = AV_CODEC_ID_FFVHUFF,
  538. .priv_data_size = sizeof(HYuvContext),
  539. .init = encode_init,
  540. .encode2 = encode_frame,
  541. .close = encode_end,
  542. .pix_fmts = (const enum AVPixelFormat[]){
  543. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  544. },
  545. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
  546. };
  547. #endif