You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1137 lines
38KB

  1. /*
  2. * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  5. * the algorithm used
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. *
  23. * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
  24. */
  25. /**
  26. * @file
  27. * huffyuv encoder
  28. */
  29. #include "avcodec.h"
  30. #include "huffyuv.h"
  31. #include "huffman.h"
  32. #include "huffyuvencdsp.h"
  33. #include "internal.h"
  34. #include "lossless_videoencdsp.h"
  35. #include "put_bits.h"
  36. #include "libavutil/opt.h"
  37. #include "libavutil/pixdesc.h"
  38. static inline void diff_bytes(HYuvContext *s, uint8_t *dst,
  39. const uint8_t *src0, const uint8_t *src1, int w)
  40. {
  41. if (s->bps <= 8) {
  42. s->llvidencdsp.diff_bytes(dst, src0, src1, w);
  43. } else {
  44. s->hencdsp.diff_int16((uint16_t *)dst, (const uint16_t *)src0, (const uint16_t *)src1, s->n - 1, w);
  45. }
  46. }
  47. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
  48. const uint8_t *src, int w, int left)
  49. {
  50. int i;
  51. int min_width = FFMIN(w, 32);
  52. if (s->bps <= 8) {
  53. for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
  54. const int temp = src[i];
  55. dst[i] = temp - left;
  56. left = temp;
  57. }
  58. if (w < 32)
  59. return left;
  60. s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 31, w - 32);
  61. return src[w-1];
  62. } else {
  63. const uint16_t *src16 = (const uint16_t *)src;
  64. uint16_t *dst16 = ( uint16_t *)dst;
  65. for (i = 0; i < min_width; i++) { /* scalar loop before dsp call */
  66. const int temp = src16[i];
  67. dst16[i] = temp - left;
  68. left = temp;
  69. }
  70. if (w < 32)
  71. return left;
  72. s->hencdsp.diff_int16(dst16 + 32, src16 + 32, src16 + 31, s->n - 1, w - 32);
  73. return src16[w-1];
  74. }
  75. }
  76. static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
  77. const uint8_t *src, int w,
  78. int *red, int *green, int *blue,
  79. int *alpha)
  80. {
  81. int i;
  82. int r, g, b, a;
  83. int min_width = FFMIN(w, 8);
  84. r = *red;
  85. g = *green;
  86. b = *blue;
  87. a = *alpha;
  88. for (i = 0; i < min_width; i++) {
  89. const int rt = src[i * 4 + R];
  90. const int gt = src[i * 4 + G];
  91. const int bt = src[i * 4 + B];
  92. const int at = src[i * 4 + A];
  93. dst[i * 4 + R] = rt - r;
  94. dst[i * 4 + G] = gt - g;
  95. dst[i * 4 + B] = bt - b;
  96. dst[i * 4 + A] = at - a;
  97. r = rt;
  98. g = gt;
  99. b = bt;
  100. a = at;
  101. }
  102. s->llvidencdsp.diff_bytes(dst + 32, src + 32, src + 32 - 4, w * 4 - 32);
  103. *red = src[(w - 1) * 4 + R];
  104. *green = src[(w - 1) * 4 + G];
  105. *blue = src[(w - 1) * 4 + B];
  106. *alpha = src[(w - 1) * 4 + A];
  107. }
  108. static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
  109. uint8_t *src, int w,
  110. int *red, int *green, int *blue)
  111. {
  112. int i;
  113. int r, g, b;
  114. r = *red;
  115. g = *green;
  116. b = *blue;
  117. for (i = 0; i < FFMIN(w, 16); i++) {
  118. const int rt = src[i * 3 + 0];
  119. const int gt = src[i * 3 + 1];
  120. const int bt = src[i * 3 + 2];
  121. dst[i * 3 + 0] = rt - r;
  122. dst[i * 3 + 1] = gt - g;
  123. dst[i * 3 + 2] = bt - b;
  124. r = rt;
  125. g = gt;
  126. b = bt;
  127. }
  128. s->llvidencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
  129. *red = src[(w - 1) * 3 + 0];
  130. *green = src[(w - 1) * 3 + 1];
  131. *blue = src[(w - 1) * 3 + 2];
  132. }
  133. static void sub_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
  134. {
  135. if (s->bps <= 8) {
  136. s->llvidencdsp.sub_median_pred(dst, src1, src2, w , left, left_top);
  137. } else {
  138. s->hencdsp.sub_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src1, (const uint16_t *)src2, s->n - 1, w , left, left_top);
  139. }
  140. }
  141. static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
  142. {
  143. int i;
  144. int index = 0;
  145. int n = s->vlc_n;
  146. for (i = 0; i < n;) {
  147. int val = len[i];
  148. int repeat = 0;
  149. for (; i < n && len[i] == val && repeat < 255; i++)
  150. repeat++;
  151. av_assert0(val < 32 && val >0 && repeat < 256 && repeat>0);
  152. if (repeat > 7) {
  153. buf[index++] = val;
  154. buf[index++] = repeat;
  155. } else {
  156. buf[index++] = val | (repeat << 5);
  157. }
  158. }
  159. return index;
  160. }
  161. static int store_huffman_tables(HYuvContext *s, uint8_t *buf)
  162. {
  163. int i, ret;
  164. int size = 0;
  165. int count = 3;
  166. if (s->version > 2)
  167. count = 1 + s->alpha + 2*s->chroma;
  168. for (i = 0; i < count; i++) {
  169. if ((ret = ff_huff_gen_len_table(s->len[i], s->stats[i], s->vlc_n, 0)) < 0)
  170. return ret;
  171. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n) < 0) {
  172. return -1;
  173. }
  174. size += store_table(s, s->len[i], buf + size);
  175. }
  176. return size;
  177. }
  178. static av_cold int encode_init(AVCodecContext *avctx)
  179. {
  180. HYuvContext *s = avctx->priv_data;
  181. int i, j;
  182. int ret;
  183. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  184. ff_huffyuv_common_init(avctx);
  185. ff_huffyuvencdsp_init(&s->hencdsp, avctx);
  186. ff_llvidencdsp_init(&s->llvidencdsp);
  187. avctx->extradata = av_mallocz(3*MAX_N + 4);
  188. if (s->flags&AV_CODEC_FLAG_PASS1) {
  189. #define STATS_OUT_SIZE 21*MAX_N*3 + 4
  190. avctx->stats_out = av_mallocz(STATS_OUT_SIZE); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
  191. if (!avctx->stats_out)
  192. return AVERROR(ENOMEM);
  193. }
  194. s->version = 2;
  195. if (!avctx->extradata)
  196. return AVERROR(ENOMEM);
  197. #if FF_API_CODED_FRAME
  198. FF_DISABLE_DEPRECATION_WARNINGS
  199. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  200. avctx->coded_frame->key_frame = 1;
  201. FF_ENABLE_DEPRECATION_WARNINGS
  202. #endif
  203. #if FF_API_PRIVATE_OPT
  204. FF_DISABLE_DEPRECATION_WARNINGS
  205. if (avctx->context_model == 1)
  206. s->context = avctx->context_model;
  207. FF_ENABLE_DEPRECATION_WARNINGS
  208. #endif
  209. s->bps = desc->comp[0].depth;
  210. s->yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
  211. s->chroma = desc->nb_components > 2;
  212. s->alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
  213. av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt,
  214. &s->chroma_h_shift,
  215. &s->chroma_v_shift);
  216. switch (avctx->pix_fmt) {
  217. case AV_PIX_FMT_YUV420P:
  218. case AV_PIX_FMT_YUV422P:
  219. if (s->width & 1) {
  220. av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
  221. return AVERROR(EINVAL);
  222. }
  223. s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
  224. break;
  225. case AV_PIX_FMT_YUV444P:
  226. case AV_PIX_FMT_YUV410P:
  227. case AV_PIX_FMT_YUV411P:
  228. case AV_PIX_FMT_YUV440P:
  229. case AV_PIX_FMT_GBRP:
  230. case AV_PIX_FMT_GBRP9:
  231. case AV_PIX_FMT_GBRP10:
  232. case AV_PIX_FMT_GBRP12:
  233. case AV_PIX_FMT_GBRP14:
  234. case AV_PIX_FMT_GBRP16:
  235. case AV_PIX_FMT_GRAY8:
  236. case AV_PIX_FMT_GRAY16:
  237. case AV_PIX_FMT_YUVA444P:
  238. case AV_PIX_FMT_YUVA420P:
  239. case AV_PIX_FMT_YUVA422P:
  240. case AV_PIX_FMT_GBRAP:
  241. case AV_PIX_FMT_YUV420P9:
  242. case AV_PIX_FMT_YUV420P10:
  243. case AV_PIX_FMT_YUV420P12:
  244. case AV_PIX_FMT_YUV420P14:
  245. case AV_PIX_FMT_YUV420P16:
  246. case AV_PIX_FMT_YUV422P9:
  247. case AV_PIX_FMT_YUV422P10:
  248. case AV_PIX_FMT_YUV422P12:
  249. case AV_PIX_FMT_YUV422P14:
  250. case AV_PIX_FMT_YUV422P16:
  251. case AV_PIX_FMT_YUV444P9:
  252. case AV_PIX_FMT_YUV444P10:
  253. case AV_PIX_FMT_YUV444P12:
  254. case AV_PIX_FMT_YUV444P14:
  255. case AV_PIX_FMT_YUV444P16:
  256. case AV_PIX_FMT_YUVA420P9:
  257. case AV_PIX_FMT_YUVA420P10:
  258. case AV_PIX_FMT_YUVA420P16:
  259. case AV_PIX_FMT_YUVA422P9:
  260. case AV_PIX_FMT_YUVA422P10:
  261. case AV_PIX_FMT_YUVA422P16:
  262. case AV_PIX_FMT_YUVA444P9:
  263. case AV_PIX_FMT_YUVA444P10:
  264. case AV_PIX_FMT_YUVA444P16:
  265. s->version = 3;
  266. break;
  267. case AV_PIX_FMT_RGB32:
  268. s->bitstream_bpp = 32;
  269. break;
  270. case AV_PIX_FMT_RGB24:
  271. s->bitstream_bpp = 24;
  272. break;
  273. default:
  274. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  275. return AVERROR(EINVAL);
  276. }
  277. s->n = 1<<s->bps;
  278. s->vlc_n = FFMIN(s->n, MAX_VLC_N);
  279. avctx->bits_per_coded_sample = s->bitstream_bpp;
  280. s->decorrelate = s->bitstream_bpp >= 24 && !s->yuv && !(desc->flags & AV_PIX_FMT_FLAG_PLANAR);
  281. #if FF_API_PRIVATE_OPT
  282. FF_DISABLE_DEPRECATION_WARNINGS
  283. if (avctx->prediction_method)
  284. s->predictor = avctx->prediction_method;
  285. FF_ENABLE_DEPRECATION_WARNINGS
  286. #endif
  287. s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
  288. if (s->context) {
  289. if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
  290. av_log(avctx, AV_LOG_ERROR,
  291. "context=1 is not compatible with "
  292. "2 pass huffyuv encoding\n");
  293. return AVERROR(EINVAL);
  294. }
  295. }
  296. if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
  297. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  298. av_log(avctx, AV_LOG_ERROR,
  299. "Error: YV12 is not supported by huffyuv; use "
  300. "vcodec=ffvhuff or format=422p\n");
  301. return AVERROR(EINVAL);
  302. }
  303. #if FF_API_PRIVATE_OPT
  304. if (s->context) {
  305. av_log(avctx, AV_LOG_ERROR,
  306. "Error: per-frame huffman tables are not supported "
  307. "by huffyuv; use vcodec=ffvhuff\n");
  308. return AVERROR(EINVAL);
  309. }
  310. if (s->version > 2) {
  311. av_log(avctx, AV_LOG_ERROR,
  312. "Error: ver>2 is not supported "
  313. "by huffyuv; use vcodec=ffvhuff\n");
  314. return AVERROR(EINVAL);
  315. }
  316. #endif
  317. if (s->interlaced != ( s->height > 288 ))
  318. av_log(avctx, AV_LOG_INFO,
  319. "using huffyuv 2.2.0 or newer interlacing flag\n");
  320. }
  321. if (s->version > 3 && avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  322. av_log(avctx, AV_LOG_ERROR, "Ver > 3 is under development, files encoded with it may not be decodable with future versions!!!\n"
  323. "Use vstrict=-2 / -strict -2 to use it anyway.\n");
  324. return AVERROR(EINVAL);
  325. }
  326. if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN && s->version <= 2) {
  327. av_log(avctx, AV_LOG_ERROR,
  328. "Error: RGB is incompatible with median predictor\n");
  329. return AVERROR(EINVAL);
  330. }
  331. ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
  332. ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
  333. if (s->context)
  334. ((uint8_t*)avctx->extradata)[2] |= 0x40;
  335. if (s->version < 3) {
  336. ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
  337. ((uint8_t*)avctx->extradata)[3] = 0;
  338. } else {
  339. ((uint8_t*)avctx->extradata)[1] = ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2);
  340. if (s->chroma)
  341. ((uint8_t*)avctx->extradata)[2] |= s->yuv ? 1 : 2;
  342. if (s->alpha)
  343. ((uint8_t*)avctx->extradata)[2] |= 4;
  344. ((uint8_t*)avctx->extradata)[3] = 1;
  345. }
  346. s->avctx->extradata_size = 4;
  347. if (avctx->stats_in) {
  348. char *p = avctx->stats_in;
  349. for (i = 0; i < 4; i++)
  350. for (j = 0; j < s->vlc_n; j++)
  351. s->stats[i][j] = 1;
  352. for (;;) {
  353. for (i = 0; i < 4; i++) {
  354. char *next;
  355. for (j = 0; j < s->vlc_n; j++) {
  356. s->stats[i][j] += strtol(p, &next, 0);
  357. if (next == p) return -1;
  358. p = next;
  359. }
  360. }
  361. if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
  362. }
  363. } else {
  364. for (i = 0; i < 4; i++)
  365. for (j = 0; j < s->vlc_n; j++) {
  366. int d = FFMIN(j, s->vlc_n - j);
  367. s->stats[i][j] = 100000000 / (d*d + 1);
  368. }
  369. }
  370. ret = store_huffman_tables(s, s->avctx->extradata + s->avctx->extradata_size);
  371. if (ret < 0)
  372. return ret;
  373. s->avctx->extradata_size += ret;
  374. if (s->context) {
  375. for (i = 0; i < 4; i++) {
  376. int pels = s->width * s->height / (i ? 40 : 10);
  377. for (j = 0; j < s->vlc_n; j++) {
  378. int d = FFMIN(j, s->vlc_n - j);
  379. s->stats[i][j] = pels/(d*d + 1);
  380. }
  381. }
  382. } else {
  383. for (i = 0; i < 4; i++)
  384. for (j = 0; j < s->vlc_n; j++)
  385. s->stats[i][j]= 0;
  386. }
  387. if (ff_huffyuv_alloc_temp(s)) {
  388. ff_huffyuv_common_end(s);
  389. return AVERROR(ENOMEM);
  390. }
  391. s->picture_number=0;
  392. return 0;
  393. }
  394. static int encode_422_bitstream(HYuvContext *s, int offset, int count)
  395. {
  396. int i;
  397. const uint8_t *y = s->temp[0] + offset;
  398. const uint8_t *u = s->temp[1] + offset / 2;
  399. const uint8_t *v = s->temp[2] + offset / 2;
  400. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
  401. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  402. return -1;
  403. }
  404. #define LOAD4\
  405. int y0 = y[2 * i];\
  406. int y1 = y[2 * i + 1];\
  407. int u0 = u[i];\
  408. int v0 = v[i];
  409. count /= 2;
  410. if (s->flags & AV_CODEC_FLAG_PASS1) {
  411. for(i = 0; i < count; i++) {
  412. LOAD4;
  413. s->stats[0][y0]++;
  414. s->stats[1][u0]++;
  415. s->stats[0][y1]++;
  416. s->stats[2][v0]++;
  417. }
  418. }
  419. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  420. return 0;
  421. if (s->context) {
  422. for (i = 0; i < count; i++) {
  423. LOAD4;
  424. s->stats[0][y0]++;
  425. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  426. s->stats[1][u0]++;
  427. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  428. s->stats[0][y1]++;
  429. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  430. s->stats[2][v0]++;
  431. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  432. }
  433. } else {
  434. for(i = 0; i < count; i++) {
  435. LOAD4;
  436. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  437. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  438. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  439. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  440. }
  441. }
  442. return 0;
  443. }
  444. static int encode_plane_bitstream(HYuvContext *s, int width, int plane)
  445. {
  446. int i, count = width/2;
  447. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < count * s->bps / 2) {
  448. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  449. return -1;
  450. }
  451. #define LOADEND\
  452. int y0 = s->temp[0][width-1];
  453. #define LOADEND_14\
  454. int y0 = s->temp16[0][width-1] & mask;
  455. #define LOADEND_16\
  456. int y0 = s->temp16[0][width-1];
  457. #define STATEND\
  458. s->stats[plane][y0]++;
  459. #define STATEND_16\
  460. s->stats[plane][y0>>2]++;
  461. #define WRITEEND\
  462. put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);
  463. #define WRITEEND_16\
  464. put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
  465. put_bits(&s->pb, 2, y0&3);
  466. #define LOAD2\
  467. int y0 = s->temp[0][2 * i];\
  468. int y1 = s->temp[0][2 * i + 1];
  469. #define LOAD2_14\
  470. int y0 = s->temp16[0][2 * i] & mask;\
  471. int y1 = s->temp16[0][2 * i + 1] & mask;
  472. #define LOAD2_16\
  473. int y0 = s->temp16[0][2 * i];\
  474. int y1 = s->temp16[0][2 * i + 1];
  475. #define STAT2\
  476. s->stats[plane][y0]++;\
  477. s->stats[plane][y1]++;
  478. #define STAT2_16\
  479. s->stats[plane][y0>>2]++;\
  480. s->stats[plane][y1>>2]++;
  481. #define WRITE2\
  482. put_bits(&s->pb, s->len[plane][y0], s->bits[plane][y0]);\
  483. put_bits(&s->pb, s->len[plane][y1], s->bits[plane][y1]);
  484. #define WRITE2_16\
  485. put_bits(&s->pb, s->len[plane][y0>>2], s->bits[plane][y0>>2]);\
  486. put_bits(&s->pb, 2, y0&3);\
  487. put_bits(&s->pb, s->len[plane][y1>>2], s->bits[plane][y1>>2]);\
  488. put_bits(&s->pb, 2, y1&3);
  489. if (s->bps <= 8) {
  490. if (s->flags & AV_CODEC_FLAG_PASS1) {
  491. for (i = 0; i < count; i++) {
  492. LOAD2;
  493. STAT2;
  494. }
  495. if (width&1) {
  496. LOADEND;
  497. STATEND;
  498. }
  499. }
  500. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  501. return 0;
  502. if (s->context) {
  503. for (i = 0; i < count; i++) {
  504. LOAD2;
  505. STAT2;
  506. WRITE2;
  507. }
  508. if (width&1) {
  509. LOADEND;
  510. STATEND;
  511. WRITEEND;
  512. }
  513. } else {
  514. for (i = 0; i < count; i++) {
  515. LOAD2;
  516. WRITE2;
  517. }
  518. if (width&1) {
  519. LOADEND;
  520. WRITEEND;
  521. }
  522. }
  523. } else if (s->bps <= 14) {
  524. int mask = s->n - 1;
  525. if (s->flags & AV_CODEC_FLAG_PASS1) {
  526. for (i = 0; i < count; i++) {
  527. LOAD2_14;
  528. STAT2;
  529. }
  530. if (width&1) {
  531. LOADEND_14;
  532. STATEND;
  533. }
  534. }
  535. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  536. return 0;
  537. if (s->context) {
  538. for (i = 0; i < count; i++) {
  539. LOAD2_14;
  540. STAT2;
  541. WRITE2;
  542. }
  543. if (width&1) {
  544. LOADEND_14;
  545. STATEND;
  546. WRITEEND;
  547. }
  548. } else {
  549. for (i = 0; i < count; i++) {
  550. LOAD2_14;
  551. WRITE2;
  552. }
  553. if (width&1) {
  554. LOADEND_14;
  555. WRITEEND;
  556. }
  557. }
  558. } else {
  559. if (s->flags & AV_CODEC_FLAG_PASS1) {
  560. for (i = 0; i < count; i++) {
  561. LOAD2_16;
  562. STAT2_16;
  563. }
  564. if (width&1) {
  565. LOADEND_16;
  566. STATEND_16;
  567. }
  568. }
  569. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  570. return 0;
  571. if (s->context) {
  572. for (i = 0; i < count; i++) {
  573. LOAD2_16;
  574. STAT2_16;
  575. WRITE2_16;
  576. }
  577. if (width&1) {
  578. LOADEND_16;
  579. STATEND_16;
  580. WRITEEND_16;
  581. }
  582. } else {
  583. for (i = 0; i < count; i++) {
  584. LOAD2_16;
  585. WRITE2_16;
  586. }
  587. if (width&1) {
  588. LOADEND_16;
  589. WRITEEND_16;
  590. }
  591. }
  592. }
  593. #undef LOAD2
  594. #undef STAT2
  595. #undef WRITE2
  596. return 0;
  597. }
  598. static int encode_gray_bitstream(HYuvContext *s, int count)
  599. {
  600. int i;
  601. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
  602. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  603. return -1;
  604. }
  605. #define LOAD2\
  606. int y0 = s->temp[0][2 * i];\
  607. int y1 = s->temp[0][2 * i + 1];
  608. #define STAT2\
  609. s->stats[0][y0]++;\
  610. s->stats[0][y1]++;
  611. #define WRITE2\
  612. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
  613. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  614. count /= 2;
  615. if (s->flags & AV_CODEC_FLAG_PASS1) {
  616. for (i = 0; i < count; i++) {
  617. LOAD2;
  618. STAT2;
  619. }
  620. }
  621. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  622. return 0;
  623. if (s->context) {
  624. for (i = 0; i < count; i++) {
  625. LOAD2;
  626. STAT2;
  627. WRITE2;
  628. }
  629. } else {
  630. for (i = 0; i < count; i++) {
  631. LOAD2;
  632. WRITE2;
  633. }
  634. }
  635. return 0;
  636. }
  637. static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
  638. {
  639. int i;
  640. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
  641. 4 * planes * count) {
  642. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  643. return -1;
  644. }
  645. #define LOAD_GBRA \
  646. int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
  647. int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
  648. int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
  649. int a = s->temp[0][planes * i + A];
  650. #define STAT_BGRA \
  651. s->stats[0][b]++; \
  652. s->stats[1][g]++; \
  653. s->stats[2][r]++; \
  654. if (planes == 4) \
  655. s->stats[2][a]++;
  656. #define WRITE_GBRA \
  657. put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
  658. put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
  659. put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
  660. if (planes == 4) \
  661. put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
  662. if ((s->flags & AV_CODEC_FLAG_PASS1) &&
  663. (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
  664. for (i = 0; i < count; i++) {
  665. LOAD_GBRA;
  666. STAT_BGRA;
  667. }
  668. } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
  669. for (i = 0; i < count; i++) {
  670. LOAD_GBRA;
  671. STAT_BGRA;
  672. WRITE_GBRA;
  673. }
  674. } else {
  675. for (i = 0; i < count; i++) {
  676. LOAD_GBRA;
  677. WRITE_GBRA;
  678. }
  679. }
  680. return 0;
  681. }
  682. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  683. const AVFrame *pict, int *got_packet)
  684. {
  685. HYuvContext *s = avctx->priv_data;
  686. const int width = s->width;
  687. const int width2 = s->width>>1;
  688. const int height = s->height;
  689. const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  690. const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  691. const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  692. const AVFrame * const p = pict;
  693. int i, j, size = 0, ret;
  694. if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
  695. return ret;
  696. if (s->context) {
  697. size = store_huffman_tables(s, pkt->data);
  698. if (size < 0)
  699. return size;
  700. for (i = 0; i < 4; i++)
  701. for (j = 0; j < s->vlc_n; j++)
  702. s->stats[i][j] >>= 1;
  703. }
  704. init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
  705. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  706. avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  707. int lefty, leftu, leftv, y, cy;
  708. put_bits(&s->pb, 8, leftv = p->data[2][0]);
  709. put_bits(&s->pb, 8, lefty = p->data[0][1]);
  710. put_bits(&s->pb, 8, leftu = p->data[1][0]);
  711. put_bits(&s->pb, 8, p->data[0][0]);
  712. lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
  713. leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
  714. leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
  715. encode_422_bitstream(s, 2, width-2);
  716. if (s->predictor==MEDIAN) {
  717. int lefttopy, lefttopu, lefttopv;
  718. cy = y = 1;
  719. if (s->interlaced) {
  720. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
  721. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
  722. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
  723. encode_422_bitstream(s, 0, width);
  724. y++; cy++;
  725. }
  726. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
  727. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
  728. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
  729. encode_422_bitstream(s, 0, 4);
  730. lefttopy = p->data[0][3];
  731. lefttopu = p->data[1][1];
  732. lefttopv = p->data[2][1];
  733. s->llvidencdsp.sub_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
  734. s->llvidencdsp.sub_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
  735. s->llvidencdsp.sub_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
  736. encode_422_bitstream(s, 0, width - 4);
  737. y++; cy++;
  738. for (; y < height; y++,cy++) {
  739. uint8_t *ydst, *udst, *vdst;
  740. if (s->bitstream_bpp == 12) {
  741. while (2 * cy > y) {
  742. ydst = p->data[0] + p->linesize[0] * y;
  743. s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
  744. encode_gray_bitstream(s, width);
  745. y++;
  746. }
  747. if (y >= height) break;
  748. }
  749. ydst = p->data[0] + p->linesize[0] * y;
  750. udst = p->data[1] + p->linesize[1] * cy;
  751. vdst = p->data[2] + p->linesize[2] * cy;
  752. s->llvidencdsp.sub_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
  753. s->llvidencdsp.sub_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  754. s->llvidencdsp.sub_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  755. encode_422_bitstream(s, 0, width);
  756. }
  757. } else {
  758. for (cy = y = 1; y < height; y++, cy++) {
  759. uint8_t *ydst, *udst, *vdst;
  760. /* encode a luma only line & y++ */
  761. if (s->bitstream_bpp == 12) {
  762. ydst = p->data[0] + p->linesize[0] * y;
  763. if (s->predictor == PLANE && s->interlaced < y) {
  764. s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  765. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  766. } else {
  767. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  768. }
  769. encode_gray_bitstream(s, width);
  770. y++;
  771. if (y >= height) break;
  772. }
  773. ydst = p->data[0] + p->linesize[0] * y;
  774. udst = p->data[1] + p->linesize[1] * cy;
  775. vdst = p->data[2] + p->linesize[2] * cy;
  776. if (s->predictor == PLANE && s->interlaced < cy) {
  777. s->llvidencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  778. s->llvidencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  779. s->llvidencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
  780. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  781. leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  782. leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
  783. } else {
  784. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  785. leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  786. leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  787. }
  788. encode_422_bitstream(s, 0, width);
  789. }
  790. }
  791. } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
  792. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  793. const int stride = -p->linesize[0];
  794. const int fake_stride = -fake_ystride;
  795. int y;
  796. int leftr, leftg, leftb, lefta;
  797. put_bits(&s->pb, 8, lefta = data[A]);
  798. put_bits(&s->pb, 8, leftr = data[R]);
  799. put_bits(&s->pb, 8, leftg = data[G]);
  800. put_bits(&s->pb, 8, leftb = data[B]);
  801. sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
  802. &leftr, &leftg, &leftb, &lefta);
  803. encode_bgra_bitstream(s, width - 1, 4);
  804. for (y = 1; y < s->height; y++) {
  805. uint8_t *dst = data + y*stride;
  806. if (s->predictor == PLANE && s->interlaced < y) {
  807. s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
  808. sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
  809. &leftr, &leftg, &leftb, &lefta);
  810. } else {
  811. sub_left_prediction_bgr32(s, s->temp[0], dst, width,
  812. &leftr, &leftg, &leftb, &lefta);
  813. }
  814. encode_bgra_bitstream(s, width, 4);
  815. }
  816. } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
  817. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  818. const int stride = -p->linesize[0];
  819. const int fake_stride = -fake_ystride;
  820. int y;
  821. int leftr, leftg, leftb;
  822. put_bits(&s->pb, 8, leftr = data[0]);
  823. put_bits(&s->pb, 8, leftg = data[1]);
  824. put_bits(&s->pb, 8, leftb = data[2]);
  825. put_bits(&s->pb, 8, 0);
  826. sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
  827. &leftr, &leftg, &leftb);
  828. encode_bgra_bitstream(s, width-1, 3);
  829. for (y = 1; y < s->height; y++) {
  830. uint8_t *dst = data + y * stride;
  831. if (s->predictor == PLANE && s->interlaced < y) {
  832. s->llvidencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
  833. width * 3);
  834. sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
  835. &leftr, &leftg, &leftb);
  836. } else {
  837. sub_left_prediction_rgb24(s, s->temp[0], dst, width,
  838. &leftr, &leftg, &leftb);
  839. }
  840. encode_bgra_bitstream(s, width, 3);
  841. }
  842. } else if (s->version > 2) {
  843. int plane;
  844. for (plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
  845. int left, y;
  846. int w = width;
  847. int h = height;
  848. int fake_stride = fake_ystride;
  849. if (s->chroma && (plane == 1 || plane == 2)) {
  850. w >>= s->chroma_h_shift;
  851. h >>= s->chroma_v_shift;
  852. fake_stride = plane == 1 ? fake_ustride : fake_vstride;
  853. }
  854. left = sub_left_prediction(s, s->temp[0], p->data[plane], w , 0);
  855. encode_plane_bitstream(s, w, plane);
  856. if (s->predictor==MEDIAN) {
  857. int lefttop;
  858. y = 1;
  859. if (s->interlaced) {
  860. left = sub_left_prediction(s, s->temp[0], p->data[plane] + p->linesize[plane], w , left);
  861. encode_plane_bitstream(s, w, plane);
  862. y++;
  863. }
  864. lefttop = p->data[plane][0];
  865. for (; y < h; y++) {
  866. uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
  867. sub_median_prediction(s, s->temp[0], dst - fake_stride, dst, w , &left, &lefttop);
  868. encode_plane_bitstream(s, w, plane);
  869. }
  870. } else {
  871. for (y = 1; y < h; y++) {
  872. uint8_t *dst = p->data[plane] + p->linesize[plane] * y;
  873. if (s->predictor == PLANE && s->interlaced < y) {
  874. diff_bytes(s, s->temp[1], dst, dst - fake_stride, w);
  875. left = sub_left_prediction(s, s->temp[0], s->temp[1], w , left);
  876. } else {
  877. left = sub_left_prediction(s, s->temp[0], dst, w , left);
  878. }
  879. encode_plane_bitstream(s, w, plane);
  880. }
  881. }
  882. }
  883. } else {
  884. av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
  885. }
  886. emms_c();
  887. size += (put_bits_count(&s->pb) + 31) / 8;
  888. put_bits(&s->pb, 16, 0);
  889. put_bits(&s->pb, 15, 0);
  890. size /= 4;
  891. if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
  892. int j;
  893. char *p = avctx->stats_out;
  894. char *end = p + STATS_OUT_SIZE;
  895. for (i = 0; i < 4; i++) {
  896. for (j = 0; j < s->vlc_n; j++) {
  897. snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
  898. p += strlen(p);
  899. s->stats[i][j]= 0;
  900. }
  901. snprintf(p, end-p, "\n");
  902. p++;
  903. if (end <= p)
  904. return AVERROR(ENOMEM);
  905. }
  906. } else if (avctx->stats_out)
  907. avctx->stats_out[0] = '\0';
  908. if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
  909. flush_put_bits(&s->pb);
  910. s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
  911. }
  912. s->picture_number++;
  913. pkt->size = size * 4;
  914. pkt->flags |= AV_PKT_FLAG_KEY;
  915. *got_packet = 1;
  916. return 0;
  917. }
  918. static av_cold int encode_end(AVCodecContext *avctx)
  919. {
  920. HYuvContext *s = avctx->priv_data;
  921. ff_huffyuv_common_end(s);
  922. av_freep(&avctx->extradata);
  923. av_freep(&avctx->stats_out);
  924. return 0;
  925. }
  926. #define OFFSET(x) offsetof(HYuvContext, x)
  927. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  928. #define COMMON_OPTIONS \
  929. { "non_deterministic", "Allow multithreading for e.g. context=1 at the expense of determinism", \
  930. OFFSET(non_determ), AV_OPT_TYPE_BOOL, { .i64 = 1 }, \
  931. 0, 1, VE }, \
  932. { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
  933. { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
  934. { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
  935. { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }, \
  936. static const AVOption normal_options[] = {
  937. COMMON_OPTIONS
  938. { NULL },
  939. };
  940. static const AVOption ff_options[] = {
  941. COMMON_OPTIONS
  942. { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  943. { NULL },
  944. };
  945. static const AVClass normal_class = {
  946. .class_name = "huffyuv",
  947. .item_name = av_default_item_name,
  948. .option = normal_options,
  949. .version = LIBAVUTIL_VERSION_INT,
  950. };
  951. static const AVClass ff_class = {
  952. .class_name = "ffvhuff",
  953. .item_name = av_default_item_name,
  954. .option = ff_options,
  955. .version = LIBAVUTIL_VERSION_INT,
  956. };
  957. AVCodec ff_huffyuv_encoder = {
  958. .name = "huffyuv",
  959. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
  960. .type = AVMEDIA_TYPE_VIDEO,
  961. .id = AV_CODEC_ID_HUFFYUV,
  962. .priv_data_size = sizeof(HYuvContext),
  963. .init = encode_init,
  964. .encode2 = encode_frame,
  965. .close = encode_end,
  966. .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
  967. .priv_class = &normal_class,
  968. .pix_fmts = (const enum AVPixelFormat[]){
  969. AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
  970. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  971. },
  972. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  973. FF_CODEC_CAP_INIT_CLEANUP,
  974. };
  975. #if CONFIG_FFVHUFF_ENCODER
  976. AVCodec ff_ffvhuff_encoder = {
  977. .name = "ffvhuff",
  978. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
  979. .type = AVMEDIA_TYPE_VIDEO,
  980. .id = AV_CODEC_ID_FFVHUFF,
  981. .priv_data_size = sizeof(HYuvContext),
  982. .init = encode_init,
  983. .encode2 = encode_frame,
  984. .close = encode_end,
  985. .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
  986. .priv_class = &ff_class,
  987. .pix_fmts = (const enum AVPixelFormat[]){
  988. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV411P,
  989. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV440P,
  990. AV_PIX_FMT_GBRP,
  991. AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  992. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16,
  993. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
  994. AV_PIX_FMT_GBRAP,
  995. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV420P16,
  996. AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16,
  997. AV_PIX_FMT_YUV444P9, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16,
  998. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
  999. AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P16,
  1000. AV_PIX_FMT_YUVA444P9, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P16,
  1001. AV_PIX_FMT_RGB24,
  1002. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  1003. },
  1004. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  1005. FF_CODEC_CAP_INIT_CLEANUP,
  1006. };
  1007. #endif