You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

724 lines
24KB

  1. /*
  2. * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  5. * the algorithm used
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * huffyuv encoder
  26. */
  27. #include "avcodec.h"
  28. #include "huffyuv.h"
  29. #include "huffman.h"
  30. #include "huffyuvencdsp.h"
  31. #include "internal.h"
  32. #include "put_bits.h"
  33. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
  34. uint8_t *src, int w, int left)
  35. {
  36. int i;
  37. if (w < 32) {
  38. for (i = 0; i < w; i++) {
  39. const int temp = src[i];
  40. dst[i] = temp - left;
  41. left = temp;
  42. }
  43. return left;
  44. } else {
  45. for (i = 0; i < 16; i++) {
  46. const int temp = src[i];
  47. dst[i] = temp - left;
  48. left = temp;
  49. }
  50. s->hencdsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
  51. return src[w-1];
  52. }
  53. }
  54. static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
  55. uint8_t *src, int w,
  56. int *red, int *green, int *blue,
  57. int *alpha)
  58. {
  59. int i;
  60. int r, g, b, a;
  61. r = *red;
  62. g = *green;
  63. b = *blue;
  64. a = *alpha;
  65. for (i = 0; i < FFMIN(w, 4); i++) {
  66. const int rt = src[i * 4 + R];
  67. const int gt = src[i * 4 + G];
  68. const int bt = src[i * 4 + B];
  69. const int at = src[i * 4 + A];
  70. dst[i * 4 + R] = rt - r;
  71. dst[i * 4 + G] = gt - g;
  72. dst[i * 4 + B] = bt - b;
  73. dst[i * 4 + A] = at - a;
  74. r = rt;
  75. g = gt;
  76. b = bt;
  77. a = at;
  78. }
  79. s->hencdsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
  80. *red = src[(w - 1) * 4 + R];
  81. *green = src[(w - 1) * 4 + G];
  82. *blue = src[(w - 1) * 4 + B];
  83. *alpha = src[(w - 1) * 4 + A];
  84. }
  85. static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
  86. uint8_t *src, int w,
  87. int *red, int *green, int *blue)
  88. {
  89. int i;
  90. int r, g, b;
  91. r = *red;
  92. g = *green;
  93. b = *blue;
  94. for (i = 0; i < FFMIN(w, 16); i++) {
  95. const int rt = src[i * 3 + 0];
  96. const int gt = src[i * 3 + 1];
  97. const int bt = src[i * 3 + 2];
  98. dst[i * 3 + 0] = rt - r;
  99. dst[i * 3 + 1] = gt - g;
  100. dst[i * 3 + 2] = bt - b;
  101. r = rt;
  102. g = gt;
  103. b = bt;
  104. }
  105. s->hencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
  106. *red = src[(w - 1) * 3 + 0];
  107. *green = src[(w - 1) * 3 + 1];
  108. *blue = src[(w - 1) * 3 + 2];
  109. }
  110. static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
  111. {
  112. int i;
  113. int index = 0;
  114. for (i = 0; i < 256;) {
  115. int val = len[i];
  116. int repeat = 0;
  117. for (; i < 256 && len[i] == val && repeat < 255; i++)
  118. repeat++;
  119. assert(val < 32 && val >0 && repeat<256 && repeat>0);
  120. if ( repeat > 7) {
  121. buf[index++] = val;
  122. buf[index++] = repeat;
  123. } else {
  124. buf[index++] = val | (repeat << 5);
  125. }
  126. }
  127. return index;
  128. }
  129. static av_cold int encode_init(AVCodecContext *avctx)
  130. {
  131. HYuvContext *s = avctx->priv_data;
  132. int i, j;
  133. ff_huffyuv_common_init(avctx);
  134. ff_huffyuvencdsp_init(&s->hencdsp);
  135. avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
  136. avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
  137. s->version = 2;
  138. if (!avctx->extradata || !avctx->stats_out)
  139. return AVERROR(ENOMEM);
  140. #if FF_API_CODED_FRAME
  141. FF_DISABLE_DEPRECATION_WARNINGS
  142. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  143. avctx->coded_frame->key_frame = 1;
  144. FF_ENABLE_DEPRECATION_WARNINGS
  145. #endif
  146. switch (avctx->pix_fmt) {
  147. case AV_PIX_FMT_YUV420P:
  148. case AV_PIX_FMT_YUV422P:
  149. if (s->width & 1) {
  150. av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
  151. return -1;
  152. }
  153. s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
  154. break;
  155. case AV_PIX_FMT_RGB32:
  156. s->bitstream_bpp = 32;
  157. break;
  158. case AV_PIX_FMT_RGB24:
  159. s->bitstream_bpp = 24;
  160. break;
  161. default:
  162. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  163. return -1;
  164. }
  165. avctx->bits_per_coded_sample = s->bitstream_bpp;
  166. s->decorrelate = s->bitstream_bpp >= 24;
  167. s->predictor = avctx->prediction_method;
  168. s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
  169. if (avctx->context_model == 1) {
  170. s->context = avctx->context_model;
  171. if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
  172. av_log(avctx, AV_LOG_ERROR,
  173. "context=1 is not compatible with "
  174. "2 pass huffyuv encoding\n");
  175. return -1;
  176. }
  177. }else s->context= 0;
  178. if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
  179. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  180. av_log(avctx, AV_LOG_ERROR,
  181. "Error: YV12 is not supported by huffyuv; use "
  182. "vcodec=ffvhuff or format=422p\n");
  183. return -1;
  184. }
  185. if (avctx->context_model) {
  186. av_log(avctx, AV_LOG_ERROR,
  187. "Error: per-frame huffman tables are not supported "
  188. "by huffyuv; use vcodec=ffvhuff\n");
  189. return -1;
  190. }
  191. if (s->interlaced != ( s->height > 288 ))
  192. av_log(avctx, AV_LOG_INFO,
  193. "using huffyuv 2.2.0 or newer interlacing flag\n");
  194. }
  195. if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
  196. av_log(avctx, AV_LOG_ERROR,
  197. "Error: RGB is incompatible with median predictor\n");
  198. return -1;
  199. }
  200. ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
  201. ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
  202. ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
  203. if (s->context)
  204. ((uint8_t*)avctx->extradata)[2] |= 0x40;
  205. ((uint8_t*)avctx->extradata)[3] = 0;
  206. s->avctx->extradata_size = 4;
  207. if (avctx->stats_in) {
  208. char *p = avctx->stats_in;
  209. for (i = 0; i < 3; i++)
  210. for (j = 0; j < 256; j++)
  211. s->stats[i][j] = 1;
  212. for (;;) {
  213. for (i = 0; i < 3; i++) {
  214. char *next;
  215. for (j = 0; j < 256; j++) {
  216. s->stats[i][j] += strtol(p, &next, 0);
  217. if (next == p) return -1;
  218. p = next;
  219. }
  220. }
  221. if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
  222. }
  223. } else {
  224. for (i = 0; i < 3; i++)
  225. for (j = 0; j < 256; j++) {
  226. int d = FFMIN(j, 256 - j);
  227. s->stats[i][j] = 100000000 / (d + 1);
  228. }
  229. }
  230. for (i = 0; i < 3; i++) {
  231. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  232. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) {
  233. return -1;
  234. }
  235. s->avctx->extradata_size +=
  236. store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
  237. }
  238. if (s->context) {
  239. for (i = 0; i < 3; i++) {
  240. int pels = s->width * s->height / (i ? 40 : 10);
  241. for (j = 0; j < 256; j++) {
  242. int d = FFMIN(j, 256 - j);
  243. s->stats[i][j] = pels/(d + 1);
  244. }
  245. }
  246. } else {
  247. for (i = 0; i < 3; i++)
  248. for (j = 0; j < 256; j++)
  249. s->stats[i][j]= 0;
  250. }
  251. ff_huffyuv_alloc_temp(s);
  252. s->picture_number=0;
  253. return 0;
  254. }
  255. static int encode_422_bitstream(HYuvContext *s, int offset, int count)
  256. {
  257. int i;
  258. const uint8_t *y = s->temp[0] + offset;
  259. const uint8_t *u = s->temp[1] + offset / 2;
  260. const uint8_t *v = s->temp[2] + offset / 2;
  261. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
  262. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  263. return -1;
  264. }
  265. #define LOAD4\
  266. int y0 = y[2 * i];\
  267. int y1 = y[2 * i + 1];\
  268. int u0 = u[i];\
  269. int v0 = v[i];
  270. count /= 2;
  271. if (s->flags & AV_CODEC_FLAG_PASS1) {
  272. for(i = 0; i < count; i++) {
  273. LOAD4;
  274. s->stats[0][y0]++;
  275. s->stats[1][u0]++;
  276. s->stats[0][y1]++;
  277. s->stats[2][v0]++;
  278. }
  279. }
  280. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  281. return 0;
  282. if (s->context) {
  283. for (i = 0; i < count; i++) {
  284. LOAD4;
  285. s->stats[0][y0]++;
  286. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  287. s->stats[1][u0]++;
  288. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  289. s->stats[0][y1]++;
  290. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  291. s->stats[2][v0]++;
  292. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  293. }
  294. } else {
  295. for(i = 0; i < count; i++) {
  296. LOAD4;
  297. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  298. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  299. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  300. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  301. }
  302. }
  303. return 0;
  304. }
  305. static int encode_gray_bitstream(HYuvContext *s, int count)
  306. {
  307. int i;
  308. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
  309. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  310. return -1;
  311. }
  312. #define LOAD2\
  313. int y0 = s->temp[0][2 * i];\
  314. int y1 = s->temp[0][2 * i + 1];
  315. #define STAT2\
  316. s->stats[0][y0]++;\
  317. s->stats[0][y1]++;
  318. #define WRITE2\
  319. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
  320. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  321. count /= 2;
  322. if (s->flags & AV_CODEC_FLAG_PASS1) {
  323. for (i = 0; i < count; i++) {
  324. LOAD2;
  325. STAT2;
  326. }
  327. }
  328. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  329. return 0;
  330. if (s->context) {
  331. for (i = 0; i < count; i++) {
  332. LOAD2;
  333. STAT2;
  334. WRITE2;
  335. }
  336. } else {
  337. for (i = 0; i < count; i++) {
  338. LOAD2;
  339. WRITE2;
  340. }
  341. }
  342. return 0;
  343. }
  344. static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
  345. {
  346. int i;
  347. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
  348. 4 * planes * count) {
  349. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  350. return -1;
  351. }
  352. #define LOAD_GBRA \
  353. int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
  354. int b = s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g & 0xFF; \
  355. int r = s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g & 0xFF; \
  356. int a = s->temp[0][planes * i + A];
  357. #define STAT_BGRA \
  358. s->stats[0][b]++; \
  359. s->stats[1][g]++; \
  360. s->stats[2][r]++; \
  361. if (planes == 4) \
  362. s->stats[2][a]++;
  363. #define WRITE_GBRA \
  364. put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
  365. put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
  366. put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
  367. if (planes == 4) \
  368. put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
  369. if ((s->flags & AV_CODEC_FLAG_PASS1) &&
  370. (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
  371. for (i = 0; i < count; i++) {
  372. LOAD_GBRA;
  373. STAT_BGRA;
  374. }
  375. } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
  376. for (i = 0; i < count; i++) {
  377. LOAD_GBRA;
  378. STAT_BGRA;
  379. WRITE_GBRA;
  380. }
  381. } else {
  382. for (i = 0; i < count; i++) {
  383. LOAD_GBRA;
  384. WRITE_GBRA;
  385. }
  386. }
  387. return 0;
  388. }
  389. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  390. const AVFrame *pict, int *got_packet)
  391. {
  392. HYuvContext *s = avctx->priv_data;
  393. const int width = s->width;
  394. const int width2 = s->width>>1;
  395. const int height = s->height;
  396. const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  397. const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  398. const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  399. const AVFrame * const p = pict;
  400. int i, j, size = 0, ret;
  401. if (!pkt->data &&
  402. (ret = av_new_packet(pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE)) < 0) {
  403. av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
  404. return ret;
  405. }
  406. if (s->context) {
  407. for (i = 0; i < 3; i++) {
  408. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  409. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0)
  410. return -1;
  411. size += store_table(s, s->len[i], &pkt->data[size]);
  412. }
  413. for (i = 0; i < 3; i++)
  414. for (j = 0; j < 256; j++)
  415. s->stats[i][j] >>= 1;
  416. }
  417. init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
  418. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  419. avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  420. int lefty, leftu, leftv, y, cy;
  421. put_bits(&s->pb, 8, leftv = p->data[2][0]);
  422. put_bits(&s->pb, 8, lefty = p->data[0][1]);
  423. put_bits(&s->pb, 8, leftu = p->data[1][0]);
  424. put_bits(&s->pb, 8, p->data[0][0]);
  425. lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
  426. leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
  427. leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
  428. encode_422_bitstream(s, 2, width-2);
  429. if (s->predictor==MEDIAN) {
  430. int lefttopy, lefttopu, lefttopv;
  431. cy = y = 1;
  432. if (s->interlaced) {
  433. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
  434. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
  435. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
  436. encode_422_bitstream(s, 0, width);
  437. y++; cy++;
  438. }
  439. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
  440. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
  441. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
  442. encode_422_bitstream(s, 0, 4);
  443. lefttopy = p->data[0][3];
  444. lefttopu = p->data[1][1];
  445. lefttopv = p->data[2][1];
  446. s->hencdsp.sub_hfyu_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
  447. s->hencdsp.sub_hfyu_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
  448. s->hencdsp.sub_hfyu_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
  449. encode_422_bitstream(s, 0, width - 4);
  450. y++; cy++;
  451. for (; y < height; y++,cy++) {
  452. uint8_t *ydst, *udst, *vdst;
  453. if (s->bitstream_bpp == 12) {
  454. while (2 * cy > y) {
  455. ydst = p->data[0] + p->linesize[0] * y;
  456. s->hencdsp.sub_hfyu_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
  457. encode_gray_bitstream(s, width);
  458. y++;
  459. }
  460. if (y >= height) break;
  461. }
  462. ydst = p->data[0] + p->linesize[0] * y;
  463. udst = p->data[1] + p->linesize[1] * cy;
  464. vdst = p->data[2] + p->linesize[2] * cy;
  465. s->hencdsp.sub_hfyu_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
  466. s->hencdsp.sub_hfyu_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  467. s->hencdsp.sub_hfyu_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  468. encode_422_bitstream(s, 0, width);
  469. }
  470. } else {
  471. for (cy = y = 1; y < height; y++, cy++) {
  472. uint8_t *ydst, *udst, *vdst;
  473. /* encode a luma only line & y++ */
  474. if (s->bitstream_bpp == 12) {
  475. ydst = p->data[0] + p->linesize[0] * y;
  476. if (s->predictor == PLANE && s->interlaced < y) {
  477. s->hencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  478. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  479. } else {
  480. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  481. }
  482. encode_gray_bitstream(s, width);
  483. y++;
  484. if (y >= height) break;
  485. }
  486. ydst = p->data[0] + p->linesize[0] * y;
  487. udst = p->data[1] + p->linesize[1] * cy;
  488. vdst = p->data[2] + p->linesize[2] * cy;
  489. if (s->predictor == PLANE && s->interlaced < cy) {
  490. s->hencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  491. s->hencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  492. s->hencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
  493. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  494. leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  495. leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
  496. } else {
  497. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  498. leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  499. leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  500. }
  501. encode_422_bitstream(s, 0, width);
  502. }
  503. }
  504. } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
  505. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  506. const int stride = -p->linesize[0];
  507. const int fake_stride = -fake_ystride;
  508. int y;
  509. int leftr, leftg, leftb, lefta;
  510. put_bits(&s->pb, 8, lefta = data[A]);
  511. put_bits(&s->pb, 8, leftr = data[R]);
  512. put_bits(&s->pb, 8, leftg = data[G]);
  513. put_bits(&s->pb, 8, leftb = data[B]);
  514. sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
  515. &leftr, &leftg, &leftb, &lefta);
  516. encode_bgra_bitstream(s, width - 1, 4);
  517. for (y = 1; y < s->height; y++) {
  518. uint8_t *dst = data + y*stride;
  519. if (s->predictor == PLANE && s->interlaced < y) {
  520. s->hencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
  521. sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
  522. &leftr, &leftg, &leftb, &lefta);
  523. } else {
  524. sub_left_prediction_bgr32(s, s->temp[0], dst, width,
  525. &leftr, &leftg, &leftb, &lefta);
  526. }
  527. encode_bgra_bitstream(s, width, 4);
  528. }
  529. } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
  530. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  531. const int stride = -p->linesize[0];
  532. const int fake_stride = -fake_ystride;
  533. int y;
  534. int leftr, leftg, leftb;
  535. put_bits(&s->pb, 8, leftr = data[0]);
  536. put_bits(&s->pb, 8, leftg = data[1]);
  537. put_bits(&s->pb, 8, leftb = data[2]);
  538. put_bits(&s->pb, 8, 0);
  539. sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
  540. &leftr, &leftg, &leftb);
  541. encode_bgra_bitstream(s, width-1, 3);
  542. for (y = 1; y < s->height; y++) {
  543. uint8_t *dst = data + y * stride;
  544. if (s->predictor == PLANE && s->interlaced < y) {
  545. s->hencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
  546. width * 3);
  547. sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
  548. &leftr, &leftg, &leftb);
  549. } else {
  550. sub_left_prediction_rgb24(s, s->temp[0], dst, width,
  551. &leftr, &leftg, &leftb);
  552. }
  553. encode_bgra_bitstream(s, width, 3);
  554. }
  555. } else {
  556. av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
  557. }
  558. emms_c();
  559. size += (put_bits_count(&s->pb) + 31) / 8;
  560. put_bits(&s->pb, 16, 0);
  561. put_bits(&s->pb, 15, 0);
  562. size /= 4;
  563. if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
  564. int j;
  565. char *p = avctx->stats_out;
  566. char *end = p + 1024*30;
  567. for (i = 0; i < 3; i++) {
  568. for (j = 0; j < 256; j++) {
  569. snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
  570. p += strlen(p);
  571. s->stats[i][j]= 0;
  572. }
  573. snprintf(p, end-p, "\n");
  574. p++;
  575. }
  576. } else
  577. avctx->stats_out[0] = '\0';
  578. if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
  579. flush_put_bits(&s->pb);
  580. s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
  581. }
  582. s->picture_number++;
  583. pkt->size = size * 4;
  584. pkt->flags |= AV_PKT_FLAG_KEY;
  585. *got_packet = 1;
  586. return 0;
  587. }
  588. static av_cold int encode_end(AVCodecContext *avctx)
  589. {
  590. HYuvContext *s = avctx->priv_data;
  591. ff_huffyuv_common_end(s);
  592. av_freep(&avctx->extradata);
  593. av_freep(&avctx->stats_out);
  594. return 0;
  595. }
  596. AVCodec ff_huffyuv_encoder = {
  597. .name = "huffyuv",
  598. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
  599. .type = AVMEDIA_TYPE_VIDEO,
  600. .id = AV_CODEC_ID_HUFFYUV,
  601. .priv_data_size = sizeof(HYuvContext),
  602. .init = encode_init,
  603. .encode2 = encode_frame,
  604. .close = encode_end,
  605. .pix_fmts = (const enum AVPixelFormat[]){
  606. AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
  607. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  608. },
  609. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  610. FF_CODEC_CAP_INIT_CLEANUP,
  611. };
  612. #if CONFIG_FFVHUFF_ENCODER
  613. AVCodec ff_ffvhuff_encoder = {
  614. .name = "ffvhuff",
  615. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
  616. .type = AVMEDIA_TYPE_VIDEO,
  617. .id = AV_CODEC_ID_FFVHUFF,
  618. .priv_data_size = sizeof(HYuvContext),
  619. .init = encode_init,
  620. .encode2 = encode_frame,
  621. .close = encode_end,
  622. .pix_fmts = (const enum AVPixelFormat[]){
  623. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
  624. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  625. },
  626. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  627. FF_CODEC_CAP_INIT_CLEANUP,
  628. };
  629. #endif