You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

715 lines
24KB

  1. /*
  2. * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  5. * the algorithm used
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * huffyuv encoder
  26. */
  27. #include "avcodec.h"
  28. #include "huffyuv.h"
  29. #include "huffman.h"
  30. #include "put_bits.h"
  31. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
  32. uint8_t *src, int w, int left)
  33. {
  34. int i;
  35. if (w < 32) {
  36. for (i = 0; i < w; i++) {
  37. const int temp = src[i];
  38. dst[i] = temp - left;
  39. left = temp;
  40. }
  41. return left;
  42. } else {
  43. for (i = 0; i < 16; i++) {
  44. const int temp = src[i];
  45. dst[i] = temp - left;
  46. left = temp;
  47. }
  48. s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
  49. return src[w-1];
  50. }
  51. }
  52. static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
  53. uint8_t *src, int w,
  54. int *red, int *green, int *blue,
  55. int *alpha)
  56. {
  57. int i;
  58. int r, g, b, a;
  59. r = *red;
  60. g = *green;
  61. b = *blue;
  62. a = *alpha;
  63. for (i = 0; i < FFMIN(w, 4); i++) {
  64. const int rt = src[i * 4 + R];
  65. const int gt = src[i * 4 + G];
  66. const int bt = src[i * 4 + B];
  67. const int at = src[i * 4 + A];
  68. dst[i * 4 + R] = rt - r;
  69. dst[i * 4 + G] = gt - g;
  70. dst[i * 4 + B] = bt - b;
  71. dst[i * 4 + A] = at - a;
  72. r = rt;
  73. g = gt;
  74. b = bt;
  75. a = at;
  76. }
  77. s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
  78. *red = src[(w - 1) * 4 + R];
  79. *green = src[(w - 1) * 4 + G];
  80. *blue = src[(w - 1) * 4 + B];
  81. *alpha = src[(w - 1) * 4 + A];
  82. }
  83. static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
  84. uint8_t *src, int w,
  85. int *red, int *green, int *blue)
  86. {
  87. int i;
  88. int r, g, b;
  89. r = *red;
  90. g = *green;
  91. b = *blue;
  92. for (i = 0; i < FFMIN(w, 16); i++) {
  93. const int rt = src[i * 3 + 0];
  94. const int gt = src[i * 3 + 1];
  95. const int bt = src[i * 3 + 2];
  96. dst[i * 3 + 0] = rt - r;
  97. dst[i * 3 + 1] = gt - g;
  98. dst[i * 3 + 2] = bt - b;
  99. r = rt;
  100. g = gt;
  101. b = bt;
  102. }
  103. s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
  104. *red = src[(w - 1) * 3 + 0];
  105. *green = src[(w - 1) * 3 + 1];
  106. *blue = src[(w - 1) * 3 + 2];
  107. }
  108. static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
  109. {
  110. int i;
  111. int index = 0;
  112. for (i = 0; i < 256;) {
  113. int val = len[i];
  114. int repeat = 0;
  115. for (; i < 256 && len[i] == val && repeat < 255; i++)
  116. repeat++;
  117. assert(val < 32 && val >0 && repeat<256 && repeat>0);
  118. if ( repeat > 7) {
  119. buf[index++] = val;
  120. buf[index++] = repeat;
  121. } else {
  122. buf[index++] = val | (repeat << 5);
  123. }
  124. }
  125. return index;
  126. }
  127. static av_cold int encode_init(AVCodecContext *avctx)
  128. {
  129. HYuvContext *s = avctx->priv_data;
  130. int i, j;
  131. ff_huffyuv_common_init(avctx);
  132. avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
  133. avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
  134. s->version = 2;
  135. avctx->coded_frame = &s->picture;
  136. switch (avctx->pix_fmt) {
  137. case AV_PIX_FMT_YUV420P:
  138. case AV_PIX_FMT_YUV422P:
  139. if (s->width & 1) {
  140. av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
  141. return -1;
  142. }
  143. s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
  144. break;
  145. case AV_PIX_FMT_RGB32:
  146. s->bitstream_bpp = 32;
  147. break;
  148. case AV_PIX_FMT_RGB24:
  149. s->bitstream_bpp = 24;
  150. break;
  151. default:
  152. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  153. return -1;
  154. }
  155. avctx->bits_per_coded_sample = s->bitstream_bpp;
  156. s->decorrelate = s->bitstream_bpp >= 24;
  157. s->predictor = avctx->prediction_method;
  158. s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
  159. if (avctx->context_model == 1) {
  160. s->context = avctx->context_model;
  161. if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
  162. av_log(avctx, AV_LOG_ERROR,
  163. "context=1 is not compatible with "
  164. "2 pass huffyuv encoding\n");
  165. return -1;
  166. }
  167. }else s->context= 0;
  168. if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
  169. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  170. av_log(avctx, AV_LOG_ERROR,
  171. "Error: YV12 is not supported by huffyuv; use "
  172. "vcodec=ffvhuff or format=422p\n");
  173. return -1;
  174. }
  175. if (avctx->context_model) {
  176. av_log(avctx, AV_LOG_ERROR,
  177. "Error: per-frame huffman tables are not supported "
  178. "by huffyuv; use vcodec=ffvhuff\n");
  179. return -1;
  180. }
  181. if (s->interlaced != ( s->height > 288 ))
  182. av_log(avctx, AV_LOG_INFO,
  183. "using huffyuv 2.2.0 or newer interlacing flag\n");
  184. }
  185. if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
  186. av_log(avctx, AV_LOG_ERROR,
  187. "Error: RGB is incompatible with median predictor\n");
  188. return -1;
  189. }
  190. ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
  191. ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
  192. ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
  193. if (s->context)
  194. ((uint8_t*)avctx->extradata)[2] |= 0x40;
  195. ((uint8_t*)avctx->extradata)[3] = 0;
  196. s->avctx->extradata_size = 4;
  197. if (avctx->stats_in) {
  198. char *p = avctx->stats_in;
  199. for (i = 0; i < 3; i++)
  200. for (j = 0; j < 256; j++)
  201. s->stats[i][j] = 1;
  202. for (;;) {
  203. for (i = 0; i < 3; i++) {
  204. char *next;
  205. for (j = 0; j < 256; j++) {
  206. s->stats[i][j] += strtol(p, &next, 0);
  207. if (next == p) return -1;
  208. p = next;
  209. }
  210. }
  211. if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
  212. }
  213. } else {
  214. for (i = 0; i < 3; i++)
  215. for (j = 0; j < 256; j++) {
  216. int d = FFMIN(j, 256 - j);
  217. s->stats[i][j] = 100000000 / (d + 1);
  218. }
  219. }
  220. for (i = 0; i < 3; i++) {
  221. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  222. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) {
  223. return -1;
  224. }
  225. s->avctx->extradata_size +=
  226. store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
  227. }
  228. if (s->context) {
  229. for (i = 0; i < 3; i++) {
  230. int pels = s->width * s->height / (i ? 40 : 10);
  231. for (j = 0; j < 256; j++) {
  232. int d = FFMIN(j, 256 - j);
  233. s->stats[i][j] = pels/(d + 1);
  234. }
  235. }
  236. } else {
  237. for (i = 0; i < 3; i++)
  238. for (j = 0; j < 256; j++)
  239. s->stats[i][j]= 0;
  240. }
  241. ff_huffyuv_alloc_temp(s);
  242. s->picture_number=0;
  243. return 0;
  244. }
  245. static int encode_422_bitstream(HYuvContext *s, int offset, int count)
  246. {
  247. int i;
  248. const uint8_t *y = s->temp[0] + offset;
  249. const uint8_t *u = s->temp[1] + offset / 2;
  250. const uint8_t *v = s->temp[2] + offset / 2;
  251. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
  252. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  253. return -1;
  254. }
  255. #define LOAD4\
  256. int y0 = y[2 * i];\
  257. int y1 = y[2 * i + 1];\
  258. int u0 = u[i];\
  259. int v0 = v[i];
  260. count /= 2;
  261. if (s->flags & CODEC_FLAG_PASS1) {
  262. for(i = 0; i < count; i++) {
  263. LOAD4;
  264. s->stats[0][y0]++;
  265. s->stats[1][u0]++;
  266. s->stats[0][y1]++;
  267. s->stats[2][v0]++;
  268. }
  269. }
  270. if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
  271. return 0;
  272. if (s->context) {
  273. for (i = 0; i < count; i++) {
  274. LOAD4;
  275. s->stats[0][y0]++;
  276. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  277. s->stats[1][u0]++;
  278. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  279. s->stats[0][y1]++;
  280. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  281. s->stats[2][v0]++;
  282. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  283. }
  284. } else {
  285. for(i = 0; i < count; i++) {
  286. LOAD4;
  287. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  288. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  289. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  290. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  291. }
  292. }
  293. return 0;
  294. }
  295. static int encode_gray_bitstream(HYuvContext *s, int count)
  296. {
  297. int i;
  298. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
  299. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  300. return -1;
  301. }
  302. #define LOAD2\
  303. int y0 = s->temp[0][2 * i];\
  304. int y1 = s->temp[0][2 * i + 1];
  305. #define STAT2\
  306. s->stats[0][y0]++;\
  307. s->stats[0][y1]++;
  308. #define WRITE2\
  309. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
  310. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  311. count /= 2;
  312. if (s->flags & CODEC_FLAG_PASS1) {
  313. for (i = 0; i < count; i++) {
  314. LOAD2;
  315. STAT2;
  316. }
  317. }
  318. if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
  319. return 0;
  320. if (s->context) {
  321. for (i = 0; i < count; i++) {
  322. LOAD2;
  323. STAT2;
  324. WRITE2;
  325. }
  326. } else {
  327. for (i = 0; i < count; i++) {
  328. LOAD2;
  329. WRITE2;
  330. }
  331. }
  332. return 0;
  333. }
  334. static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
  335. {
  336. int i;
  337. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
  338. 4 * planes * count) {
  339. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  340. return -1;
  341. }
  342. #define LOAD_GBRA \
  343. int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
  344. int b = s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g & 0xFF; \
  345. int r = s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g & 0xFF; \
  346. int a = s->temp[0][planes * i + A];
  347. #define STAT_BGRA \
  348. s->stats[0][b]++; \
  349. s->stats[1][g]++; \
  350. s->stats[2][r]++; \
  351. if (planes == 4) \
  352. s->stats[2][a]++;
  353. #define WRITE_GBRA \
  354. put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
  355. put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
  356. put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
  357. if (planes == 4) \
  358. put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
  359. if ((s->flags & CODEC_FLAG_PASS1) &&
  360. (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
  361. for (i = 0; i < count; i++) {
  362. LOAD_GBRA;
  363. STAT_BGRA;
  364. }
  365. } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
  366. for (i = 0; i < count; i++) {
  367. LOAD_GBRA;
  368. STAT_BGRA;
  369. WRITE_GBRA;
  370. }
  371. } else {
  372. for (i = 0; i < count; i++) {
  373. LOAD_GBRA;
  374. WRITE_GBRA;
  375. }
  376. }
  377. return 0;
  378. }
  379. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  380. const AVFrame *pict, int *got_packet)
  381. {
  382. HYuvContext *s = avctx->priv_data;
  383. const int width = s->width;
  384. const int width2 = s->width>>1;
  385. const int height = s->height;
  386. const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  387. const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  388. const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  389. AVFrame * const p = &s->picture;
  390. int i, j, size = 0, ret;
  391. if (!pkt->data &&
  392. (ret = av_new_packet(pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
  393. av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
  394. return ret;
  395. }
  396. *p = *pict;
  397. p->pict_type = AV_PICTURE_TYPE_I;
  398. p->key_frame = 1;
  399. if (s->context) {
  400. for (i = 0; i < 3; i++) {
  401. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  402. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0)
  403. return -1;
  404. size += store_table(s, s->len[i], &pkt->data[size]);
  405. }
  406. for (i = 0; i < 3; i++)
  407. for (j = 0; j < 256; j++)
  408. s->stats[i][j] >>= 1;
  409. }
  410. init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
  411. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  412. avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  413. int lefty, leftu, leftv, y, cy;
  414. put_bits(&s->pb, 8, leftv = p->data[2][0]);
  415. put_bits(&s->pb, 8, lefty = p->data[0][1]);
  416. put_bits(&s->pb, 8, leftu = p->data[1][0]);
  417. put_bits(&s->pb, 8, p->data[0][0]);
  418. lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
  419. leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
  420. leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
  421. encode_422_bitstream(s, 2, width-2);
  422. if (s->predictor==MEDIAN) {
  423. int lefttopy, lefttopu, lefttopv;
  424. cy = y = 1;
  425. if (s->interlaced) {
  426. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
  427. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
  428. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
  429. encode_422_bitstream(s, 0, width);
  430. y++; cy++;
  431. }
  432. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
  433. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
  434. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
  435. encode_422_bitstream(s, 0, 4);
  436. lefttopy = p->data[0][3];
  437. lefttopu = p->data[1][1];
  438. lefttopv = p->data[2][1];
  439. s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
  440. s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
  441. s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
  442. encode_422_bitstream(s, 0, width - 4);
  443. y++; cy++;
  444. for (; y < height; y++,cy++) {
  445. uint8_t *ydst, *udst, *vdst;
  446. if (s->bitstream_bpp == 12) {
  447. while (2 * cy > y) {
  448. ydst = p->data[0] + p->linesize[0] * y;
  449. s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  450. encode_gray_bitstream(s, width);
  451. y++;
  452. }
  453. if (y >= height) break;
  454. }
  455. ydst = p->data[0] + p->linesize[0] * y;
  456. udst = p->data[1] + p->linesize[1] * cy;
  457. vdst = p->data[2] + p->linesize[2] * cy;
  458. s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  459. s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  460. s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  461. encode_422_bitstream(s, 0, width);
  462. }
  463. } else {
  464. for (cy = y = 1; y < height; y++, cy++) {
  465. uint8_t *ydst, *udst, *vdst;
  466. /* encode a luma only line & y++ */
  467. if (s->bitstream_bpp == 12) {
  468. ydst = p->data[0] + p->linesize[0] * y;
  469. if (s->predictor == PLANE && s->interlaced < y) {
  470. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  471. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  472. } else {
  473. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  474. }
  475. encode_gray_bitstream(s, width);
  476. y++;
  477. if (y >= height) break;
  478. }
  479. ydst = p->data[0] + p->linesize[0] * y;
  480. udst = p->data[1] + p->linesize[1] * cy;
  481. vdst = p->data[2] + p->linesize[2] * cy;
  482. if (s->predictor == PLANE && s->interlaced < cy) {
  483. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  484. s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  485. s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
  486. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  487. leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  488. leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
  489. } else {
  490. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  491. leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  492. leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  493. }
  494. encode_422_bitstream(s, 0, width);
  495. }
  496. }
  497. } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
  498. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  499. const int stride = -p->linesize[0];
  500. const int fake_stride = -fake_ystride;
  501. int y;
  502. int leftr, leftg, leftb, lefta;
  503. put_bits(&s->pb, 8, lefta = data[A]);
  504. put_bits(&s->pb, 8, leftr = data[R]);
  505. put_bits(&s->pb, 8, leftg = data[G]);
  506. put_bits(&s->pb, 8, leftb = data[B]);
  507. sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
  508. &leftr, &leftg, &leftb, &lefta);
  509. encode_bgra_bitstream(s, width - 1, 4);
  510. for (y = 1; y < s->height; y++) {
  511. uint8_t *dst = data + y*stride;
  512. if (s->predictor == PLANE && s->interlaced < y) {
  513. s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
  514. sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
  515. &leftr, &leftg, &leftb, &lefta);
  516. } else {
  517. sub_left_prediction_bgr32(s, s->temp[0], dst, width,
  518. &leftr, &leftg, &leftb, &lefta);
  519. }
  520. encode_bgra_bitstream(s, width, 4);
  521. }
  522. } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
  523. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  524. const int stride = -p->linesize[0];
  525. const int fake_stride = -fake_ystride;
  526. int y;
  527. int leftr, leftg, leftb;
  528. put_bits(&s->pb, 8, leftr = data[0]);
  529. put_bits(&s->pb, 8, leftg = data[1]);
  530. put_bits(&s->pb, 8, leftb = data[2]);
  531. put_bits(&s->pb, 8, 0);
  532. sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
  533. &leftr, &leftg, &leftb);
  534. encode_bgra_bitstream(s, width-1, 3);
  535. for (y = 1; y < s->height; y++) {
  536. uint8_t *dst = data + y * stride;
  537. if (s->predictor == PLANE && s->interlaced < y) {
  538. s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
  539. width * 3);
  540. sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
  541. &leftr, &leftg, &leftb);
  542. } else {
  543. sub_left_prediction_rgb24(s, s->temp[0], dst, width,
  544. &leftr, &leftg, &leftb);
  545. }
  546. encode_bgra_bitstream(s, width, 3);
  547. }
  548. } else {
  549. av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
  550. }
  551. emms_c();
  552. size += (put_bits_count(&s->pb) + 31) / 8;
  553. put_bits(&s->pb, 16, 0);
  554. put_bits(&s->pb, 15, 0);
  555. size /= 4;
  556. if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
  557. int j;
  558. char *p = avctx->stats_out;
  559. char *end = p + 1024*30;
  560. for (i = 0; i < 3; i++) {
  561. for (j = 0; j < 256; j++) {
  562. snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
  563. p += strlen(p);
  564. s->stats[i][j]= 0;
  565. }
  566. snprintf(p, end-p, "\n");
  567. p++;
  568. }
  569. } else
  570. avctx->stats_out[0] = '\0';
  571. if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
  572. flush_put_bits(&s->pb);
  573. s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
  574. }
  575. s->picture_number++;
  576. pkt->size = size * 4;
  577. pkt->flags |= AV_PKT_FLAG_KEY;
  578. *got_packet = 1;
  579. return 0;
  580. }
  581. static av_cold int encode_end(AVCodecContext *avctx)
  582. {
  583. HYuvContext *s = avctx->priv_data;
  584. ff_huffyuv_common_end(s);
  585. av_freep(&avctx->extradata);
  586. av_freep(&avctx->stats_out);
  587. return 0;
  588. }
  589. #if CONFIG_HUFFYUV_ENCODER
  590. AVCodec ff_huffyuv_encoder = {
  591. .name = "huffyuv",
  592. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
  593. .type = AVMEDIA_TYPE_VIDEO,
  594. .id = AV_CODEC_ID_HUFFYUV,
  595. .priv_data_size = sizeof(HYuvContext),
  596. .init = encode_init,
  597. .encode2 = encode_frame,
  598. .close = encode_end,
  599. .pix_fmts = (const enum AVPixelFormat[]){
  600. AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
  601. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  602. },
  603. };
  604. #endif
  605. #if CONFIG_FFVHUFF_ENCODER
  606. AVCodec ff_ffvhuff_encoder = {
  607. .name = "ffvhuff",
  608. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
  609. .type = AVMEDIA_TYPE_VIDEO,
  610. .id = AV_CODEC_ID_FFVHUFF,
  611. .priv_data_size = sizeof(HYuvContext),
  612. .init = encode_init,
  613. .encode2 = encode_frame,
  614. .close = encode_end,
  615. .pix_fmts = (const enum AVPixelFormat[]){
  616. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
  617. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  618. },
  619. };
  620. #endif