You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

701 lines
23KB

  1. /*
  2. * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  5. * the algorithm used
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * huffyuv encoder
  26. */
  27. #include "avcodec.h"
  28. #include "huffyuv.h"
  29. #include "huffman.h"
  30. #include "internal.h"
  31. #include "put_bits.h"
  32. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
  33. const uint8_t *src, int w, int left)
  34. {
  35. int i;
  36. if (w < 32) {
  37. for (i = 0; i < w; i++) {
  38. const int temp = src[i];
  39. dst[i] = temp - left;
  40. left = temp;
  41. }
  42. return left;
  43. } else {
  44. for (i = 0; i < 16; i++) {
  45. const int temp = src[i];
  46. dst[i] = temp - left;
  47. left = temp;
  48. }
  49. s->dsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
  50. return src[w-1];
  51. }
  52. }
  53. static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
  54. const uint8_t *src, int w,
  55. int *red, int *green, int *blue, int *alpha)
  56. {
  57. int i;
  58. int r,g,b,a;
  59. r = *red;
  60. g = *green;
  61. b = *blue;
  62. a = *alpha;
  63. for (i = 0; i < FFMIN(w, 4); i++) {
  64. const int rt = src[i * 4 + R];
  65. const int gt = src[i * 4 + G];
  66. const int bt = src[i * 4 + B];
  67. const int at = src[i * 4 + A];
  68. dst[i * 4 + R] = rt - r;
  69. dst[i * 4 + G] = gt - g;
  70. dst[i * 4 + B] = bt - b;
  71. dst[i * 4 + A] = at - a;
  72. r = rt;
  73. g = gt;
  74. b = bt;
  75. a = at;
  76. }
  77. s->dsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
  78. *red = src[(w - 1) * 4 + R];
  79. *green = src[(w - 1) * 4 + G];
  80. *blue = src[(w - 1) * 4 + B];
  81. *alpha = src[(w - 1) * 4 + A];
  82. }
  83. static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
  84. int i;
  85. int r,g,b;
  86. r = *red;
  87. g = *green;
  88. b = *blue;
  89. for (i = 0; i < FFMIN(w,16); i++) {
  90. const int rt = src[i*3 + 0];
  91. const int gt = src[i*3 + 1];
  92. const int bt = src[i*3 + 2];
  93. dst[i*3 + 0] = rt - r;
  94. dst[i*3 + 1] = gt - g;
  95. dst[i*3 + 2] = bt - b;
  96. r = rt;
  97. g = gt;
  98. b = bt;
  99. }
  100. s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w*3 - 48);
  101. *red = src[(w - 1)*3 + 0];
  102. *green = src[(w - 1)*3 + 1];
  103. *blue = src[(w - 1)*3 + 2];
  104. }
  105. static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
  106. {
  107. int i;
  108. int index = 0;
  109. for (i = 0; i < 256;) {
  110. int val = len[i];
  111. int repeat = 0;
  112. for (; i < 256 && len[i] == val && repeat < 255; i++)
  113. repeat++;
  114. av_assert0(val < 32 && val >0 && repeat<256 && repeat>0);
  115. if (repeat > 7) {
  116. buf[index++] = val;
  117. buf[index++] = repeat;
  118. } else {
  119. buf[index++] = val | (repeat << 5);
  120. }
  121. }
  122. return index;
  123. }
  124. static av_cold int encode_init(AVCodecContext *avctx)
  125. {
  126. HYuvContext *s = avctx->priv_data;
  127. int i, j;
  128. ff_huffyuv_common_init(avctx);
  129. avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
  130. avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
  131. if (!avctx->extradata || !avctx->stats_out) {
  132. av_freep(&avctx->stats_out);
  133. return AVERROR(ENOMEM);
  134. }
  135. s->version = 2;
  136. avctx->coded_frame = &s->picture;
  137. switch (avctx->pix_fmt) {
  138. case AV_PIX_FMT_YUV420P:
  139. case AV_PIX_FMT_YUV422P:
  140. if (s->width & 1) {
  141. av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
  142. return AVERROR(EINVAL);
  143. }
  144. s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
  145. break;
  146. case AV_PIX_FMT_RGB32:
  147. s->bitstream_bpp = 32;
  148. break;
  149. case AV_PIX_FMT_RGB24:
  150. s->bitstream_bpp = 24;
  151. break;
  152. default:
  153. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  154. return AVERROR(EINVAL);
  155. }
  156. avctx->bits_per_coded_sample = s->bitstream_bpp;
  157. s->decorrelate = s->bitstream_bpp >= 24;
  158. s->predictor = avctx->prediction_method;
  159. s->interlaced = avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
  160. if (avctx->context_model == 1) {
  161. s->context = avctx->context_model;
  162. if (s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)) {
  163. av_log(avctx, AV_LOG_ERROR,
  164. "context=1 is not compatible with "
  165. "2 pass huffyuv encoding\n");
  166. return AVERROR(EINVAL);
  167. }
  168. }else s->context= 0;
  169. if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
  170. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  171. av_log(avctx, AV_LOG_ERROR,
  172. "Error: YV12 is not supported by huffyuv; use "
  173. "vcodec=ffvhuff or format=422p\n");
  174. return AVERROR(EINVAL);
  175. }
  176. if (avctx->context_model) {
  177. av_log(avctx, AV_LOG_ERROR,
  178. "Error: per-frame huffman tables are not supported "
  179. "by huffyuv; use vcodec=ffvhuff\n");
  180. return AVERROR(EINVAL);
  181. }
  182. if (s->interlaced != ( s->height > 288 ))
  183. av_log(avctx, AV_LOG_INFO,
  184. "using huffyuv 2.2.0 or newer interlacing flag\n");
  185. }
  186. if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
  187. av_log(avctx, AV_LOG_ERROR,
  188. "Error: RGB is incompatible with median predictor\n");
  189. return AVERROR(EINVAL);
  190. }
  191. ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
  192. ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
  193. ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
  194. if (s->context)
  195. ((uint8_t*)avctx->extradata)[2] |= 0x40;
  196. ((uint8_t*)avctx->extradata)[3] = 0;
  197. s->avctx->extradata_size = 4;
  198. if (avctx->stats_in) {
  199. char *p = avctx->stats_in;
  200. for (i = 0; i < 3; i++)
  201. for (j = 0; j < 256; j++)
  202. s->stats[i][j] = 1;
  203. for (;;) {
  204. for (i = 0; i < 3; i++) {
  205. char *next;
  206. for (j = 0; j < 256; j++) {
  207. s->stats[i][j] += strtol(p, &next, 0);
  208. if (next == p) return -1;
  209. p = next;
  210. }
  211. }
  212. if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
  213. }
  214. } else {
  215. for (i = 0; i < 3; i++)
  216. for (j = 0; j < 256; j++) {
  217. int d = FFMIN(j, 256 - j);
  218. s->stats[i][j] = 100000000 / (d + 1);
  219. }
  220. }
  221. for (i = 0; i < 3; i++) {
  222. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  223. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) {
  224. return -1;
  225. }
  226. s->avctx->extradata_size +=
  227. store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
  228. }
  229. if (s->context) {
  230. for (i = 0; i < 3; i++) {
  231. int pels = s->width * s->height / (i ? 40 : 10);
  232. for (j = 0; j < 256; j++) {
  233. int d = FFMIN(j, 256 - j);
  234. s->stats[i][j] = pels/(d + 1);
  235. }
  236. }
  237. } else {
  238. for (i = 0; i < 3; i++)
  239. for (j = 0; j < 256; j++)
  240. s->stats[i][j]= 0;
  241. }
  242. if (ff_huffyuv_alloc_temp(s)) {
  243. ff_huffyuv_common_end(s);
  244. return AVERROR(ENOMEM);
  245. }
  246. s->picture_number=0;
  247. return 0;
  248. }
  249. static int encode_422_bitstream(HYuvContext *s, int offset, int count)
  250. {
  251. int i;
  252. const uint8_t *y = s->temp[0] + offset;
  253. const uint8_t *u = s->temp[1] + offset / 2;
  254. const uint8_t *v = s->temp[2] + offset / 2;
  255. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
  256. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  257. return -1;
  258. }
  259. #define LOAD4\
  260. int y0 = y[2 * i];\
  261. int y1 = y[2 * i + 1];\
  262. int u0 = u[i];\
  263. int v0 = v[i];
  264. count /= 2;
  265. if (s->flags & CODEC_FLAG_PASS1) {
  266. for(i = 0; i < count; i++) {
  267. LOAD4;
  268. s->stats[0][y0]++;
  269. s->stats[1][u0]++;
  270. s->stats[0][y1]++;
  271. s->stats[2][v0]++;
  272. }
  273. }
  274. if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
  275. return 0;
  276. if (s->context) {
  277. for (i = 0; i < count; i++) {
  278. LOAD4;
  279. s->stats[0][y0]++;
  280. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  281. s->stats[1][u0]++;
  282. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  283. s->stats[0][y1]++;
  284. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  285. s->stats[2][v0]++;
  286. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  287. }
  288. } else {
  289. for(i = 0; i < count; i++) {
  290. LOAD4;
  291. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  292. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  293. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  294. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  295. }
  296. }
  297. return 0;
  298. }
  299. static int encode_gray_bitstream(HYuvContext *s, int count)
  300. {
  301. int i;
  302. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
  303. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  304. return -1;
  305. }
  306. #define LOAD2\
  307. int y0 = s->temp[0][2 * i];\
  308. int y1 = s->temp[0][2 * i + 1];
  309. #define STAT2\
  310. s->stats[0][y0]++;\
  311. s->stats[0][y1]++;
  312. #define WRITE2\
  313. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
  314. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  315. count /= 2;
  316. if (s->flags & CODEC_FLAG_PASS1) {
  317. for (i = 0; i < count; i++) {
  318. LOAD2;
  319. STAT2;
  320. }
  321. }
  322. if (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)
  323. return 0;
  324. if (s->context) {
  325. for (i = 0; i < count; i++) {
  326. LOAD2;
  327. STAT2;
  328. WRITE2;
  329. }
  330. } else {
  331. for (i = 0; i < count; i++) {
  332. LOAD2;
  333. WRITE2;
  334. }
  335. }
  336. return 0;
  337. }
  338. static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
  339. {
  340. int i;
  341. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count) {
  342. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  343. return -1;
  344. }
  345. #define LOAD3\
  346. int g = s->temp[0][planes==3 ? 3*i + 1 : 4*i + G];\
  347. int b = (s->temp[0][planes==3 ? 3*i + 2 : 4*i + B] - g) & 0xff;\
  348. int r = (s->temp[0][planes==3 ? 3*i + 0 : 4*i + R] - g) & 0xff;\
  349. int a = s->temp[0][planes*i + A];
  350. #define STAT3\
  351. s->stats[0][b]++;\
  352. s->stats[1][g]++;\
  353. s->stats[2][r]++;\
  354. if(planes==4) s->stats[2][a]++;
  355. #define WRITE3\
  356. put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
  357. put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
  358. put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
  359. if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
  360. if ((s->flags & CODEC_FLAG_PASS1) &&
  361. (s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
  362. for (i = 0; i < count; i++) {
  363. LOAD3;
  364. STAT3;
  365. }
  366. } else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
  367. for (i = 0; i < count; i++) {
  368. LOAD3;
  369. STAT3;
  370. WRITE3;
  371. }
  372. } else {
  373. for (i = 0; i < count; i++) {
  374. LOAD3;
  375. WRITE3;
  376. }
  377. }
  378. return 0;
  379. }
  380. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  381. const AVFrame *pict, int *got_packet)
  382. {
  383. HYuvContext *s = avctx->priv_data;
  384. const int width = s->width;
  385. const int width2 = s->width>>1;
  386. const int height = s->height;
  387. const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  388. const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  389. const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  390. AVFrame * const p = &s->picture;
  391. int i, j, size = 0, ret;
  392. if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
  393. return ret;
  394. *p = *pict;
  395. p->pict_type = AV_PICTURE_TYPE_I;
  396. p->key_frame = 1;
  397. if (s->context) {
  398. for (i = 0; i < 3; i++) {
  399. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  400. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0)
  401. return -1;
  402. size += store_table(s, s->len[i], &pkt->data[size]);
  403. }
  404. for (i = 0; i < 3; i++)
  405. for (j = 0; j < 256; j++)
  406. s->stats[i][j] >>= 1;
  407. }
  408. init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
  409. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  410. avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  411. int lefty, leftu, leftv, y, cy;
  412. put_bits(&s->pb, 8, leftv = p->data[2][0]);
  413. put_bits(&s->pb, 8, lefty = p->data[0][1]);
  414. put_bits(&s->pb, 8, leftu = p->data[1][0]);
  415. put_bits(&s->pb, 8, p->data[0][0]);
  416. lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
  417. leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
  418. leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
  419. encode_422_bitstream(s, 2, width-2);
  420. if (s->predictor==MEDIAN) {
  421. int lefttopy, lefttopu, lefttopv;
  422. cy = y = 1;
  423. if (s->interlaced) {
  424. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
  425. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
  426. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
  427. encode_422_bitstream(s, 0, width);
  428. y++; cy++;
  429. }
  430. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
  431. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
  432. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
  433. encode_422_bitstream(s, 0, 4);
  434. lefttopy = p->data[0][3];
  435. lefttopu = p->data[1][1];
  436. lefttopv = p->data[2][1];
  437. s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride + 4, width - 4 , &lefty, &lefttopy);
  438. s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
  439. s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
  440. encode_422_bitstream(s, 0, width - 4);
  441. y++; cy++;
  442. for (; y < height; y++,cy++) {
  443. uint8_t *ydst, *udst, *vdst;
  444. if (s->bitstream_bpp == 12) {
  445. while (2 * cy > y) {
  446. ydst = p->data[0] + p->linesize[0] * y;
  447. s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  448. encode_gray_bitstream(s, width);
  449. y++;
  450. }
  451. if (y >= height) break;
  452. }
  453. ydst = p->data[0] + p->linesize[0] * y;
  454. udst = p->data[1] + p->linesize[1] * cy;
  455. vdst = p->data[2] + p->linesize[2] * cy;
  456. s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
  457. s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  458. s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  459. encode_422_bitstream(s, 0, width);
  460. }
  461. } else {
  462. for (cy = y = 1; y < height; y++, cy++) {
  463. uint8_t *ydst, *udst, *vdst;
  464. /* encode a luma only line & y++ */
  465. if (s->bitstream_bpp == 12) {
  466. ydst = p->data[0] + p->linesize[0] * y;
  467. if (s->predictor == PLANE && s->interlaced < y) {
  468. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  469. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  470. } else {
  471. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  472. }
  473. encode_gray_bitstream(s, width);
  474. y++;
  475. if (y >= height) break;
  476. }
  477. ydst = p->data[0] + p->linesize[0] * y;
  478. udst = p->data[1] + p->linesize[1] * cy;
  479. vdst = p->data[2] + p->linesize[2] * cy;
  480. if (s->predictor == PLANE && s->interlaced < cy) {
  481. s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  482. s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  483. s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
  484. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  485. leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  486. leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
  487. } else {
  488. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  489. leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  490. leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  491. }
  492. encode_422_bitstream(s, 0, width);
  493. }
  494. }
  495. } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
  496. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  497. const int stride = -p->linesize[0];
  498. const int fake_stride = -fake_ystride;
  499. int y;
  500. int leftr, leftg, leftb, lefta;
  501. put_bits(&s->pb, 8, lefta = data[A]);
  502. put_bits(&s->pb, 8, leftr = data[R]);
  503. put_bits(&s->pb, 8, leftg = data[G]);
  504. put_bits(&s->pb, 8, leftb = data[B]);
  505. sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb, &lefta);
  506. encode_bgra_bitstream(s, width - 1, 4);
  507. for (y = 1; y < s->height; y++) {
  508. uint8_t *dst = data + y*stride;
  509. if (s->predictor == PLANE && s->interlaced < y) {
  510. s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
  511. sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
  512. } else {
  513. sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
  514. }
  515. encode_bgra_bitstream(s, width, 4);
  516. }
  517. }else if(avctx->pix_fmt == AV_PIX_FMT_RGB24){
  518. uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
  519. const int stride = -p->linesize[0];
  520. const int fake_stride = -fake_ystride;
  521. int y;
  522. int leftr, leftg, leftb;
  523. put_bits(&s->pb, 8, leftr= data[0]);
  524. put_bits(&s->pb, 8, leftg= data[1]);
  525. put_bits(&s->pb, 8, leftb= data[2]);
  526. put_bits(&s->pb, 8, 0);
  527. sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
  528. encode_bgra_bitstream(s, width-1, 3);
  529. for(y=1; y<s->height; y++){
  530. uint8_t *dst = data + y*stride;
  531. if(s->predictor == PLANE && s->interlaced < y){
  532. s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
  533. sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
  534. }else{
  535. sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
  536. }
  537. encode_bgra_bitstream(s, width, 3);
  538. }
  539. } else {
  540. av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
  541. }
  542. emms_c();
  543. size += (put_bits_count(&s->pb) + 31) / 8;
  544. put_bits(&s->pb, 16, 0);
  545. put_bits(&s->pb, 15, 0);
  546. size /= 4;
  547. if ((s->flags&CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
  548. int j;
  549. char *p = avctx->stats_out;
  550. char *end = p + 1024*30;
  551. for (i = 0; i < 3; i++) {
  552. for (j = 0; j < 256; j++) {
  553. snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
  554. p += strlen(p);
  555. s->stats[i][j]= 0;
  556. }
  557. snprintf(p, end-p, "\n");
  558. p++;
  559. }
  560. } else
  561. avctx->stats_out[0] = '\0';
  562. if (!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
  563. flush_put_bits(&s->pb);
  564. s->dsp.bswap_buf((uint32_t*)pkt->data, (uint32_t*)pkt->data, size);
  565. }
  566. s->picture_number++;
  567. pkt->size = size * 4;
  568. pkt->flags |= AV_PKT_FLAG_KEY;
  569. *got_packet = 1;
  570. return 0;
  571. }
  572. static av_cold int encode_end(AVCodecContext *avctx)
  573. {
  574. HYuvContext *s = avctx->priv_data;
  575. ff_huffyuv_common_end(s);
  576. av_freep(&avctx->extradata);
  577. av_freep(&avctx->stats_out);
  578. return 0;
  579. }
  580. #if CONFIG_HUFFYUV_ENCODER
  581. AVCodec ff_huffyuv_encoder = {
  582. .name = "huffyuv",
  583. .type = AVMEDIA_TYPE_VIDEO,
  584. .id = AV_CODEC_ID_HUFFYUV,
  585. .priv_data_size = sizeof(HYuvContext),
  586. .init = encode_init,
  587. .encode2 = encode_frame,
  588. .close = encode_end,
  589. .pix_fmts = (const enum AVPixelFormat[]){
  590. AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  591. },
  592. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
  593. };
  594. #endif
  595. #if CONFIG_FFVHUFF_ENCODER
  596. AVCodec ff_ffvhuff_encoder = {
  597. .name = "ffvhuff",
  598. .type = AVMEDIA_TYPE_VIDEO,
  599. .id = AV_CODEC_ID_FFVHUFF,
  600. .priv_data_size = sizeof(HYuvContext),
  601. .init = encode_init,
  602. .encode2 = encode_frame,
  603. .close = encode_end,
  604. .pix_fmts = (const enum AVPixelFormat[]){
  605. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  606. },
  607. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
  608. };
  609. #endif