You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

772 lines
26KB

  1. /*
  2. * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  5. * the algorithm used
  6. *
  7. * This file is part of Libav.
  8. *
  9. * Libav is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * Libav is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with Libav; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. /**
  24. * @file
  25. * huffyuv encoder
  26. */
  27. #include "libavutil/opt.h"
  28. #include "avcodec.h"
  29. #include "huffyuv.h"
  30. #include "huffman.h"
  31. #include "huffyuvencdsp.h"
  32. #include "internal.h"
  33. #include "put_bits.h"
  34. static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
  35. uint8_t *src, int w, int left)
  36. {
  37. int i;
  38. if (w < 32) {
  39. for (i = 0; i < w; i++) {
  40. const int temp = src[i];
  41. dst[i] = temp - left;
  42. left = temp;
  43. }
  44. return left;
  45. } else {
  46. for (i = 0; i < 16; i++) {
  47. const int temp = src[i];
  48. dst[i] = temp - left;
  49. left = temp;
  50. }
  51. s->hencdsp.diff_bytes(dst + 16, src + 16, src + 15, w - 16);
  52. return src[w-1];
  53. }
  54. }
  55. static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
  56. uint8_t *src, int w,
  57. int *red, int *green, int *blue,
  58. int *alpha)
  59. {
  60. int i;
  61. int r, g, b, a;
  62. r = *red;
  63. g = *green;
  64. b = *blue;
  65. a = *alpha;
  66. for (i = 0; i < FFMIN(w, 4); i++) {
  67. const int rt = src[i * 4 + R];
  68. const int gt = src[i * 4 + G];
  69. const int bt = src[i * 4 + B];
  70. const int at = src[i * 4 + A];
  71. dst[i * 4 + R] = rt - r;
  72. dst[i * 4 + G] = gt - g;
  73. dst[i * 4 + B] = bt - b;
  74. dst[i * 4 + A] = at - a;
  75. r = rt;
  76. g = gt;
  77. b = bt;
  78. a = at;
  79. }
  80. s->hencdsp.diff_bytes(dst + 16, src + 16, src + 12, w * 4 - 16);
  81. *red = src[(w - 1) * 4 + R];
  82. *green = src[(w - 1) * 4 + G];
  83. *blue = src[(w - 1) * 4 + B];
  84. *alpha = src[(w - 1) * 4 + A];
  85. }
  86. static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
  87. uint8_t *src, int w,
  88. int *red, int *green, int *blue)
  89. {
  90. int i;
  91. int r, g, b;
  92. r = *red;
  93. g = *green;
  94. b = *blue;
  95. for (i = 0; i < FFMIN(w, 16); i++) {
  96. const int rt = src[i * 3 + 0];
  97. const int gt = src[i * 3 + 1];
  98. const int bt = src[i * 3 + 2];
  99. dst[i * 3 + 0] = rt - r;
  100. dst[i * 3 + 1] = gt - g;
  101. dst[i * 3 + 2] = bt - b;
  102. r = rt;
  103. g = gt;
  104. b = bt;
  105. }
  106. s->hencdsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
  107. *red = src[(w - 1) * 3 + 0];
  108. *green = src[(w - 1) * 3 + 1];
  109. *blue = src[(w - 1) * 3 + 2];
  110. }
  111. static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
  112. {
  113. int i;
  114. int index = 0;
  115. for (i = 0; i < 256;) {
  116. int val = len[i];
  117. int repeat = 0;
  118. for (; i < 256 && len[i] == val && repeat < 255; i++)
  119. repeat++;
  120. assert(val < 32 && val >0 && repeat<256 && repeat>0);
  121. if ( repeat > 7) {
  122. buf[index++] = val;
  123. buf[index++] = repeat;
  124. } else {
  125. buf[index++] = val | (repeat << 5);
  126. }
  127. }
  128. return index;
  129. }
  130. static av_cold int encode_init(AVCodecContext *avctx)
  131. {
  132. HYuvContext *s = avctx->priv_data;
  133. int i, j;
  134. ff_huffyuv_common_init(avctx);
  135. ff_huffyuvencdsp_init(&s->hencdsp);
  136. avctx->extradata = av_mallocz(1024*30); // 256*3+4 == 772
  137. avctx->stats_out = av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
  138. s->version = 2;
  139. if (!avctx->extradata || !avctx->stats_out)
  140. return AVERROR(ENOMEM);
  141. #if FF_API_CODED_FRAME
  142. FF_DISABLE_DEPRECATION_WARNINGS
  143. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  144. avctx->coded_frame->key_frame = 1;
  145. FF_ENABLE_DEPRECATION_WARNINGS
  146. #endif
  147. #if FF_API_PRIVATE_OPT
  148. FF_DISABLE_DEPRECATION_WARNINGS
  149. if (avctx->context_model == 1)
  150. s->context = avctx->context_model;
  151. FF_ENABLE_DEPRECATION_WARNINGS
  152. #endif
  153. switch (avctx->pix_fmt) {
  154. case AV_PIX_FMT_YUV420P:
  155. case AV_PIX_FMT_YUV422P:
  156. if (s->width & 1) {
  157. av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
  158. return -1;
  159. }
  160. s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
  161. break;
  162. case AV_PIX_FMT_RGB32:
  163. s->bitstream_bpp = 32;
  164. break;
  165. case AV_PIX_FMT_RGB24:
  166. s->bitstream_bpp = 24;
  167. break;
  168. default:
  169. av_log(avctx, AV_LOG_ERROR, "format not supported\n");
  170. return -1;
  171. }
  172. avctx->bits_per_coded_sample = s->bitstream_bpp;
  173. s->decorrelate = s->bitstream_bpp >= 24;
  174. #if FF_API_PRIVATE_OPT
  175. FF_DISABLE_DEPRECATION_WARNINGS
  176. if (avctx->prediction_method)
  177. s->predictor = avctx->prediction_method;
  178. FF_ENABLE_DEPRECATION_WARNINGS
  179. #endif
  180. s->interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_ME ? 1 : 0;
  181. if (s->context) {
  182. if (s->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2)) {
  183. av_log(avctx, AV_LOG_ERROR,
  184. "context=1 is not compatible with "
  185. "2 pass huffyuv encoding\n");
  186. return -1;
  187. }
  188. }
  189. if (avctx->codec->id == AV_CODEC_ID_HUFFYUV) {
  190. if (avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  191. av_log(avctx, AV_LOG_ERROR,
  192. "Error: YV12 is not supported by huffyuv; use "
  193. "vcodec=ffvhuff or format=422p\n");
  194. return -1;
  195. }
  196. #if FF_API_PRIVATE_OPT
  197. if (s->context) {
  198. av_log(avctx, AV_LOG_ERROR,
  199. "Error: per-frame huffman tables are not supported "
  200. "by huffyuv; use vcodec=ffvhuff\n");
  201. return -1;
  202. }
  203. #endif
  204. if (s->interlaced != ( s->height > 288 ))
  205. av_log(avctx, AV_LOG_INFO,
  206. "using huffyuv 2.2.0 or newer interlacing flag\n");
  207. }
  208. if (s->bitstream_bpp >= 24 && s->predictor == MEDIAN) {
  209. av_log(avctx, AV_LOG_ERROR,
  210. "Error: RGB is incompatible with median predictor\n");
  211. return -1;
  212. }
  213. ((uint8_t*)avctx->extradata)[0] = s->predictor | (s->decorrelate << 6);
  214. ((uint8_t*)avctx->extradata)[1] = s->bitstream_bpp;
  215. ((uint8_t*)avctx->extradata)[2] = s->interlaced ? 0x10 : 0x20;
  216. if (s->context)
  217. ((uint8_t*)avctx->extradata)[2] |= 0x40;
  218. ((uint8_t*)avctx->extradata)[3] = 0;
  219. s->avctx->extradata_size = 4;
  220. if (avctx->stats_in) {
  221. char *p = avctx->stats_in;
  222. for (i = 0; i < 3; i++)
  223. for (j = 0; j < 256; j++)
  224. s->stats[i][j] = 1;
  225. for (;;) {
  226. for (i = 0; i < 3; i++) {
  227. char *next;
  228. for (j = 0; j < 256; j++) {
  229. s->stats[i][j] += strtol(p, &next, 0);
  230. if (next == p) return -1;
  231. p = next;
  232. }
  233. }
  234. if (p[0] == 0 || p[1] == 0 || p[2] == 0) break;
  235. }
  236. } else {
  237. for (i = 0; i < 3; i++)
  238. for (j = 0; j < 256; j++) {
  239. int d = FFMIN(j, 256 - j);
  240. s->stats[i][j] = 100000000 / (d + 1);
  241. }
  242. }
  243. for (i = 0; i < 3; i++) {
  244. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  245. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0) {
  246. return -1;
  247. }
  248. s->avctx->extradata_size +=
  249. store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
  250. }
  251. if (s->context) {
  252. for (i = 0; i < 3; i++) {
  253. int pels = s->width * s->height / (i ? 40 : 10);
  254. for (j = 0; j < 256; j++) {
  255. int d = FFMIN(j, 256 - j);
  256. s->stats[i][j] = pels/(d + 1);
  257. }
  258. }
  259. } else {
  260. for (i = 0; i < 3; i++)
  261. for (j = 0; j < 256; j++)
  262. s->stats[i][j]= 0;
  263. }
  264. ff_huffyuv_alloc_temp(s);
  265. s->picture_number=0;
  266. return 0;
  267. }
  268. static int encode_422_bitstream(HYuvContext *s, int offset, int count)
  269. {
  270. int i;
  271. const uint8_t *y = s->temp[0] + offset;
  272. const uint8_t *u = s->temp[1] + offset / 2;
  273. const uint8_t *v = s->temp[2] + offset / 2;
  274. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 2 * 4 * count) {
  275. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  276. return -1;
  277. }
  278. #define LOAD4\
  279. int y0 = y[2 * i];\
  280. int y1 = y[2 * i + 1];\
  281. int u0 = u[i];\
  282. int v0 = v[i];
  283. count /= 2;
  284. if (s->flags & AV_CODEC_FLAG_PASS1) {
  285. for(i = 0; i < count; i++) {
  286. LOAD4;
  287. s->stats[0][y0]++;
  288. s->stats[1][u0]++;
  289. s->stats[0][y1]++;
  290. s->stats[2][v0]++;
  291. }
  292. }
  293. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  294. return 0;
  295. if (s->context) {
  296. for (i = 0; i < count; i++) {
  297. LOAD4;
  298. s->stats[0][y0]++;
  299. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  300. s->stats[1][u0]++;
  301. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  302. s->stats[0][y1]++;
  303. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  304. s->stats[2][v0]++;
  305. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  306. }
  307. } else {
  308. for(i = 0; i < count; i++) {
  309. LOAD4;
  310. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
  311. put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
  312. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  313. put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
  314. }
  315. }
  316. return 0;
  317. }
  318. static int encode_gray_bitstream(HYuvContext *s, int count)
  319. {
  320. int i;
  321. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) < 4 * count) {
  322. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  323. return -1;
  324. }
  325. #define LOAD2\
  326. int y0 = s->temp[0][2 * i];\
  327. int y1 = s->temp[0][2 * i + 1];
  328. #define STAT2\
  329. s->stats[0][y0]++;\
  330. s->stats[0][y1]++;
  331. #define WRITE2\
  332. put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
  333. put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
  334. count /= 2;
  335. if (s->flags & AV_CODEC_FLAG_PASS1) {
  336. for (i = 0; i < count; i++) {
  337. LOAD2;
  338. STAT2;
  339. }
  340. }
  341. if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)
  342. return 0;
  343. if (s->context) {
  344. for (i = 0; i < count; i++) {
  345. LOAD2;
  346. STAT2;
  347. WRITE2;
  348. }
  349. } else {
  350. for (i = 0; i < count; i++) {
  351. LOAD2;
  352. WRITE2;
  353. }
  354. }
  355. return 0;
  356. }
  357. static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
  358. {
  359. int i;
  360. if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
  361. 4 * planes * count) {
  362. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  363. return -1;
  364. }
  365. #define LOAD_GBRA \
  366. int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
  367. int b = s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g & 0xFF; \
  368. int r = s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g & 0xFF; \
  369. int a = s->temp[0][planes * i + A];
  370. #define STAT_BGRA \
  371. s->stats[0][b]++; \
  372. s->stats[1][g]++; \
  373. s->stats[2][r]++; \
  374. if (planes == 4) \
  375. s->stats[2][a]++;
  376. #define WRITE_GBRA \
  377. put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
  378. put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
  379. put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
  380. if (planes == 4) \
  381. put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
  382. if ((s->flags & AV_CODEC_FLAG_PASS1) &&
  383. (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
  384. for (i = 0; i < count; i++) {
  385. LOAD_GBRA;
  386. STAT_BGRA;
  387. }
  388. } else if (s->context || (s->flags & AV_CODEC_FLAG_PASS1)) {
  389. for (i = 0; i < count; i++) {
  390. LOAD_GBRA;
  391. STAT_BGRA;
  392. WRITE_GBRA;
  393. }
  394. } else {
  395. for (i = 0; i < count; i++) {
  396. LOAD_GBRA;
  397. WRITE_GBRA;
  398. }
  399. }
  400. return 0;
  401. }
  402. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  403. const AVFrame *pict, int *got_packet)
  404. {
  405. HYuvContext *s = avctx->priv_data;
  406. const int width = s->width;
  407. const int width2 = s->width>>1;
  408. const int height = s->height;
  409. const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
  410. const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
  411. const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
  412. const AVFrame * const p = pict;
  413. int i, j, size = 0, ret;
  414. if (!pkt->data &&
  415. (ret = av_new_packet(pkt, width * height * 3 * 4 + AV_INPUT_BUFFER_MIN_SIZE)) < 0) {
  416. av_log(avctx, AV_LOG_ERROR, "Error allocating output packet.\n");
  417. return ret;
  418. }
  419. if (s->context) {
  420. for (i = 0; i < 3; i++) {
  421. ff_huff_gen_len_table(s->len[i], s->stats[i]);
  422. if (ff_huffyuv_generate_bits_table(s->bits[i], s->len[i]) < 0)
  423. return -1;
  424. size += store_table(s, s->len[i], &pkt->data[size]);
  425. }
  426. for (i = 0; i < 3; i++)
  427. for (j = 0; j < 256; j++)
  428. s->stats[i][j] >>= 1;
  429. }
  430. init_put_bits(&s->pb, pkt->data + size, pkt->size - size);
  431. if (avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  432. avctx->pix_fmt == AV_PIX_FMT_YUV420P) {
  433. int lefty, leftu, leftv, y, cy;
  434. put_bits(&s->pb, 8, leftv = p->data[2][0]);
  435. put_bits(&s->pb, 8, lefty = p->data[0][1]);
  436. put_bits(&s->pb, 8, leftu = p->data[1][0]);
  437. put_bits(&s->pb, 8, p->data[0][0]);
  438. lefty = sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
  439. leftu = sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
  440. leftv = sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
  441. encode_422_bitstream(s, 2, width-2);
  442. if (s->predictor==MEDIAN) {
  443. int lefttopy, lefttopu, lefttopv;
  444. cy = y = 1;
  445. if (s->interlaced) {
  446. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + p->linesize[0], width , lefty);
  447. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + p->linesize[1], width2, leftu);
  448. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + p->linesize[2], width2, leftv);
  449. encode_422_bitstream(s, 0, width);
  450. y++; cy++;
  451. }
  452. lefty = sub_left_prediction(s, s->temp[0], p->data[0] + fake_ystride, 4, lefty);
  453. leftu = sub_left_prediction(s, s->temp[1], p->data[1] + fake_ustride, 2, leftu);
  454. leftv = sub_left_prediction(s, s->temp[2], p->data[2] + fake_vstride, 2, leftv);
  455. encode_422_bitstream(s, 0, 4);
  456. lefttopy = p->data[0][3];
  457. lefttopu = p->data[1][1];
  458. lefttopv = p->data[2][1];
  459. s->hencdsp.sub_hfyu_median_pred(s->temp[0], p->data[0] + 4, p->data[0] + fake_ystride + 4, width - 4, &lefty, &lefttopy);
  460. s->hencdsp.sub_hfyu_median_pred(s->temp[1], p->data[1] + 2, p->data[1] + fake_ustride + 2, width2 - 2, &leftu, &lefttopu);
  461. s->hencdsp.sub_hfyu_median_pred(s->temp[2], p->data[2] + 2, p->data[2] + fake_vstride + 2, width2 - 2, &leftv, &lefttopv);
  462. encode_422_bitstream(s, 0, width - 4);
  463. y++; cy++;
  464. for (; y < height; y++,cy++) {
  465. uint8_t *ydst, *udst, *vdst;
  466. if (s->bitstream_bpp == 12) {
  467. while (2 * cy > y) {
  468. ydst = p->data[0] + p->linesize[0] * y;
  469. s->hencdsp.sub_hfyu_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
  470. encode_gray_bitstream(s, width);
  471. y++;
  472. }
  473. if (y >= height) break;
  474. }
  475. ydst = p->data[0] + p->linesize[0] * y;
  476. udst = p->data[1] + p->linesize[1] * cy;
  477. vdst = p->data[2] + p->linesize[2] * cy;
  478. s->hencdsp.sub_hfyu_median_pred(s->temp[0], ydst - fake_ystride, ydst, width, &lefty, &lefttopy);
  479. s->hencdsp.sub_hfyu_median_pred(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
  480. s->hencdsp.sub_hfyu_median_pred(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
  481. encode_422_bitstream(s, 0, width);
  482. }
  483. } else {
  484. for (cy = y = 1; y < height; y++, cy++) {
  485. uint8_t *ydst, *udst, *vdst;
  486. /* encode a luma only line & y++ */
  487. if (s->bitstream_bpp == 12) {
  488. ydst = p->data[0] + p->linesize[0] * y;
  489. if (s->predictor == PLANE && s->interlaced < y) {
  490. s->hencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  491. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  492. } else {
  493. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  494. }
  495. encode_gray_bitstream(s, width);
  496. y++;
  497. if (y >= height) break;
  498. }
  499. ydst = p->data[0] + p->linesize[0] * y;
  500. udst = p->data[1] + p->linesize[1] * cy;
  501. vdst = p->data[2] + p->linesize[2] * cy;
  502. if (s->predictor == PLANE && s->interlaced < cy) {
  503. s->hencdsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
  504. s->hencdsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
  505. s->hencdsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
  506. lefty = sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
  507. leftu = sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
  508. leftv = sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
  509. } else {
  510. lefty = sub_left_prediction(s, s->temp[0], ydst, width , lefty);
  511. leftu = sub_left_prediction(s, s->temp[1], udst, width2, leftu);
  512. leftv = sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
  513. }
  514. encode_422_bitstream(s, 0, width);
  515. }
  516. }
  517. } else if(avctx->pix_fmt == AV_PIX_FMT_RGB32) {
  518. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  519. const int stride = -p->linesize[0];
  520. const int fake_stride = -fake_ystride;
  521. int y;
  522. int leftr, leftg, leftb, lefta;
  523. put_bits(&s->pb, 8, lefta = data[A]);
  524. put_bits(&s->pb, 8, leftr = data[R]);
  525. put_bits(&s->pb, 8, leftg = data[G]);
  526. put_bits(&s->pb, 8, leftb = data[B]);
  527. sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
  528. &leftr, &leftg, &leftb, &lefta);
  529. encode_bgra_bitstream(s, width - 1, 4);
  530. for (y = 1; y < s->height; y++) {
  531. uint8_t *dst = data + y*stride;
  532. if (s->predictor == PLANE && s->interlaced < y) {
  533. s->hencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
  534. sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
  535. &leftr, &leftg, &leftb, &lefta);
  536. } else {
  537. sub_left_prediction_bgr32(s, s->temp[0], dst, width,
  538. &leftr, &leftg, &leftb, &lefta);
  539. }
  540. encode_bgra_bitstream(s, width, 4);
  541. }
  542. } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
  543. uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
  544. const int stride = -p->linesize[0];
  545. const int fake_stride = -fake_ystride;
  546. int y;
  547. int leftr, leftg, leftb;
  548. put_bits(&s->pb, 8, leftr = data[0]);
  549. put_bits(&s->pb, 8, leftg = data[1]);
  550. put_bits(&s->pb, 8, leftb = data[2]);
  551. put_bits(&s->pb, 8, 0);
  552. sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
  553. &leftr, &leftg, &leftb);
  554. encode_bgra_bitstream(s, width-1, 3);
  555. for (y = 1; y < s->height; y++) {
  556. uint8_t *dst = data + y * stride;
  557. if (s->predictor == PLANE && s->interlaced < y) {
  558. s->hencdsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
  559. width * 3);
  560. sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
  561. &leftr, &leftg, &leftb);
  562. } else {
  563. sub_left_prediction_rgb24(s, s->temp[0], dst, width,
  564. &leftr, &leftg, &leftb);
  565. }
  566. encode_bgra_bitstream(s, width, 3);
  567. }
  568. } else {
  569. av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
  570. }
  571. emms_c();
  572. size += (put_bits_count(&s->pb) + 31) / 8;
  573. put_bits(&s->pb, 16, 0);
  574. put_bits(&s->pb, 15, 0);
  575. size /= 4;
  576. if ((s->flags & AV_CODEC_FLAG_PASS1) && (s->picture_number & 31) == 0) {
  577. int j;
  578. char *p = avctx->stats_out;
  579. char *end = p + 1024*30;
  580. for (i = 0; i < 3; i++) {
  581. for (j = 0; j < 256; j++) {
  582. snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
  583. p += strlen(p);
  584. s->stats[i][j]= 0;
  585. }
  586. snprintf(p, end-p, "\n");
  587. p++;
  588. }
  589. } else
  590. avctx->stats_out[0] = '\0';
  591. if (!(s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT)) {
  592. flush_put_bits(&s->pb);
  593. s->bdsp.bswap_buf((uint32_t *) pkt->data, (uint32_t *) pkt->data, size);
  594. }
  595. s->picture_number++;
  596. pkt->size = size * 4;
  597. pkt->flags |= AV_PKT_FLAG_KEY;
  598. *got_packet = 1;
  599. return 0;
  600. }
  601. static av_cold int encode_end(AVCodecContext *avctx)
  602. {
  603. HYuvContext *s = avctx->priv_data;
  604. ff_huffyuv_common_end(s);
  605. av_freep(&avctx->extradata);
  606. av_freep(&avctx->stats_out);
  607. return 0;
  608. }
  609. #define OFFSET(x) offsetof(HYuvContext, x)
  610. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  611. #define HUFF_CLASS(variant) \
  612. static const AVClass variant ## _class = { \
  613. .class_name = # variant, \
  614. .item_name = av_default_item_name, \
  615. .option = variant ## _options, \
  616. .version = LIBAVUTIL_VERSION_INT, \
  617. }
  618. #define FF_HUFFYUV_COMMON_OPTS \
  619. { "pred", "Prediction method", OFFSET(predictor), AV_OPT_TYPE_INT, { .i64 = LEFT }, LEFT, MEDIAN, VE, "pred" }, \
  620. { "left", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = LEFT }, INT_MIN, INT_MAX, VE, "pred" }, \
  621. { "plane", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PLANE }, INT_MIN, INT_MAX, VE, "pred" }, \
  622. { "median", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MEDIAN }, INT_MIN, INT_MAX, VE, "pred" }
  623. static const AVOption huffyuv_options[] = {
  624. FF_HUFFYUV_COMMON_OPTS,
  625. { NULL},
  626. };
  627. HUFF_CLASS(huffyuv);
  628. AVCodec ff_huffyuv_encoder = {
  629. .name = "huffyuv",
  630. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
  631. .type = AVMEDIA_TYPE_VIDEO,
  632. .id = AV_CODEC_ID_HUFFYUV,
  633. .priv_data_size = sizeof(HYuvContext),
  634. .priv_class = &huffyuv_class,
  635. .init = encode_init,
  636. .encode2 = encode_frame,
  637. .close = encode_end,
  638. .pix_fmts = (const enum AVPixelFormat[]){
  639. AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
  640. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  641. },
  642. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  643. FF_CODEC_CAP_INIT_CLEANUP,
  644. };
  645. #if CONFIG_FFVHUFF_ENCODER
  646. static const AVOption ffhuffyuv_options[] = {
  647. FF_HUFFYUV_COMMON_OPTS,
  648. { "context", "Set per-frame huffman tables", OFFSET(context), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  649. { NULL }
  650. };
  651. HUFF_CLASS(ffhuffyuv);
  652. AVCodec ff_ffvhuff_encoder = {
  653. .name = "ffvhuff",
  654. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
  655. .type = AVMEDIA_TYPE_VIDEO,
  656. .id = AV_CODEC_ID_FFVHUFF,
  657. .priv_data_size = sizeof(HYuvContext),
  658. .priv_class = &ffhuffyuv_class,
  659. .init = encode_init,
  660. .encode2 = encode_frame,
  661. .close = encode_end,
  662. .pix_fmts = (const enum AVPixelFormat[]){
  663. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
  664. AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
  665. },
  666. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  667. FF_CODEC_CAP_INIT_CLEANUP,
  668. };
  669. #endif