You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1260 lines
46KB

  1. /*
  2. * huffyuv decoder
  3. *
  4. * Copyright (c) 2002-2014 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
  7. * the algorithm used
  8. *
  9. * This file is part of FFmpeg.
  10. *
  11. * FFmpeg is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU Lesser General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2.1 of the License, or (at your option) any later version.
  15. *
  16. * FFmpeg is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public
  22. * License along with FFmpeg; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  24. *
  25. * yuva, gray, 4:4:4, 4:1:1, 4:1:0 and >8 bit per sample support sponsored by NOA
  26. */
  27. /**
  28. * @file
  29. * huffyuv decoder
  30. */
  31. #define UNCHECKED_BITSTREAM_READER 1
  32. #include "avcodec.h"
  33. #include "get_bits.h"
  34. #include "huffyuv.h"
  35. #include "huffyuvdsp.h"
  36. #include "thread.h"
  37. #include "libavutil/imgutils.h"
  38. #include "libavutil/pixdesc.h"
  39. #define classic_shift_luma_table_size 42
  40. static const unsigned char classic_shift_luma[classic_shift_luma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
  41. 34, 36, 35, 69, 135, 232, 9, 16, 10, 24, 11, 23, 12, 16, 13, 10,
  42. 14, 8, 15, 8, 16, 8, 17, 20, 16, 10, 207, 206, 205, 236, 11, 8,
  43. 10, 21, 9, 23, 8, 8, 199, 70, 69, 68, 0,
  44. 0,0,0,0,0,0,0,0,
  45. };
  46. #define classic_shift_chroma_table_size 59
  47. static const unsigned char classic_shift_chroma[classic_shift_chroma_table_size + FF_INPUT_BUFFER_PADDING_SIZE] = {
  48. 66, 36, 37, 38, 39, 40, 41, 75, 76, 77, 110, 239, 144, 81, 82, 83,
  49. 84, 85, 118, 183, 56, 57, 88, 89, 56, 89, 154, 57, 58, 57, 26, 141,
  50. 57, 56, 58, 57, 58, 57, 184, 119, 214, 245, 116, 83, 82, 49, 80, 79,
  51. 78, 77, 44, 75, 41, 40, 39, 38, 37, 36, 34, 0,
  52. 0,0,0,0,0,0,0,0,
  53. };
  54. static const unsigned char classic_add_luma[256] = {
  55. 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
  56. 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
  57. 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
  58. 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
  59. 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
  60. 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
  61. 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
  62. 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
  63. 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
  64. 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
  65. 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
  66. 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
  67. 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
  68. 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
  69. 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
  70. 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
  71. };
  72. static const unsigned char classic_add_chroma[256] = {
  73. 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
  74. 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
  75. 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
  76. 43, 45, 76, 81, 46, 82, 75, 55, 56, 144, 58, 80, 60, 74, 147, 63,
  77. 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
  78. 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
  79. 17, 14, 5, 6, 100, 54, 47, 50, 51, 53, 106, 107, 108, 109, 110, 111,
  80. 112, 113, 114, 115, 4, 117, 118, 92, 94, 121, 122, 3, 124, 103, 2, 1,
  81. 0, 129, 130, 131, 120, 119, 126, 125, 136, 137, 138, 139, 140, 141, 142, 134,
  82. 135, 132, 133, 104, 64, 101, 62, 57, 102, 95, 93, 59, 61, 28, 97, 96,
  83. 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
  84. 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
  85. 7, 128, 127, 105, 123, 116, 35, 34, 33, 145, 31, 79, 42, 146, 78, 26,
  86. 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
  87. 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
  88. 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
  89. };
  90. static int read_len_table(uint8_t *dst, GetBitContext *gb, int n)
  91. {
  92. int i, val, repeat;
  93. for (i = 0; i < n;) {
  94. repeat = get_bits(gb, 3);
  95. val = get_bits(gb, 5);
  96. if (repeat == 0)
  97. repeat = get_bits(gb, 8);
  98. if (i + repeat > n || get_bits_left(gb) < 0) {
  99. av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
  100. return AVERROR_INVALIDDATA;
  101. }
  102. while (repeat--)
  103. dst[i++] = val;
  104. }
  105. return 0;
  106. }
  107. static int generate_joint_tables(HYuvContext *s)
  108. {
  109. int ret;
  110. uint16_t *symbols = av_mallocz(5 << VLC_BITS);
  111. uint16_t *bits;
  112. uint8_t *len;
  113. if (!symbols)
  114. return AVERROR(ENOMEM);
  115. bits = symbols + (1 << VLC_BITS);
  116. len = (uint8_t *)(bits + (1 << VLC_BITS));
  117. if (s->bitstream_bpp < 24 || s->version > 2) {
  118. int p, i, y, u;
  119. for (p = 0; p < 4; p++) {
  120. int p0 = s->version > 2 ? p : 0;
  121. for (i = y = 0; y < s->vlc_n; y++) {
  122. int len0 = s->len[p0][y];
  123. int limit = VLC_BITS - len0;
  124. if (limit <= 0 || !len0)
  125. continue;
  126. if ((sign_extend(y, 8) & (s->vlc_n-1)) != y)
  127. continue;
  128. for (u = 0; u < s->vlc_n; u++) {
  129. int len1 = s->len[p][u];
  130. if (len1 > limit || !len1)
  131. continue;
  132. if ((sign_extend(u, 8) & (s->vlc_n-1)) != u)
  133. continue;
  134. av_assert0(i < (1 << VLC_BITS));
  135. len[i] = len0 + len1;
  136. bits[i] = (s->bits[p0][y] << len1) + s->bits[p][u];
  137. symbols[i] = (y << 8) + (u & 0xFF);
  138. i++;
  139. }
  140. }
  141. ff_free_vlc(&s->vlc[4 + p]);
  142. if ((ret = ff_init_vlc_sparse(&s->vlc[4 + p], VLC_BITS, i, len, 1, 1,
  143. bits, 2, 2, symbols, 2, 2, 0)) < 0)
  144. goto out;
  145. }
  146. } else {
  147. uint8_t (*map)[4] = (uint8_t(*)[4]) s->pix_bgr_map;
  148. int i, b, g, r, code;
  149. int p0 = s->decorrelate;
  150. int p1 = !s->decorrelate;
  151. /* Restrict the range to +/-16 because that's pretty much guaranteed
  152. * to cover all the combinations that fit in 11 bits total, and it
  153. * does not matter if we miss a few rare codes. */
  154. for (i = 0, g = -16; g < 16; g++) {
  155. int len0 = s->len[p0][g & 255];
  156. int limit0 = VLC_BITS - len0;
  157. if (limit0 < 2 || !len0)
  158. continue;
  159. for (b = -16; b < 16; b++) {
  160. int len1 = s->len[p1][b & 255];
  161. int limit1 = limit0 - len1;
  162. if (limit1 < 1 || !len1)
  163. continue;
  164. code = (s->bits[p0][g & 255] << len1) + s->bits[p1][b & 255];
  165. for (r = -16; r < 16; r++) {
  166. int len2 = s->len[2][r & 255];
  167. if (len2 > limit1 || !len2)
  168. continue;
  169. av_assert0(i < (1 << VLC_BITS));
  170. len[i] = len0 + len1 + len2;
  171. bits[i] = (code << len2) + s->bits[2][r & 255];
  172. if (s->decorrelate) {
  173. map[i][G] = g;
  174. map[i][B] = g + b;
  175. map[i][R] = g + r;
  176. } else {
  177. map[i][B] = g;
  178. map[i][G] = b;
  179. map[i][R] = r;
  180. }
  181. i++;
  182. }
  183. }
  184. }
  185. ff_free_vlc(&s->vlc[4]);
  186. if ((ret = init_vlc(&s->vlc[4], VLC_BITS, i, len, 1, 1,
  187. bits, 2, 2, 0)) < 0)
  188. goto out;
  189. }
  190. ret = 0;
  191. out:
  192. av_freep(&symbols);
  193. return ret;
  194. }
  195. static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length)
  196. {
  197. GetBitContext gb;
  198. int i, ret;
  199. int count = 3;
  200. if ((ret = init_get_bits(&gb, src, length * 8)) < 0)
  201. return ret;
  202. if (s->version > 2)
  203. count = 1 + s->alpha + 2*s->chroma;
  204. for (i = 0; i < count; i++) {
  205. if ((ret = read_len_table(s->len[i], &gb, s->vlc_n)) < 0)
  206. return ret;
  207. if ((ret = ff_huffyuv_generate_bits_table(s->bits[i], s->len[i], s->vlc_n)) < 0)
  208. return ret;
  209. ff_free_vlc(&s->vlc[i]);
  210. if ((ret = init_vlc(&s->vlc[i], VLC_BITS, s->vlc_n, s->len[i], 1, 1,
  211. s->bits[i], 4, 4, 0)) < 0)
  212. return ret;
  213. }
  214. if ((ret = generate_joint_tables(s)) < 0)
  215. return ret;
  216. return (get_bits_count(&gb) + 7) / 8;
  217. }
  218. static int read_old_huffman_tables(HYuvContext *s)
  219. {
  220. GetBitContext gb;
  221. int i, ret;
  222. init_get_bits(&gb, classic_shift_luma,
  223. classic_shift_luma_table_size * 8);
  224. if ((ret = read_len_table(s->len[0], &gb, 256)) < 0)
  225. return ret;
  226. init_get_bits(&gb, classic_shift_chroma,
  227. classic_shift_chroma_table_size * 8);
  228. if ((ret = read_len_table(s->len[1], &gb, 256)) < 0)
  229. return ret;
  230. for (i = 0; i < 256; i++)
  231. s->bits[0][i] = classic_add_luma[i];
  232. for (i = 0; i < 256; i++)
  233. s->bits[1][i] = classic_add_chroma[i];
  234. if (s->bitstream_bpp >= 24) {
  235. memcpy(s->bits[1], s->bits[0], 256 * sizeof(uint32_t));
  236. memcpy(s->len[1], s->len[0], 256 * sizeof(uint8_t));
  237. }
  238. memcpy(s->bits[2], s->bits[1], 256 * sizeof(uint32_t));
  239. memcpy(s->len[2], s->len[1], 256 * sizeof(uint8_t));
  240. for (i = 0; i < 4; i++) {
  241. ff_free_vlc(&s->vlc[i]);
  242. if ((ret = init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1,
  243. s->bits[i], 4, 4, 0)) < 0)
  244. return ret;
  245. }
  246. if ((ret = generate_joint_tables(s)) < 0)
  247. return ret;
  248. return 0;
  249. }
  250. static av_cold int decode_init(AVCodecContext *avctx)
  251. {
  252. HYuvContext *s = avctx->priv_data;
  253. int ret;
  254. ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
  255. if (ret < 0)
  256. return ret;
  257. ff_huffyuvdsp_init(&s->hdsp);
  258. memset(s->vlc, 0, 4 * sizeof(VLC));
  259. s->interlaced = avctx->height > 288;
  260. s->bgr32 = 1;
  261. if (avctx->extradata_size) {
  262. if ((avctx->bits_per_coded_sample & 7) &&
  263. avctx->bits_per_coded_sample != 12)
  264. s->version = 1; // do such files exist at all?
  265. else if (avctx->extradata_size > 3 && avctx->extradata[3] == 0)
  266. s->version = 2;
  267. else
  268. s->version = 3;
  269. } else
  270. s->version = 0;
  271. s->bps = 8;
  272. s->n = 1<<s->bps;
  273. s->vlc_n = FFMIN(s->n, MAX_VLC_N);
  274. s->chroma = 1;
  275. if (s->version >= 2) {
  276. int method, interlace;
  277. if (avctx->extradata_size < 4)
  278. return AVERROR_INVALIDDATA;
  279. method = avctx->extradata[0];
  280. s->decorrelate = method & 64 ? 1 : 0;
  281. s->predictor = method & 63;
  282. if (s->version == 2) {
  283. s->bitstream_bpp = avctx->extradata[1];
  284. if (s->bitstream_bpp == 0)
  285. s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
  286. } else {
  287. s->bps = (avctx->extradata[1] >> 4) + 1;
  288. s->n = 1<<s->bps;
  289. s->vlc_n = FFMIN(s->n, MAX_VLC_N);
  290. s->chroma_h_shift = avctx->extradata[1] & 3;
  291. s->chroma_v_shift = (avctx->extradata[1] >> 2) & 3;
  292. s->yuv = !!(avctx->extradata[2] & 1);
  293. s->chroma= !!(avctx->extradata[2] & 3);
  294. s->alpha = !!(avctx->extradata[2] & 4);
  295. }
  296. interlace = (avctx->extradata[2] & 0x30) >> 4;
  297. s->interlaced = (interlace == 1) ? 1 : (interlace == 2) ? 0 : s->interlaced;
  298. s->context = avctx->extradata[2] & 0x40 ? 1 : 0;
  299. if ((ret = read_huffman_tables(s, avctx->extradata + 4,
  300. avctx->extradata_size - 4)) < 0)
  301. return ret;
  302. } else {
  303. switch (avctx->bits_per_coded_sample & 7) {
  304. case 1:
  305. s->predictor = LEFT;
  306. s->decorrelate = 0;
  307. break;
  308. case 2:
  309. s->predictor = LEFT;
  310. s->decorrelate = 1;
  311. break;
  312. case 3:
  313. s->predictor = PLANE;
  314. s->decorrelate = avctx->bits_per_coded_sample >= 24;
  315. break;
  316. case 4:
  317. s->predictor = MEDIAN;
  318. s->decorrelate = 0;
  319. break;
  320. default:
  321. s->predictor = LEFT; // OLD
  322. s->decorrelate = 0;
  323. break;
  324. }
  325. s->bitstream_bpp = avctx->bits_per_coded_sample & ~7;
  326. s->context = 0;
  327. if ((ret = read_old_huffman_tables(s)) < 0)
  328. return ret;
  329. }
  330. if (s->version <= 2) {
  331. switch (s->bitstream_bpp) {
  332. case 12:
  333. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  334. s->yuv = 1;
  335. break;
  336. case 16:
  337. if (s->yuy2)
  338. avctx->pix_fmt = AV_PIX_FMT_YUYV422;
  339. else
  340. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  341. s->yuv = 1;
  342. break;
  343. case 24:
  344. if (s->bgr32)
  345. avctx->pix_fmt = AV_PIX_FMT_0RGB32;
  346. else
  347. avctx->pix_fmt = AV_PIX_FMT_BGR24;
  348. break;
  349. case 32:
  350. av_assert0(s->bgr32);
  351. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  352. s->alpha = 1;
  353. break;
  354. default:
  355. return AVERROR_INVALIDDATA;
  356. }
  357. av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt,
  358. &s->chroma_h_shift,
  359. &s->chroma_v_shift);
  360. } else {
  361. switch ( (s->chroma<<10) | (s->yuv<<9) | (s->alpha<<8) | ((s->bps-1)<<4) | s->chroma_h_shift | (s->chroma_v_shift<<2)) {
  362. case 0x070:
  363. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  364. break;
  365. case 0x0F0:
  366. avctx->pix_fmt = AV_PIX_FMT_GRAY16;
  367. break;
  368. case 0x170:
  369. avctx->pix_fmt = AV_PIX_FMT_GRAY8A;
  370. break;
  371. case 0x470:
  372. avctx->pix_fmt = AV_PIX_FMT_GBRP;
  373. break;
  374. case 0x480:
  375. avctx->pix_fmt = AV_PIX_FMT_GBRP9;
  376. break;
  377. case 0x490:
  378. avctx->pix_fmt = AV_PIX_FMT_GBRP10;
  379. break;
  380. case 0x4B0:
  381. avctx->pix_fmt = AV_PIX_FMT_GBRP12;
  382. break;
  383. case 0x4D0:
  384. avctx->pix_fmt = AV_PIX_FMT_GBRP14;
  385. break;
  386. case 0x4F0:
  387. avctx->pix_fmt = AV_PIX_FMT_GBRP16;
  388. break;
  389. case 0x570:
  390. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  391. break;
  392. case 0x670:
  393. avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  394. break;
  395. case 0x680:
  396. avctx->pix_fmt = AV_PIX_FMT_YUV444P9;
  397. break;
  398. case 0x690:
  399. avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
  400. break;
  401. case 0x6B0:
  402. avctx->pix_fmt = AV_PIX_FMT_YUV444P12;
  403. break;
  404. case 0x6D0:
  405. avctx->pix_fmt = AV_PIX_FMT_YUV444P14;
  406. break;
  407. case 0x6F0:
  408. avctx->pix_fmt = AV_PIX_FMT_YUV444P16;
  409. break;
  410. case 0x671:
  411. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  412. break;
  413. case 0x681:
  414. avctx->pix_fmt = AV_PIX_FMT_YUV422P9;
  415. break;
  416. case 0x691:
  417. avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
  418. break;
  419. case 0x6B1:
  420. avctx->pix_fmt = AV_PIX_FMT_YUV422P12;
  421. break;
  422. case 0x6D1:
  423. avctx->pix_fmt = AV_PIX_FMT_YUV422P14;
  424. break;
  425. case 0x6F1:
  426. avctx->pix_fmt = AV_PIX_FMT_YUV422P16;
  427. break;
  428. case 0x672:
  429. avctx->pix_fmt = AV_PIX_FMT_YUV411P;
  430. break;
  431. case 0x674:
  432. avctx->pix_fmt = AV_PIX_FMT_YUV440P;
  433. break;
  434. case 0x675:
  435. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  436. break;
  437. case 0x685:
  438. avctx->pix_fmt = AV_PIX_FMT_YUV420P9;
  439. break;
  440. case 0x695:
  441. avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
  442. break;
  443. case 0x6B5:
  444. avctx->pix_fmt = AV_PIX_FMT_YUV420P12;
  445. break;
  446. case 0x6D5:
  447. avctx->pix_fmt = AV_PIX_FMT_YUV420P14;
  448. break;
  449. case 0x6F5:
  450. avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
  451. break;
  452. case 0x67A:
  453. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  454. break;
  455. case 0x770:
  456. avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
  457. break;
  458. case 0x780:
  459. avctx->pix_fmt = AV_PIX_FMT_YUVA444P9;
  460. break;
  461. case 0x790:
  462. avctx->pix_fmt = AV_PIX_FMT_YUVA444P10;
  463. break;
  464. case 0x7F0:
  465. avctx->pix_fmt = AV_PIX_FMT_YUVA444P16;
  466. break;
  467. case 0x771:
  468. avctx->pix_fmt = AV_PIX_FMT_YUVA422P;
  469. break;
  470. case 0x781:
  471. avctx->pix_fmt = AV_PIX_FMT_YUVA422P9;
  472. break;
  473. case 0x791:
  474. avctx->pix_fmt = AV_PIX_FMT_YUVA422P10;
  475. break;
  476. case 0x7F1:
  477. avctx->pix_fmt = AV_PIX_FMT_YUVA422P16;
  478. break;
  479. case 0x775:
  480. avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
  481. break;
  482. case 0x785:
  483. avctx->pix_fmt = AV_PIX_FMT_YUVA420P9;
  484. break;
  485. case 0x795:
  486. avctx->pix_fmt = AV_PIX_FMT_YUVA420P10;
  487. break;
  488. case 0x7F5:
  489. avctx->pix_fmt = AV_PIX_FMT_YUVA420P16;
  490. break;
  491. default:
  492. return AVERROR_INVALIDDATA;
  493. }
  494. }
  495. ff_huffyuv_common_init(avctx);
  496. if ((avctx->pix_fmt == AV_PIX_FMT_YUV422P || avctx->pix_fmt == AV_PIX_FMT_YUV420P) && avctx->width & 1) {
  497. av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
  498. return AVERROR_INVALIDDATA;
  499. }
  500. if (s->predictor == MEDIAN && avctx->pix_fmt == AV_PIX_FMT_YUV422P &&
  501. avctx->width % 4) {
  502. av_log(avctx, AV_LOG_ERROR, "width must be a multiple of 4 "
  503. "for this combination of colorspace and predictor type.\n");
  504. return AVERROR_INVALIDDATA;
  505. }
  506. if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
  507. ff_huffyuv_common_end(s);
  508. return ret;
  509. }
  510. return 0;
  511. }
  512. static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
  513. {
  514. HYuvContext *s = avctx->priv_data;
  515. int i, ret;
  516. if ((ret = ff_huffyuv_alloc_temp(s)) < 0) {
  517. ff_huffyuv_common_end(s);
  518. return ret;
  519. }
  520. for (i = 0; i < 8; i++)
  521. s->vlc[i].table = NULL;
  522. if (s->version >= 2) {
  523. if ((ret = read_huffman_tables(s, avctx->extradata + 4,
  524. avctx->extradata_size)) < 0)
  525. return ret;
  526. } else {
  527. if ((ret = read_old_huffman_tables(s)) < 0)
  528. return ret;
  529. }
  530. return 0;
  531. }
  532. /** Subset of GET_VLC for use in hand-roller VLC code */
  533. #define VLC_INTERN(dst, table, gb, name, bits, max_depth) \
  534. code = table[index][0]; \
  535. n = table[index][1]; \
  536. if (max_depth > 1 && n < 0) { \
  537. LAST_SKIP_BITS(name, gb, bits); \
  538. UPDATE_CACHE(name, gb); \
  539. \
  540. nb_bits = -n; \
  541. index = SHOW_UBITS(name, gb, nb_bits) + code; \
  542. code = table[index][0]; \
  543. n = table[index][1]; \
  544. if (max_depth > 2 && n < 0) { \
  545. LAST_SKIP_BITS(name, gb, nb_bits); \
  546. UPDATE_CACHE(name, gb); \
  547. \
  548. nb_bits = -n; \
  549. index = SHOW_UBITS(name, gb, nb_bits) + code; \
  550. code = table[index][0]; \
  551. n = table[index][1]; \
  552. } \
  553. } \
  554. dst = code; \
  555. LAST_SKIP_BITS(name, gb, n)
  556. #define GET_VLC_DUAL(dst0, dst1, name, gb, dtable, table1, table2, \
  557. bits, max_depth, OP) \
  558. do { \
  559. unsigned int index = SHOW_UBITS(name, gb, bits); \
  560. int code, n = dtable[index][1]; \
  561. \
  562. if (n<=0) { \
  563. int nb_bits; \
  564. VLC_INTERN(dst0, table1, gb, name, bits, max_depth); \
  565. \
  566. UPDATE_CACHE(re, gb); \
  567. index = SHOW_UBITS(name, gb, bits); \
  568. VLC_INTERN(dst1, table2, gb, name, bits, max_depth); \
  569. } else { \
  570. code = dtable[index][0]; \
  571. OP(dst0, dst1, code); \
  572. LAST_SKIP_BITS(name, gb, n); \
  573. } \
  574. } while (0)
  575. #define OP8bits(dst0, dst1, code) dst0 = code>>8; dst1 = code
  576. #define READ_2PIX(dst0, dst1, plane1) \
  577. UPDATE_CACHE(re, &s->gb); \
  578. GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane1].table, \
  579. s->vlc[0].table, s->vlc[plane1].table, VLC_BITS, 3, OP8bits)
  580. static void decode_422_bitstream(HYuvContext *s, int count)
  581. {
  582. int i, icount;
  583. OPEN_READER(re, &s->gb);
  584. count /= 2;
  585. icount = get_bits_left(&s->gb) / (32 * 4);
  586. if (count >= icount) {
  587. for (i = 0; i < icount; i++) {
  588. READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
  589. READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
  590. }
  591. for (; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
  592. READ_2PIX(s->temp[0][2 * i ], s->temp[1][i], 1);
  593. if (BITS_LEFT(re, &s->gb) <= 0) break;
  594. READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
  595. }
  596. for (; i < count; i++)
  597. s->temp[0][2 * i ] = s->temp[1][i] =
  598. s->temp[0][2 * i + 1] = s->temp[2][i] = 0;
  599. } else {
  600. for (i = 0; i < count; i++) {
  601. READ_2PIX(s->temp[0][2 * i], s->temp[1][i], 1);
  602. READ_2PIX(s->temp[0][2 * i + 1], s->temp[2][i], 2);
  603. }
  604. }
  605. CLOSE_READER(re, &s->gb);
  606. }
  607. #define READ_2PIX_PLANE(dst0, dst1, plane, OP) \
  608. UPDATE_CACHE(re, &s->gb); \
  609. GET_VLC_DUAL(dst0, dst1, re, &s->gb, s->vlc[4+plane].table, \
  610. s->vlc[plane].table, s->vlc[plane].table, VLC_BITS, 3, OP)
  611. #define OP14bits(dst0, dst1, code) dst0 = code>>8; dst1 = sign_extend(code, 8)
  612. /* TODO instead of restarting the read when the code isn't in the first level
  613. * of the joint table, jump into the 2nd level of the individual table. */
  614. #define READ_2PIX_PLANE16(dst0, dst1, plane){\
  615. dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
  616. dst0 += get_bits(&s->gb, 2);\
  617. dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\
  618. dst1 += get_bits(&s->gb, 2);\
  619. }
  620. static void decode_plane_bitstream(HYuvContext *s, int width, int plane)
  621. {
  622. int i, count = width/2;
  623. if (s->bps <= 8) {
  624. OPEN_READER(re, &s->gb);
  625. if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
  626. for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
  627. READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
  628. }
  629. } else {
  630. for(i=0; i<count; i++){
  631. READ_2PIX_PLANE(s->temp[0][2 * i], s->temp[0][2 * i + 1], plane, OP8bits);
  632. }
  633. }
  634. if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
  635. unsigned int index;
  636. int nb_bits, code, n;
  637. UPDATE_CACHE(re, &s->gb);
  638. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  639. VLC_INTERN(s->temp[0][width-1], s->vlc[plane].table,
  640. &s->gb, re, VLC_BITS, 3);
  641. }
  642. CLOSE_READER(re, &s->gb);
  643. } else if (s->bps <= 14) {
  644. OPEN_READER(re, &s->gb);
  645. if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
  646. for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
  647. READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
  648. }
  649. } else {
  650. for(i=0; i<count; i++){
  651. READ_2PIX_PLANE(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane, OP14bits);
  652. }
  653. }
  654. if( width&1 && BITS_LEFT(re, &s->gb)>0 ) {
  655. unsigned int index;
  656. int nb_bits, code, n;
  657. UPDATE_CACHE(re, &s->gb);
  658. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  659. VLC_INTERN(s->temp16[0][width-1], s->vlc[plane].table,
  660. &s->gb, re, VLC_BITS, 3);
  661. }
  662. CLOSE_READER(re, &s->gb);
  663. } else {
  664. if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
  665. for (i = 0; i < count && get_bits_left(&s->gb) > 0; i++) {
  666. READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
  667. }
  668. } else {
  669. for(i=0; i<count; i++){
  670. READ_2PIX_PLANE16(s->temp16[0][2 * i], s->temp16[0][2 * i + 1], plane);
  671. }
  672. }
  673. if( width&1 && get_bits_left(&s->gb)>0 ) {
  674. int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;
  675. s->temp16[0][width-1] = dst + get_bits(&s->gb, 2);
  676. }
  677. }
  678. }
  679. static void decode_gray_bitstream(HYuvContext *s, int count)
  680. {
  681. int i;
  682. OPEN_READER(re, &s->gb);
  683. count /= 2;
  684. if (count >= (get_bits_left(&s->gb)) / (32 * 2)) {
  685. for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
  686. READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
  687. }
  688. } else {
  689. for (i = 0; i < count; i++) {
  690. READ_2PIX(s->temp[0][2 * i], s->temp[0][2 * i + 1], 0);
  691. }
  692. }
  693. CLOSE_READER(re, &s->gb);
  694. }
  695. static av_always_inline void decode_bgr_1(HYuvContext *s, int count,
  696. int decorrelate, int alpha)
  697. {
  698. int i;
  699. OPEN_READER(re, &s->gb);
  700. for (i = 0; i < count && BITS_LEFT(re, &s->gb) > 0; i++) {
  701. unsigned int index;
  702. int code, n, nb_bits;
  703. UPDATE_CACHE(re, &s->gb);
  704. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  705. n = s->vlc[4].table[index][1];
  706. if (n>0) {
  707. code = s->vlc[4].table[index][0];
  708. *(uint32_t *) &s->temp[0][4 * i] = s->pix_bgr_map[code];
  709. LAST_SKIP_BITS(re, &s->gb, n);
  710. } else {
  711. if (decorrelate) {
  712. VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
  713. &s->gb, re, VLC_BITS, 3);
  714. UPDATE_CACHE(re, &s->gb);
  715. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  716. VLC_INTERN(code, s->vlc[0].table, &s->gb, re, VLC_BITS, 3);
  717. s->temp[0][4 * i + B] = code + s->temp[0][4 * i + G];
  718. UPDATE_CACHE(re, &s->gb);
  719. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  720. VLC_INTERN(code, s->vlc[2].table, &s->gb, re, VLC_BITS, 3);
  721. s->temp[0][4 * i + R] = code + s->temp[0][4 * i + G];
  722. } else {
  723. VLC_INTERN(s->temp[0][4 * i + B], s->vlc[0].table,
  724. &s->gb, re, VLC_BITS, 3);
  725. UPDATE_CACHE(re, &s->gb);
  726. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  727. VLC_INTERN(s->temp[0][4 * i + G], s->vlc[1].table,
  728. &s->gb, re, VLC_BITS, 3);
  729. UPDATE_CACHE(re, &s->gb);
  730. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  731. VLC_INTERN(s->temp[0][4 * i + R], s->vlc[2].table,
  732. &s->gb, re, VLC_BITS, 3);
  733. }
  734. }
  735. if (alpha) {
  736. UPDATE_CACHE(re, &s->gb);
  737. index = SHOW_UBITS(re, &s->gb, VLC_BITS);
  738. VLC_INTERN(s->temp[0][4 * i + A], s->vlc[2].table,
  739. &s->gb, re, VLC_BITS, 3);
  740. } else
  741. s->temp[0][4 * i + A] = 0;
  742. }
  743. CLOSE_READER(re, &s->gb);
  744. }
  745. static void decode_bgr_bitstream(HYuvContext *s, int count)
  746. {
  747. if (s->decorrelate) {
  748. if (s->bitstream_bpp == 24)
  749. decode_bgr_1(s, count, 1, 0);
  750. else
  751. decode_bgr_1(s, count, 1, 1);
  752. } else {
  753. if (s->bitstream_bpp == 24)
  754. decode_bgr_1(s, count, 0, 0);
  755. else
  756. decode_bgr_1(s, count, 0, 1);
  757. }
  758. }
  759. static void draw_slice(HYuvContext *s, AVFrame *frame, int y)
  760. {
  761. int h, cy, i;
  762. int offset[AV_NUM_DATA_POINTERS];
  763. if (!s->avctx->draw_horiz_band)
  764. return;
  765. h = y - s->last_slice_end;
  766. y -= h;
  767. if (s->bitstream_bpp == 12)
  768. cy = y >> 1;
  769. else
  770. cy = y;
  771. offset[0] = frame->linesize[0] * y;
  772. offset[1] = frame->linesize[1] * cy;
  773. offset[2] = frame->linesize[2] * cy;
  774. for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
  775. offset[i] = 0;
  776. emms_c();
  777. s->avctx->draw_horiz_band(s->avctx, frame, offset, y, 3, h);
  778. s->last_slice_end = y + h;
  779. }
  780. static int left_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int acc)
  781. {
  782. if (s->bps <= 8) {
  783. return s->hdsp.add_hfyu_left_pred(dst, src, w, acc);
  784. } else {
  785. return s->llviddsp.add_hfyu_left_pred_int16(( uint16_t *)dst, (const uint16_t *)src, s->n-1, w, acc);
  786. }
  787. }
  788. static void add_bytes(HYuvContext *s, uint8_t *dst, uint8_t *src, int w)
  789. {
  790. if (s->bps <= 8) {
  791. s->hdsp.add_bytes(dst, src, w);
  792. } else {
  793. s->llviddsp.add_int16((uint16_t*)dst, (const uint16_t*)src, s->n - 1, w);
  794. }
  795. }
  796. static void add_median_prediction(HYuvContext *s, uint8_t *dst, const uint8_t *src, const uint8_t *diff, int w, int *left, int *left_top)
  797. {
  798. if (s->bps <= 8) {
  799. s->hdsp.add_hfyu_median_pred(dst, src, diff, w, left, left_top);
  800. } else {
  801. s->llviddsp.add_hfyu_median_pred_int16((uint16_t *)dst, (const uint16_t *)src, (const uint16_t *)diff, s->n-1, w, left, left_top);
  802. }
  803. }
  804. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  805. AVPacket *avpkt)
  806. {
  807. const uint8_t *buf = avpkt->data;
  808. int buf_size = avpkt->size;
  809. HYuvContext *s = avctx->priv_data;
  810. const int width = s->width;
  811. const int width2 = s->width >> 1;
  812. const int height = s->height;
  813. int fake_ystride, fake_ustride, fake_vstride;
  814. ThreadFrame frame = { .f = data };
  815. AVFrame *const p = data;
  816. int table_size = 0, ret;
  817. av_fast_padded_malloc(&s->bitstream_buffer,
  818. &s->bitstream_buffer_size,
  819. buf_size);
  820. if (!s->bitstream_buffer)
  821. return AVERROR(ENOMEM);
  822. s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
  823. (const uint32_t *) buf, buf_size / 4);
  824. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  825. return ret;
  826. if (s->context) {
  827. table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
  828. if (table_size < 0)
  829. return table_size;
  830. }
  831. if ((unsigned) (buf_size - table_size) >= INT_MAX / 8)
  832. return AVERROR_INVALIDDATA;
  833. if ((ret = init_get_bits(&s->gb, s->bitstream_buffer + table_size,
  834. (buf_size - table_size) * 8)) < 0)
  835. return ret;
  836. fake_ystride = s->interlaced ? p->linesize[0] * 2 : p->linesize[0];
  837. fake_ustride = s->interlaced ? p->linesize[1] * 2 : p->linesize[1];
  838. fake_vstride = s->interlaced ? p->linesize[2] * 2 : p->linesize[2];
  839. s->last_slice_end = 0;
  840. if (s->version > 2) {
  841. int plane;
  842. for(plane = 0; plane < 1 + 2*s->chroma + s->alpha; plane++) {
  843. int left, lefttop, y;
  844. int w = width;
  845. int h = height;
  846. int fake_stride = fake_ystride;
  847. if (s->chroma && (plane == 1 || plane == 2)) {
  848. w >>= s->chroma_h_shift;
  849. h >>= s->chroma_v_shift;
  850. fake_stride = plane == 1 ? fake_ustride : fake_vstride;
  851. }
  852. switch (s->predictor) {
  853. case LEFT:
  854. case PLANE:
  855. decode_plane_bitstream(s, w, plane);
  856. left = left_prediction(s, p->data[plane], s->temp[0], w, 0);
  857. for (y = 1; y < h; y++) {
  858. uint8_t *dst = p->data[plane] + p->linesize[plane]*y;
  859. decode_plane_bitstream(s, w, plane);
  860. left = left_prediction(s, dst, s->temp[0], w, left);
  861. if (s->predictor == PLANE) {
  862. if (y > s->interlaced) {
  863. add_bytes(s, dst, dst - fake_stride, w);
  864. }
  865. }
  866. }
  867. break;
  868. case MEDIAN:
  869. decode_plane_bitstream(s, w, plane);
  870. left= left_prediction(s, p->data[plane], s->temp[0], w, 0);
  871. y = 1;
  872. /* second line is left predicted for interlaced case */
  873. if (s->interlaced) {
  874. decode_plane_bitstream(s, w, plane);
  875. left = left_prediction(s, p->data[plane] + p->linesize[plane], s->temp[0], w, left);
  876. y++;
  877. }
  878. lefttop = p->data[plane][0];
  879. decode_plane_bitstream(s, w, plane);
  880. add_median_prediction(s, p->data[plane] + fake_stride, p->data[plane], s->temp[0], w, &left, &lefttop);
  881. y++;
  882. for (; y<h; y++) {
  883. uint8_t *dst;
  884. decode_plane_bitstream(s, w, plane);
  885. dst = p->data[plane] + p->linesize[plane] * y;
  886. add_median_prediction(s, dst, dst - fake_stride, s->temp[0], w, &left, &lefttop);
  887. }
  888. break;
  889. }
  890. }
  891. draw_slice(s, p, height);
  892. } else if (s->bitstream_bpp < 24) {
  893. int y, cy;
  894. int lefty, leftu, leftv;
  895. int lefttopy, lefttopu, lefttopv;
  896. if (s->yuy2) {
  897. p->data[0][3] = get_bits(&s->gb, 8);
  898. p->data[0][2] = get_bits(&s->gb, 8);
  899. p->data[0][1] = get_bits(&s->gb, 8);
  900. p->data[0][0] = get_bits(&s->gb, 8);
  901. av_log(avctx, AV_LOG_ERROR,
  902. "YUY2 output is not implemented yet\n");
  903. return AVERROR_PATCHWELCOME;
  904. } else {
  905. leftv =
  906. p->data[2][0] = get_bits(&s->gb, 8);
  907. lefty =
  908. p->data[0][1] = get_bits(&s->gb, 8);
  909. leftu =
  910. p->data[1][0] = get_bits(&s->gb, 8);
  911. p->data[0][0] = get_bits(&s->gb, 8);
  912. switch (s->predictor) {
  913. case LEFT:
  914. case PLANE:
  915. decode_422_bitstream(s, width - 2);
  916. lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0],
  917. width - 2, lefty);
  918. if (!(s->flags & CODEC_FLAG_GRAY)) {
  919. leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
  920. leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
  921. }
  922. for (cy = y = 1; y < s->height; y++, cy++) {
  923. uint8_t *ydst, *udst, *vdst;
  924. if (s->bitstream_bpp == 12) {
  925. decode_gray_bitstream(s, width);
  926. ydst = p->data[0] + p->linesize[0] * y;
  927. lefty = s->hdsp.add_hfyu_left_pred(ydst, s->temp[0],
  928. width, lefty);
  929. if (s->predictor == PLANE) {
  930. if (y > s->interlaced)
  931. s->hdsp.add_bytes(ydst, ydst - fake_ystride, width);
  932. }
  933. y++;
  934. if (y >= s->height)
  935. break;
  936. }
  937. draw_slice(s, p, y);
  938. ydst = p->data[0] + p->linesize[0] * y;
  939. udst = p->data[1] + p->linesize[1] * cy;
  940. vdst = p->data[2] + p->linesize[2] * cy;
  941. decode_422_bitstream(s, width);
  942. lefty = s->hdsp.add_hfyu_left_pred(ydst, s->temp[0],
  943. width, lefty);
  944. if (!(s->flags & CODEC_FLAG_GRAY)) {
  945. leftu = s->hdsp.add_hfyu_left_pred(udst, s->temp[1], width2, leftu);
  946. leftv = s->hdsp.add_hfyu_left_pred(vdst, s->temp[2], width2, leftv);
  947. }
  948. if (s->predictor == PLANE) {
  949. if (cy > s->interlaced) {
  950. s->hdsp.add_bytes(ydst, ydst - fake_ystride, width);
  951. if (!(s->flags & CODEC_FLAG_GRAY)) {
  952. s->hdsp.add_bytes(udst, udst - fake_ustride, width2);
  953. s->hdsp.add_bytes(vdst, vdst - fake_vstride, width2);
  954. }
  955. }
  956. }
  957. }
  958. draw_slice(s, p, height);
  959. break;
  960. case MEDIAN:
  961. /* first line except first 2 pixels is left predicted */
  962. decode_422_bitstream(s, width - 2);
  963. lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + 2, s->temp[0],
  964. width - 2, lefty);
  965. if (!(s->flags & CODEC_FLAG_GRAY)) {
  966. leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + 1, s->temp[1], width2 - 1, leftu);
  967. leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + 1, s->temp[2], width2 - 1, leftv);
  968. }
  969. cy = y = 1;
  970. /* second line is left predicted for interlaced case */
  971. if (s->interlaced) {
  972. decode_422_bitstream(s, width);
  973. lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + p->linesize[0],
  974. s->temp[0], width, lefty);
  975. if (!(s->flags & CODEC_FLAG_GRAY)) {
  976. leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
  977. leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
  978. }
  979. y++;
  980. cy++;
  981. }
  982. /* next 4 pixels are left predicted too */
  983. decode_422_bitstream(s, 4);
  984. lefty = s->hdsp.add_hfyu_left_pred(p->data[0] + fake_ystride,
  985. s->temp[0], 4, lefty);
  986. if (!(s->flags & CODEC_FLAG_GRAY)) {
  987. leftu = s->hdsp.add_hfyu_left_pred(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
  988. leftv = s->hdsp.add_hfyu_left_pred(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
  989. }
  990. /* next line except the first 4 pixels is median predicted */
  991. lefttopy = p->data[0][3];
  992. decode_422_bitstream(s, width - 4);
  993. s->hdsp.add_hfyu_median_pred(p->data[0] + fake_ystride + 4,
  994. p->data[0] + 4, s->temp[0],
  995. width - 4, &lefty, &lefttopy);
  996. if (!(s->flags & CODEC_FLAG_GRAY)) {
  997. lefttopu = p->data[1][1];
  998. lefttopv = p->data[2][1];
  999. s->hdsp.add_hfyu_median_pred(p->data[1] + fake_ustride + 2, p->data[1] + 2, s->temp[1], width2 - 2, &leftu, &lefttopu);
  1000. s->hdsp.add_hfyu_median_pred(p->data[2] + fake_vstride + 2, p->data[2] + 2, s->temp[2], width2 - 2, &leftv, &lefttopv);
  1001. }
  1002. y++;
  1003. cy++;
  1004. for (; y < height; y++, cy++) {
  1005. uint8_t *ydst, *udst, *vdst;
  1006. if (s->bitstream_bpp == 12) {
  1007. while (2 * cy > y) {
  1008. decode_gray_bitstream(s, width);
  1009. ydst = p->data[0] + p->linesize[0] * y;
  1010. s->hdsp.add_hfyu_median_pred(ydst, ydst - fake_ystride,
  1011. s->temp[0], width,
  1012. &lefty, &lefttopy);
  1013. y++;
  1014. }
  1015. if (y >= height)
  1016. break;
  1017. }
  1018. draw_slice(s, p, y);
  1019. decode_422_bitstream(s, width);
  1020. ydst = p->data[0] + p->linesize[0] * y;
  1021. udst = p->data[1] + p->linesize[1] * cy;
  1022. vdst = p->data[2] + p->linesize[2] * cy;
  1023. s->hdsp.add_hfyu_median_pred(ydst, ydst - fake_ystride,
  1024. s->temp[0], width,
  1025. &lefty, &lefttopy);
  1026. if (!(s->flags & CODEC_FLAG_GRAY)) {
  1027. s->hdsp.add_hfyu_median_pred(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
  1028. s->hdsp.add_hfyu_median_pred(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
  1029. }
  1030. }
  1031. draw_slice(s, p, height);
  1032. break;
  1033. }
  1034. }
  1035. } else {
  1036. int y;
  1037. uint8_t left[4];
  1038. const int last_line = (height - 1) * p->linesize[0];
  1039. if (s->bitstream_bpp == 32) {
  1040. left[A] = p->data[0][last_line + A] = get_bits(&s->gb, 8);
  1041. left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
  1042. left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
  1043. left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
  1044. } else {
  1045. left[R] = p->data[0][last_line + R] = get_bits(&s->gb, 8);
  1046. left[G] = p->data[0][last_line + G] = get_bits(&s->gb, 8);
  1047. left[B] = p->data[0][last_line + B] = get_bits(&s->gb, 8);
  1048. left[A] = p->data[0][last_line + A] = 255;
  1049. skip_bits(&s->gb, 8);
  1050. }
  1051. if (s->bgr32) {
  1052. switch (s->predictor) {
  1053. case LEFT:
  1054. case PLANE:
  1055. decode_bgr_bitstream(s, width - 1);
  1056. s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + last_line + 4,
  1057. s->temp[0], width - 1, left);
  1058. for (y = s->height - 2; y >= 0; y--) { // Yes it is stored upside down.
  1059. decode_bgr_bitstream(s, width);
  1060. s->hdsp.add_hfyu_left_pred_bgr32(p->data[0] + p->linesize[0] * y,
  1061. s->temp[0], width, left);
  1062. if (s->predictor == PLANE) {
  1063. if (s->bitstream_bpp != 32)
  1064. left[A] = 0;
  1065. if ((y & s->interlaced) == 0 &&
  1066. y < s->height - 1 - s->interlaced) {
  1067. s->hdsp.add_bytes(p->data[0] + p->linesize[0] * y,
  1068. p->data[0] + p->linesize[0] * y +
  1069. fake_ystride, fake_ystride);
  1070. }
  1071. }
  1072. }
  1073. // just 1 large slice as this is not possible in reverse order
  1074. draw_slice(s, p, height);
  1075. break;
  1076. default:
  1077. av_log(avctx, AV_LOG_ERROR,
  1078. "prediction type not supported!\n");
  1079. }
  1080. } else {
  1081. av_log(avctx, AV_LOG_ERROR,
  1082. "BGR24 output is not implemented yet\n");
  1083. return AVERROR_PATCHWELCOME;
  1084. }
  1085. }
  1086. emms_c();
  1087. *got_frame = 1;
  1088. return (get_bits_count(&s->gb) + 31) / 32 * 4 + table_size;
  1089. }
  1090. static av_cold int decode_end(AVCodecContext *avctx)
  1091. {
  1092. HYuvContext *s = avctx->priv_data;
  1093. int i;
  1094. ff_huffyuv_common_end(s);
  1095. av_freep(&s->bitstream_buffer);
  1096. for (i = 0; i < 8; i++)
  1097. ff_free_vlc(&s->vlc[i]);
  1098. return 0;
  1099. }
  1100. AVCodec ff_huffyuv_decoder = {
  1101. .name = "huffyuv",
  1102. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
  1103. .type = AVMEDIA_TYPE_VIDEO,
  1104. .id = AV_CODEC_ID_HUFFYUV,
  1105. .priv_data_size = sizeof(HYuvContext),
  1106. .init = decode_init,
  1107. .close = decode_end,
  1108. .decode = decode_frame,
  1109. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
  1110. CODEC_CAP_FRAME_THREADS,
  1111. .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
  1112. };
  1113. #if CONFIG_FFVHUFF_DECODER
  1114. AVCodec ff_ffvhuff_decoder = {
  1115. .name = "ffvhuff",
  1116. .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
  1117. .type = AVMEDIA_TYPE_VIDEO,
  1118. .id = AV_CODEC_ID_FFVHUFF,
  1119. .priv_data_size = sizeof(HYuvContext),
  1120. .init = decode_init,
  1121. .close = decode_end,
  1122. .decode = decode_frame,
  1123. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND |
  1124. CODEC_CAP_FRAME_THREADS,
  1125. .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
  1126. };
  1127. #endif /* CONFIG_FFVHUFF_DECODER */