You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1472 lines
47KB

  1. /*
  2. * WebP (.webp) image decoder
  3. * Copyright (c) 2013 Aneesh Dogra <aneesh@sugarlabs.org>
  4. * Copyright (c) 2013 Justin Ruggles <justin.ruggles@gmail.com>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * WebP image decoder
  25. *
  26. * @author Aneesh Dogra <aneesh@sugarlabs.org>
  27. * Container and Lossy decoding
  28. *
  29. * @author Justin Ruggles <justin.ruggles@gmail.com>
  30. * Lossless decoder
  31. * Compressed alpha for lossy
  32. *
  33. * Unimplemented:
  34. * - Animation
  35. * - ICC profile
  36. * - Exif and XMP metadata
  37. */
  38. #define BITSTREAM_READER_LE
  39. #include "libavutil/imgutils.h"
  40. #include "avcodec.h"
  41. #include "bytestream.h"
  42. #include "internal.h"
  43. #include "get_bits.h"
  44. #include "thread.h"
  45. #include "vp8.h"
  46. #define VP8X_FLAG_ANIMATION 0x02
  47. #define VP8X_FLAG_XMP_METADATA 0x04
  48. #define VP8X_FLAG_EXIF_METADATA 0x08
  49. #define VP8X_FLAG_ALPHA 0x10
  50. #define VP8X_FLAG_ICC 0x20
  51. #define MAX_PALETTE_SIZE 256
  52. #define MAX_CACHE_BITS 11
  53. #define NUM_CODE_LENGTH_CODES 19
  54. #define HUFFMAN_CODES_PER_META_CODE 5
  55. #define NUM_LITERAL_CODES 256
  56. #define NUM_LENGTH_CODES 24
  57. #define NUM_DISTANCE_CODES 40
  58. #define NUM_SHORT_DISTANCES 120
  59. #define MAX_HUFFMAN_CODE_LENGTH 15
  60. static const uint16_t alphabet_sizes[HUFFMAN_CODES_PER_META_CODE] = {
  61. NUM_LITERAL_CODES + NUM_LENGTH_CODES,
  62. NUM_LITERAL_CODES, NUM_LITERAL_CODES, NUM_LITERAL_CODES,
  63. NUM_DISTANCE_CODES
  64. };
  65. static const uint8_t code_length_code_order[NUM_CODE_LENGTH_CODES] = {
  66. 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
  67. };
  68. static const int8_t lz77_distance_offsets[NUM_SHORT_DISTANCES][2] = {
  69. { 0, 1 }, { 1, 0 }, { 1, 1 }, { -1, 1 }, { 0, 2 }, { 2, 0 }, { 1, 2 }, { -1, 2 },
  70. { 2, 1 }, { -2, 1 }, { 2, 2 }, { -2, 2 }, { 0, 3 }, { 3, 0 }, { 1, 3 }, { -1, 3 },
  71. { 3, 1 }, { -3, 1 }, { 2, 3 }, { -2, 3 }, { 3, 2 }, { -3, 2 }, { 0, 4 }, { 4, 0 },
  72. { 1, 4 }, { -1, 4 }, { 4, 1 }, { -4, 1 }, { 3, 3 }, { -3, 3 }, { 2, 4 }, { -2, 4 },
  73. { 4, 2 }, { -4, 2 }, { 0, 5 }, { 3, 4 }, { -3, 4 }, { 4, 3 }, { -4, 3 }, { 5, 0 },
  74. { 1, 5 }, { -1, 5 }, { 5, 1 }, { -5, 1 }, { 2, 5 }, { -2, 5 }, { 5, 2 }, { -5, 2 },
  75. { 4, 4 }, { -4, 4 }, { 3, 5 }, { -3, 5 }, { 5, 3 }, { -5, 3 }, { 0, 6 }, { 6, 0 },
  76. { 1, 6 }, { -1, 6 }, { 6, 1 }, { -6, 1 }, { 2, 6 }, { -2, 6 }, { 6, 2 }, { -6, 2 },
  77. { 4, 5 }, { -4, 5 }, { 5, 4 }, { -5, 4 }, { 3, 6 }, { -3, 6 }, { 6, 3 }, { -6, 3 },
  78. { 0, 7 }, { 7, 0 }, { 1, 7 }, { -1, 7 }, { 5, 5 }, { -5, 5 }, { 7, 1 }, { -7, 1 },
  79. { 4, 6 }, { -4, 6 }, { 6, 4 }, { -6, 4 }, { 2, 7 }, { -2, 7 }, { 7, 2 }, { -7, 2 },
  80. { 3, 7 }, { -3, 7 }, { 7, 3 }, { -7, 3 }, { 5, 6 }, { -5, 6 }, { 6, 5 }, { -6, 5 },
  81. { 8, 0 }, { 4, 7 }, { -4, 7 }, { 7, 4 }, { -7, 4 }, { 8, 1 }, { 8, 2 }, { 6, 6 },
  82. { -6, 6 }, { 8, 3 }, { 5, 7 }, { -5, 7 }, { 7, 5 }, { -7, 5 }, { 8, 4 }, { 6, 7 },
  83. { -6, 7 }, { 7, 6 }, { -7, 6 }, { 8, 5 }, { 7, 7 }, { -7, 7 }, { 8, 6 }, { 8, 7 }
  84. };
  85. enum AlphaCompression {
  86. ALPHA_COMPRESSION_NONE,
  87. ALPHA_COMPRESSION_VP8L,
  88. };
  89. enum AlphaFilter {
  90. ALPHA_FILTER_NONE,
  91. ALPHA_FILTER_HORIZONTAL,
  92. ALPHA_FILTER_VERTICAL,
  93. ALPHA_FILTER_GRADIENT,
  94. };
  95. enum TransformType {
  96. PREDICTOR_TRANSFORM = 0,
  97. COLOR_TRANSFORM = 1,
  98. SUBTRACT_GREEN = 2,
  99. COLOR_INDEXING_TRANSFORM = 3,
  100. };
  101. enum PredictionMode {
  102. PRED_MODE_BLACK,
  103. PRED_MODE_L,
  104. PRED_MODE_T,
  105. PRED_MODE_TR,
  106. PRED_MODE_TL,
  107. PRED_MODE_AVG_T_AVG_L_TR,
  108. PRED_MODE_AVG_L_TL,
  109. PRED_MODE_AVG_L_T,
  110. PRED_MODE_AVG_TL_T,
  111. PRED_MODE_AVG_T_TR,
  112. PRED_MODE_AVG_AVG_L_TL_AVG_T_TR,
  113. PRED_MODE_SELECT,
  114. PRED_MODE_ADD_SUBTRACT_FULL,
  115. PRED_MODE_ADD_SUBTRACT_HALF,
  116. };
  117. enum HuffmanIndex {
  118. HUFF_IDX_GREEN = 0,
  119. HUFF_IDX_RED = 1,
  120. HUFF_IDX_BLUE = 2,
  121. HUFF_IDX_ALPHA = 3,
  122. HUFF_IDX_DIST = 4
  123. };
  124. /* The structure of WebP lossless is an optional series of transformation data,
  125. * followed by the primary image. The primary image also optionally contains
  126. * an entropy group mapping if there are multiple entropy groups. There is a
  127. * basic image type called an "entropy coded image" that is used for all of
  128. * these. The type of each entropy coded image is referred to by the
  129. * specification as its role. */
  130. enum ImageRole {
  131. /* Primary Image: Stores the actual pixels of the image. */
  132. IMAGE_ROLE_ARGB,
  133. /* Entropy Image: Defines which Huffman group to use for different areas of
  134. * the primary image. */
  135. IMAGE_ROLE_ENTROPY,
  136. /* Predictors: Defines which predictor type to use for different areas of
  137. * the primary image. */
  138. IMAGE_ROLE_PREDICTOR,
  139. /* Color Transform Data: Defines the color transformation for different
  140. * areas of the primary image. */
  141. IMAGE_ROLE_COLOR_TRANSFORM,
  142. /* Color Index: Stored as an image of height == 1. */
  143. IMAGE_ROLE_COLOR_INDEXING,
  144. IMAGE_ROLE_NB,
  145. };
  146. typedef struct HuffReader {
  147. VLC vlc; /* Huffman decoder context */
  148. int simple; /* whether to use simple mode */
  149. int nb_symbols; /* number of coded symbols */
  150. uint16_t simple_symbols[2]; /* symbols for simple mode */
  151. } HuffReader;
  152. typedef struct ImageContext {
  153. enum ImageRole role; /* role of this image */
  154. AVFrame *frame; /* AVFrame for data */
  155. int color_cache_bits; /* color cache size, log2 */
  156. uint32_t *color_cache; /* color cache data */
  157. int nb_huffman_groups; /* number of huffman groups */
  158. HuffReader *huffman_groups; /* reader for each huffman group */
  159. int size_reduction; /* relative size compared to primary image, log2 */
  160. int is_alpha_primary;
  161. } ImageContext;
  162. typedef struct WebPContext {
  163. VP8Context v; /* VP8 Context used for lossy decoding */
  164. GetBitContext gb; /* bitstream reader for main image chunk */
  165. AVFrame *alpha_frame; /* AVFrame for alpha data decompressed from VP8L */
  166. AVCodecContext *avctx; /* parent AVCodecContext */
  167. int initialized; /* set once the VP8 context is initialized */
  168. int has_alpha; /* has a separate alpha chunk */
  169. enum AlphaCompression alpha_compression; /* compression type for alpha chunk */
  170. enum AlphaFilter alpha_filter; /* filtering method for alpha chunk */
  171. uint8_t *alpha_data; /* alpha chunk data */
  172. int alpha_data_size; /* alpha chunk data size */
  173. int width; /* image width */
  174. int height; /* image height */
  175. int lossless; /* indicates lossless or lossy */
  176. int nb_transforms; /* number of transforms */
  177. enum TransformType transforms[4]; /* transformations used in the image, in order */
  178. int reduced_width; /* reduced width for index image, if applicable */
  179. int nb_huffman_groups; /* number of huffman groups in the primary image */
  180. ImageContext image[IMAGE_ROLE_NB]; /* image context for each role */
  181. } WebPContext;
  182. #define GET_PIXEL(frame, x, y) \
  183. ((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x))
  184. #define GET_PIXEL_COMP(frame, x, y, c) \
  185. (*((frame)->data[0] + (y) * frame->linesize[0] + 4 * (x) + c))
  186. static void image_ctx_free(ImageContext *img)
  187. {
  188. int i, j;
  189. av_free(img->color_cache);
  190. if (img->role != IMAGE_ROLE_ARGB && !img->is_alpha_primary)
  191. av_frame_free(&img->frame);
  192. if (img->huffman_groups) {
  193. for (i = 0; i < img->nb_huffman_groups; i++) {
  194. for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++)
  195. ff_free_vlc(&img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE + j].vlc);
  196. }
  197. av_free(img->huffman_groups);
  198. }
  199. memset(img, 0, sizeof(*img));
  200. }
  201. /* Differs from get_vlc2() in the following ways:
  202. * - codes are bit-reversed
  203. * - assumes 8-bit table to make reversal simpler
  204. * - assumes max depth of 2 since the max code length for WebP is 15
  205. */
  206. static av_always_inline int webp_get_vlc(GetBitContext *gb, VLC_TYPE (*table)[2])
  207. {
  208. int n, nb_bits;
  209. unsigned int index;
  210. int code;
  211. OPEN_READER(re, gb);
  212. UPDATE_CACHE(re, gb);
  213. index = SHOW_UBITS(re, gb, 8);
  214. index = ff_reverse[index];
  215. code = table[index][0];
  216. n = table[index][1];
  217. if (n < 0) {
  218. LAST_SKIP_BITS(re, gb, 8);
  219. UPDATE_CACHE(re, gb);
  220. nb_bits = -n;
  221. index = SHOW_UBITS(re, gb, nb_bits);
  222. index = (ff_reverse[index] >> (8 - nb_bits)) + code;
  223. code = table[index][0];
  224. n = table[index][1];
  225. }
  226. SKIP_BITS(re, gb, n);
  227. CLOSE_READER(re, gb);
  228. return code;
  229. }
  230. static int huff_reader_get_symbol(HuffReader *r, GetBitContext *gb)
  231. {
  232. if (r->simple) {
  233. if (r->nb_symbols == 1)
  234. return r->simple_symbols[0];
  235. else
  236. return r->simple_symbols[get_bits1(gb)];
  237. } else
  238. return webp_get_vlc(gb, r->vlc.table);
  239. }
  240. static int huff_reader_build_canonical(HuffReader *r, int *code_lengths,
  241. int alphabet_size)
  242. {
  243. int len = 0, sym, code = 0, ret;
  244. int max_code_length = 0;
  245. uint16_t *codes;
  246. /* special-case 1 symbol since the vlc reader cannot handle it */
  247. for (sym = 0; sym < alphabet_size; sym++) {
  248. if (code_lengths[sym] > 0) {
  249. len++;
  250. code = sym;
  251. if (len > 1)
  252. break;
  253. }
  254. }
  255. if (len == 1) {
  256. r->nb_symbols = 1;
  257. r->simple_symbols[0] = code;
  258. r->simple = 1;
  259. return 0;
  260. }
  261. for (sym = 0; sym < alphabet_size; sym++)
  262. max_code_length = FFMAX(max_code_length, code_lengths[sym]);
  263. if (max_code_length == 0 || max_code_length > MAX_HUFFMAN_CODE_LENGTH)
  264. return AVERROR(EINVAL);
  265. codes = av_malloc(alphabet_size * sizeof(*codes));
  266. if (!codes)
  267. return AVERROR(ENOMEM);
  268. code = 0;
  269. r->nb_symbols = 0;
  270. for (len = 1; len <= max_code_length; len++) {
  271. for (sym = 0; sym < alphabet_size; sym++) {
  272. if (code_lengths[sym] != len)
  273. continue;
  274. codes[sym] = code++;
  275. r->nb_symbols++;
  276. }
  277. code <<= 1;
  278. }
  279. if (!r->nb_symbols) {
  280. av_free(codes);
  281. return AVERROR_INVALIDDATA;
  282. }
  283. ret = init_vlc(&r->vlc, 8, alphabet_size,
  284. code_lengths, sizeof(*code_lengths), sizeof(*code_lengths),
  285. codes, sizeof(*codes), sizeof(*codes), 0);
  286. if (ret < 0) {
  287. av_free(codes);
  288. return ret;
  289. }
  290. r->simple = 0;
  291. av_free(codes);
  292. return 0;
  293. }
  294. static void read_huffman_code_simple(WebPContext *s, HuffReader *hc)
  295. {
  296. hc->nb_symbols = get_bits1(&s->gb) + 1;
  297. if (get_bits1(&s->gb))
  298. hc->simple_symbols[0] = get_bits(&s->gb, 8);
  299. else
  300. hc->simple_symbols[0] = get_bits1(&s->gb);
  301. if (hc->nb_symbols == 2)
  302. hc->simple_symbols[1] = get_bits(&s->gb, 8);
  303. hc->simple = 1;
  304. }
  305. static int read_huffman_code_normal(WebPContext *s, HuffReader *hc,
  306. int alphabet_size)
  307. {
  308. HuffReader code_len_hc = { { 0 }, 0, 0, { 0 } };
  309. int *code_lengths = NULL;
  310. int code_length_code_lengths[NUM_CODE_LENGTH_CODES] = { 0 };
  311. int i, symbol, max_symbol, prev_code_len, ret;
  312. int num_codes = 4 + get_bits(&s->gb, 4);
  313. if (num_codes > NUM_CODE_LENGTH_CODES)
  314. return AVERROR_INVALIDDATA;
  315. for (i = 0; i < num_codes; i++)
  316. code_length_code_lengths[code_length_code_order[i]] = get_bits(&s->gb, 3);
  317. ret = huff_reader_build_canonical(&code_len_hc, code_length_code_lengths,
  318. NUM_CODE_LENGTH_CODES);
  319. if (ret < 0)
  320. goto finish;
  321. code_lengths = av_mallocz_array(alphabet_size, sizeof(*code_lengths));
  322. if (!code_lengths) {
  323. ret = AVERROR(ENOMEM);
  324. goto finish;
  325. }
  326. if (get_bits1(&s->gb)) {
  327. int bits = 2 + 2 * get_bits(&s->gb, 3);
  328. max_symbol = 2 + get_bits(&s->gb, bits);
  329. if (max_symbol > alphabet_size) {
  330. av_log(s->avctx, AV_LOG_ERROR, "max symbol %d > alphabet size %d\n",
  331. max_symbol, alphabet_size);
  332. ret = AVERROR_INVALIDDATA;
  333. goto finish;
  334. }
  335. } else {
  336. max_symbol = alphabet_size;
  337. }
  338. prev_code_len = 8;
  339. symbol = 0;
  340. while (symbol < alphabet_size) {
  341. int code_len;
  342. if (!max_symbol--)
  343. break;
  344. code_len = huff_reader_get_symbol(&code_len_hc, &s->gb);
  345. if (code_len < 16) {
  346. /* Code length code [0..15] indicates literal code lengths. */
  347. code_lengths[symbol++] = code_len;
  348. if (code_len)
  349. prev_code_len = code_len;
  350. } else {
  351. int repeat = 0, length = 0;
  352. switch (code_len) {
  353. case 16:
  354. /* Code 16 repeats the previous non-zero value [3..6] times,
  355. * i.e., 3 + ReadBits(2) times. If code 16 is used before a
  356. * non-zero value has been emitted, a value of 8 is repeated. */
  357. repeat = 3 + get_bits(&s->gb, 2);
  358. length = prev_code_len;
  359. break;
  360. case 17:
  361. /* Code 17 emits a streak of zeros [3..10], i.e.,
  362. * 3 + ReadBits(3) times. */
  363. repeat = 3 + get_bits(&s->gb, 3);
  364. break;
  365. case 18:
  366. /* Code 18 emits a streak of zeros of length [11..138], i.e.,
  367. * 11 + ReadBits(7) times. */
  368. repeat = 11 + get_bits(&s->gb, 7);
  369. break;
  370. }
  371. if (symbol + repeat > alphabet_size) {
  372. av_log(s->avctx, AV_LOG_ERROR,
  373. "invalid symbol %d + repeat %d > alphabet size %d\n",
  374. symbol, repeat, alphabet_size);
  375. ret = AVERROR_INVALIDDATA;
  376. goto finish;
  377. }
  378. while (repeat-- > 0)
  379. code_lengths[symbol++] = length;
  380. }
  381. }
  382. ret = huff_reader_build_canonical(hc, code_lengths, alphabet_size);
  383. finish:
  384. ff_free_vlc(&code_len_hc.vlc);
  385. av_free(code_lengths);
  386. return ret;
  387. }
  388. static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
  389. int w, int h);
  390. #define PARSE_BLOCK_SIZE(w, h) do { \
  391. block_bits = get_bits(&s->gb, 3) + 2; \
  392. blocks_w = FFALIGN((w), 1 << block_bits) >> block_bits; \
  393. blocks_h = FFALIGN((h), 1 << block_bits) >> block_bits; \
  394. } while (0)
  395. static int decode_entropy_image(WebPContext *s)
  396. {
  397. ImageContext *img;
  398. int ret, block_bits, width, blocks_w, blocks_h, x, y, max;
  399. width = s->width;
  400. if (s->reduced_width > 0)
  401. width = s->reduced_width;
  402. PARSE_BLOCK_SIZE(width, s->height);
  403. ret = decode_entropy_coded_image(s, IMAGE_ROLE_ENTROPY, blocks_w, blocks_h);
  404. if (ret < 0)
  405. return ret;
  406. img = &s->image[IMAGE_ROLE_ENTROPY];
  407. img->size_reduction = block_bits;
  408. /* the number of huffman groups is determined by the maximum group number
  409. * coded in the entropy image */
  410. max = 0;
  411. for (y = 0; y < img->frame->height; y++) {
  412. for (x = 0; x < img->frame->width; x++) {
  413. int p0 = GET_PIXEL_COMP(img->frame, x, y, 1);
  414. int p1 = GET_PIXEL_COMP(img->frame, x, y, 2);
  415. int p = p0 << 8 | p1;
  416. max = FFMAX(max, p);
  417. }
  418. }
  419. s->nb_huffman_groups = max + 1;
  420. return 0;
  421. }
  422. static int parse_transform_predictor(WebPContext *s)
  423. {
  424. int block_bits, blocks_w, blocks_h, ret;
  425. PARSE_BLOCK_SIZE(s->width, s->height);
  426. ret = decode_entropy_coded_image(s, IMAGE_ROLE_PREDICTOR, blocks_w,
  427. blocks_h);
  428. if (ret < 0)
  429. return ret;
  430. s->image[IMAGE_ROLE_PREDICTOR].size_reduction = block_bits;
  431. return 0;
  432. }
  433. static int parse_transform_color(WebPContext *s)
  434. {
  435. int block_bits, blocks_w, blocks_h, ret;
  436. PARSE_BLOCK_SIZE(s->width, s->height);
  437. ret = decode_entropy_coded_image(s, IMAGE_ROLE_COLOR_TRANSFORM, blocks_w,
  438. blocks_h);
  439. if (ret < 0)
  440. return ret;
  441. s->image[IMAGE_ROLE_COLOR_TRANSFORM].size_reduction = block_bits;
  442. return 0;
  443. }
  444. static int parse_transform_color_indexing(WebPContext *s)
  445. {
  446. ImageContext *img;
  447. int width_bits, index_size, ret, x;
  448. uint8_t *ct;
  449. index_size = get_bits(&s->gb, 8) + 1;
  450. if (index_size <= 2)
  451. width_bits = 3;
  452. else if (index_size <= 4)
  453. width_bits = 2;
  454. else if (index_size <= 16)
  455. width_bits = 1;
  456. else
  457. width_bits = 0;
  458. ret = decode_entropy_coded_image(s, IMAGE_ROLE_COLOR_INDEXING,
  459. index_size, 1);
  460. if (ret < 0)
  461. return ret;
  462. img = &s->image[IMAGE_ROLE_COLOR_INDEXING];
  463. img->size_reduction = width_bits;
  464. if (width_bits > 0)
  465. s->reduced_width = (s->width + ((1 << width_bits) - 1)) >> width_bits;
  466. /* color index values are delta-coded */
  467. ct = img->frame->data[0] + 4;
  468. for (x = 4; x < img->frame->width * 4; x++, ct++)
  469. ct[0] += ct[-4];
  470. return 0;
  471. }
  472. static HuffReader *get_huffman_group(WebPContext *s, ImageContext *img,
  473. int x, int y)
  474. {
  475. ImageContext *gimg = &s->image[IMAGE_ROLE_ENTROPY];
  476. int group = 0;
  477. if (gimg->size_reduction > 0) {
  478. int group_x = x >> gimg->size_reduction;
  479. int group_y = y >> gimg->size_reduction;
  480. int g0 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 1);
  481. int g1 = GET_PIXEL_COMP(gimg->frame, group_x, group_y, 2);
  482. group = g0 << 8 | g1;
  483. }
  484. return &img->huffman_groups[group * HUFFMAN_CODES_PER_META_CODE];
  485. }
  486. static av_always_inline void color_cache_put(ImageContext *img, uint32_t c)
  487. {
  488. uint32_t cache_idx = (0x1E35A7BD * c) >> (32 - img->color_cache_bits);
  489. img->color_cache[cache_idx] = c;
  490. }
  491. static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
  492. int w, int h)
  493. {
  494. ImageContext *img;
  495. HuffReader *hg;
  496. int i, j, ret, x, y, width;
  497. img = &s->image[role];
  498. img->role = role;
  499. if (!img->frame) {
  500. img->frame = av_frame_alloc();
  501. if (!img->frame)
  502. return AVERROR(ENOMEM);
  503. }
  504. img->frame->format = AV_PIX_FMT_ARGB;
  505. img->frame->width = w;
  506. img->frame->height = h;
  507. if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
  508. ThreadFrame pt = { .f = img->frame };
  509. ret = ff_thread_get_buffer(s->avctx, &pt, 0);
  510. } else
  511. ret = av_frame_get_buffer(img->frame, 1);
  512. if (ret < 0)
  513. return ret;
  514. if (get_bits1(&s->gb)) {
  515. img->color_cache_bits = get_bits(&s->gb, 4);
  516. if (img->color_cache_bits < 1 || img->color_cache_bits > 11) {
  517. av_log(s->avctx, AV_LOG_ERROR, "invalid color cache bits: %d\n",
  518. img->color_cache_bits);
  519. return AVERROR_INVALIDDATA;
  520. }
  521. img->color_cache = av_mallocz_array(1 << img->color_cache_bits,
  522. sizeof(*img->color_cache));
  523. if (!img->color_cache)
  524. return AVERROR(ENOMEM);
  525. } else {
  526. img->color_cache_bits = 0;
  527. }
  528. img->nb_huffman_groups = 1;
  529. if (role == IMAGE_ROLE_ARGB && get_bits1(&s->gb)) {
  530. ret = decode_entropy_image(s);
  531. if (ret < 0)
  532. return ret;
  533. img->nb_huffman_groups = s->nb_huffman_groups;
  534. }
  535. img->huffman_groups = av_mallocz_array(img->nb_huffman_groups *
  536. HUFFMAN_CODES_PER_META_CODE,
  537. sizeof(*img->huffman_groups));
  538. if (!img->huffman_groups)
  539. return AVERROR(ENOMEM);
  540. for (i = 0; i < img->nb_huffman_groups; i++) {
  541. hg = &img->huffman_groups[i * HUFFMAN_CODES_PER_META_CODE];
  542. for (j = 0; j < HUFFMAN_CODES_PER_META_CODE; j++) {
  543. int alphabet_size = alphabet_sizes[j];
  544. if (!j && img->color_cache_bits > 0)
  545. alphabet_size += 1 << img->color_cache_bits;
  546. if (get_bits1(&s->gb)) {
  547. read_huffman_code_simple(s, &hg[j]);
  548. } else {
  549. ret = read_huffman_code_normal(s, &hg[j], alphabet_size);
  550. if (ret < 0)
  551. return ret;
  552. }
  553. }
  554. }
  555. width = img->frame->width;
  556. if (role == IMAGE_ROLE_ARGB && s->reduced_width > 0)
  557. width = s->reduced_width;
  558. x = 0; y = 0;
  559. while (y < img->frame->height) {
  560. int v;
  561. hg = get_huffman_group(s, img, x, y);
  562. v = huff_reader_get_symbol(&hg[HUFF_IDX_GREEN], &s->gb);
  563. if (v < NUM_LITERAL_CODES) {
  564. /* literal pixel values */
  565. uint8_t *p = GET_PIXEL(img->frame, x, y);
  566. p[2] = v;
  567. p[1] = huff_reader_get_symbol(&hg[HUFF_IDX_RED], &s->gb);
  568. p[3] = huff_reader_get_symbol(&hg[HUFF_IDX_BLUE], &s->gb);
  569. p[0] = huff_reader_get_symbol(&hg[HUFF_IDX_ALPHA], &s->gb);
  570. if (img->color_cache_bits)
  571. color_cache_put(img, AV_RB32(p));
  572. x++;
  573. if (x == width) {
  574. x = 0;
  575. y++;
  576. }
  577. } else if (v < NUM_LITERAL_CODES + NUM_LENGTH_CODES) {
  578. /* LZ77 backwards mapping */
  579. int prefix_code, length, distance, ref_x, ref_y;
  580. /* parse length and distance */
  581. prefix_code = v - NUM_LITERAL_CODES;
  582. if (prefix_code < 4) {
  583. length = prefix_code + 1;
  584. } else {
  585. int extra_bits = (prefix_code - 2) >> 1;
  586. int offset = 2 + (prefix_code & 1) << extra_bits;
  587. length = offset + get_bits(&s->gb, extra_bits) + 1;
  588. }
  589. prefix_code = huff_reader_get_symbol(&hg[HUFF_IDX_DIST], &s->gb);
  590. if (prefix_code < 4) {
  591. distance = prefix_code + 1;
  592. } else {
  593. int extra_bits = prefix_code - 2 >> 1;
  594. int offset = 2 + (prefix_code & 1) << extra_bits;
  595. distance = offset + get_bits(&s->gb, extra_bits) + 1;
  596. }
  597. /* find reference location */
  598. if (distance <= NUM_SHORT_DISTANCES) {
  599. int xi = lz77_distance_offsets[distance - 1][0];
  600. int yi = lz77_distance_offsets[distance - 1][1];
  601. distance = FFMAX(1, xi + yi * width);
  602. } else {
  603. distance -= NUM_SHORT_DISTANCES;
  604. }
  605. ref_x = x;
  606. ref_y = y;
  607. if (distance <= x) {
  608. ref_x -= distance;
  609. distance = 0;
  610. } else {
  611. ref_x = 0;
  612. distance -= x;
  613. }
  614. while (distance >= width) {
  615. ref_y--;
  616. distance -= width;
  617. }
  618. if (distance > 0) {
  619. ref_x = width - distance;
  620. ref_y--;
  621. }
  622. ref_x = FFMAX(0, ref_x);
  623. ref_y = FFMAX(0, ref_y);
  624. /* copy pixels
  625. * source and dest regions can overlap and wrap lines, so just
  626. * copy per-pixel */
  627. for (i = 0; i < length; i++) {
  628. uint8_t *p_ref = GET_PIXEL(img->frame, ref_x, ref_y);
  629. uint8_t *p = GET_PIXEL(img->frame, x, y);
  630. AV_COPY32(p, p_ref);
  631. if (img->color_cache_bits)
  632. color_cache_put(img, AV_RB32(p));
  633. x++;
  634. ref_x++;
  635. if (x == width) {
  636. x = 0;
  637. y++;
  638. }
  639. if (ref_x == width) {
  640. ref_x = 0;
  641. ref_y++;
  642. }
  643. if (y == img->frame->height || ref_y == img->frame->height)
  644. break;
  645. }
  646. } else {
  647. /* read from color cache */
  648. uint8_t *p = GET_PIXEL(img->frame, x, y);
  649. int cache_idx = v - (NUM_LITERAL_CODES + NUM_LENGTH_CODES);
  650. if (!img->color_cache_bits) {
  651. av_log(s->avctx, AV_LOG_ERROR, "color cache not found\n");
  652. return AVERROR_INVALIDDATA;
  653. }
  654. if (cache_idx >= 1 << img->color_cache_bits) {
  655. av_log(s->avctx, AV_LOG_ERROR,
  656. "color cache index out-of-bounds\n");
  657. return AVERROR_INVALIDDATA;
  658. }
  659. AV_WB32(p, img->color_cache[cache_idx]);
  660. x++;
  661. if (x == width) {
  662. x = 0;
  663. y++;
  664. }
  665. }
  666. }
  667. return 0;
  668. }
  669. /* PRED_MODE_BLACK */
  670. static void inv_predict_0(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  671. const uint8_t *p_t, const uint8_t *p_tr)
  672. {
  673. AV_WB32(p, 0xFF000000);
  674. }
  675. /* PRED_MODE_L */
  676. static void inv_predict_1(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  677. const uint8_t *p_t, const uint8_t *p_tr)
  678. {
  679. AV_COPY32(p, p_l);
  680. }
  681. /* PRED_MODE_T */
  682. static void inv_predict_2(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  683. const uint8_t *p_t, const uint8_t *p_tr)
  684. {
  685. AV_COPY32(p, p_t);
  686. }
  687. /* PRED_MODE_TR */
  688. static void inv_predict_3(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  689. const uint8_t *p_t, const uint8_t *p_tr)
  690. {
  691. AV_COPY32(p, p_tr);
  692. }
  693. /* PRED_MODE_TL */
  694. static void inv_predict_4(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  695. const uint8_t *p_t, const uint8_t *p_tr)
  696. {
  697. AV_COPY32(p, p_tl);
  698. }
  699. /* PRED_MODE_AVG_T_AVG_L_TR */
  700. static void inv_predict_5(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  701. const uint8_t *p_t, const uint8_t *p_tr)
  702. {
  703. p[0] = p_t[0] + (p_l[0] + p_tr[0] >> 1) >> 1;
  704. p[1] = p_t[1] + (p_l[1] + p_tr[1] >> 1) >> 1;
  705. p[2] = p_t[2] + (p_l[2] + p_tr[2] >> 1) >> 1;
  706. p[3] = p_t[3] + (p_l[3] + p_tr[3] >> 1) >> 1;
  707. }
  708. /* PRED_MODE_AVG_L_TL */
  709. static void inv_predict_6(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  710. const uint8_t *p_t, const uint8_t *p_tr)
  711. {
  712. p[0] = p_l[0] + p_tl[0] >> 1;
  713. p[1] = p_l[1] + p_tl[1] >> 1;
  714. p[2] = p_l[2] + p_tl[2] >> 1;
  715. p[3] = p_l[3] + p_tl[3] >> 1;
  716. }
  717. /* PRED_MODE_AVG_L_T */
  718. static void inv_predict_7(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  719. const uint8_t *p_t, const uint8_t *p_tr)
  720. {
  721. p[0] = p_l[0] + p_t[0] >> 1;
  722. p[1] = p_l[1] + p_t[1] >> 1;
  723. p[2] = p_l[2] + p_t[2] >> 1;
  724. p[3] = p_l[3] + p_t[3] >> 1;
  725. }
  726. /* PRED_MODE_AVG_TL_T */
  727. static void inv_predict_8(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  728. const uint8_t *p_t, const uint8_t *p_tr)
  729. {
  730. p[0] = p_tl[0] + p_t[0] >> 1;
  731. p[1] = p_tl[1] + p_t[1] >> 1;
  732. p[2] = p_tl[2] + p_t[2] >> 1;
  733. p[3] = p_tl[3] + p_t[3] >> 1;
  734. }
  735. /* PRED_MODE_AVG_T_TR */
  736. static void inv_predict_9(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  737. const uint8_t *p_t, const uint8_t *p_tr)
  738. {
  739. p[0] = p_t[0] + p_tr[0] >> 1;
  740. p[1] = p_t[1] + p_tr[1] >> 1;
  741. p[2] = p_t[2] + p_tr[2] >> 1;
  742. p[3] = p_t[3] + p_tr[3] >> 1;
  743. }
  744. /* PRED_MODE_AVG_AVG_L_TL_AVG_T_TR */
  745. static void inv_predict_10(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  746. const uint8_t *p_t, const uint8_t *p_tr)
  747. {
  748. p[0] = (p_l[0] + p_tl[0] >> 1) + (p_t[0] + p_tr[0] >> 1) >> 1;
  749. p[1] = (p_l[1] + p_tl[1] >> 1) + (p_t[1] + p_tr[1] >> 1) >> 1;
  750. p[2] = (p_l[2] + p_tl[2] >> 1) + (p_t[2] + p_tr[2] >> 1) >> 1;
  751. p[3] = (p_l[3] + p_tl[3] >> 1) + (p_t[3] + p_tr[3] >> 1) >> 1;
  752. }
  753. /* PRED_MODE_SELECT */
  754. static void inv_predict_11(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  755. const uint8_t *p_t, const uint8_t *p_tr)
  756. {
  757. int diff = (FFABS(p_l[0] - p_tl[0]) - FFABS(p_t[0] - p_tl[0])) +
  758. (FFABS(p_l[1] - p_tl[1]) - FFABS(p_t[1] - p_tl[1])) +
  759. (FFABS(p_l[2] - p_tl[2]) - FFABS(p_t[2] - p_tl[2])) +
  760. (FFABS(p_l[3] - p_tl[3]) - FFABS(p_t[3] - p_tl[3]));
  761. if (diff <= 0)
  762. AV_COPY32(p, p_t);
  763. else
  764. AV_COPY32(p, p_l);
  765. }
  766. /* PRED_MODE_ADD_SUBTRACT_FULL */
  767. static void inv_predict_12(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  768. const uint8_t *p_t, const uint8_t *p_tr)
  769. {
  770. p[0] = av_clip_uint8(p_l[0] + p_t[0] - p_tl[0]);
  771. p[1] = av_clip_uint8(p_l[1] + p_t[1] - p_tl[1]);
  772. p[2] = av_clip_uint8(p_l[2] + p_t[2] - p_tl[2]);
  773. p[3] = av_clip_uint8(p_l[3] + p_t[3] - p_tl[3]);
  774. }
  775. static av_always_inline uint8_t clamp_add_subtract_half(int a, int b, int c)
  776. {
  777. int d = a + b >> 1;
  778. return av_clip_uint8(d + (d - c) / 2);
  779. }
  780. /* PRED_MODE_ADD_SUBTRACT_HALF */
  781. static void inv_predict_13(uint8_t *p, const uint8_t *p_l, const uint8_t *p_tl,
  782. const uint8_t *p_t, const uint8_t *p_tr)
  783. {
  784. p[0] = clamp_add_subtract_half(p_l[0], p_t[0], p_tl[0]);
  785. p[1] = clamp_add_subtract_half(p_l[1], p_t[1], p_tl[1]);
  786. p[2] = clamp_add_subtract_half(p_l[2], p_t[2], p_tl[2]);
  787. p[3] = clamp_add_subtract_half(p_l[3], p_t[3], p_tl[3]);
  788. }
  789. typedef void (*inv_predict_func)(uint8_t *p, const uint8_t *p_l,
  790. const uint8_t *p_tl, const uint8_t *p_t,
  791. const uint8_t *p_tr);
  792. static const inv_predict_func inverse_predict[14] = {
  793. inv_predict_0, inv_predict_1, inv_predict_2, inv_predict_3,
  794. inv_predict_4, inv_predict_5, inv_predict_6, inv_predict_7,
  795. inv_predict_8, inv_predict_9, inv_predict_10, inv_predict_11,
  796. inv_predict_12, inv_predict_13,
  797. };
  798. static void inverse_prediction(AVFrame *frame, enum PredictionMode m, int x, int y)
  799. {
  800. uint8_t *dec, *p_l, *p_tl, *p_t, *p_tr;
  801. uint8_t p[4];
  802. dec = GET_PIXEL(frame, x, y);
  803. p_l = GET_PIXEL(frame, x - 1, y);
  804. p_tl = GET_PIXEL(frame, x - 1, y - 1);
  805. p_t = GET_PIXEL(frame, x, y - 1);
  806. if (x == frame->width - 1)
  807. p_tr = GET_PIXEL(frame, 0, y);
  808. else
  809. p_tr = GET_PIXEL(frame, x + 1, y - 1);
  810. inverse_predict[m](p, p_l, p_tl, p_t, p_tr);
  811. dec[0] += p[0];
  812. dec[1] += p[1];
  813. dec[2] += p[2];
  814. dec[3] += p[3];
  815. }
  816. static int apply_predictor_transform(WebPContext *s)
  817. {
  818. ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
  819. ImageContext *pimg = &s->image[IMAGE_ROLE_PREDICTOR];
  820. int x, y;
  821. for (y = 0; y < img->frame->height; y++) {
  822. for (x = 0; x < img->frame->width; x++) {
  823. int tx = x >> pimg->size_reduction;
  824. int ty = y >> pimg->size_reduction;
  825. enum PredictionMode m = GET_PIXEL_COMP(pimg->frame, tx, ty, 2);
  826. if (x == 0) {
  827. if (y == 0)
  828. m = PRED_MODE_BLACK;
  829. else
  830. m = PRED_MODE_T;
  831. } else if (y == 0)
  832. m = PRED_MODE_L;
  833. if (m > 13) {
  834. av_log(s->avctx, AV_LOG_ERROR,
  835. "invalid predictor mode: %d\n", m);
  836. return AVERROR_INVALIDDATA;
  837. }
  838. inverse_prediction(img->frame, m, x, y);
  839. }
  840. }
  841. return 0;
  842. }
  843. static av_always_inline uint8_t color_transform_delta(uint8_t color_pred,
  844. uint8_t color)
  845. {
  846. return (int)ff_u8_to_s8(color_pred) * ff_u8_to_s8(color) >> 5;
  847. }
  848. static int apply_color_transform(WebPContext *s)
  849. {
  850. ImageContext *img, *cimg;
  851. int x, y, cx, cy;
  852. uint8_t *p, *cp;
  853. img = &s->image[IMAGE_ROLE_ARGB];
  854. cimg = &s->image[IMAGE_ROLE_COLOR_TRANSFORM];
  855. for (y = 0; y < img->frame->height; y++) {
  856. for (x = 0; x < img->frame->width; x++) {
  857. cx = x >> cimg->size_reduction;
  858. cy = y >> cimg->size_reduction;
  859. cp = GET_PIXEL(cimg->frame, cx, cy);
  860. p = GET_PIXEL(img->frame, x, y);
  861. p[1] += color_transform_delta(cp[3], p[2]);
  862. p[3] += color_transform_delta(cp[2], p[2]) +
  863. color_transform_delta(cp[1], p[1]);
  864. }
  865. }
  866. return 0;
  867. }
  868. static int apply_subtract_green_transform(WebPContext *s)
  869. {
  870. int x, y;
  871. ImageContext *img = &s->image[IMAGE_ROLE_ARGB];
  872. for (y = 0; y < img->frame->height; y++) {
  873. for (x = 0; x < img->frame->width; x++) {
  874. uint8_t *p = GET_PIXEL(img->frame, x, y);
  875. p[1] += p[2];
  876. p[3] += p[2];
  877. }
  878. }
  879. return 0;
  880. }
  881. static int apply_color_indexing_transform(WebPContext *s)
  882. {
  883. ImageContext *img;
  884. ImageContext *pal;
  885. int i, x, y;
  886. uint8_t *p, *pi;
  887. img = &s->image[IMAGE_ROLE_ARGB];
  888. pal = &s->image[IMAGE_ROLE_COLOR_INDEXING];
  889. if (pal->size_reduction > 0) {
  890. GetBitContext gb_g;
  891. uint8_t *line;
  892. int pixel_bits = 8 >> pal->size_reduction;
  893. line = av_malloc(img->frame->linesize[0]);
  894. if (!line)
  895. return AVERROR(ENOMEM);
  896. for (y = 0; y < img->frame->height; y++) {
  897. p = GET_PIXEL(img->frame, 0, y);
  898. memcpy(line, p, img->frame->linesize[0]);
  899. init_get_bits(&gb_g, line, img->frame->linesize[0] * 8);
  900. skip_bits(&gb_g, 16);
  901. i = 0;
  902. for (x = 0; x < img->frame->width; x++) {
  903. p = GET_PIXEL(img->frame, x, y);
  904. p[2] = get_bits(&gb_g, pixel_bits);
  905. i++;
  906. if (i == 1 << pal->size_reduction) {
  907. skip_bits(&gb_g, 24);
  908. i = 0;
  909. }
  910. }
  911. }
  912. av_free(line);
  913. }
  914. for (y = 0; y < img->frame->height; y++) {
  915. for (x = 0; x < img->frame->width; x++) {
  916. p = GET_PIXEL(img->frame, x, y);
  917. i = p[2];
  918. if (i >= pal->frame->width) {
  919. av_log(s->avctx, AV_LOG_ERROR, "invalid palette index %d\n", i);
  920. return AVERROR_INVALIDDATA;
  921. }
  922. pi = GET_PIXEL(pal->frame, i, 0);
  923. AV_COPY32(p, pi);
  924. }
  925. }
  926. return 0;
  927. }
  928. static int vp8_lossless_decode_frame(AVCodecContext *avctx, AVFrame *p,
  929. int *got_frame, uint8_t *data_start,
  930. unsigned int data_size, int is_alpha_chunk)
  931. {
  932. WebPContext *s = avctx->priv_data;
  933. int w, h, ret, i;
  934. if (!is_alpha_chunk) {
  935. s->lossless = 1;
  936. avctx->pix_fmt = AV_PIX_FMT_ARGB;
  937. }
  938. ret = init_get_bits(&s->gb, data_start, data_size * 8);
  939. if (ret < 0)
  940. return ret;
  941. if (!is_alpha_chunk) {
  942. if (get_bits(&s->gb, 8) != 0x2F) {
  943. av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless signature\n");
  944. return AVERROR_INVALIDDATA;
  945. }
  946. w = get_bits(&s->gb, 14) + 1;
  947. h = get_bits(&s->gb, 14) + 1;
  948. if (s->width && s->width != w) {
  949. av_log(avctx, AV_LOG_WARNING, "Width mismatch. %d != %d\n",
  950. s->width, w);
  951. }
  952. s->width = w;
  953. if (s->height && s->height != h) {
  954. av_log(avctx, AV_LOG_WARNING, "Height mismatch. %d != %d\n",
  955. s->width, w);
  956. }
  957. s->height = h;
  958. ret = ff_set_dimensions(avctx, s->width, s->height);
  959. if (ret < 0)
  960. return ret;
  961. s->has_alpha = get_bits1(&s->gb);
  962. if (get_bits(&s->gb, 3) != 0x0) {
  963. av_log(avctx, AV_LOG_ERROR, "Invalid WebP Lossless version\n");
  964. return AVERROR_INVALIDDATA;
  965. }
  966. } else {
  967. if (!s->width || !s->height)
  968. return AVERROR_BUG;
  969. w = s->width;
  970. h = s->height;
  971. }
  972. /* parse transformations */
  973. s->nb_transforms = 0;
  974. s->reduced_width = 0;
  975. while (get_bits1(&s->gb)) {
  976. enum TransformType transform = get_bits(&s->gb, 2);
  977. s->transforms[s->nb_transforms++] = transform;
  978. switch (transform) {
  979. case PREDICTOR_TRANSFORM:
  980. ret = parse_transform_predictor(s);
  981. break;
  982. case COLOR_TRANSFORM:
  983. ret = parse_transform_color(s);
  984. break;
  985. case COLOR_INDEXING_TRANSFORM:
  986. ret = parse_transform_color_indexing(s);
  987. break;
  988. }
  989. if (ret < 0)
  990. goto free_and_return;
  991. }
  992. /* decode primary image */
  993. s->image[IMAGE_ROLE_ARGB].frame = p;
  994. if (is_alpha_chunk)
  995. s->image[IMAGE_ROLE_ARGB].is_alpha_primary = 1;
  996. ret = decode_entropy_coded_image(s, IMAGE_ROLE_ARGB, w, h);
  997. if (ret < 0)
  998. goto free_and_return;
  999. /* apply transformations */
  1000. for (i = s->nb_transforms - 1; i >= 0; i--) {
  1001. switch (s->transforms[i]) {
  1002. case PREDICTOR_TRANSFORM:
  1003. ret = apply_predictor_transform(s);
  1004. break;
  1005. case COLOR_TRANSFORM:
  1006. ret = apply_color_transform(s);
  1007. break;
  1008. case SUBTRACT_GREEN:
  1009. ret = apply_subtract_green_transform(s);
  1010. break;
  1011. case COLOR_INDEXING_TRANSFORM:
  1012. ret = apply_color_indexing_transform(s);
  1013. break;
  1014. }
  1015. if (ret < 0)
  1016. goto free_and_return;
  1017. }
  1018. *got_frame = 1;
  1019. p->pict_type = AV_PICTURE_TYPE_I;
  1020. p->key_frame = 1;
  1021. ret = data_size;
  1022. free_and_return:
  1023. for (i = 0; i < IMAGE_ROLE_NB; i++)
  1024. image_ctx_free(&s->image[i]);
  1025. return ret;
  1026. }
  1027. static void alpha_inverse_prediction(AVFrame *frame, enum AlphaFilter m)
  1028. {
  1029. int x, y, ls;
  1030. uint8_t *dec;
  1031. ls = frame->linesize[3];
  1032. /* filter first row using horizontal filter */
  1033. dec = frame->data[3] + 1;
  1034. for (x = 1; x < frame->width; x++, dec++)
  1035. *dec += *(dec - 1);
  1036. /* filter first column using vertical filter */
  1037. dec = frame->data[3] + ls;
  1038. for (y = 1; y < frame->height; y++, dec += ls)
  1039. *dec += *(dec - ls);
  1040. /* filter the rest using the specified filter */
  1041. switch (m) {
  1042. case ALPHA_FILTER_HORIZONTAL:
  1043. for (y = 1; y < frame->height; y++) {
  1044. dec = frame->data[3] + y * ls + 1;
  1045. for (x = 1; x < frame->width; x++, dec++)
  1046. *dec += *(dec - 1);
  1047. }
  1048. break;
  1049. case ALPHA_FILTER_VERTICAL:
  1050. for (y = 1; y < frame->height; y++) {
  1051. dec = frame->data[3] + y * ls + 1;
  1052. for (x = 1; x < frame->width; x++, dec++)
  1053. *dec += *(dec - ls);
  1054. }
  1055. break;
  1056. case ALPHA_FILTER_GRADIENT:
  1057. for (y = 1; y < frame->height; y++) {
  1058. dec = frame->data[3] + y * ls + 1;
  1059. for (x = 1; x < frame->width; x++, dec++)
  1060. dec[0] += av_clip_uint8(*(dec - 1) + *(dec - ls) - *(dec - ls - 1));
  1061. }
  1062. break;
  1063. }
  1064. }
  1065. static int vp8_lossy_decode_alpha(AVCodecContext *avctx, AVFrame *p,
  1066. uint8_t *data_start,
  1067. unsigned int data_size)
  1068. {
  1069. WebPContext *s = avctx->priv_data;
  1070. int x, y, ret;
  1071. if (s->alpha_compression == ALPHA_COMPRESSION_NONE) {
  1072. GetByteContext gb;
  1073. bytestream2_init(&gb, data_start, data_size);
  1074. for (y = 0; y < s->height; y++)
  1075. bytestream2_get_buffer(&gb, p->data[3] + p->linesize[3] * y,
  1076. s->width);
  1077. } else if (s->alpha_compression == ALPHA_COMPRESSION_VP8L) {
  1078. uint8_t *ap, *pp;
  1079. int alpha_got_frame = 0;
  1080. s->alpha_frame = av_frame_alloc();
  1081. if (!s->alpha_frame)
  1082. return AVERROR(ENOMEM);
  1083. ret = vp8_lossless_decode_frame(avctx, s->alpha_frame, &alpha_got_frame,
  1084. data_start, data_size, 1);
  1085. if (ret < 0) {
  1086. av_frame_free(&s->alpha_frame);
  1087. return ret;
  1088. }
  1089. if (!alpha_got_frame) {
  1090. av_frame_free(&s->alpha_frame);
  1091. return AVERROR_INVALIDDATA;
  1092. }
  1093. /* copy green component of alpha image to alpha plane of primary image */
  1094. for (y = 0; y < s->height; y++) {
  1095. ap = GET_PIXEL(s->alpha_frame, 0, y) + 2;
  1096. pp = p->data[3] + p->linesize[3] * y;
  1097. for (x = 0; x < s->width; x++) {
  1098. *pp = *ap;
  1099. pp++;
  1100. ap += 4;
  1101. }
  1102. }
  1103. av_frame_free(&s->alpha_frame);
  1104. }
  1105. /* apply alpha filtering */
  1106. if (s->alpha_filter)
  1107. alpha_inverse_prediction(p, s->alpha_filter);
  1108. return 0;
  1109. }
  1110. static int vp8_lossy_decode_frame(AVCodecContext *avctx, AVFrame *p,
  1111. int *got_frame, uint8_t *data_start,
  1112. unsigned int data_size)
  1113. {
  1114. WebPContext *s = avctx->priv_data;
  1115. AVPacket pkt;
  1116. int ret;
  1117. if (!s->initialized) {
  1118. ff_vp8_decode_init(avctx);
  1119. s->initialized = 1;
  1120. if (s->has_alpha)
  1121. avctx->pix_fmt = AV_PIX_FMT_YUVA420P;
  1122. }
  1123. s->lossless = 0;
  1124. if (data_size > INT_MAX) {
  1125. av_log(avctx, AV_LOG_ERROR, "unsupported chunk size\n");
  1126. return AVERROR_PATCHWELCOME;
  1127. }
  1128. av_init_packet(&pkt);
  1129. pkt.data = data_start;
  1130. pkt.size = data_size;
  1131. ret = ff_vp8_decode_frame(avctx, p, got_frame, &pkt);
  1132. if (s->has_alpha) {
  1133. ret = vp8_lossy_decode_alpha(avctx, p, s->alpha_data,
  1134. s->alpha_data_size);
  1135. if (ret < 0)
  1136. return ret;
  1137. }
  1138. return ret;
  1139. }
  1140. static int webp_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  1141. AVPacket *avpkt)
  1142. {
  1143. AVFrame * const p = data;
  1144. WebPContext *s = avctx->priv_data;
  1145. GetByteContext gb;
  1146. int ret;
  1147. uint32_t chunk_type, chunk_size;
  1148. int vp8x_flags = 0;
  1149. s->avctx = avctx;
  1150. s->width = 0;
  1151. s->height = 0;
  1152. *got_frame = 0;
  1153. s->has_alpha = 0;
  1154. bytestream2_init(&gb, avpkt->data, avpkt->size);
  1155. if (bytestream2_get_bytes_left(&gb) < 12)
  1156. return AVERROR_INVALIDDATA;
  1157. if (bytestream2_get_le32(&gb) != MKTAG('R', 'I', 'F', 'F')) {
  1158. av_log(avctx, AV_LOG_ERROR, "missing RIFF tag\n");
  1159. return AVERROR_INVALIDDATA;
  1160. }
  1161. chunk_size = bytestream2_get_le32(&gb);
  1162. if (bytestream2_get_bytes_left(&gb) < chunk_size)
  1163. return AVERROR_INVALIDDATA;
  1164. if (bytestream2_get_le32(&gb) != MKTAG('W', 'E', 'B', 'P')) {
  1165. av_log(avctx, AV_LOG_ERROR, "missing WEBP tag\n");
  1166. return AVERROR_INVALIDDATA;
  1167. }
  1168. while (bytestream2_get_bytes_left(&gb) > 0) {
  1169. char chunk_str[5] = { 0 };
  1170. chunk_type = bytestream2_get_le32(&gb);
  1171. chunk_size = bytestream2_get_le32(&gb);
  1172. if (chunk_size == UINT32_MAX)
  1173. return AVERROR_INVALIDDATA;
  1174. chunk_size += chunk_size & 1;
  1175. if (bytestream2_get_bytes_left(&gb) < chunk_size)
  1176. return AVERROR_INVALIDDATA;
  1177. switch (chunk_type) {
  1178. case MKTAG('V', 'P', '8', ' '):
  1179. if (!*got_frame) {
  1180. ret = vp8_lossy_decode_frame(avctx, p, got_frame,
  1181. avpkt->data + bytestream2_tell(&gb),
  1182. chunk_size);
  1183. if (ret < 0)
  1184. return ret;
  1185. }
  1186. bytestream2_skip(&gb, chunk_size);
  1187. break;
  1188. case MKTAG('V', 'P', '8', 'L'):
  1189. if (!*got_frame) {
  1190. ret = vp8_lossless_decode_frame(avctx, p, got_frame,
  1191. avpkt->data + bytestream2_tell(&gb),
  1192. chunk_size, 0);
  1193. if (ret < 0)
  1194. return ret;
  1195. }
  1196. bytestream2_skip(&gb, chunk_size);
  1197. break;
  1198. case MKTAG('V', 'P', '8', 'X'):
  1199. vp8x_flags = bytestream2_get_byte(&gb);
  1200. bytestream2_skip(&gb, 3);
  1201. s->width = bytestream2_get_le24(&gb) + 1;
  1202. s->height = bytestream2_get_le24(&gb) + 1;
  1203. ret = av_image_check_size(s->width, s->height, 0, avctx);
  1204. if (ret < 0)
  1205. return ret;
  1206. break;
  1207. case MKTAG('A', 'L', 'P', 'H'): {
  1208. int alpha_header, filter_m, compression;
  1209. if (!(vp8x_flags & VP8X_FLAG_ALPHA)) {
  1210. av_log(avctx, AV_LOG_WARNING,
  1211. "ALPHA chunk present, but alpha bit not set in the "
  1212. "VP8X header\n");
  1213. }
  1214. if (chunk_size == 0) {
  1215. av_log(avctx, AV_LOG_ERROR, "invalid ALPHA chunk size\n");
  1216. return AVERROR_INVALIDDATA;
  1217. }
  1218. alpha_header = bytestream2_get_byte(&gb);
  1219. s->alpha_data = avpkt->data + bytestream2_tell(&gb);
  1220. s->alpha_data_size = chunk_size - 1;
  1221. bytestream2_skip(&gb, s->alpha_data_size);
  1222. filter_m = (alpha_header >> 2) & 0x03;
  1223. compression = alpha_header & 0x03;
  1224. if (compression > ALPHA_COMPRESSION_VP8L) {
  1225. av_log(avctx, AV_LOG_VERBOSE,
  1226. "skipping unsupported ALPHA chunk\n");
  1227. } else {
  1228. s->has_alpha = 1;
  1229. s->alpha_compression = compression;
  1230. s->alpha_filter = filter_m;
  1231. }
  1232. break;
  1233. }
  1234. case MKTAG('I', 'C', 'C', 'P'):
  1235. case MKTAG('A', 'N', 'I', 'M'):
  1236. case MKTAG('A', 'N', 'M', 'F'):
  1237. case MKTAG('E', 'X', 'I', 'F'):
  1238. case MKTAG('X', 'M', 'P', ' '):
  1239. AV_WL32(chunk_str, chunk_type);
  1240. av_log(avctx, AV_LOG_VERBOSE, "skipping unsupported chunk: %s\n",
  1241. chunk_str);
  1242. bytestream2_skip(&gb, chunk_size);
  1243. break;
  1244. default:
  1245. AV_WL32(chunk_str, chunk_type);
  1246. av_log(avctx, AV_LOG_VERBOSE, "skipping unknown chunk: %s\n",
  1247. chunk_str);
  1248. bytestream2_skip(&gb, chunk_size);
  1249. break;
  1250. }
  1251. }
  1252. if (!*got_frame) {
  1253. av_log(avctx, AV_LOG_ERROR, "image data not found\n");
  1254. return AVERROR_INVALIDDATA;
  1255. }
  1256. return avpkt->size;
  1257. }
  1258. static av_cold int webp_decode_close(AVCodecContext *avctx)
  1259. {
  1260. WebPContext *s = avctx->priv_data;
  1261. if (s->initialized)
  1262. return ff_vp8_decode_free(avctx);
  1263. return 0;
  1264. }
  1265. AVCodec ff_webp_decoder = {
  1266. .name = "webp",
  1267. .long_name = NULL_IF_CONFIG_SMALL("WebP image"),
  1268. .type = AVMEDIA_TYPE_VIDEO,
  1269. .id = AV_CODEC_ID_WEBP,
  1270. .priv_data_size = sizeof(WebPContext),
  1271. .decode = webp_decode_frame,
  1272. .close = webp_decode_close,
  1273. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  1274. };