You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1555 lines
53KB

  1. /*
  2. * PNG image format
  3. * Copyright (c) 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. //#define DEBUG
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/bprint.h"
  24. #include "libavutil/imgutils.h"
  25. #include "libavutil/stereo3d.h"
  26. #include "avcodec.h"
  27. #include "bytestream.h"
  28. #include "internal.h"
  29. #include "apng.h"
  30. #include "png.h"
  31. #include "pngdsp.h"
  32. #include "thread.h"
  33. #include <zlib.h>
  34. typedef struct PNGDecContext {
  35. PNGDSPContext dsp;
  36. AVCodecContext *avctx;
  37. GetByteContext gb;
  38. ThreadFrame previous_picture;
  39. ThreadFrame last_picture;
  40. ThreadFrame picture;
  41. int state;
  42. int width, height;
  43. int cur_w, cur_h;
  44. int last_w, last_h;
  45. int x_offset, y_offset;
  46. int last_x_offset, last_y_offset;
  47. uint8_t dispose_op, blend_op;
  48. uint8_t last_dispose_op;
  49. int bit_depth;
  50. int color_type;
  51. int compression_type;
  52. int interlace_type;
  53. int filter_type;
  54. int channels;
  55. int bits_per_pixel;
  56. int bpp;
  57. int has_trns;
  58. uint8_t transparent_color_be[6];
  59. uint8_t *image_buf;
  60. int image_linesize;
  61. uint32_t palette[256];
  62. uint8_t *crow_buf;
  63. uint8_t *last_row;
  64. unsigned int last_row_size;
  65. uint8_t *tmp_row;
  66. unsigned int tmp_row_size;
  67. uint8_t *buffer;
  68. int buffer_size;
  69. int pass;
  70. int crow_size; /* compressed row size (include filter type) */
  71. int row_size; /* decompressed row size */
  72. int pass_row_size; /* decompress row size of the current pass */
  73. int y;
  74. z_stream zstream;
  75. } PNGDecContext;
  76. /* Mask to determine which pixels are valid in a pass */
  77. static const uint8_t png_pass_mask[NB_PASSES] = {
  78. 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
  79. };
  80. /* Mask to determine which y pixels can be written in a pass */
  81. static const uint8_t png_pass_dsp_ymask[NB_PASSES] = {
  82. 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
  83. };
  84. /* Mask to determine which pixels to overwrite while displaying */
  85. static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
  86. 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
  87. };
  88. /* NOTE: we try to construct a good looking image at each pass. width
  89. * is the original image width. We also do pixel format conversion at
  90. * this stage */
  91. static void png_put_interlaced_row(uint8_t *dst, int width,
  92. int bits_per_pixel, int pass,
  93. int color_type, const uint8_t *src)
  94. {
  95. int x, mask, dsp_mask, j, src_x, b, bpp;
  96. uint8_t *d;
  97. const uint8_t *s;
  98. mask = png_pass_mask[pass];
  99. dsp_mask = png_pass_dsp_mask[pass];
  100. switch (bits_per_pixel) {
  101. case 1:
  102. src_x = 0;
  103. for (x = 0; x < width; x++) {
  104. j = (x & 7);
  105. if ((dsp_mask << j) & 0x80) {
  106. b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
  107. dst[x >> 3] &= 0xFF7F>>j;
  108. dst[x >> 3] |= b << (7 - j);
  109. }
  110. if ((mask << j) & 0x80)
  111. src_x++;
  112. }
  113. break;
  114. case 2:
  115. src_x = 0;
  116. for (x = 0; x < width; x++) {
  117. int j2 = 2 * (x & 3);
  118. j = (x & 7);
  119. if ((dsp_mask << j) & 0x80) {
  120. b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
  121. dst[x >> 2] &= 0xFF3F>>j2;
  122. dst[x >> 2] |= b << (6 - j2);
  123. }
  124. if ((mask << j) & 0x80)
  125. src_x++;
  126. }
  127. break;
  128. case 4:
  129. src_x = 0;
  130. for (x = 0; x < width; x++) {
  131. int j2 = 4*(x&1);
  132. j = (x & 7);
  133. if ((dsp_mask << j) & 0x80) {
  134. b = (src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
  135. dst[x >> 1] &= 0xFF0F>>j2;
  136. dst[x >> 1] |= b << (4 - j2);
  137. }
  138. if ((mask << j) & 0x80)
  139. src_x++;
  140. }
  141. break;
  142. default:
  143. bpp = bits_per_pixel >> 3;
  144. d = dst;
  145. s = src;
  146. for (x = 0; x < width; x++) {
  147. j = x & 7;
  148. if ((dsp_mask << j) & 0x80) {
  149. memcpy(d, s, bpp);
  150. }
  151. d += bpp;
  152. if ((mask << j) & 0x80)
  153. s += bpp;
  154. }
  155. break;
  156. }
  157. }
  158. void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top,
  159. int w, int bpp)
  160. {
  161. int i;
  162. for (i = 0; i < w; i++) {
  163. int a, b, c, p, pa, pb, pc;
  164. a = dst[i - bpp];
  165. b = top[i];
  166. c = top[i - bpp];
  167. p = b - c;
  168. pc = a - c;
  169. pa = abs(p);
  170. pb = abs(pc);
  171. pc = abs(p + pc);
  172. if (pa <= pb && pa <= pc)
  173. p = a;
  174. else if (pb <= pc)
  175. p = b;
  176. else
  177. p = c;
  178. dst[i] = p + src[i];
  179. }
  180. }
  181. #define UNROLL1(bpp, op) \
  182. { \
  183. r = dst[0]; \
  184. if (bpp >= 2) \
  185. g = dst[1]; \
  186. if (bpp >= 3) \
  187. b = dst[2]; \
  188. if (bpp >= 4) \
  189. a = dst[3]; \
  190. for (; i <= size - bpp; i += bpp) { \
  191. dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
  192. if (bpp == 1) \
  193. continue; \
  194. dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
  195. if (bpp == 2) \
  196. continue; \
  197. dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
  198. if (bpp == 3) \
  199. continue; \
  200. dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
  201. } \
  202. }
  203. #define UNROLL_FILTER(op) \
  204. if (bpp == 1) { \
  205. UNROLL1(1, op) \
  206. } else if (bpp == 2) { \
  207. UNROLL1(2, op) \
  208. } else if (bpp == 3) { \
  209. UNROLL1(3, op) \
  210. } else if (bpp == 4) { \
  211. UNROLL1(4, op) \
  212. } \
  213. for (; i < size; i++) { \
  214. dst[i] = op(dst[i - bpp], src[i], last[i]); \
  215. }
  216. /* NOTE: 'dst' can be equal to 'last' */
  217. static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
  218. uint8_t *src, uint8_t *last, int size, int bpp)
  219. {
  220. int i, p, r, g, b, a;
  221. switch (filter_type) {
  222. case PNG_FILTER_VALUE_NONE:
  223. memcpy(dst, src, size);
  224. break;
  225. case PNG_FILTER_VALUE_SUB:
  226. for (i = 0; i < bpp; i++)
  227. dst[i] = src[i];
  228. if (bpp == 4) {
  229. p = *(int *)dst;
  230. for (; i < size; i += bpp) {
  231. unsigned s = *(int *)(src + i);
  232. p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
  233. *(int *)(dst + i) = p;
  234. }
  235. } else {
  236. #define OP_SUB(x, s, l) ((x) + (s))
  237. UNROLL_FILTER(OP_SUB);
  238. }
  239. break;
  240. case PNG_FILTER_VALUE_UP:
  241. dsp->add_bytes_l2(dst, src, last, size);
  242. break;
  243. case PNG_FILTER_VALUE_AVG:
  244. for (i = 0; i < bpp; i++) {
  245. p = (last[i] >> 1);
  246. dst[i] = p + src[i];
  247. }
  248. #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
  249. UNROLL_FILTER(OP_AVG);
  250. break;
  251. case PNG_FILTER_VALUE_PAETH:
  252. for (i = 0; i < bpp; i++) {
  253. p = last[i];
  254. dst[i] = p + src[i];
  255. }
  256. if (bpp > 2 && size > 4) {
  257. /* would write off the end of the array if we let it process
  258. * the last pixel with bpp=3 */
  259. int w = (bpp & 3) ? size - 3 : size;
  260. if (w > i) {
  261. dsp->add_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
  262. i = w;
  263. }
  264. }
  265. ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
  266. break;
  267. }
  268. }
  269. /* This used to be called "deloco" in FFmpeg
  270. * and is actually an inverse reversible colorspace transformation */
  271. #define YUV2RGB(NAME, TYPE) \
  272. static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
  273. { \
  274. int i; \
  275. for (i = 0; i < size; i += 3 + alpha) { \
  276. int g = dst [i + 1]; \
  277. dst[i + 0] += g; \
  278. dst[i + 2] += g; \
  279. } \
  280. }
  281. YUV2RGB(rgb8, uint8_t)
  282. YUV2RGB(rgb16, uint16_t)
  283. /* process exactly one decompressed row */
  284. static void png_handle_row(PNGDecContext *s)
  285. {
  286. uint8_t *ptr, *last_row;
  287. int got_line;
  288. if (!s->interlace_type) {
  289. ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
  290. if (s->y == 0)
  291. last_row = s->last_row;
  292. else
  293. last_row = ptr - s->image_linesize;
  294. png_filter_row(&s->dsp, ptr, s->crow_buf[0], s->crow_buf + 1,
  295. last_row, s->row_size, s->bpp);
  296. /* loco lags by 1 row so that it doesn't interfere with top prediction */
  297. if (s->filter_type == PNG_FILTER_TYPE_LOCO && s->y > 0) {
  298. if (s->bit_depth == 16) {
  299. deloco_rgb16((uint16_t *)(ptr - s->image_linesize), s->row_size / 2,
  300. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  301. } else {
  302. deloco_rgb8(ptr - s->image_linesize, s->row_size,
  303. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  304. }
  305. }
  306. s->y++;
  307. if (s->y == s->cur_h) {
  308. s->state |= PNG_ALLIMAGE;
  309. if (s->filter_type == PNG_FILTER_TYPE_LOCO) {
  310. if (s->bit_depth == 16) {
  311. deloco_rgb16((uint16_t *)ptr, s->row_size / 2,
  312. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  313. } else {
  314. deloco_rgb8(ptr, s->row_size,
  315. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  316. }
  317. }
  318. }
  319. } else {
  320. got_line = 0;
  321. for (;;) {
  322. ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
  323. if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
  324. /* if we already read one row, it is time to stop to
  325. * wait for the next one */
  326. if (got_line)
  327. break;
  328. png_filter_row(&s->dsp, s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
  329. s->last_row, s->pass_row_size, s->bpp);
  330. FFSWAP(uint8_t *, s->last_row, s->tmp_row);
  331. FFSWAP(unsigned int, s->last_row_size, s->tmp_row_size);
  332. got_line = 1;
  333. }
  334. if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
  335. png_put_interlaced_row(ptr, s->cur_w, s->bits_per_pixel, s->pass,
  336. s->color_type, s->last_row);
  337. }
  338. s->y++;
  339. if (s->y == s->cur_h) {
  340. memset(s->last_row, 0, s->row_size);
  341. for (;;) {
  342. if (s->pass == NB_PASSES - 1) {
  343. s->state |= PNG_ALLIMAGE;
  344. goto the_end;
  345. } else {
  346. s->pass++;
  347. s->y = 0;
  348. s->pass_row_size = ff_png_pass_row_size(s->pass,
  349. s->bits_per_pixel,
  350. s->cur_w);
  351. s->crow_size = s->pass_row_size + 1;
  352. if (s->pass_row_size != 0)
  353. break;
  354. /* skip pass if empty row */
  355. }
  356. }
  357. }
  358. }
  359. the_end:;
  360. }
  361. }
  362. static int png_decode_idat(PNGDecContext *s, int length)
  363. {
  364. int ret;
  365. s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
  366. s->zstream.next_in = (unsigned char *)s->gb.buffer;
  367. bytestream2_skip(&s->gb, length);
  368. /* decode one line if possible */
  369. while (s->zstream.avail_in > 0) {
  370. ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
  371. if (ret != Z_OK && ret != Z_STREAM_END) {
  372. av_log(s->avctx, AV_LOG_ERROR, "inflate returned error %d\n", ret);
  373. return AVERROR_EXTERNAL;
  374. }
  375. if (s->zstream.avail_out == 0) {
  376. if (!(s->state & PNG_ALLIMAGE)) {
  377. png_handle_row(s);
  378. }
  379. s->zstream.avail_out = s->crow_size;
  380. s->zstream.next_out = s->crow_buf;
  381. }
  382. if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
  383. av_log(NULL, AV_LOG_WARNING,
  384. "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
  385. return 0;
  386. }
  387. }
  388. return 0;
  389. }
  390. static int decode_zbuf(AVBPrint *bp, const uint8_t *data,
  391. const uint8_t *data_end)
  392. {
  393. z_stream zstream;
  394. unsigned char *buf;
  395. unsigned buf_size;
  396. int ret;
  397. zstream.zalloc = ff_png_zalloc;
  398. zstream.zfree = ff_png_zfree;
  399. zstream.opaque = NULL;
  400. if (inflateInit(&zstream) != Z_OK)
  401. return AVERROR_EXTERNAL;
  402. zstream.next_in = (unsigned char *)data;
  403. zstream.avail_in = data_end - data;
  404. av_bprint_init(bp, 0, -1);
  405. while (zstream.avail_in > 0) {
  406. av_bprint_get_buffer(bp, 2, &buf, &buf_size);
  407. if (buf_size < 2) {
  408. ret = AVERROR(ENOMEM);
  409. goto fail;
  410. }
  411. zstream.next_out = buf;
  412. zstream.avail_out = buf_size - 1;
  413. ret = inflate(&zstream, Z_PARTIAL_FLUSH);
  414. if (ret != Z_OK && ret != Z_STREAM_END) {
  415. ret = AVERROR_EXTERNAL;
  416. goto fail;
  417. }
  418. bp->len += zstream.next_out - buf;
  419. if (ret == Z_STREAM_END)
  420. break;
  421. }
  422. inflateEnd(&zstream);
  423. bp->str[bp->len] = 0;
  424. return 0;
  425. fail:
  426. inflateEnd(&zstream);
  427. av_bprint_finalize(bp, NULL);
  428. return ret;
  429. }
  430. static uint8_t *iso88591_to_utf8(const uint8_t *in, size_t size_in)
  431. {
  432. size_t extra = 0, i;
  433. uint8_t *out, *q;
  434. for (i = 0; i < size_in; i++)
  435. extra += in[i] >= 0x80;
  436. if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
  437. return NULL;
  438. q = out = av_malloc(size_in + extra + 1);
  439. if (!out)
  440. return NULL;
  441. for (i = 0; i < size_in; i++) {
  442. if (in[i] >= 0x80) {
  443. *(q++) = 0xC0 | (in[i] >> 6);
  444. *(q++) = 0x80 | (in[i] & 0x3F);
  445. } else {
  446. *(q++) = in[i];
  447. }
  448. }
  449. *(q++) = 0;
  450. return out;
  451. }
  452. static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
  453. AVDictionary **dict)
  454. {
  455. int ret, method;
  456. const uint8_t *data = s->gb.buffer;
  457. const uint8_t *data_end = data + length;
  458. const uint8_t *keyword = data;
  459. const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
  460. uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
  461. unsigned text_len;
  462. AVBPrint bp;
  463. if (!keyword_end)
  464. return AVERROR_INVALIDDATA;
  465. data = keyword_end + 1;
  466. if (compressed) {
  467. if (data == data_end)
  468. return AVERROR_INVALIDDATA;
  469. method = *(data++);
  470. if (method)
  471. return AVERROR_INVALIDDATA;
  472. if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
  473. return ret;
  474. text_len = bp.len;
  475. av_bprint_finalize(&bp, (char **)&text);
  476. if (!text)
  477. return AVERROR(ENOMEM);
  478. } else {
  479. text = (uint8_t *)data;
  480. text_len = data_end - text;
  481. }
  482. kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
  483. txt_utf8 = iso88591_to_utf8(text, text_len);
  484. if (text != data)
  485. av_free(text);
  486. if (!(kw_utf8 && txt_utf8)) {
  487. av_free(kw_utf8);
  488. av_free(txt_utf8);
  489. return AVERROR(ENOMEM);
  490. }
  491. av_dict_set(dict, kw_utf8, txt_utf8,
  492. AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
  493. return 0;
  494. }
  495. static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s,
  496. uint32_t length)
  497. {
  498. if (length != 13)
  499. return AVERROR_INVALIDDATA;
  500. if (s->state & PNG_IDAT) {
  501. av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
  502. return AVERROR_INVALIDDATA;
  503. }
  504. if (s->state & PNG_IHDR) {
  505. av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
  506. return AVERROR_INVALIDDATA;
  507. }
  508. s->width = s->cur_w = bytestream2_get_be32(&s->gb);
  509. s->height = s->cur_h = bytestream2_get_be32(&s->gb);
  510. if (av_image_check_size(s->width, s->height, 0, avctx)) {
  511. s->cur_w = s->cur_h = s->width = s->height = 0;
  512. av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
  513. return AVERROR_INVALIDDATA;
  514. }
  515. s->bit_depth = bytestream2_get_byte(&s->gb);
  516. if (s->bit_depth != 1 && s->bit_depth != 2 && s->bit_depth != 4 &&
  517. s->bit_depth != 8 && s->bit_depth != 16) {
  518. av_log(avctx, AV_LOG_ERROR, "Invalid bit depth\n");
  519. goto error;
  520. }
  521. s->color_type = bytestream2_get_byte(&s->gb);
  522. s->compression_type = bytestream2_get_byte(&s->gb);
  523. s->filter_type = bytestream2_get_byte(&s->gb);
  524. s->interlace_type = bytestream2_get_byte(&s->gb);
  525. bytestream2_skip(&s->gb, 4); /* crc */
  526. s->state |= PNG_IHDR;
  527. if (avctx->debug & FF_DEBUG_PICT_INFO)
  528. av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
  529. "compression_type=%d filter_type=%d interlace_type=%d\n",
  530. s->width, s->height, s->bit_depth, s->color_type,
  531. s->compression_type, s->filter_type, s->interlace_type);
  532. return 0;
  533. error:
  534. s->cur_w = s->cur_h = s->width = s->height = 0;
  535. s->bit_depth = 8;
  536. return AVERROR_INVALIDDATA;
  537. }
  538. static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s)
  539. {
  540. if (s->state & PNG_IDAT) {
  541. av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n");
  542. return AVERROR_INVALIDDATA;
  543. }
  544. avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb);
  545. avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb);
  546. if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0)
  547. avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
  548. bytestream2_skip(&s->gb, 1); /* unit specifier */
  549. bytestream2_skip(&s->gb, 4); /* crc */
  550. return 0;
  551. }
  552. static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s,
  553. uint32_t length, AVFrame *p)
  554. {
  555. int ret;
  556. size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
  557. if (!(s->state & PNG_IHDR)) {
  558. av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
  559. return AVERROR_INVALIDDATA;
  560. }
  561. if (!(s->state & PNG_IDAT)) {
  562. /* init image info */
  563. avctx->width = s->width;
  564. avctx->height = s->height;
  565. s->channels = ff_png_get_nb_channels(s->color_type);
  566. s->bits_per_pixel = s->bit_depth * s->channels;
  567. s->bpp = (s->bits_per_pixel + 7) >> 3;
  568. s->row_size = (s->cur_w * s->bits_per_pixel + 7) >> 3;
  569. if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  570. s->color_type == PNG_COLOR_TYPE_RGB) {
  571. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  572. } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  573. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  574. avctx->pix_fmt = AV_PIX_FMT_RGBA;
  575. } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  576. s->color_type == PNG_COLOR_TYPE_GRAY) {
  577. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  578. } else if (s->bit_depth == 16 &&
  579. s->color_type == PNG_COLOR_TYPE_GRAY) {
  580. avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
  581. } else if (s->bit_depth == 16 &&
  582. s->color_type == PNG_COLOR_TYPE_RGB) {
  583. avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
  584. } else if (s->bit_depth == 16 &&
  585. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  586. avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
  587. } else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
  588. s->color_type == PNG_COLOR_TYPE_PALETTE) {
  589. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  590. } else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
  591. avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
  592. } else if (s->bit_depth == 8 &&
  593. s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
  594. avctx->pix_fmt = AV_PIX_FMT_YA8;
  595. } else if (s->bit_depth == 16 &&
  596. s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
  597. avctx->pix_fmt = AV_PIX_FMT_YA16BE;
  598. } else {
  599. av_log(avctx, AV_LOG_ERROR, "unsupported bit depth %d "
  600. "and color type %d\n",
  601. s->bit_depth, s->color_type);
  602. return AVERROR_INVALIDDATA;
  603. }
  604. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
  605. switch (avctx->pix_fmt) {
  606. case AV_PIX_FMT_RGB24:
  607. avctx->pix_fmt = AV_PIX_FMT_RGBA;
  608. break;
  609. case AV_PIX_FMT_RGB48BE:
  610. avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
  611. break;
  612. case AV_PIX_FMT_GRAY8:
  613. avctx->pix_fmt = AV_PIX_FMT_YA8;
  614. break;
  615. case AV_PIX_FMT_GRAY16BE:
  616. avctx->pix_fmt = AV_PIX_FMT_YA16BE;
  617. break;
  618. default:
  619. avpriv_request_sample(avctx, "bit depth %d "
  620. "and color type %d with TRNS",
  621. s->bit_depth, s->color_type);
  622. return AVERROR_INVALIDDATA;
  623. }
  624. s->bpp += byte_depth;
  625. }
  626. if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
  627. return ret;
  628. if (avctx->codec_id == AV_CODEC_ID_APNG && s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
  629. ff_thread_release_buffer(avctx, &s->previous_picture);
  630. if ((ret = ff_thread_get_buffer(avctx, &s->previous_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
  631. return ret;
  632. }
  633. ff_thread_finish_setup(avctx);
  634. p->pict_type = AV_PICTURE_TYPE_I;
  635. p->key_frame = 1;
  636. p->interlaced_frame = !!s->interlace_type;
  637. /* compute the compressed row size */
  638. if (!s->interlace_type) {
  639. s->crow_size = s->row_size + 1;
  640. } else {
  641. s->pass = 0;
  642. s->pass_row_size = ff_png_pass_row_size(s->pass,
  643. s->bits_per_pixel,
  644. s->cur_w);
  645. s->crow_size = s->pass_row_size + 1;
  646. }
  647. ff_dlog(avctx, "row_size=%d crow_size =%d\n",
  648. s->row_size, s->crow_size);
  649. s->image_buf = p->data[0];
  650. s->image_linesize = p->linesize[0];
  651. /* copy the palette if needed */
  652. if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
  653. memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
  654. /* empty row is used if differencing to the first row */
  655. av_fast_padded_mallocz(&s->last_row, &s->last_row_size, s->row_size);
  656. if (!s->last_row)
  657. return AVERROR_INVALIDDATA;
  658. if (s->interlace_type ||
  659. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  660. av_fast_padded_malloc(&s->tmp_row, &s->tmp_row_size, s->row_size);
  661. if (!s->tmp_row)
  662. return AVERROR_INVALIDDATA;
  663. }
  664. /* compressed row */
  665. av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
  666. if (!s->buffer)
  667. return AVERROR(ENOMEM);
  668. /* we want crow_buf+1 to be 16-byte aligned */
  669. s->crow_buf = s->buffer + 15;
  670. s->zstream.avail_out = s->crow_size;
  671. s->zstream.next_out = s->crow_buf;
  672. }
  673. s->state |= PNG_IDAT;
  674. /* set image to non-transparent bpp while decompressing */
  675. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
  676. s->bpp -= byte_depth;
  677. ret = png_decode_idat(s, length);
  678. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
  679. s->bpp += byte_depth;
  680. if (ret < 0)
  681. return ret;
  682. bytestream2_skip(&s->gb, 4); /* crc */
  683. return 0;
  684. }
  685. static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s,
  686. uint32_t length)
  687. {
  688. int n, i, r, g, b;
  689. if ((length % 3) != 0 || length > 256 * 3)
  690. return AVERROR_INVALIDDATA;
  691. /* read the palette */
  692. n = length / 3;
  693. for (i = 0; i < n; i++) {
  694. r = bytestream2_get_byte(&s->gb);
  695. g = bytestream2_get_byte(&s->gb);
  696. b = bytestream2_get_byte(&s->gb);
  697. s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
  698. }
  699. for (; i < 256; i++)
  700. s->palette[i] = (0xFFU << 24);
  701. s->state |= PNG_PLTE;
  702. bytestream2_skip(&s->gb, 4); /* crc */
  703. return 0;
  704. }
  705. static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s,
  706. uint32_t length)
  707. {
  708. int v, i;
  709. if (!(s->state & PNG_IHDR)) {
  710. av_log(avctx, AV_LOG_ERROR, "trns before IHDR\n");
  711. return AVERROR_INVALIDDATA;
  712. }
  713. if (s->state & PNG_IDAT) {
  714. av_log(avctx, AV_LOG_ERROR, "trns after IDAT\n");
  715. return AVERROR_INVALIDDATA;
  716. }
  717. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  718. if (length > 256 || !(s->state & PNG_PLTE))
  719. return AVERROR_INVALIDDATA;
  720. for (i = 0; i < length; i++) {
  721. unsigned v = bytestream2_get_byte(&s->gb);
  722. s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
  723. }
  724. } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) {
  725. if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) ||
  726. (s->color_type == PNG_COLOR_TYPE_RGB && length != 6) ||
  727. s->bit_depth == 1)
  728. return AVERROR_INVALIDDATA;
  729. for (i = 0; i < length / 2; i++) {
  730. /* only use the least significant bits */
  731. v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth);
  732. if (s->bit_depth > 8)
  733. AV_WB16(&s->transparent_color_be[2 * i], v);
  734. else
  735. s->transparent_color_be[i] = v;
  736. }
  737. } else {
  738. return AVERROR_INVALIDDATA;
  739. }
  740. bytestream2_skip(&s->gb, 4); /* crc */
  741. s->has_trns = 1;
  742. return 0;
  743. }
  744. static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
  745. {
  746. if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
  747. int i, j, k;
  748. uint8_t *pd = p->data[0];
  749. for (j = 0; j < s->height; j++) {
  750. i = s->width / 8;
  751. for (k = 7; k >= 1; k--)
  752. if ((s->width&7) >= k)
  753. pd[8*i + k - 1] = (pd[i]>>8-k) & 1;
  754. for (i--; i >= 0; i--) {
  755. pd[8*i + 7]= pd[i] & 1;
  756. pd[8*i + 6]= (pd[i]>>1) & 1;
  757. pd[8*i + 5]= (pd[i]>>2) & 1;
  758. pd[8*i + 4]= (pd[i]>>3) & 1;
  759. pd[8*i + 3]= (pd[i]>>4) & 1;
  760. pd[8*i + 2]= (pd[i]>>5) & 1;
  761. pd[8*i + 1]= (pd[i]>>6) & 1;
  762. pd[8*i + 0]= pd[i]>>7;
  763. }
  764. pd += s->image_linesize;
  765. }
  766. } else if (s->bits_per_pixel == 2) {
  767. int i, j;
  768. uint8_t *pd = p->data[0];
  769. for (j = 0; j < s->height; j++) {
  770. i = s->width / 4;
  771. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  772. if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
  773. if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
  774. if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
  775. for (i--; i >= 0; i--) {
  776. pd[4*i + 3]= pd[i] & 3;
  777. pd[4*i + 2]= (pd[i]>>2) & 3;
  778. pd[4*i + 1]= (pd[i]>>4) & 3;
  779. pd[4*i + 0]= pd[i]>>6;
  780. }
  781. } else {
  782. if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
  783. if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
  784. if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
  785. for (i--; i >= 0; i--) {
  786. pd[4*i + 3]= ( pd[i] & 3)*0x55;
  787. pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
  788. pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
  789. pd[4*i + 0]= ( pd[i]>>6 )*0x55;
  790. }
  791. }
  792. pd += s->image_linesize;
  793. }
  794. } else if (s->bits_per_pixel == 4) {
  795. int i, j;
  796. uint8_t *pd = p->data[0];
  797. for (j = 0; j < s->height; j++) {
  798. i = s->width/2;
  799. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  800. if (s->width&1) pd[2*i+0]= pd[i]>>4;
  801. for (i--; i >= 0; i--) {
  802. pd[2*i + 1] = pd[i] & 15;
  803. pd[2*i + 0] = pd[i] >> 4;
  804. }
  805. } else {
  806. if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
  807. for (i--; i >= 0; i--) {
  808. pd[2*i + 1] = (pd[i] & 15) * 0x11;
  809. pd[2*i + 0] = (pd[i] >> 4) * 0x11;
  810. }
  811. }
  812. pd += s->image_linesize;
  813. }
  814. }
  815. }
  816. static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
  817. uint32_t length)
  818. {
  819. uint32_t sequence_number;
  820. int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
  821. if (length != 26)
  822. return AVERROR_INVALIDDATA;
  823. if (!(s->state & PNG_IHDR)) {
  824. av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
  825. return AVERROR_INVALIDDATA;
  826. }
  827. s->last_w = s->cur_w;
  828. s->last_h = s->cur_h;
  829. s->last_x_offset = s->x_offset;
  830. s->last_y_offset = s->y_offset;
  831. s->last_dispose_op = s->dispose_op;
  832. sequence_number = bytestream2_get_be32(&s->gb);
  833. cur_w = bytestream2_get_be32(&s->gb);
  834. cur_h = bytestream2_get_be32(&s->gb);
  835. x_offset = bytestream2_get_be32(&s->gb);
  836. y_offset = bytestream2_get_be32(&s->gb);
  837. bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
  838. dispose_op = bytestream2_get_byte(&s->gb);
  839. blend_op = bytestream2_get_byte(&s->gb);
  840. bytestream2_skip(&s->gb, 4); /* crc */
  841. if (sequence_number == 0 &&
  842. (cur_w != s->width ||
  843. cur_h != s->height ||
  844. x_offset != 0 ||
  845. y_offset != 0) ||
  846. cur_w <= 0 || cur_h <= 0 ||
  847. x_offset < 0 || y_offset < 0 ||
  848. cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
  849. return AVERROR_INVALIDDATA;
  850. if (blend_op != APNG_BLEND_OP_OVER && blend_op != APNG_BLEND_OP_SOURCE) {
  851. av_log(avctx, AV_LOG_ERROR, "Invalid blend_op %d\n", blend_op);
  852. return AVERROR_INVALIDDATA;
  853. }
  854. if (sequence_number == 0 && dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
  855. // No previous frame to revert to for the first frame
  856. // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND
  857. dispose_op = APNG_DISPOSE_OP_BACKGROUND;
  858. }
  859. if (blend_op == APNG_BLEND_OP_OVER && !s->has_trns && (
  860. avctx->pix_fmt == AV_PIX_FMT_RGB24 ||
  861. avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
  862. avctx->pix_fmt == AV_PIX_FMT_PAL8 ||
  863. avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
  864. avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
  865. avctx->pix_fmt == AV_PIX_FMT_MONOBLACK
  866. )) {
  867. // APNG_BLEND_OP_OVER is the same as APNG_BLEND_OP_SOURCE when there is no alpha channel
  868. blend_op = APNG_BLEND_OP_SOURCE;
  869. }
  870. s->cur_w = cur_w;
  871. s->cur_h = cur_h;
  872. s->x_offset = x_offset;
  873. s->y_offset = y_offset;
  874. s->dispose_op = dispose_op;
  875. s->blend_op = blend_op;
  876. return 0;
  877. }
  878. static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
  879. {
  880. int i, j;
  881. uint8_t *pd = p->data[0];
  882. uint8_t *pd_last = s->last_picture.f->data[0];
  883. int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
  884. ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
  885. for (j = 0; j < s->height; j++) {
  886. for (i = 0; i < ls; i++)
  887. pd[i] += pd_last[i];
  888. pd += s->image_linesize;
  889. pd_last += s->image_linesize;
  890. }
  891. }
  892. // divide by 255 and round to nearest
  893. // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
  894. #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
  895. static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s,
  896. AVFrame *p)
  897. {
  898. size_t x, y;
  899. uint8_t *buffer;
  900. if (s->blend_op == APNG_BLEND_OP_OVER &&
  901. avctx->pix_fmt != AV_PIX_FMT_RGBA &&
  902. avctx->pix_fmt != AV_PIX_FMT_GRAY8A &&
  903. avctx->pix_fmt != AV_PIX_FMT_PAL8) {
  904. avpriv_request_sample(avctx, "Blending with pixel format %s",
  905. av_get_pix_fmt_name(avctx->pix_fmt));
  906. return AVERROR_PATCHWELCOME;
  907. }
  908. buffer = av_malloc_array(s->image_linesize, s->height);
  909. if (!buffer)
  910. return AVERROR(ENOMEM);
  911. // Do the disposal operation specified by the last frame on the frame
  912. if (s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
  913. ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
  914. memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height);
  915. if (s->last_dispose_op == APNG_DISPOSE_OP_BACKGROUND)
  916. for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y)
  917. memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w);
  918. memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height);
  919. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  920. } else {
  921. ff_thread_await_progress(&s->previous_picture, INT_MAX, 0);
  922. memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height);
  923. }
  924. // Perform blending
  925. if (s->blend_op == APNG_BLEND_OP_SOURCE) {
  926. for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
  927. size_t row_start = s->image_linesize * y + s->bpp * s->x_offset;
  928. memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w);
  929. }
  930. } else { // APNG_BLEND_OP_OVER
  931. for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
  932. uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset;
  933. uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset;
  934. for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
  935. size_t b;
  936. uint8_t foreground_alpha, background_alpha, output_alpha;
  937. uint8_t output[10];
  938. // Since we might be blending alpha onto alpha, we use the following equations:
  939. // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
  940. // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha
  941. switch (avctx->pix_fmt) {
  942. case AV_PIX_FMT_RGBA:
  943. foreground_alpha = foreground[3];
  944. background_alpha = background[3];
  945. break;
  946. case AV_PIX_FMT_GRAY8A:
  947. foreground_alpha = foreground[1];
  948. background_alpha = background[1];
  949. break;
  950. case AV_PIX_FMT_PAL8:
  951. foreground_alpha = s->palette[foreground[0]] >> 24;
  952. background_alpha = s->palette[background[0]] >> 24;
  953. break;
  954. }
  955. if (foreground_alpha == 0)
  956. continue;
  957. if (foreground_alpha == 255) {
  958. memcpy(background, foreground, s->bpp);
  959. continue;
  960. }
  961. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  962. // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first
  963. avpriv_request_sample(avctx, "Alpha blending palette samples");
  964. background[0] = foreground[0];
  965. continue;
  966. }
  967. output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
  968. av_assert0(s->bpp <= 10);
  969. for (b = 0; b < s->bpp - 1; ++b) {
  970. if (output_alpha == 0) {
  971. output[b] = 0;
  972. } else if (background_alpha == 255) {
  973. output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]);
  974. } else {
  975. output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha);
  976. }
  977. }
  978. output[b] = output_alpha;
  979. memcpy(background, output, s->bpp);
  980. }
  981. }
  982. }
  983. // Copy blended buffer into the frame and free
  984. memcpy(p->data[0], buffer, s->image_linesize * s->height);
  985. av_free(buffer);
  986. return 0;
  987. }
  988. static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
  989. AVFrame *p, AVPacket *avpkt)
  990. {
  991. AVDictionary *metadata = NULL;
  992. uint32_t tag, length;
  993. int decode_next_dat = 0;
  994. int ret;
  995. for (;;) {
  996. length = bytestream2_get_bytes_left(&s->gb);
  997. if (length <= 0) {
  998. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  999. avctx->skip_frame == AVDISCARD_ALL) {
  1000. av_frame_set_metadata(p, metadata);
  1001. return 0;
  1002. }
  1003. if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) {
  1004. if (!(s->state & PNG_IDAT))
  1005. return 0;
  1006. else
  1007. goto exit_loop;
  1008. }
  1009. av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length);
  1010. if ( s->state & PNG_ALLIMAGE
  1011. && avctx->strict_std_compliance <= FF_COMPLIANCE_NORMAL)
  1012. goto exit_loop;
  1013. ret = AVERROR_INVALIDDATA;
  1014. goto fail;
  1015. }
  1016. length = bytestream2_get_be32(&s->gb);
  1017. if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
  1018. av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
  1019. ret = AVERROR_INVALIDDATA;
  1020. goto fail;
  1021. }
  1022. tag = bytestream2_get_le32(&s->gb);
  1023. if (avctx->debug & FF_DEBUG_STARTCODE)
  1024. av_log(avctx, AV_LOG_DEBUG, "png: tag=%c%c%c%c length=%u\n",
  1025. (tag & 0xff),
  1026. ((tag >> 8) & 0xff),
  1027. ((tag >> 16) & 0xff),
  1028. ((tag >> 24) & 0xff), length);
  1029. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1030. avctx->skip_frame == AVDISCARD_ALL) {
  1031. switch(tag) {
  1032. case MKTAG('I', 'H', 'D', 'R'):
  1033. case MKTAG('p', 'H', 'Y', 's'):
  1034. case MKTAG('t', 'E', 'X', 't'):
  1035. case MKTAG('I', 'D', 'A', 'T'):
  1036. case MKTAG('t', 'R', 'N', 'S'):
  1037. break;
  1038. default:
  1039. goto skip_tag;
  1040. }
  1041. }
  1042. switch (tag) {
  1043. case MKTAG('I', 'H', 'D', 'R'):
  1044. if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
  1045. goto fail;
  1046. break;
  1047. case MKTAG('p', 'H', 'Y', 's'):
  1048. if ((ret = decode_phys_chunk(avctx, s)) < 0)
  1049. goto fail;
  1050. break;
  1051. case MKTAG('f', 'c', 'T', 'L'):
  1052. if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
  1053. goto skip_tag;
  1054. if ((ret = decode_fctl_chunk(avctx, s, length)) < 0)
  1055. goto fail;
  1056. decode_next_dat = 1;
  1057. break;
  1058. case MKTAG('f', 'd', 'A', 'T'):
  1059. if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
  1060. goto skip_tag;
  1061. if (!decode_next_dat) {
  1062. ret = AVERROR_INVALIDDATA;
  1063. goto fail;
  1064. }
  1065. bytestream2_get_be32(&s->gb);
  1066. length -= 4;
  1067. /* fallthrough */
  1068. case MKTAG('I', 'D', 'A', 'T'):
  1069. if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
  1070. goto skip_tag;
  1071. if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
  1072. goto fail;
  1073. break;
  1074. case MKTAG('P', 'L', 'T', 'E'):
  1075. if (decode_plte_chunk(avctx, s, length) < 0)
  1076. goto skip_tag;
  1077. break;
  1078. case MKTAG('t', 'R', 'N', 'S'):
  1079. if (decode_trns_chunk(avctx, s, length) < 0)
  1080. goto skip_tag;
  1081. break;
  1082. case MKTAG('t', 'E', 'X', 't'):
  1083. if (decode_text_chunk(s, length, 0, &metadata) < 0)
  1084. av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n");
  1085. bytestream2_skip(&s->gb, length + 4);
  1086. break;
  1087. case MKTAG('z', 'T', 'X', 't'):
  1088. if (decode_text_chunk(s, length, 1, &metadata) < 0)
  1089. av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n");
  1090. bytestream2_skip(&s->gb, length + 4);
  1091. break;
  1092. case MKTAG('s', 'T', 'E', 'R'): {
  1093. int mode = bytestream2_get_byte(&s->gb);
  1094. AVStereo3D *stereo3d = av_stereo3d_create_side_data(p);
  1095. if (!stereo3d)
  1096. goto fail;
  1097. if (mode == 0 || mode == 1) {
  1098. stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
  1099. stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT;
  1100. } else {
  1101. av_log(avctx, AV_LOG_WARNING,
  1102. "Unknown value in sTER chunk (%d)\n", mode);
  1103. }
  1104. bytestream2_skip(&s->gb, 4); /* crc */
  1105. break;
  1106. }
  1107. case MKTAG('I', 'E', 'N', 'D'):
  1108. if (!(s->state & PNG_ALLIMAGE))
  1109. av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
  1110. if (!(s->state & (PNG_ALLIMAGE|PNG_IDAT))) {
  1111. ret = AVERROR_INVALIDDATA;
  1112. goto fail;
  1113. }
  1114. bytestream2_skip(&s->gb, 4); /* crc */
  1115. goto exit_loop;
  1116. default:
  1117. /* skip tag */
  1118. skip_tag:
  1119. bytestream2_skip(&s->gb, length + 4);
  1120. break;
  1121. }
  1122. }
  1123. exit_loop:
  1124. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1125. avctx->skip_frame == AVDISCARD_ALL) {
  1126. av_frame_set_metadata(p, metadata);
  1127. return 0;
  1128. }
  1129. if (s->bits_per_pixel <= 4)
  1130. handle_small_bpp(s, p);
  1131. /* apply transparency if needed */
  1132. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
  1133. size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
  1134. size_t raw_bpp = s->bpp - byte_depth;
  1135. unsigned x, y;
  1136. av_assert0(s->bit_depth > 1);
  1137. for (y = 0; y < s->height; ++y) {
  1138. uint8_t *row = &s->image_buf[s->image_linesize * y];
  1139. /* since we're updating in-place, we have to go from right to left */
  1140. for (x = s->width; x > 0; --x) {
  1141. uint8_t *pixel = &row[s->bpp * (x - 1)];
  1142. memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp);
  1143. if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) {
  1144. memset(&pixel[raw_bpp], 0, byte_depth);
  1145. } else {
  1146. memset(&pixel[raw_bpp], 0xff, byte_depth);
  1147. }
  1148. }
  1149. }
  1150. }
  1151. /* handle P-frames only if a predecessor frame is available */
  1152. if (s->last_picture.f->data[0]) {
  1153. if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
  1154. && s->last_picture.f->width == p->width
  1155. && s->last_picture.f->height== p->height
  1156. && s->last_picture.f->format== p->format
  1157. ) {
  1158. if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG)
  1159. handle_p_frame_png(s, p);
  1160. else if (CONFIG_APNG_DECODER &&
  1161. avctx->codec_id == AV_CODEC_ID_APNG &&
  1162. (ret = handle_p_frame_apng(avctx, s, p)) < 0)
  1163. goto fail;
  1164. }
  1165. }
  1166. ff_thread_report_progress(&s->picture, INT_MAX, 0);
  1167. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  1168. av_frame_set_metadata(p, metadata);
  1169. metadata = NULL;
  1170. return 0;
  1171. fail:
  1172. av_dict_free(&metadata);
  1173. ff_thread_report_progress(&s->picture, INT_MAX, 0);
  1174. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  1175. return ret;
  1176. }
  1177. #if CONFIG_PNG_DECODER
  1178. static int decode_frame_png(AVCodecContext *avctx,
  1179. void *data, int *got_frame,
  1180. AVPacket *avpkt)
  1181. {
  1182. PNGDecContext *const s = avctx->priv_data;
  1183. const uint8_t *buf = avpkt->data;
  1184. int buf_size = avpkt->size;
  1185. AVFrame *p;
  1186. int64_t sig;
  1187. int ret;
  1188. ff_thread_release_buffer(avctx, &s->last_picture);
  1189. FFSWAP(ThreadFrame, s->picture, s->last_picture);
  1190. p = s->picture.f;
  1191. bytestream2_init(&s->gb, buf, buf_size);
  1192. /* check signature */
  1193. sig = bytestream2_get_be64(&s->gb);
  1194. if (sig != PNGSIG &&
  1195. sig != MNGSIG) {
  1196. av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
  1197. return AVERROR_INVALIDDATA;
  1198. }
  1199. s->y = s->state = s->has_trns = 0;
  1200. /* init the zlib */
  1201. s->zstream.zalloc = ff_png_zalloc;
  1202. s->zstream.zfree = ff_png_zfree;
  1203. s->zstream.opaque = NULL;
  1204. ret = inflateInit(&s->zstream);
  1205. if (ret != Z_OK) {
  1206. av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
  1207. return AVERROR_EXTERNAL;
  1208. }
  1209. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1210. goto the_end;
  1211. if (avctx->skip_frame == AVDISCARD_ALL) {
  1212. *got_frame = 0;
  1213. ret = bytestream2_tell(&s->gb);
  1214. goto the_end;
  1215. }
  1216. if ((ret = av_frame_ref(data, s->picture.f)) < 0)
  1217. return ret;
  1218. *got_frame = 1;
  1219. ret = bytestream2_tell(&s->gb);
  1220. the_end:
  1221. inflateEnd(&s->zstream);
  1222. s->crow_buf = NULL;
  1223. return ret;
  1224. }
  1225. #endif
  1226. #if CONFIG_APNG_DECODER
  1227. static int decode_frame_apng(AVCodecContext *avctx,
  1228. void *data, int *got_frame,
  1229. AVPacket *avpkt)
  1230. {
  1231. PNGDecContext *const s = avctx->priv_data;
  1232. int ret;
  1233. AVFrame *p;
  1234. ff_thread_release_buffer(avctx, &s->last_picture);
  1235. FFSWAP(ThreadFrame, s->picture, s->last_picture);
  1236. p = s->picture.f;
  1237. if (!(s->state & PNG_IHDR)) {
  1238. if (!avctx->extradata_size)
  1239. return AVERROR_INVALIDDATA;
  1240. /* only init fields, there is no zlib use in extradata */
  1241. s->zstream.zalloc = ff_png_zalloc;
  1242. s->zstream.zfree = ff_png_zfree;
  1243. bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
  1244. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1245. goto end;
  1246. }
  1247. /* reset state for a new frame */
  1248. if ((ret = inflateInit(&s->zstream)) != Z_OK) {
  1249. av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
  1250. ret = AVERROR_EXTERNAL;
  1251. goto end;
  1252. }
  1253. s->y = 0;
  1254. s->state &= ~(PNG_IDAT | PNG_ALLIMAGE);
  1255. bytestream2_init(&s->gb, avpkt->data, avpkt->size);
  1256. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1257. goto end;
  1258. if (!(s->state & PNG_ALLIMAGE))
  1259. av_log(avctx, AV_LOG_WARNING, "Frame did not contain a complete image\n");
  1260. if (!(s->state & (PNG_ALLIMAGE|PNG_IDAT))) {
  1261. ret = AVERROR_INVALIDDATA;
  1262. goto end;
  1263. }
  1264. if ((ret = av_frame_ref(data, s->picture.f)) < 0)
  1265. goto end;
  1266. *got_frame = 1;
  1267. ret = bytestream2_tell(&s->gb);
  1268. end:
  1269. inflateEnd(&s->zstream);
  1270. return ret;
  1271. }
  1272. #endif
  1273. #if HAVE_THREADS
  1274. static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  1275. {
  1276. PNGDecContext *psrc = src->priv_data;
  1277. PNGDecContext *pdst = dst->priv_data;
  1278. int ret;
  1279. if (dst == src)
  1280. return 0;
  1281. ff_thread_release_buffer(dst, &pdst->picture);
  1282. if (psrc->picture.f->data[0] &&
  1283. (ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
  1284. return ret;
  1285. if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
  1286. pdst->width = psrc->width;
  1287. pdst->height = psrc->height;
  1288. pdst->bit_depth = psrc->bit_depth;
  1289. pdst->color_type = psrc->color_type;
  1290. pdst->compression_type = psrc->compression_type;
  1291. pdst->interlace_type = psrc->interlace_type;
  1292. pdst->filter_type = psrc->filter_type;
  1293. pdst->cur_w = psrc->cur_w;
  1294. pdst->cur_h = psrc->cur_h;
  1295. pdst->x_offset = psrc->x_offset;
  1296. pdst->y_offset = psrc->y_offset;
  1297. pdst->has_trns = psrc->has_trns;
  1298. memcpy(pdst->transparent_color_be, psrc->transparent_color_be, sizeof(pdst->transparent_color_be));
  1299. pdst->dispose_op = psrc->dispose_op;
  1300. memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
  1301. pdst->state |= psrc->state & (PNG_IHDR | PNG_PLTE);
  1302. ff_thread_release_buffer(dst, &pdst->last_picture);
  1303. if (psrc->last_picture.f->data[0] &&
  1304. (ret = ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture)) < 0)
  1305. return ret;
  1306. ff_thread_release_buffer(dst, &pdst->previous_picture);
  1307. if (psrc->previous_picture.f->data[0] &&
  1308. (ret = ff_thread_ref_frame(&pdst->previous_picture, &psrc->previous_picture)) < 0)
  1309. return ret;
  1310. }
  1311. return 0;
  1312. }
  1313. #endif
  1314. static av_cold int png_dec_init(AVCodecContext *avctx)
  1315. {
  1316. PNGDecContext *s = avctx->priv_data;
  1317. avctx->color_range = AVCOL_RANGE_JPEG;
  1318. s->avctx = avctx;
  1319. s->previous_picture.f = av_frame_alloc();
  1320. s->last_picture.f = av_frame_alloc();
  1321. s->picture.f = av_frame_alloc();
  1322. if (!s->previous_picture.f || !s->last_picture.f || !s->picture.f) {
  1323. av_frame_free(&s->previous_picture.f);
  1324. av_frame_free(&s->last_picture.f);
  1325. av_frame_free(&s->picture.f);
  1326. return AVERROR(ENOMEM);
  1327. }
  1328. if (!avctx->internal->is_copy) {
  1329. avctx->internal->allocate_progress = 1;
  1330. ff_pngdsp_init(&s->dsp);
  1331. }
  1332. return 0;
  1333. }
  1334. static av_cold int png_dec_end(AVCodecContext *avctx)
  1335. {
  1336. PNGDecContext *s = avctx->priv_data;
  1337. ff_thread_release_buffer(avctx, &s->previous_picture);
  1338. av_frame_free(&s->previous_picture.f);
  1339. ff_thread_release_buffer(avctx, &s->last_picture);
  1340. av_frame_free(&s->last_picture.f);
  1341. ff_thread_release_buffer(avctx, &s->picture);
  1342. av_frame_free(&s->picture.f);
  1343. av_freep(&s->buffer);
  1344. s->buffer_size = 0;
  1345. av_freep(&s->last_row);
  1346. s->last_row_size = 0;
  1347. av_freep(&s->tmp_row);
  1348. s->tmp_row_size = 0;
  1349. return 0;
  1350. }
  1351. #if CONFIG_APNG_DECODER
  1352. AVCodec ff_apng_decoder = {
  1353. .name = "apng",
  1354. .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
  1355. .type = AVMEDIA_TYPE_VIDEO,
  1356. .id = AV_CODEC_ID_APNG,
  1357. .priv_data_size = sizeof(PNGDecContext),
  1358. .init = png_dec_init,
  1359. .close = png_dec_end,
  1360. .decode = decode_frame_apng,
  1361. .init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
  1362. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  1363. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  1364. };
  1365. #endif
  1366. #if CONFIG_PNG_DECODER
  1367. AVCodec ff_png_decoder = {
  1368. .name = "png",
  1369. .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
  1370. .type = AVMEDIA_TYPE_VIDEO,
  1371. .id = AV_CODEC_ID_PNG,
  1372. .priv_data_size = sizeof(PNGDecContext),
  1373. .init = png_dec_init,
  1374. .close = png_dec_end,
  1375. .decode = decode_frame_png,
  1376. .init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
  1377. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  1378. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  1379. .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
  1380. };
  1381. #endif