You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1652 lines
56KB

  1. /*
  2. * PNG image format
  3. * Copyright (c) 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. //#define DEBUG
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/bprint.h"
  24. #include "libavutil/imgutils.h"
  25. #include "libavutil/stereo3d.h"
  26. #include "libavutil/mastering_display_metadata.h"
  27. #include "avcodec.h"
  28. #include "bytestream.h"
  29. #include "internal.h"
  30. #include "apng.h"
  31. #include "png.h"
  32. #include "pngdsp.h"
  33. #include "thread.h"
  34. #include <zlib.h>
  35. enum PNGHeaderState {
  36. PNG_IHDR = 1 << 0,
  37. PNG_PLTE = 1 << 1,
  38. };
  39. enum PNGImageState {
  40. PNG_IDAT = 1 << 0,
  41. PNG_ALLIMAGE = 1 << 1,
  42. };
  43. typedef struct PNGDecContext {
  44. PNGDSPContext dsp;
  45. AVCodecContext *avctx;
  46. GetByteContext gb;
  47. ThreadFrame previous_picture;
  48. ThreadFrame last_picture;
  49. ThreadFrame picture;
  50. enum PNGHeaderState hdr_state;
  51. enum PNGImageState pic_state;
  52. int width, height;
  53. int cur_w, cur_h;
  54. int last_w, last_h;
  55. int x_offset, y_offset;
  56. int last_x_offset, last_y_offset;
  57. uint8_t dispose_op, blend_op;
  58. uint8_t last_dispose_op;
  59. int bit_depth;
  60. int color_type;
  61. int compression_type;
  62. int interlace_type;
  63. int filter_type;
  64. int channels;
  65. int bits_per_pixel;
  66. int bpp;
  67. int has_trns;
  68. uint8_t transparent_color_be[6];
  69. uint8_t *image_buf;
  70. int image_linesize;
  71. uint32_t palette[256];
  72. uint8_t *crow_buf;
  73. uint8_t *last_row;
  74. unsigned int last_row_size;
  75. uint8_t *tmp_row;
  76. unsigned int tmp_row_size;
  77. uint8_t *buffer;
  78. int buffer_size;
  79. int pass;
  80. int crow_size; /* compressed row size (include filter type) */
  81. int row_size; /* decompressed row size */
  82. int pass_row_size; /* decompress row size of the current pass */
  83. int y;
  84. z_stream zstream;
  85. } PNGDecContext;
  86. /* Mask to determine which pixels are valid in a pass */
  87. static const uint8_t png_pass_mask[NB_PASSES] = {
  88. 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
  89. };
  90. /* Mask to determine which y pixels can be written in a pass */
  91. static const uint8_t png_pass_dsp_ymask[NB_PASSES] = {
  92. 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
  93. };
  94. /* Mask to determine which pixels to overwrite while displaying */
  95. static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
  96. 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
  97. };
  98. /* NOTE: we try to construct a good looking image at each pass. width
  99. * is the original image width. We also do pixel format conversion at
  100. * this stage */
  101. static void png_put_interlaced_row(uint8_t *dst, int width,
  102. int bits_per_pixel, int pass,
  103. int color_type, const uint8_t *src)
  104. {
  105. int x, mask, dsp_mask, j, src_x, b, bpp;
  106. uint8_t *d;
  107. const uint8_t *s;
  108. mask = png_pass_mask[pass];
  109. dsp_mask = png_pass_dsp_mask[pass];
  110. switch (bits_per_pixel) {
  111. case 1:
  112. src_x = 0;
  113. for (x = 0; x < width; x++) {
  114. j = (x & 7);
  115. if ((dsp_mask << j) & 0x80) {
  116. b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
  117. dst[x >> 3] &= 0xFF7F>>j;
  118. dst[x >> 3] |= b << (7 - j);
  119. }
  120. if ((mask << j) & 0x80)
  121. src_x++;
  122. }
  123. break;
  124. case 2:
  125. src_x = 0;
  126. for (x = 0; x < width; x++) {
  127. int j2 = 2 * (x & 3);
  128. j = (x & 7);
  129. if ((dsp_mask << j) & 0x80) {
  130. b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
  131. dst[x >> 2] &= 0xFF3F>>j2;
  132. dst[x >> 2] |= b << (6 - j2);
  133. }
  134. if ((mask << j) & 0x80)
  135. src_x++;
  136. }
  137. break;
  138. case 4:
  139. src_x = 0;
  140. for (x = 0; x < width; x++) {
  141. int j2 = 4*(x&1);
  142. j = (x & 7);
  143. if ((dsp_mask << j) & 0x80) {
  144. b = (src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
  145. dst[x >> 1] &= 0xFF0F>>j2;
  146. dst[x >> 1] |= b << (4 - j2);
  147. }
  148. if ((mask << j) & 0x80)
  149. src_x++;
  150. }
  151. break;
  152. default:
  153. bpp = bits_per_pixel >> 3;
  154. d = dst;
  155. s = src;
  156. for (x = 0; x < width; x++) {
  157. j = x & 7;
  158. if ((dsp_mask << j) & 0x80) {
  159. memcpy(d, s, bpp);
  160. }
  161. d += bpp;
  162. if ((mask << j) & 0x80)
  163. s += bpp;
  164. }
  165. break;
  166. }
  167. }
  168. void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top,
  169. int w, int bpp)
  170. {
  171. int i;
  172. for (i = 0; i < w; i++) {
  173. int a, b, c, p, pa, pb, pc;
  174. a = dst[i - bpp];
  175. b = top[i];
  176. c = top[i - bpp];
  177. p = b - c;
  178. pc = a - c;
  179. pa = abs(p);
  180. pb = abs(pc);
  181. pc = abs(p + pc);
  182. if (pa <= pb && pa <= pc)
  183. p = a;
  184. else if (pb <= pc)
  185. p = b;
  186. else
  187. p = c;
  188. dst[i] = p + src[i];
  189. }
  190. }
  191. #define UNROLL1(bpp, op) \
  192. { \
  193. r = dst[0]; \
  194. if (bpp >= 2) \
  195. g = dst[1]; \
  196. if (bpp >= 3) \
  197. b = dst[2]; \
  198. if (bpp >= 4) \
  199. a = dst[3]; \
  200. for (; i <= size - bpp; i += bpp) { \
  201. dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
  202. if (bpp == 1) \
  203. continue; \
  204. dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
  205. if (bpp == 2) \
  206. continue; \
  207. dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
  208. if (bpp == 3) \
  209. continue; \
  210. dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
  211. } \
  212. }
  213. #define UNROLL_FILTER(op) \
  214. if (bpp == 1) { \
  215. UNROLL1(1, op) \
  216. } else if (bpp == 2) { \
  217. UNROLL1(2, op) \
  218. } else if (bpp == 3) { \
  219. UNROLL1(3, op) \
  220. } else if (bpp == 4) { \
  221. UNROLL1(4, op) \
  222. } \
  223. for (; i < size; i++) { \
  224. dst[i] = op(dst[i - bpp], src[i], last[i]); \
  225. }
  226. /* NOTE: 'dst' can be equal to 'last' */
  227. static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
  228. uint8_t *src, uint8_t *last, int size, int bpp)
  229. {
  230. int i, p, r, g, b, a;
  231. switch (filter_type) {
  232. case PNG_FILTER_VALUE_NONE:
  233. memcpy(dst, src, size);
  234. break;
  235. case PNG_FILTER_VALUE_SUB:
  236. for (i = 0; i < bpp; i++)
  237. dst[i] = src[i];
  238. if (bpp == 4) {
  239. p = *(int *)dst;
  240. for (; i < size; i += bpp) {
  241. unsigned s = *(int *)(src + i);
  242. p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
  243. *(int *)(dst + i) = p;
  244. }
  245. } else {
  246. #define OP_SUB(x, s, l) ((x) + (s))
  247. UNROLL_FILTER(OP_SUB);
  248. }
  249. break;
  250. case PNG_FILTER_VALUE_UP:
  251. dsp->add_bytes_l2(dst, src, last, size);
  252. break;
  253. case PNG_FILTER_VALUE_AVG:
  254. for (i = 0; i < bpp; i++) {
  255. p = (last[i] >> 1);
  256. dst[i] = p + src[i];
  257. }
  258. #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
  259. UNROLL_FILTER(OP_AVG);
  260. break;
  261. case PNG_FILTER_VALUE_PAETH:
  262. for (i = 0; i < bpp; i++) {
  263. p = last[i];
  264. dst[i] = p + src[i];
  265. }
  266. if (bpp > 2 && size > 4) {
  267. /* would write off the end of the array if we let it process
  268. * the last pixel with bpp=3 */
  269. int w = (bpp & 3) ? size - 3 : size;
  270. if (w > i) {
  271. dsp->add_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
  272. i = w;
  273. }
  274. }
  275. ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
  276. break;
  277. }
  278. }
  279. /* This used to be called "deloco" in FFmpeg
  280. * and is actually an inverse reversible colorspace transformation */
  281. #define YUV2RGB(NAME, TYPE) \
  282. static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
  283. { \
  284. int i; \
  285. for (i = 0; i < size; i += 3 + alpha) { \
  286. int g = dst [i + 1]; \
  287. dst[i + 0] += g; \
  288. dst[i + 2] += g; \
  289. } \
  290. }
  291. YUV2RGB(rgb8, uint8_t)
  292. YUV2RGB(rgb16, uint16_t)
  293. /* process exactly one decompressed row */
  294. static void png_handle_row(PNGDecContext *s)
  295. {
  296. uint8_t *ptr, *last_row;
  297. int got_line;
  298. if (!s->interlace_type) {
  299. ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
  300. if (s->y == 0)
  301. last_row = s->last_row;
  302. else
  303. last_row = ptr - s->image_linesize;
  304. png_filter_row(&s->dsp, ptr, s->crow_buf[0], s->crow_buf + 1,
  305. last_row, s->row_size, s->bpp);
  306. /* loco lags by 1 row so that it doesn't interfere with top prediction */
  307. if (s->filter_type == PNG_FILTER_TYPE_LOCO && s->y > 0) {
  308. if (s->bit_depth == 16) {
  309. deloco_rgb16((uint16_t *)(ptr - s->image_linesize), s->row_size / 2,
  310. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  311. } else {
  312. deloco_rgb8(ptr - s->image_linesize, s->row_size,
  313. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  314. }
  315. }
  316. s->y++;
  317. if (s->y == s->cur_h) {
  318. s->pic_state |= PNG_ALLIMAGE;
  319. if (s->filter_type == PNG_FILTER_TYPE_LOCO) {
  320. if (s->bit_depth == 16) {
  321. deloco_rgb16((uint16_t *)ptr, s->row_size / 2,
  322. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  323. } else {
  324. deloco_rgb8(ptr, s->row_size,
  325. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  326. }
  327. }
  328. }
  329. } else {
  330. got_line = 0;
  331. for (;;) {
  332. ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
  333. if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
  334. /* if we already read one row, it is time to stop to
  335. * wait for the next one */
  336. if (got_line)
  337. break;
  338. png_filter_row(&s->dsp, s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
  339. s->last_row, s->pass_row_size, s->bpp);
  340. FFSWAP(uint8_t *, s->last_row, s->tmp_row);
  341. FFSWAP(unsigned int, s->last_row_size, s->tmp_row_size);
  342. got_line = 1;
  343. }
  344. if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
  345. png_put_interlaced_row(ptr, s->cur_w, s->bits_per_pixel, s->pass,
  346. s->color_type, s->last_row);
  347. }
  348. s->y++;
  349. if (s->y == s->cur_h) {
  350. memset(s->last_row, 0, s->row_size);
  351. for (;;) {
  352. if (s->pass == NB_PASSES - 1) {
  353. s->pic_state |= PNG_ALLIMAGE;
  354. goto the_end;
  355. } else {
  356. s->pass++;
  357. s->y = 0;
  358. s->pass_row_size = ff_png_pass_row_size(s->pass,
  359. s->bits_per_pixel,
  360. s->cur_w);
  361. s->crow_size = s->pass_row_size + 1;
  362. if (s->pass_row_size != 0)
  363. break;
  364. /* skip pass if empty row */
  365. }
  366. }
  367. }
  368. }
  369. the_end:;
  370. }
  371. }
  372. static int png_decode_idat(PNGDecContext *s, int length)
  373. {
  374. int ret;
  375. s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
  376. s->zstream.next_in = (unsigned char *)s->gb.buffer;
  377. bytestream2_skip(&s->gb, length);
  378. /* decode one line if possible */
  379. while (s->zstream.avail_in > 0) {
  380. ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
  381. if (ret != Z_OK && ret != Z_STREAM_END) {
  382. av_log(s->avctx, AV_LOG_ERROR, "inflate returned error %d\n", ret);
  383. return AVERROR_EXTERNAL;
  384. }
  385. if (s->zstream.avail_out == 0) {
  386. if (!(s->pic_state & PNG_ALLIMAGE)) {
  387. png_handle_row(s);
  388. }
  389. s->zstream.avail_out = s->crow_size;
  390. s->zstream.next_out = s->crow_buf;
  391. }
  392. if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
  393. av_log(NULL, AV_LOG_WARNING,
  394. "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
  395. return 0;
  396. }
  397. }
  398. return 0;
  399. }
  400. static int decode_zbuf(AVBPrint *bp, const uint8_t *data,
  401. const uint8_t *data_end)
  402. {
  403. z_stream zstream;
  404. unsigned char *buf;
  405. unsigned buf_size;
  406. int ret;
  407. zstream.zalloc = ff_png_zalloc;
  408. zstream.zfree = ff_png_zfree;
  409. zstream.opaque = NULL;
  410. if (inflateInit(&zstream) != Z_OK)
  411. return AVERROR_EXTERNAL;
  412. zstream.next_in = (unsigned char *)data;
  413. zstream.avail_in = data_end - data;
  414. av_bprint_init(bp, 0, -1);
  415. while (zstream.avail_in > 0) {
  416. av_bprint_get_buffer(bp, 2, &buf, &buf_size);
  417. if (buf_size < 2) {
  418. ret = AVERROR(ENOMEM);
  419. goto fail;
  420. }
  421. zstream.next_out = buf;
  422. zstream.avail_out = buf_size - 1;
  423. ret = inflate(&zstream, Z_PARTIAL_FLUSH);
  424. if (ret != Z_OK && ret != Z_STREAM_END) {
  425. ret = AVERROR_EXTERNAL;
  426. goto fail;
  427. }
  428. bp->len += zstream.next_out - buf;
  429. if (ret == Z_STREAM_END)
  430. break;
  431. }
  432. inflateEnd(&zstream);
  433. bp->str[bp->len] = 0;
  434. return 0;
  435. fail:
  436. inflateEnd(&zstream);
  437. av_bprint_finalize(bp, NULL);
  438. return ret;
  439. }
  440. static uint8_t *iso88591_to_utf8(const uint8_t *in, size_t size_in)
  441. {
  442. size_t extra = 0, i;
  443. uint8_t *out, *q;
  444. for (i = 0; i < size_in; i++)
  445. extra += in[i] >= 0x80;
  446. if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
  447. return NULL;
  448. q = out = av_malloc(size_in + extra + 1);
  449. if (!out)
  450. return NULL;
  451. for (i = 0; i < size_in; i++) {
  452. if (in[i] >= 0x80) {
  453. *(q++) = 0xC0 | (in[i] >> 6);
  454. *(q++) = 0x80 | (in[i] & 0x3F);
  455. } else {
  456. *(q++) = in[i];
  457. }
  458. }
  459. *(q++) = 0;
  460. return out;
  461. }
  462. static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
  463. AVDictionary **dict)
  464. {
  465. int ret, method;
  466. const uint8_t *data = s->gb.buffer;
  467. const uint8_t *data_end = data + length;
  468. const uint8_t *keyword = data;
  469. const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
  470. uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
  471. unsigned text_len;
  472. AVBPrint bp;
  473. if (!keyword_end)
  474. return AVERROR_INVALIDDATA;
  475. data = keyword_end + 1;
  476. if (compressed) {
  477. if (data == data_end)
  478. return AVERROR_INVALIDDATA;
  479. method = *(data++);
  480. if (method)
  481. return AVERROR_INVALIDDATA;
  482. if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
  483. return ret;
  484. text_len = bp.len;
  485. ret = av_bprint_finalize(&bp, (char **)&text);
  486. if (ret < 0)
  487. return ret;
  488. } else {
  489. text = (uint8_t *)data;
  490. text_len = data_end - text;
  491. }
  492. kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
  493. txt_utf8 = iso88591_to_utf8(text, text_len);
  494. if (text != data)
  495. av_free(text);
  496. if (!(kw_utf8 && txt_utf8)) {
  497. av_free(kw_utf8);
  498. av_free(txt_utf8);
  499. return AVERROR(ENOMEM);
  500. }
  501. av_dict_set(dict, kw_utf8, txt_utf8,
  502. AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
  503. return 0;
  504. }
  505. static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s,
  506. uint32_t length)
  507. {
  508. if (length != 13)
  509. return AVERROR_INVALIDDATA;
  510. if (s->pic_state & PNG_IDAT) {
  511. av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
  512. return AVERROR_INVALIDDATA;
  513. }
  514. if (s->hdr_state & PNG_IHDR) {
  515. av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
  516. return AVERROR_INVALIDDATA;
  517. }
  518. s->width = s->cur_w = bytestream2_get_be32(&s->gb);
  519. s->height = s->cur_h = bytestream2_get_be32(&s->gb);
  520. if (av_image_check_size(s->width, s->height, 0, avctx)) {
  521. s->cur_w = s->cur_h = s->width = s->height = 0;
  522. av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
  523. return AVERROR_INVALIDDATA;
  524. }
  525. s->bit_depth = bytestream2_get_byte(&s->gb);
  526. if (s->bit_depth != 1 && s->bit_depth != 2 && s->bit_depth != 4 &&
  527. s->bit_depth != 8 && s->bit_depth != 16) {
  528. av_log(avctx, AV_LOG_ERROR, "Invalid bit depth\n");
  529. goto error;
  530. }
  531. s->color_type = bytestream2_get_byte(&s->gb);
  532. s->compression_type = bytestream2_get_byte(&s->gb);
  533. s->filter_type = bytestream2_get_byte(&s->gb);
  534. s->interlace_type = bytestream2_get_byte(&s->gb);
  535. bytestream2_skip(&s->gb, 4); /* crc */
  536. s->hdr_state |= PNG_IHDR;
  537. if (avctx->debug & FF_DEBUG_PICT_INFO)
  538. av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
  539. "compression_type=%d filter_type=%d interlace_type=%d\n",
  540. s->width, s->height, s->bit_depth, s->color_type,
  541. s->compression_type, s->filter_type, s->interlace_type);
  542. return 0;
  543. error:
  544. s->cur_w = s->cur_h = s->width = s->height = 0;
  545. s->bit_depth = 8;
  546. return AVERROR_INVALIDDATA;
  547. }
  548. static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s)
  549. {
  550. if (s->pic_state & PNG_IDAT) {
  551. av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n");
  552. return AVERROR_INVALIDDATA;
  553. }
  554. avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb);
  555. avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb);
  556. if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0)
  557. avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
  558. bytestream2_skip(&s->gb, 1); /* unit specifier */
  559. bytestream2_skip(&s->gb, 4); /* crc */
  560. return 0;
  561. }
  562. static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s,
  563. uint32_t length, AVFrame *p)
  564. {
  565. int ret;
  566. size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
  567. if (!(s->hdr_state & PNG_IHDR)) {
  568. av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
  569. return AVERROR_INVALIDDATA;
  570. }
  571. if (!(s->pic_state & PNG_IDAT)) {
  572. /* init image info */
  573. ret = ff_set_dimensions(avctx, s->width, s->height);
  574. if (ret < 0)
  575. return ret;
  576. s->channels = ff_png_get_nb_channels(s->color_type);
  577. s->bits_per_pixel = s->bit_depth * s->channels;
  578. s->bpp = (s->bits_per_pixel + 7) >> 3;
  579. s->row_size = (s->cur_w * s->bits_per_pixel + 7) >> 3;
  580. if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  581. s->color_type == PNG_COLOR_TYPE_RGB) {
  582. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  583. } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  584. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  585. avctx->pix_fmt = AV_PIX_FMT_RGBA;
  586. } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  587. s->color_type == PNG_COLOR_TYPE_GRAY) {
  588. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  589. } else if (s->bit_depth == 16 &&
  590. s->color_type == PNG_COLOR_TYPE_GRAY) {
  591. avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
  592. } else if (s->bit_depth == 16 &&
  593. s->color_type == PNG_COLOR_TYPE_RGB) {
  594. avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
  595. } else if (s->bit_depth == 16 &&
  596. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  597. avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
  598. } else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
  599. s->color_type == PNG_COLOR_TYPE_PALETTE) {
  600. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  601. } else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
  602. avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
  603. } else if (s->bit_depth == 8 &&
  604. s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
  605. avctx->pix_fmt = AV_PIX_FMT_YA8;
  606. } else if (s->bit_depth == 16 &&
  607. s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
  608. avctx->pix_fmt = AV_PIX_FMT_YA16BE;
  609. } else {
  610. avpriv_report_missing_feature(avctx,
  611. "Bit depth %d color type %d",
  612. s->bit_depth, s->color_type);
  613. return AVERROR_PATCHWELCOME;
  614. }
  615. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
  616. switch (avctx->pix_fmt) {
  617. case AV_PIX_FMT_RGB24:
  618. avctx->pix_fmt = AV_PIX_FMT_RGBA;
  619. break;
  620. case AV_PIX_FMT_RGB48BE:
  621. avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
  622. break;
  623. case AV_PIX_FMT_GRAY8:
  624. avctx->pix_fmt = AV_PIX_FMT_YA8;
  625. break;
  626. case AV_PIX_FMT_GRAY16BE:
  627. avctx->pix_fmt = AV_PIX_FMT_YA16BE;
  628. break;
  629. default:
  630. avpriv_request_sample(avctx, "bit depth %d "
  631. "and color type %d with TRNS",
  632. s->bit_depth, s->color_type);
  633. return AVERROR_INVALIDDATA;
  634. }
  635. s->bpp += byte_depth;
  636. }
  637. if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
  638. return ret;
  639. if (avctx->codec_id == AV_CODEC_ID_APNG && s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
  640. ff_thread_release_buffer(avctx, &s->previous_picture);
  641. if ((ret = ff_thread_get_buffer(avctx, &s->previous_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
  642. return ret;
  643. }
  644. p->pict_type = AV_PICTURE_TYPE_I;
  645. p->key_frame = 1;
  646. p->interlaced_frame = !!s->interlace_type;
  647. ff_thread_finish_setup(avctx);
  648. /* compute the compressed row size */
  649. if (!s->interlace_type) {
  650. s->crow_size = s->row_size + 1;
  651. } else {
  652. s->pass = 0;
  653. s->pass_row_size = ff_png_pass_row_size(s->pass,
  654. s->bits_per_pixel,
  655. s->cur_w);
  656. s->crow_size = s->pass_row_size + 1;
  657. }
  658. ff_dlog(avctx, "row_size=%d crow_size =%d\n",
  659. s->row_size, s->crow_size);
  660. s->image_buf = p->data[0];
  661. s->image_linesize = p->linesize[0];
  662. /* copy the palette if needed */
  663. if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
  664. memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
  665. /* empty row is used if differencing to the first row */
  666. av_fast_padded_mallocz(&s->last_row, &s->last_row_size, s->row_size);
  667. if (!s->last_row)
  668. return AVERROR_INVALIDDATA;
  669. if (s->interlace_type ||
  670. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  671. av_fast_padded_malloc(&s->tmp_row, &s->tmp_row_size, s->row_size);
  672. if (!s->tmp_row)
  673. return AVERROR_INVALIDDATA;
  674. }
  675. /* compressed row */
  676. av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
  677. if (!s->buffer)
  678. return AVERROR(ENOMEM);
  679. /* we want crow_buf+1 to be 16-byte aligned */
  680. s->crow_buf = s->buffer + 15;
  681. s->zstream.avail_out = s->crow_size;
  682. s->zstream.next_out = s->crow_buf;
  683. }
  684. s->pic_state |= PNG_IDAT;
  685. /* set image to non-transparent bpp while decompressing */
  686. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
  687. s->bpp -= byte_depth;
  688. ret = png_decode_idat(s, length);
  689. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
  690. s->bpp += byte_depth;
  691. if (ret < 0)
  692. return ret;
  693. bytestream2_skip(&s->gb, 4); /* crc */
  694. return 0;
  695. }
  696. static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s,
  697. uint32_t length)
  698. {
  699. int n, i, r, g, b;
  700. if ((length % 3) != 0 || length > 256 * 3)
  701. return AVERROR_INVALIDDATA;
  702. /* read the palette */
  703. n = length / 3;
  704. for (i = 0; i < n; i++) {
  705. r = bytestream2_get_byte(&s->gb);
  706. g = bytestream2_get_byte(&s->gb);
  707. b = bytestream2_get_byte(&s->gb);
  708. s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
  709. }
  710. for (; i < 256; i++)
  711. s->palette[i] = (0xFFU << 24);
  712. s->hdr_state |= PNG_PLTE;
  713. bytestream2_skip(&s->gb, 4); /* crc */
  714. return 0;
  715. }
  716. static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s,
  717. uint32_t length)
  718. {
  719. int v, i;
  720. if (!(s->hdr_state & PNG_IHDR)) {
  721. av_log(avctx, AV_LOG_ERROR, "trns before IHDR\n");
  722. return AVERROR_INVALIDDATA;
  723. }
  724. if (s->pic_state & PNG_IDAT) {
  725. av_log(avctx, AV_LOG_ERROR, "trns after IDAT\n");
  726. return AVERROR_INVALIDDATA;
  727. }
  728. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  729. if (length > 256 || !(s->hdr_state & PNG_PLTE))
  730. return AVERROR_INVALIDDATA;
  731. for (i = 0; i < length; i++) {
  732. unsigned v = bytestream2_get_byte(&s->gb);
  733. s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
  734. }
  735. } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) {
  736. if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) ||
  737. (s->color_type == PNG_COLOR_TYPE_RGB && length != 6) ||
  738. s->bit_depth == 1)
  739. return AVERROR_INVALIDDATA;
  740. for (i = 0; i < length / 2; i++) {
  741. /* only use the least significant bits */
  742. v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth);
  743. if (s->bit_depth > 8)
  744. AV_WB16(&s->transparent_color_be[2 * i], v);
  745. else
  746. s->transparent_color_be[i] = v;
  747. }
  748. } else {
  749. return AVERROR_INVALIDDATA;
  750. }
  751. bytestream2_skip(&s->gb, 4); /* crc */
  752. s->has_trns = 1;
  753. return 0;
  754. }
  755. static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f)
  756. {
  757. int ret, cnt = 0;
  758. uint8_t *data, profile_name[82];
  759. AVBPrint bp;
  760. AVFrameSideData *sd;
  761. while ((profile_name[cnt++] = bytestream2_get_byte(&s->gb)) && cnt < 81);
  762. if (cnt > 80) {
  763. av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid name!\n");
  764. return AVERROR_INVALIDDATA;
  765. }
  766. length = FFMAX(length - cnt, 0);
  767. if (bytestream2_get_byte(&s->gb) != 0) {
  768. av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid compression!\n");
  769. return AVERROR_INVALIDDATA;
  770. }
  771. length = FFMAX(length - 1, 0);
  772. if ((ret = decode_zbuf(&bp, s->gb.buffer, s->gb.buffer + length)) < 0)
  773. return ret;
  774. ret = av_bprint_finalize(&bp, (char **)&data);
  775. if (ret < 0)
  776. return ret;
  777. sd = av_frame_new_side_data(f, AV_FRAME_DATA_ICC_PROFILE, bp.len);
  778. if (!sd) {
  779. av_free(data);
  780. return AVERROR(ENOMEM);
  781. }
  782. av_dict_set(&sd->metadata, "name", profile_name, 0);
  783. memcpy(sd->data, data, bp.len);
  784. av_free(data);
  785. /* ICC compressed data and CRC */
  786. bytestream2_skip(&s->gb, length + 4);
  787. return 0;
  788. }
  789. static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
  790. {
  791. if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
  792. int i, j, k;
  793. uint8_t *pd = p->data[0];
  794. for (j = 0; j < s->height; j++) {
  795. i = s->width / 8;
  796. for (k = 7; k >= 1; k--)
  797. if ((s->width&7) >= k)
  798. pd[8*i + k - 1] = (pd[i]>>8-k) & 1;
  799. for (i--; i >= 0; i--) {
  800. pd[8*i + 7]= pd[i] & 1;
  801. pd[8*i + 6]= (pd[i]>>1) & 1;
  802. pd[8*i + 5]= (pd[i]>>2) & 1;
  803. pd[8*i + 4]= (pd[i]>>3) & 1;
  804. pd[8*i + 3]= (pd[i]>>4) & 1;
  805. pd[8*i + 2]= (pd[i]>>5) & 1;
  806. pd[8*i + 1]= (pd[i]>>6) & 1;
  807. pd[8*i + 0]= pd[i]>>7;
  808. }
  809. pd += s->image_linesize;
  810. }
  811. } else if (s->bits_per_pixel == 2) {
  812. int i, j;
  813. uint8_t *pd = p->data[0];
  814. for (j = 0; j < s->height; j++) {
  815. i = s->width / 4;
  816. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  817. if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
  818. if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
  819. if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
  820. for (i--; i >= 0; i--) {
  821. pd[4*i + 3]= pd[i] & 3;
  822. pd[4*i + 2]= (pd[i]>>2) & 3;
  823. pd[4*i + 1]= (pd[i]>>4) & 3;
  824. pd[4*i + 0]= pd[i]>>6;
  825. }
  826. } else {
  827. if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
  828. if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
  829. if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
  830. for (i--; i >= 0; i--) {
  831. pd[4*i + 3]= ( pd[i] & 3)*0x55;
  832. pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
  833. pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
  834. pd[4*i + 0]= ( pd[i]>>6 )*0x55;
  835. }
  836. }
  837. pd += s->image_linesize;
  838. }
  839. } else if (s->bits_per_pixel == 4) {
  840. int i, j;
  841. uint8_t *pd = p->data[0];
  842. for (j = 0; j < s->height; j++) {
  843. i = s->width/2;
  844. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  845. if (s->width&1) pd[2*i+0]= pd[i]>>4;
  846. for (i--; i >= 0; i--) {
  847. pd[2*i + 1] = pd[i] & 15;
  848. pd[2*i + 0] = pd[i] >> 4;
  849. }
  850. } else {
  851. if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
  852. for (i--; i >= 0; i--) {
  853. pd[2*i + 1] = (pd[i] & 15) * 0x11;
  854. pd[2*i + 0] = (pd[i] >> 4) * 0x11;
  855. }
  856. }
  857. pd += s->image_linesize;
  858. }
  859. }
  860. }
  861. static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
  862. uint32_t length)
  863. {
  864. uint32_t sequence_number;
  865. int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
  866. if (length != 26)
  867. return AVERROR_INVALIDDATA;
  868. if (!(s->hdr_state & PNG_IHDR)) {
  869. av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
  870. return AVERROR_INVALIDDATA;
  871. }
  872. s->last_w = s->cur_w;
  873. s->last_h = s->cur_h;
  874. s->last_x_offset = s->x_offset;
  875. s->last_y_offset = s->y_offset;
  876. s->last_dispose_op = s->dispose_op;
  877. sequence_number = bytestream2_get_be32(&s->gb);
  878. cur_w = bytestream2_get_be32(&s->gb);
  879. cur_h = bytestream2_get_be32(&s->gb);
  880. x_offset = bytestream2_get_be32(&s->gb);
  881. y_offset = bytestream2_get_be32(&s->gb);
  882. bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
  883. dispose_op = bytestream2_get_byte(&s->gb);
  884. blend_op = bytestream2_get_byte(&s->gb);
  885. bytestream2_skip(&s->gb, 4); /* crc */
  886. if (sequence_number == 0 &&
  887. (cur_w != s->width ||
  888. cur_h != s->height ||
  889. x_offset != 0 ||
  890. y_offset != 0) ||
  891. cur_w <= 0 || cur_h <= 0 ||
  892. x_offset < 0 || y_offset < 0 ||
  893. cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
  894. return AVERROR_INVALIDDATA;
  895. if (blend_op != APNG_BLEND_OP_OVER && blend_op != APNG_BLEND_OP_SOURCE) {
  896. av_log(avctx, AV_LOG_ERROR, "Invalid blend_op %d\n", blend_op);
  897. return AVERROR_INVALIDDATA;
  898. }
  899. if ((sequence_number == 0 || !s->previous_picture.f->data[0]) &&
  900. dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
  901. // No previous frame to revert to for the first frame
  902. // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND
  903. dispose_op = APNG_DISPOSE_OP_BACKGROUND;
  904. }
  905. if (blend_op == APNG_BLEND_OP_OVER && !s->has_trns && (
  906. avctx->pix_fmt == AV_PIX_FMT_RGB24 ||
  907. avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
  908. avctx->pix_fmt == AV_PIX_FMT_PAL8 ||
  909. avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
  910. avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
  911. avctx->pix_fmt == AV_PIX_FMT_MONOBLACK
  912. )) {
  913. // APNG_BLEND_OP_OVER is the same as APNG_BLEND_OP_SOURCE when there is no alpha channel
  914. blend_op = APNG_BLEND_OP_SOURCE;
  915. }
  916. s->cur_w = cur_w;
  917. s->cur_h = cur_h;
  918. s->x_offset = x_offset;
  919. s->y_offset = y_offset;
  920. s->dispose_op = dispose_op;
  921. s->blend_op = blend_op;
  922. return 0;
  923. }
  924. static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
  925. {
  926. int i, j;
  927. uint8_t *pd = p->data[0];
  928. uint8_t *pd_last = s->last_picture.f->data[0];
  929. int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
  930. ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
  931. for (j = 0; j < s->height; j++) {
  932. for (i = 0; i < ls; i++)
  933. pd[i] += pd_last[i];
  934. pd += s->image_linesize;
  935. pd_last += s->image_linesize;
  936. }
  937. }
  938. // divide by 255 and round to nearest
  939. // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
  940. #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
  941. static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s,
  942. AVFrame *p)
  943. {
  944. size_t x, y;
  945. uint8_t *buffer;
  946. if (s->blend_op == APNG_BLEND_OP_OVER &&
  947. avctx->pix_fmt != AV_PIX_FMT_RGBA &&
  948. avctx->pix_fmt != AV_PIX_FMT_GRAY8A &&
  949. avctx->pix_fmt != AV_PIX_FMT_PAL8) {
  950. avpriv_request_sample(avctx, "Blending with pixel format %s",
  951. av_get_pix_fmt_name(avctx->pix_fmt));
  952. return AVERROR_PATCHWELCOME;
  953. }
  954. buffer = av_malloc_array(s->image_linesize, s->height);
  955. if (!buffer)
  956. return AVERROR(ENOMEM);
  957. // Do the disposal operation specified by the last frame on the frame
  958. if (s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
  959. ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
  960. memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height);
  961. if (s->last_dispose_op == APNG_DISPOSE_OP_BACKGROUND)
  962. for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y)
  963. memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w);
  964. memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height);
  965. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  966. } else {
  967. ff_thread_await_progress(&s->previous_picture, INT_MAX, 0);
  968. memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height);
  969. }
  970. // Perform blending
  971. if (s->blend_op == APNG_BLEND_OP_SOURCE) {
  972. for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
  973. size_t row_start = s->image_linesize * y + s->bpp * s->x_offset;
  974. memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w);
  975. }
  976. } else { // APNG_BLEND_OP_OVER
  977. for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
  978. uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset;
  979. uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset;
  980. for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
  981. size_t b;
  982. uint8_t foreground_alpha, background_alpha, output_alpha;
  983. uint8_t output[10];
  984. // Since we might be blending alpha onto alpha, we use the following equations:
  985. // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
  986. // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha
  987. switch (avctx->pix_fmt) {
  988. case AV_PIX_FMT_RGBA:
  989. foreground_alpha = foreground[3];
  990. background_alpha = background[3];
  991. break;
  992. case AV_PIX_FMT_GRAY8A:
  993. foreground_alpha = foreground[1];
  994. background_alpha = background[1];
  995. break;
  996. case AV_PIX_FMT_PAL8:
  997. foreground_alpha = s->palette[foreground[0]] >> 24;
  998. background_alpha = s->palette[background[0]] >> 24;
  999. break;
  1000. }
  1001. if (foreground_alpha == 0)
  1002. continue;
  1003. if (foreground_alpha == 255) {
  1004. memcpy(background, foreground, s->bpp);
  1005. continue;
  1006. }
  1007. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  1008. // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first
  1009. avpriv_request_sample(avctx, "Alpha blending palette samples");
  1010. background[0] = foreground[0];
  1011. continue;
  1012. }
  1013. output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
  1014. av_assert0(s->bpp <= 10);
  1015. for (b = 0; b < s->bpp - 1; ++b) {
  1016. if (output_alpha == 0) {
  1017. output[b] = 0;
  1018. } else if (background_alpha == 255) {
  1019. output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]);
  1020. } else {
  1021. output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha);
  1022. }
  1023. }
  1024. output[b] = output_alpha;
  1025. memcpy(background, output, s->bpp);
  1026. }
  1027. }
  1028. }
  1029. // Copy blended buffer into the frame and free
  1030. memcpy(p->data[0], buffer, s->image_linesize * s->height);
  1031. av_free(buffer);
  1032. return 0;
  1033. }
  1034. static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
  1035. AVFrame *p, AVPacket *avpkt)
  1036. {
  1037. AVDictionary **metadatap = NULL;
  1038. uint32_t tag, length;
  1039. int decode_next_dat = 0;
  1040. int i, ret;
  1041. for (;;) {
  1042. length = bytestream2_get_bytes_left(&s->gb);
  1043. if (length <= 0) {
  1044. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1045. avctx->skip_frame == AVDISCARD_ALL) {
  1046. return 0;
  1047. }
  1048. if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) {
  1049. if (!(s->pic_state & PNG_IDAT))
  1050. return 0;
  1051. else
  1052. goto exit_loop;
  1053. }
  1054. av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length);
  1055. if ( s->pic_state & PNG_ALLIMAGE
  1056. && avctx->strict_std_compliance <= FF_COMPLIANCE_NORMAL)
  1057. goto exit_loop;
  1058. ret = AVERROR_INVALIDDATA;
  1059. goto fail;
  1060. }
  1061. length = bytestream2_get_be32(&s->gb);
  1062. if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
  1063. av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
  1064. ret = AVERROR_INVALIDDATA;
  1065. goto fail;
  1066. }
  1067. tag = bytestream2_get_le32(&s->gb);
  1068. if (avctx->debug & FF_DEBUG_STARTCODE)
  1069. av_log(avctx, AV_LOG_DEBUG, "png: tag=%s length=%u\n",
  1070. av_fourcc2str(tag), length);
  1071. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1072. avctx->skip_frame == AVDISCARD_ALL) {
  1073. switch(tag) {
  1074. case MKTAG('I', 'H', 'D', 'R'):
  1075. case MKTAG('p', 'H', 'Y', 's'):
  1076. case MKTAG('t', 'E', 'X', 't'):
  1077. case MKTAG('I', 'D', 'A', 'T'):
  1078. case MKTAG('t', 'R', 'N', 'S'):
  1079. break;
  1080. default:
  1081. goto skip_tag;
  1082. }
  1083. }
  1084. metadatap = &p->metadata;
  1085. switch (tag) {
  1086. case MKTAG('I', 'H', 'D', 'R'):
  1087. if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
  1088. goto fail;
  1089. break;
  1090. case MKTAG('p', 'H', 'Y', 's'):
  1091. if ((ret = decode_phys_chunk(avctx, s)) < 0)
  1092. goto fail;
  1093. break;
  1094. case MKTAG('f', 'c', 'T', 'L'):
  1095. if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
  1096. goto skip_tag;
  1097. if ((ret = decode_fctl_chunk(avctx, s, length)) < 0)
  1098. goto fail;
  1099. decode_next_dat = 1;
  1100. break;
  1101. case MKTAG('f', 'd', 'A', 'T'):
  1102. if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
  1103. goto skip_tag;
  1104. if (!decode_next_dat) {
  1105. ret = AVERROR_INVALIDDATA;
  1106. goto fail;
  1107. }
  1108. bytestream2_get_be32(&s->gb);
  1109. length -= 4;
  1110. /* fallthrough */
  1111. case MKTAG('I', 'D', 'A', 'T'):
  1112. if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
  1113. goto skip_tag;
  1114. if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
  1115. goto fail;
  1116. break;
  1117. case MKTAG('P', 'L', 'T', 'E'):
  1118. if (decode_plte_chunk(avctx, s, length) < 0)
  1119. goto skip_tag;
  1120. break;
  1121. case MKTAG('t', 'R', 'N', 'S'):
  1122. if (decode_trns_chunk(avctx, s, length) < 0)
  1123. goto skip_tag;
  1124. break;
  1125. case MKTAG('t', 'E', 'X', 't'):
  1126. if (decode_text_chunk(s, length, 0, metadatap) < 0)
  1127. av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n");
  1128. bytestream2_skip(&s->gb, length + 4);
  1129. break;
  1130. case MKTAG('z', 'T', 'X', 't'):
  1131. if (decode_text_chunk(s, length, 1, metadatap) < 0)
  1132. av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n");
  1133. bytestream2_skip(&s->gb, length + 4);
  1134. break;
  1135. case MKTAG('s', 'T', 'E', 'R'): {
  1136. int mode = bytestream2_get_byte(&s->gb);
  1137. AVStereo3D *stereo3d = av_stereo3d_create_side_data(p);
  1138. if (!stereo3d)
  1139. goto fail;
  1140. if (mode == 0 || mode == 1) {
  1141. stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
  1142. stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT;
  1143. } else {
  1144. av_log(avctx, AV_LOG_WARNING,
  1145. "Unknown value in sTER chunk (%d)\n", mode);
  1146. }
  1147. bytestream2_skip(&s->gb, 4); /* crc */
  1148. break;
  1149. }
  1150. case MKTAG('i', 'C', 'C', 'P'): {
  1151. if (decode_iccp_chunk(s, length, p) < 0)
  1152. goto fail;
  1153. break;
  1154. }
  1155. case MKTAG('c', 'H', 'R', 'M'): {
  1156. AVMasteringDisplayMetadata *mdm = av_mastering_display_metadata_create_side_data(p);
  1157. if (!mdm) {
  1158. ret = AVERROR(ENOMEM);
  1159. goto fail;
  1160. }
  1161. mdm->white_point[0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1162. mdm->white_point[1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1163. /* RGB Primaries */
  1164. for (i = 0; i < 3; i++) {
  1165. mdm->display_primaries[i][0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1166. mdm->display_primaries[i][1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1167. }
  1168. mdm->has_primaries = 1;
  1169. bytestream2_skip(&s->gb, 4); /* crc */
  1170. break;
  1171. }
  1172. case MKTAG('g', 'A', 'M', 'A'): {
  1173. AVBPrint bp;
  1174. char *gamma_str;
  1175. int num = bytestream2_get_be32(&s->gb);
  1176. av_bprint_init(&bp, 0, -1);
  1177. av_bprintf(&bp, "%i/%i", num, 100000);
  1178. ret = av_bprint_finalize(&bp, &gamma_str);
  1179. if (ret < 0)
  1180. return ret;
  1181. av_dict_set(&p->metadata, "gamma", gamma_str, AV_DICT_DONT_STRDUP_VAL);
  1182. bytestream2_skip(&s->gb, 4); /* crc */
  1183. break;
  1184. }
  1185. case MKTAG('I', 'E', 'N', 'D'):
  1186. if (!(s->pic_state & PNG_ALLIMAGE))
  1187. av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
  1188. if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
  1189. ret = AVERROR_INVALIDDATA;
  1190. goto fail;
  1191. }
  1192. bytestream2_skip(&s->gb, 4); /* crc */
  1193. goto exit_loop;
  1194. default:
  1195. /* skip tag */
  1196. skip_tag:
  1197. bytestream2_skip(&s->gb, length + 4);
  1198. break;
  1199. }
  1200. }
  1201. exit_loop:
  1202. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1203. avctx->skip_frame == AVDISCARD_ALL) {
  1204. return 0;
  1205. }
  1206. if (s->bits_per_pixel <= 4)
  1207. handle_small_bpp(s, p);
  1208. /* apply transparency if needed */
  1209. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
  1210. size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
  1211. size_t raw_bpp = s->bpp - byte_depth;
  1212. unsigned x, y;
  1213. av_assert0(s->bit_depth > 1);
  1214. for (y = 0; y < s->height; ++y) {
  1215. uint8_t *row = &s->image_buf[s->image_linesize * y];
  1216. /* since we're updating in-place, we have to go from right to left */
  1217. for (x = s->width; x > 0; --x) {
  1218. uint8_t *pixel = &row[s->bpp * (x - 1)];
  1219. memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp);
  1220. if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) {
  1221. memset(&pixel[raw_bpp], 0, byte_depth);
  1222. } else {
  1223. memset(&pixel[raw_bpp], 0xff, byte_depth);
  1224. }
  1225. }
  1226. }
  1227. }
  1228. /* handle P-frames only if a predecessor frame is available */
  1229. if (s->last_picture.f->data[0]) {
  1230. if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
  1231. && s->last_picture.f->width == p->width
  1232. && s->last_picture.f->height== p->height
  1233. && s->last_picture.f->format== p->format
  1234. ) {
  1235. if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG)
  1236. handle_p_frame_png(s, p);
  1237. else if (CONFIG_APNG_DECODER &&
  1238. avctx->codec_id == AV_CODEC_ID_APNG &&
  1239. (ret = handle_p_frame_apng(avctx, s, p)) < 0)
  1240. goto fail;
  1241. }
  1242. }
  1243. ff_thread_report_progress(&s->picture, INT_MAX, 0);
  1244. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  1245. return 0;
  1246. fail:
  1247. ff_thread_report_progress(&s->picture, INT_MAX, 0);
  1248. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  1249. return ret;
  1250. }
  1251. #if CONFIG_PNG_DECODER
  1252. static int decode_frame_png(AVCodecContext *avctx,
  1253. void *data, int *got_frame,
  1254. AVPacket *avpkt)
  1255. {
  1256. PNGDecContext *const s = avctx->priv_data;
  1257. const uint8_t *buf = avpkt->data;
  1258. int buf_size = avpkt->size;
  1259. AVFrame *p;
  1260. int64_t sig;
  1261. int ret;
  1262. ff_thread_release_buffer(avctx, &s->last_picture);
  1263. FFSWAP(ThreadFrame, s->picture, s->last_picture);
  1264. p = s->picture.f;
  1265. bytestream2_init(&s->gb, buf, buf_size);
  1266. /* check signature */
  1267. sig = bytestream2_get_be64(&s->gb);
  1268. if (sig != PNGSIG &&
  1269. sig != MNGSIG) {
  1270. av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
  1271. return AVERROR_INVALIDDATA;
  1272. }
  1273. s->y = s->has_trns = 0;
  1274. s->hdr_state = 0;
  1275. s->pic_state = 0;
  1276. /* init the zlib */
  1277. s->zstream.zalloc = ff_png_zalloc;
  1278. s->zstream.zfree = ff_png_zfree;
  1279. s->zstream.opaque = NULL;
  1280. ret = inflateInit(&s->zstream);
  1281. if (ret != Z_OK) {
  1282. av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
  1283. return AVERROR_EXTERNAL;
  1284. }
  1285. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1286. goto the_end;
  1287. if (avctx->skip_frame == AVDISCARD_ALL) {
  1288. *got_frame = 0;
  1289. ret = bytestream2_tell(&s->gb);
  1290. goto the_end;
  1291. }
  1292. if ((ret = av_frame_ref(data, s->picture.f)) < 0)
  1293. goto the_end;
  1294. *got_frame = 1;
  1295. ret = bytestream2_tell(&s->gb);
  1296. the_end:
  1297. inflateEnd(&s->zstream);
  1298. s->crow_buf = NULL;
  1299. return ret;
  1300. }
  1301. #endif
  1302. #if CONFIG_APNG_DECODER
  1303. static int decode_frame_apng(AVCodecContext *avctx,
  1304. void *data, int *got_frame,
  1305. AVPacket *avpkt)
  1306. {
  1307. PNGDecContext *const s = avctx->priv_data;
  1308. int ret;
  1309. AVFrame *p;
  1310. ff_thread_release_buffer(avctx, &s->last_picture);
  1311. FFSWAP(ThreadFrame, s->picture, s->last_picture);
  1312. p = s->picture.f;
  1313. if (!(s->hdr_state & PNG_IHDR)) {
  1314. if (!avctx->extradata_size)
  1315. return AVERROR_INVALIDDATA;
  1316. /* only init fields, there is no zlib use in extradata */
  1317. s->zstream.zalloc = ff_png_zalloc;
  1318. s->zstream.zfree = ff_png_zfree;
  1319. bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
  1320. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1321. goto end;
  1322. }
  1323. /* reset state for a new frame */
  1324. if ((ret = inflateInit(&s->zstream)) != Z_OK) {
  1325. av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
  1326. ret = AVERROR_EXTERNAL;
  1327. goto end;
  1328. }
  1329. s->y = 0;
  1330. s->pic_state = 0;
  1331. bytestream2_init(&s->gb, avpkt->data, avpkt->size);
  1332. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1333. goto end;
  1334. if (!(s->pic_state & PNG_ALLIMAGE))
  1335. av_log(avctx, AV_LOG_WARNING, "Frame did not contain a complete image\n");
  1336. if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
  1337. ret = AVERROR_INVALIDDATA;
  1338. goto end;
  1339. }
  1340. if ((ret = av_frame_ref(data, s->picture.f)) < 0)
  1341. goto end;
  1342. *got_frame = 1;
  1343. ret = bytestream2_tell(&s->gb);
  1344. end:
  1345. inflateEnd(&s->zstream);
  1346. return ret;
  1347. }
  1348. #endif
  1349. #if HAVE_THREADS
  1350. static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  1351. {
  1352. PNGDecContext *psrc = src->priv_data;
  1353. PNGDecContext *pdst = dst->priv_data;
  1354. int ret;
  1355. if (dst == src)
  1356. return 0;
  1357. ff_thread_release_buffer(dst, &pdst->picture);
  1358. if (psrc->picture.f->data[0] &&
  1359. (ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
  1360. return ret;
  1361. if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
  1362. pdst->width = psrc->width;
  1363. pdst->height = psrc->height;
  1364. pdst->bit_depth = psrc->bit_depth;
  1365. pdst->color_type = psrc->color_type;
  1366. pdst->compression_type = psrc->compression_type;
  1367. pdst->interlace_type = psrc->interlace_type;
  1368. pdst->filter_type = psrc->filter_type;
  1369. pdst->cur_w = psrc->cur_w;
  1370. pdst->cur_h = psrc->cur_h;
  1371. pdst->x_offset = psrc->x_offset;
  1372. pdst->y_offset = psrc->y_offset;
  1373. pdst->has_trns = psrc->has_trns;
  1374. memcpy(pdst->transparent_color_be, psrc->transparent_color_be, sizeof(pdst->transparent_color_be));
  1375. pdst->dispose_op = psrc->dispose_op;
  1376. memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
  1377. pdst->hdr_state |= psrc->hdr_state;
  1378. ff_thread_release_buffer(dst, &pdst->last_picture);
  1379. if (psrc->last_picture.f->data[0] &&
  1380. (ret = ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture)) < 0)
  1381. return ret;
  1382. ff_thread_release_buffer(dst, &pdst->previous_picture);
  1383. if (psrc->previous_picture.f->data[0] &&
  1384. (ret = ff_thread_ref_frame(&pdst->previous_picture, &psrc->previous_picture)) < 0)
  1385. return ret;
  1386. }
  1387. return 0;
  1388. }
  1389. #endif
  1390. static av_cold int png_dec_init(AVCodecContext *avctx)
  1391. {
  1392. PNGDecContext *s = avctx->priv_data;
  1393. avctx->color_range = AVCOL_RANGE_JPEG;
  1394. s->avctx = avctx;
  1395. s->previous_picture.f = av_frame_alloc();
  1396. s->last_picture.f = av_frame_alloc();
  1397. s->picture.f = av_frame_alloc();
  1398. if (!s->previous_picture.f || !s->last_picture.f || !s->picture.f) {
  1399. av_frame_free(&s->previous_picture.f);
  1400. av_frame_free(&s->last_picture.f);
  1401. av_frame_free(&s->picture.f);
  1402. return AVERROR(ENOMEM);
  1403. }
  1404. if (!avctx->internal->is_copy) {
  1405. avctx->internal->allocate_progress = 1;
  1406. ff_pngdsp_init(&s->dsp);
  1407. }
  1408. return 0;
  1409. }
  1410. static av_cold int png_dec_end(AVCodecContext *avctx)
  1411. {
  1412. PNGDecContext *s = avctx->priv_data;
  1413. ff_thread_release_buffer(avctx, &s->previous_picture);
  1414. av_frame_free(&s->previous_picture.f);
  1415. ff_thread_release_buffer(avctx, &s->last_picture);
  1416. av_frame_free(&s->last_picture.f);
  1417. ff_thread_release_buffer(avctx, &s->picture);
  1418. av_frame_free(&s->picture.f);
  1419. av_freep(&s->buffer);
  1420. s->buffer_size = 0;
  1421. av_freep(&s->last_row);
  1422. s->last_row_size = 0;
  1423. av_freep(&s->tmp_row);
  1424. s->tmp_row_size = 0;
  1425. return 0;
  1426. }
  1427. #if CONFIG_APNG_DECODER
  1428. AVCodec ff_apng_decoder = {
  1429. .name = "apng",
  1430. .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
  1431. .type = AVMEDIA_TYPE_VIDEO,
  1432. .id = AV_CODEC_ID_APNG,
  1433. .priv_data_size = sizeof(PNGDecContext),
  1434. .init = png_dec_init,
  1435. .close = png_dec_end,
  1436. .decode = decode_frame_apng,
  1437. .init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
  1438. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  1439. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  1440. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
  1441. };
  1442. #endif
  1443. #if CONFIG_PNG_DECODER
  1444. AVCodec ff_png_decoder = {
  1445. .name = "png",
  1446. .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
  1447. .type = AVMEDIA_TYPE_VIDEO,
  1448. .id = AV_CODEC_ID_PNG,
  1449. .priv_data_size = sizeof(PNGDecContext),
  1450. .init = png_dec_init,
  1451. .close = png_dec_end,
  1452. .decode = decode_frame_png,
  1453. .init_thread_copy = ONLY_IF_THREADS_ENABLED(png_dec_init),
  1454. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  1455. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  1456. .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_THREADSAFE,
  1457. };
  1458. #endif