You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1851 lines
62KB

  1. /*
  2. * PNG image format
  3. * Copyright (c) 2003 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. //#define DEBUG
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/bprint.h"
  24. #include "libavutil/imgutils.h"
  25. #include "libavutil/intreadwrite.h"
  26. #include "libavutil/stereo3d.h"
  27. #include "libavutil/mastering_display_metadata.h"
  28. #include "avcodec.h"
  29. #include "bytestream.h"
  30. #include "internal.h"
  31. #include "apng.h"
  32. #include "png.h"
  33. #include "pngdsp.h"
  34. #include "thread.h"
  35. #include <zlib.h>
  36. enum PNGHeaderState {
  37. PNG_IHDR = 1 << 0,
  38. PNG_PLTE = 1 << 1,
  39. };
  40. enum PNGImageState {
  41. PNG_IDAT = 1 << 0,
  42. PNG_ALLIMAGE = 1 << 1,
  43. };
  44. typedef struct PNGDecContext {
  45. PNGDSPContext dsp;
  46. AVCodecContext *avctx;
  47. GetByteContext gb;
  48. ThreadFrame previous_picture;
  49. ThreadFrame last_picture;
  50. ThreadFrame picture;
  51. enum PNGHeaderState hdr_state;
  52. enum PNGImageState pic_state;
  53. int width, height;
  54. int cur_w, cur_h;
  55. int last_w, last_h;
  56. int x_offset, y_offset;
  57. int last_x_offset, last_y_offset;
  58. uint8_t dispose_op, blend_op;
  59. uint8_t last_dispose_op;
  60. int bit_depth;
  61. int color_type;
  62. int compression_type;
  63. int interlace_type;
  64. int filter_type;
  65. int channels;
  66. int bits_per_pixel;
  67. int bpp;
  68. int has_trns;
  69. uint8_t transparent_color_be[6];
  70. uint8_t *image_buf;
  71. int image_linesize;
  72. uint32_t palette[256];
  73. uint8_t *crow_buf;
  74. uint8_t *last_row;
  75. unsigned int last_row_size;
  76. uint8_t *tmp_row;
  77. unsigned int tmp_row_size;
  78. uint8_t *buffer;
  79. int buffer_size;
  80. int pass;
  81. int crow_size; /* compressed row size (include filter type) */
  82. int row_size; /* decompressed row size */
  83. int pass_row_size; /* decompress row size of the current pass */
  84. int y;
  85. z_stream zstream;
  86. } PNGDecContext;
  87. /* Mask to determine which pixels are valid in a pass */
  88. static const uint8_t png_pass_mask[NB_PASSES] = {
  89. 0x01, 0x01, 0x11, 0x11, 0x55, 0x55, 0xff,
  90. };
  91. /* Mask to determine which y pixels can be written in a pass */
  92. static const uint8_t png_pass_dsp_ymask[NB_PASSES] = {
  93. 0xff, 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55,
  94. };
  95. /* Mask to determine which pixels to overwrite while displaying */
  96. static const uint8_t png_pass_dsp_mask[NB_PASSES] = {
  97. 0xff, 0x0f, 0xff, 0x33, 0xff, 0x55, 0xff
  98. };
  99. /* NOTE: we try to construct a good looking image at each pass. width
  100. * is the original image width. We also do pixel format conversion at
  101. * this stage */
  102. static void png_put_interlaced_row(uint8_t *dst, int width,
  103. int bits_per_pixel, int pass,
  104. int color_type, const uint8_t *src)
  105. {
  106. int x, mask, dsp_mask, j, src_x, b, bpp;
  107. uint8_t *d;
  108. const uint8_t *s;
  109. mask = png_pass_mask[pass];
  110. dsp_mask = png_pass_dsp_mask[pass];
  111. switch (bits_per_pixel) {
  112. case 1:
  113. src_x = 0;
  114. for (x = 0; x < width; x++) {
  115. j = (x & 7);
  116. if ((dsp_mask << j) & 0x80) {
  117. b = (src[src_x >> 3] >> (7 - (src_x & 7))) & 1;
  118. dst[x >> 3] &= 0xFF7F>>j;
  119. dst[x >> 3] |= b << (7 - j);
  120. }
  121. if ((mask << j) & 0x80)
  122. src_x++;
  123. }
  124. break;
  125. case 2:
  126. src_x = 0;
  127. for (x = 0; x < width; x++) {
  128. int j2 = 2 * (x & 3);
  129. j = (x & 7);
  130. if ((dsp_mask << j) & 0x80) {
  131. b = (src[src_x >> 2] >> (6 - 2*(src_x & 3))) & 3;
  132. dst[x >> 2] &= 0xFF3F>>j2;
  133. dst[x >> 2] |= b << (6 - j2);
  134. }
  135. if ((mask << j) & 0x80)
  136. src_x++;
  137. }
  138. break;
  139. case 4:
  140. src_x = 0;
  141. for (x = 0; x < width; x++) {
  142. int j2 = 4*(x&1);
  143. j = (x & 7);
  144. if ((dsp_mask << j) & 0x80) {
  145. b = (src[src_x >> 1] >> (4 - 4*(src_x & 1))) & 15;
  146. dst[x >> 1] &= 0xFF0F>>j2;
  147. dst[x >> 1] |= b << (4 - j2);
  148. }
  149. if ((mask << j) & 0x80)
  150. src_x++;
  151. }
  152. break;
  153. default:
  154. bpp = bits_per_pixel >> 3;
  155. d = dst;
  156. s = src;
  157. for (x = 0; x < width; x++) {
  158. j = x & 7;
  159. if ((dsp_mask << j) & 0x80) {
  160. memcpy(d, s, bpp);
  161. }
  162. d += bpp;
  163. if ((mask << j) & 0x80)
  164. s += bpp;
  165. }
  166. break;
  167. }
  168. }
  169. void ff_add_png_paeth_prediction(uint8_t *dst, uint8_t *src, uint8_t *top,
  170. int w, int bpp)
  171. {
  172. int i;
  173. for (i = 0; i < w; i++) {
  174. int a, b, c, p, pa, pb, pc;
  175. a = dst[i - bpp];
  176. b = top[i];
  177. c = top[i - bpp];
  178. p = b - c;
  179. pc = a - c;
  180. pa = abs(p);
  181. pb = abs(pc);
  182. pc = abs(p + pc);
  183. if (pa <= pb && pa <= pc)
  184. p = a;
  185. else if (pb <= pc)
  186. p = b;
  187. else
  188. p = c;
  189. dst[i] = p + src[i];
  190. }
  191. }
  192. #define UNROLL1(bpp, op) \
  193. { \
  194. r = dst[0]; \
  195. if (bpp >= 2) \
  196. g = dst[1]; \
  197. if (bpp >= 3) \
  198. b = dst[2]; \
  199. if (bpp >= 4) \
  200. a = dst[3]; \
  201. for (; i <= size - bpp; i += bpp) { \
  202. dst[i + 0] = r = op(r, src[i + 0], last[i + 0]); \
  203. if (bpp == 1) \
  204. continue; \
  205. dst[i + 1] = g = op(g, src[i + 1], last[i + 1]); \
  206. if (bpp == 2) \
  207. continue; \
  208. dst[i + 2] = b = op(b, src[i + 2], last[i + 2]); \
  209. if (bpp == 3) \
  210. continue; \
  211. dst[i + 3] = a = op(a, src[i + 3], last[i + 3]); \
  212. } \
  213. }
  214. #define UNROLL_FILTER(op) \
  215. if (bpp == 1) { \
  216. UNROLL1(1, op) \
  217. } else if (bpp == 2) { \
  218. UNROLL1(2, op) \
  219. } else if (bpp == 3) { \
  220. UNROLL1(3, op) \
  221. } else if (bpp == 4) { \
  222. UNROLL1(4, op) \
  223. } \
  224. for (; i < size; i++) { \
  225. dst[i] = op(dst[i - bpp], src[i], last[i]); \
  226. }
  227. /* NOTE: 'dst' can be equal to 'last' */
  228. static void png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type,
  229. uint8_t *src, uint8_t *last, int size, int bpp)
  230. {
  231. int i, p, r, g, b, a;
  232. switch (filter_type) {
  233. case PNG_FILTER_VALUE_NONE:
  234. memcpy(dst, src, size);
  235. break;
  236. case PNG_FILTER_VALUE_SUB:
  237. for (i = 0; i < bpp; i++)
  238. dst[i] = src[i];
  239. if (bpp == 4) {
  240. p = *(int *)dst;
  241. for (; i < size; i += bpp) {
  242. unsigned s = *(int *)(src + i);
  243. p = ((s & 0x7f7f7f7f) + (p & 0x7f7f7f7f)) ^ ((s ^ p) & 0x80808080);
  244. *(int *)(dst + i) = p;
  245. }
  246. } else {
  247. #define OP_SUB(x, s, l) ((x) + (s))
  248. UNROLL_FILTER(OP_SUB);
  249. }
  250. break;
  251. case PNG_FILTER_VALUE_UP:
  252. dsp->add_bytes_l2(dst, src, last, size);
  253. break;
  254. case PNG_FILTER_VALUE_AVG:
  255. for (i = 0; i < bpp; i++) {
  256. p = (last[i] >> 1);
  257. dst[i] = p + src[i];
  258. }
  259. #define OP_AVG(x, s, l) (((((x) + (l)) >> 1) + (s)) & 0xff)
  260. UNROLL_FILTER(OP_AVG);
  261. break;
  262. case PNG_FILTER_VALUE_PAETH:
  263. for (i = 0; i < bpp; i++) {
  264. p = last[i];
  265. dst[i] = p + src[i];
  266. }
  267. if (bpp > 2 && size > 4) {
  268. /* would write off the end of the array if we let it process
  269. * the last pixel with bpp=3 */
  270. int w = (bpp & 3) ? size - 3 : size;
  271. if (w > i) {
  272. dsp->add_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
  273. i = w;
  274. }
  275. }
  276. ff_add_png_paeth_prediction(dst + i, src + i, last + i, size - i, bpp);
  277. break;
  278. }
  279. }
  280. /* This used to be called "deloco" in FFmpeg
  281. * and is actually an inverse reversible colorspace transformation */
  282. #define YUV2RGB(NAME, TYPE) \
  283. static void deloco_ ## NAME(TYPE *dst, int size, int alpha) \
  284. { \
  285. int i; \
  286. for (i = 0; i < size; i += 3 + alpha) { \
  287. int g = dst [i + 1]; \
  288. dst[i + 0] += g; \
  289. dst[i + 2] += g; \
  290. } \
  291. }
  292. YUV2RGB(rgb8, uint8_t)
  293. YUV2RGB(rgb16, uint16_t)
  294. static int percent_missing(PNGDecContext *s)
  295. {
  296. if (s->interlace_type) {
  297. return 100 - 100 * s->pass / (NB_PASSES - 1);
  298. } else {
  299. return 100 - 100 * s->y / s->cur_h;
  300. }
  301. }
  302. /* process exactly one decompressed row */
  303. static void png_handle_row(PNGDecContext *s)
  304. {
  305. uint8_t *ptr, *last_row;
  306. int got_line;
  307. if (!s->interlace_type) {
  308. ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
  309. if (s->y == 0)
  310. last_row = s->last_row;
  311. else
  312. last_row = ptr - s->image_linesize;
  313. png_filter_row(&s->dsp, ptr, s->crow_buf[0], s->crow_buf + 1,
  314. last_row, s->row_size, s->bpp);
  315. /* loco lags by 1 row so that it doesn't interfere with top prediction */
  316. if (s->filter_type == PNG_FILTER_TYPE_LOCO && s->y > 0) {
  317. if (s->bit_depth == 16) {
  318. deloco_rgb16((uint16_t *)(ptr - s->image_linesize), s->row_size / 2,
  319. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  320. } else {
  321. deloco_rgb8(ptr - s->image_linesize, s->row_size,
  322. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  323. }
  324. }
  325. s->y++;
  326. if (s->y == s->cur_h) {
  327. s->pic_state |= PNG_ALLIMAGE;
  328. if (s->filter_type == PNG_FILTER_TYPE_LOCO) {
  329. if (s->bit_depth == 16) {
  330. deloco_rgb16((uint16_t *)ptr, s->row_size / 2,
  331. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  332. } else {
  333. deloco_rgb8(ptr, s->row_size,
  334. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA);
  335. }
  336. }
  337. }
  338. } else {
  339. got_line = 0;
  340. for (;;) {
  341. ptr = s->image_buf + s->image_linesize * (s->y + s->y_offset) + s->x_offset * s->bpp;
  342. if ((ff_png_pass_ymask[s->pass] << (s->y & 7)) & 0x80) {
  343. /* if we already read one row, it is time to stop to
  344. * wait for the next one */
  345. if (got_line)
  346. break;
  347. png_filter_row(&s->dsp, s->tmp_row, s->crow_buf[0], s->crow_buf + 1,
  348. s->last_row, s->pass_row_size, s->bpp);
  349. FFSWAP(uint8_t *, s->last_row, s->tmp_row);
  350. FFSWAP(unsigned int, s->last_row_size, s->tmp_row_size);
  351. got_line = 1;
  352. }
  353. if ((png_pass_dsp_ymask[s->pass] << (s->y & 7)) & 0x80) {
  354. png_put_interlaced_row(ptr, s->cur_w, s->bits_per_pixel, s->pass,
  355. s->color_type, s->last_row);
  356. }
  357. s->y++;
  358. if (s->y == s->cur_h) {
  359. memset(s->last_row, 0, s->row_size);
  360. for (;;) {
  361. if (s->pass == NB_PASSES - 1) {
  362. s->pic_state |= PNG_ALLIMAGE;
  363. goto the_end;
  364. } else {
  365. s->pass++;
  366. s->y = 0;
  367. s->pass_row_size = ff_png_pass_row_size(s->pass,
  368. s->bits_per_pixel,
  369. s->cur_w);
  370. s->crow_size = s->pass_row_size + 1;
  371. if (s->pass_row_size != 0)
  372. break;
  373. /* skip pass if empty row */
  374. }
  375. }
  376. }
  377. }
  378. the_end:;
  379. }
  380. }
  381. static int png_decode_idat(PNGDecContext *s, int length)
  382. {
  383. int ret;
  384. s->zstream.avail_in = FFMIN(length, bytestream2_get_bytes_left(&s->gb));
  385. s->zstream.next_in = s->gb.buffer;
  386. bytestream2_skip(&s->gb, length);
  387. /* decode one line if possible */
  388. while (s->zstream.avail_in > 0) {
  389. ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
  390. if (ret != Z_OK && ret != Z_STREAM_END) {
  391. av_log(s->avctx, AV_LOG_ERROR, "inflate returned error %d\n", ret);
  392. return AVERROR_EXTERNAL;
  393. }
  394. if (s->zstream.avail_out == 0) {
  395. if (!(s->pic_state & PNG_ALLIMAGE)) {
  396. png_handle_row(s);
  397. }
  398. s->zstream.avail_out = s->crow_size;
  399. s->zstream.next_out = s->crow_buf;
  400. }
  401. if (ret == Z_STREAM_END && s->zstream.avail_in > 0) {
  402. av_log(s->avctx, AV_LOG_WARNING,
  403. "%d undecompressed bytes left in buffer\n", s->zstream.avail_in);
  404. return 0;
  405. }
  406. }
  407. return 0;
  408. }
  409. static int decode_zbuf(AVBPrint *bp, const uint8_t *data,
  410. const uint8_t *data_end)
  411. {
  412. z_stream zstream;
  413. unsigned char *buf;
  414. unsigned buf_size;
  415. int ret;
  416. zstream.zalloc = ff_png_zalloc;
  417. zstream.zfree = ff_png_zfree;
  418. zstream.opaque = NULL;
  419. if (inflateInit(&zstream) != Z_OK)
  420. return AVERROR_EXTERNAL;
  421. zstream.next_in = data;
  422. zstream.avail_in = data_end - data;
  423. av_bprint_init(bp, 0, AV_BPRINT_SIZE_UNLIMITED);
  424. while (zstream.avail_in > 0) {
  425. av_bprint_get_buffer(bp, 2, &buf, &buf_size);
  426. if (buf_size < 2) {
  427. ret = AVERROR(ENOMEM);
  428. goto fail;
  429. }
  430. zstream.next_out = buf;
  431. zstream.avail_out = buf_size - 1;
  432. ret = inflate(&zstream, Z_PARTIAL_FLUSH);
  433. if (ret != Z_OK && ret != Z_STREAM_END) {
  434. ret = AVERROR_EXTERNAL;
  435. goto fail;
  436. }
  437. bp->len += zstream.next_out - buf;
  438. if (ret == Z_STREAM_END)
  439. break;
  440. }
  441. inflateEnd(&zstream);
  442. bp->str[bp->len] = 0;
  443. return 0;
  444. fail:
  445. inflateEnd(&zstream);
  446. av_bprint_finalize(bp, NULL);
  447. return ret;
  448. }
  449. static uint8_t *iso88591_to_utf8(const uint8_t *in, size_t size_in)
  450. {
  451. size_t extra = 0, i;
  452. uint8_t *out, *q;
  453. for (i = 0; i < size_in; i++)
  454. extra += in[i] >= 0x80;
  455. if (size_in == SIZE_MAX || extra > SIZE_MAX - size_in - 1)
  456. return NULL;
  457. q = out = av_malloc(size_in + extra + 1);
  458. if (!out)
  459. return NULL;
  460. for (i = 0; i < size_in; i++) {
  461. if (in[i] >= 0x80) {
  462. *(q++) = 0xC0 | (in[i] >> 6);
  463. *(q++) = 0x80 | (in[i] & 0x3F);
  464. } else {
  465. *(q++) = in[i];
  466. }
  467. }
  468. *(q++) = 0;
  469. return out;
  470. }
  471. static int decode_text_chunk(PNGDecContext *s, uint32_t length, int compressed,
  472. AVDictionary **dict)
  473. {
  474. int ret, method;
  475. const uint8_t *data = s->gb.buffer;
  476. const uint8_t *data_end = data + length;
  477. const uint8_t *keyword = data;
  478. const uint8_t *keyword_end = memchr(keyword, 0, data_end - keyword);
  479. uint8_t *kw_utf8 = NULL, *text, *txt_utf8 = NULL;
  480. unsigned text_len;
  481. AVBPrint bp;
  482. if (!keyword_end)
  483. return AVERROR_INVALIDDATA;
  484. data = keyword_end + 1;
  485. if (compressed) {
  486. if (data == data_end)
  487. return AVERROR_INVALIDDATA;
  488. method = *(data++);
  489. if (method)
  490. return AVERROR_INVALIDDATA;
  491. if ((ret = decode_zbuf(&bp, data, data_end)) < 0)
  492. return ret;
  493. text_len = bp.len;
  494. ret = av_bprint_finalize(&bp, (char **)&text);
  495. if (ret < 0)
  496. return ret;
  497. } else {
  498. text = (uint8_t *)data;
  499. text_len = data_end - text;
  500. }
  501. kw_utf8 = iso88591_to_utf8(keyword, keyword_end - keyword);
  502. txt_utf8 = iso88591_to_utf8(text, text_len);
  503. if (text != data)
  504. av_free(text);
  505. if (!(kw_utf8 && txt_utf8)) {
  506. av_free(kw_utf8);
  507. av_free(txt_utf8);
  508. return AVERROR(ENOMEM);
  509. }
  510. av_dict_set(dict, kw_utf8, txt_utf8,
  511. AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
  512. return 0;
  513. }
  514. static int decode_ihdr_chunk(AVCodecContext *avctx, PNGDecContext *s,
  515. uint32_t length)
  516. {
  517. if (length != 13)
  518. return AVERROR_INVALIDDATA;
  519. if (s->pic_state & PNG_IDAT) {
  520. av_log(avctx, AV_LOG_ERROR, "IHDR after IDAT\n");
  521. return AVERROR_INVALIDDATA;
  522. }
  523. if (s->hdr_state & PNG_IHDR) {
  524. av_log(avctx, AV_LOG_ERROR, "Multiple IHDR\n");
  525. return AVERROR_INVALIDDATA;
  526. }
  527. s->width = s->cur_w = bytestream2_get_be32(&s->gb);
  528. s->height = s->cur_h = bytestream2_get_be32(&s->gb);
  529. if (av_image_check_size(s->width, s->height, 0, avctx)) {
  530. s->cur_w = s->cur_h = s->width = s->height = 0;
  531. av_log(avctx, AV_LOG_ERROR, "Invalid image size\n");
  532. return AVERROR_INVALIDDATA;
  533. }
  534. s->bit_depth = bytestream2_get_byte(&s->gb);
  535. if (s->bit_depth != 1 && s->bit_depth != 2 && s->bit_depth != 4 &&
  536. s->bit_depth != 8 && s->bit_depth != 16) {
  537. av_log(avctx, AV_LOG_ERROR, "Invalid bit depth\n");
  538. goto error;
  539. }
  540. s->color_type = bytestream2_get_byte(&s->gb);
  541. s->compression_type = bytestream2_get_byte(&s->gb);
  542. if (s->compression_type) {
  543. av_log(avctx, AV_LOG_ERROR, "Invalid compression method %d\n", s->compression_type);
  544. goto error;
  545. }
  546. s->filter_type = bytestream2_get_byte(&s->gb);
  547. s->interlace_type = bytestream2_get_byte(&s->gb);
  548. bytestream2_skip(&s->gb, 4); /* crc */
  549. s->hdr_state |= PNG_IHDR;
  550. if (avctx->debug & FF_DEBUG_PICT_INFO)
  551. av_log(avctx, AV_LOG_DEBUG, "width=%d height=%d depth=%d color_type=%d "
  552. "compression_type=%d filter_type=%d interlace_type=%d\n",
  553. s->width, s->height, s->bit_depth, s->color_type,
  554. s->compression_type, s->filter_type, s->interlace_type);
  555. return 0;
  556. error:
  557. s->cur_w = s->cur_h = s->width = s->height = 0;
  558. s->bit_depth = 8;
  559. return AVERROR_INVALIDDATA;
  560. }
  561. static int decode_phys_chunk(AVCodecContext *avctx, PNGDecContext *s)
  562. {
  563. if (s->pic_state & PNG_IDAT) {
  564. av_log(avctx, AV_LOG_ERROR, "pHYs after IDAT\n");
  565. return AVERROR_INVALIDDATA;
  566. }
  567. avctx->sample_aspect_ratio.num = bytestream2_get_be32(&s->gb);
  568. avctx->sample_aspect_ratio.den = bytestream2_get_be32(&s->gb);
  569. if (avctx->sample_aspect_ratio.num < 0 || avctx->sample_aspect_ratio.den < 0)
  570. avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
  571. bytestream2_skip(&s->gb, 1); /* unit specifier */
  572. bytestream2_skip(&s->gb, 4); /* crc */
  573. return 0;
  574. }
  575. static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s,
  576. uint32_t length, AVFrame *p)
  577. {
  578. int ret;
  579. size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
  580. if (!(s->hdr_state & PNG_IHDR)) {
  581. av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n");
  582. return AVERROR_INVALIDDATA;
  583. }
  584. if (!(s->pic_state & PNG_IDAT)) {
  585. /* init image info */
  586. ret = ff_set_dimensions(avctx, s->width, s->height);
  587. if (ret < 0)
  588. return ret;
  589. s->channels = ff_png_get_nb_channels(s->color_type);
  590. s->bits_per_pixel = s->bit_depth * s->channels;
  591. s->bpp = (s->bits_per_pixel + 7) >> 3;
  592. s->row_size = (s->cur_w * s->bits_per_pixel + 7) >> 3;
  593. if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  594. s->color_type == PNG_COLOR_TYPE_RGB) {
  595. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  596. } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  597. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  598. avctx->pix_fmt = AV_PIX_FMT_RGBA;
  599. } else if ((s->bit_depth == 2 || s->bit_depth == 4 || s->bit_depth == 8) &&
  600. s->color_type == PNG_COLOR_TYPE_GRAY) {
  601. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  602. } else if (s->bit_depth == 16 &&
  603. s->color_type == PNG_COLOR_TYPE_GRAY) {
  604. avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
  605. } else if (s->bit_depth == 16 &&
  606. s->color_type == PNG_COLOR_TYPE_RGB) {
  607. avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
  608. } else if (s->bit_depth == 16 &&
  609. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  610. avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
  611. } else if ((s->bits_per_pixel == 1 || s->bits_per_pixel == 2 || s->bits_per_pixel == 4 || s->bits_per_pixel == 8) &&
  612. s->color_type == PNG_COLOR_TYPE_PALETTE) {
  613. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  614. } else if (s->bit_depth == 1 && s->bits_per_pixel == 1 && avctx->codec_id != AV_CODEC_ID_APNG) {
  615. avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
  616. } else if (s->bit_depth == 8 &&
  617. s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
  618. avctx->pix_fmt = AV_PIX_FMT_YA8;
  619. } else if (s->bit_depth == 16 &&
  620. s->color_type == PNG_COLOR_TYPE_GRAY_ALPHA) {
  621. avctx->pix_fmt = AV_PIX_FMT_YA16BE;
  622. } else {
  623. avpriv_report_missing_feature(avctx,
  624. "Bit depth %d color type %d",
  625. s->bit_depth, s->color_type);
  626. return AVERROR_PATCHWELCOME;
  627. }
  628. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
  629. switch (avctx->pix_fmt) {
  630. case AV_PIX_FMT_RGB24:
  631. avctx->pix_fmt = AV_PIX_FMT_RGBA;
  632. break;
  633. case AV_PIX_FMT_RGB48BE:
  634. avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
  635. break;
  636. case AV_PIX_FMT_GRAY8:
  637. avctx->pix_fmt = AV_PIX_FMT_YA8;
  638. break;
  639. case AV_PIX_FMT_GRAY16BE:
  640. avctx->pix_fmt = AV_PIX_FMT_YA16BE;
  641. break;
  642. default:
  643. avpriv_request_sample(avctx, "bit depth %d "
  644. "and color type %d with TRNS",
  645. s->bit_depth, s->color_type);
  646. return AVERROR_INVALIDDATA;
  647. }
  648. s->bpp += byte_depth;
  649. }
  650. if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
  651. return ret;
  652. if (avctx->codec_id == AV_CODEC_ID_APNG && s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
  653. ff_thread_release_buffer(avctx, &s->previous_picture);
  654. if ((ret = ff_thread_get_buffer(avctx, &s->previous_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
  655. return ret;
  656. }
  657. p->pict_type = AV_PICTURE_TYPE_I;
  658. p->key_frame = 1;
  659. p->interlaced_frame = !!s->interlace_type;
  660. ff_thread_finish_setup(avctx);
  661. /* compute the compressed row size */
  662. if (!s->interlace_type) {
  663. s->crow_size = s->row_size + 1;
  664. } else {
  665. s->pass = 0;
  666. s->pass_row_size = ff_png_pass_row_size(s->pass,
  667. s->bits_per_pixel,
  668. s->cur_w);
  669. s->crow_size = s->pass_row_size + 1;
  670. }
  671. ff_dlog(avctx, "row_size=%d crow_size =%d\n",
  672. s->row_size, s->crow_size);
  673. s->image_buf = p->data[0];
  674. s->image_linesize = p->linesize[0];
  675. /* copy the palette if needed */
  676. if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
  677. memcpy(p->data[1], s->palette, 256 * sizeof(uint32_t));
  678. /* empty row is used if differencing to the first row */
  679. av_fast_padded_mallocz(&s->last_row, &s->last_row_size, s->row_size);
  680. if (!s->last_row)
  681. return AVERROR_INVALIDDATA;
  682. if (s->interlace_type ||
  683. s->color_type == PNG_COLOR_TYPE_RGB_ALPHA) {
  684. av_fast_padded_malloc(&s->tmp_row, &s->tmp_row_size, s->row_size);
  685. if (!s->tmp_row)
  686. return AVERROR_INVALIDDATA;
  687. }
  688. /* compressed row */
  689. av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
  690. if (!s->buffer)
  691. return AVERROR(ENOMEM);
  692. /* we want crow_buf+1 to be 16-byte aligned */
  693. s->crow_buf = s->buffer + 15;
  694. s->zstream.avail_out = s->crow_size;
  695. s->zstream.next_out = s->crow_buf;
  696. }
  697. s->pic_state |= PNG_IDAT;
  698. /* set image to non-transparent bpp while decompressing */
  699. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
  700. s->bpp -= byte_depth;
  701. ret = png_decode_idat(s, length);
  702. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE)
  703. s->bpp += byte_depth;
  704. if (ret < 0)
  705. return ret;
  706. bytestream2_skip(&s->gb, 4); /* crc */
  707. return 0;
  708. }
  709. static int decode_plte_chunk(AVCodecContext *avctx, PNGDecContext *s,
  710. uint32_t length)
  711. {
  712. int n, i, r, g, b;
  713. if ((length % 3) != 0 || length > 256 * 3)
  714. return AVERROR_INVALIDDATA;
  715. /* read the palette */
  716. n = length / 3;
  717. for (i = 0; i < n; i++) {
  718. r = bytestream2_get_byte(&s->gb);
  719. g = bytestream2_get_byte(&s->gb);
  720. b = bytestream2_get_byte(&s->gb);
  721. s->palette[i] = (0xFFU << 24) | (r << 16) | (g << 8) | b;
  722. }
  723. for (; i < 256; i++)
  724. s->palette[i] = (0xFFU << 24);
  725. s->hdr_state |= PNG_PLTE;
  726. bytestream2_skip(&s->gb, 4); /* crc */
  727. return 0;
  728. }
  729. static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s,
  730. uint32_t length)
  731. {
  732. int v, i;
  733. if (!(s->hdr_state & PNG_IHDR)) {
  734. av_log(avctx, AV_LOG_ERROR, "trns before IHDR\n");
  735. return AVERROR_INVALIDDATA;
  736. }
  737. if (s->pic_state & PNG_IDAT) {
  738. av_log(avctx, AV_LOG_ERROR, "trns after IDAT\n");
  739. return AVERROR_INVALIDDATA;
  740. }
  741. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  742. if (length > 256 || !(s->hdr_state & PNG_PLTE))
  743. return AVERROR_INVALIDDATA;
  744. for (i = 0; i < length; i++) {
  745. unsigned v = bytestream2_get_byte(&s->gb);
  746. s->palette[i] = (s->palette[i] & 0x00ffffff) | (v << 24);
  747. }
  748. } else if (s->color_type == PNG_COLOR_TYPE_GRAY || s->color_type == PNG_COLOR_TYPE_RGB) {
  749. if ((s->color_type == PNG_COLOR_TYPE_GRAY && length != 2) ||
  750. (s->color_type == PNG_COLOR_TYPE_RGB && length != 6) ||
  751. s->bit_depth == 1)
  752. return AVERROR_INVALIDDATA;
  753. for (i = 0; i < length / 2; i++) {
  754. /* only use the least significant bits */
  755. v = av_mod_uintp2(bytestream2_get_be16(&s->gb), s->bit_depth);
  756. if (s->bit_depth > 8)
  757. AV_WB16(&s->transparent_color_be[2 * i], v);
  758. else
  759. s->transparent_color_be[i] = v;
  760. }
  761. } else {
  762. return AVERROR_INVALIDDATA;
  763. }
  764. bytestream2_skip(&s->gb, 4); /* crc */
  765. s->has_trns = 1;
  766. return 0;
  767. }
  768. static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f)
  769. {
  770. int ret, cnt = 0;
  771. uint8_t *data, profile_name[82];
  772. AVBPrint bp;
  773. AVFrameSideData *sd;
  774. while ((profile_name[cnt++] = bytestream2_get_byte(&s->gb)) && cnt < 81);
  775. if (cnt > 80) {
  776. av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid name!\n");
  777. return AVERROR_INVALIDDATA;
  778. }
  779. length = FFMAX(length - cnt, 0);
  780. if (bytestream2_get_byte(&s->gb) != 0) {
  781. av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid compression!\n");
  782. return AVERROR_INVALIDDATA;
  783. }
  784. length = FFMAX(length - 1, 0);
  785. if ((ret = decode_zbuf(&bp, s->gb.buffer, s->gb.buffer + length)) < 0)
  786. return ret;
  787. ret = av_bprint_finalize(&bp, (char **)&data);
  788. if (ret < 0)
  789. return ret;
  790. sd = av_frame_new_side_data(f, AV_FRAME_DATA_ICC_PROFILE, bp.len);
  791. if (!sd) {
  792. av_free(data);
  793. return AVERROR(ENOMEM);
  794. }
  795. av_dict_set(&sd->metadata, "name", profile_name, 0);
  796. memcpy(sd->data, data, bp.len);
  797. av_free(data);
  798. /* ICC compressed data and CRC */
  799. bytestream2_skip(&s->gb, length + 4);
  800. return 0;
  801. }
  802. static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
  803. {
  804. if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
  805. int i, j, k;
  806. uint8_t *pd = p->data[0];
  807. for (j = 0; j < s->height; j++) {
  808. i = s->width / 8;
  809. for (k = 7; k >= 1; k--)
  810. if ((s->width&7) >= k)
  811. pd[8*i + k - 1] = (pd[i]>>8-k) & 1;
  812. for (i--; i >= 0; i--) {
  813. pd[8*i + 7]= pd[i] & 1;
  814. pd[8*i + 6]= (pd[i]>>1) & 1;
  815. pd[8*i + 5]= (pd[i]>>2) & 1;
  816. pd[8*i + 4]= (pd[i]>>3) & 1;
  817. pd[8*i + 3]= (pd[i]>>4) & 1;
  818. pd[8*i + 2]= (pd[i]>>5) & 1;
  819. pd[8*i + 1]= (pd[i]>>6) & 1;
  820. pd[8*i + 0]= pd[i]>>7;
  821. }
  822. pd += s->image_linesize;
  823. }
  824. } else if (s->bits_per_pixel == 2) {
  825. int i, j;
  826. uint8_t *pd = p->data[0];
  827. for (j = 0; j < s->height; j++) {
  828. i = s->width / 4;
  829. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  830. if ((s->width&3) >= 3) pd[4*i + 2]= (pd[i] >> 2) & 3;
  831. if ((s->width&3) >= 2) pd[4*i + 1]= (pd[i] >> 4) & 3;
  832. if ((s->width&3) >= 1) pd[4*i + 0]= pd[i] >> 6;
  833. for (i--; i >= 0; i--) {
  834. pd[4*i + 3]= pd[i] & 3;
  835. pd[4*i + 2]= (pd[i]>>2) & 3;
  836. pd[4*i + 1]= (pd[i]>>4) & 3;
  837. pd[4*i + 0]= pd[i]>>6;
  838. }
  839. } else {
  840. if ((s->width&3) >= 3) pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
  841. if ((s->width&3) >= 2) pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
  842. if ((s->width&3) >= 1) pd[4*i + 0]= ( pd[i]>>6 )*0x55;
  843. for (i--; i >= 0; i--) {
  844. pd[4*i + 3]= ( pd[i] & 3)*0x55;
  845. pd[4*i + 2]= ((pd[i]>>2) & 3)*0x55;
  846. pd[4*i + 1]= ((pd[i]>>4) & 3)*0x55;
  847. pd[4*i + 0]= ( pd[i]>>6 )*0x55;
  848. }
  849. }
  850. pd += s->image_linesize;
  851. }
  852. } else if (s->bits_per_pixel == 4) {
  853. int i, j;
  854. uint8_t *pd = p->data[0];
  855. for (j = 0; j < s->height; j++) {
  856. i = s->width/2;
  857. if (s->color_type == PNG_COLOR_TYPE_PALETTE) {
  858. if (s->width&1) pd[2*i+0]= pd[i]>>4;
  859. for (i--; i >= 0; i--) {
  860. pd[2*i + 1] = pd[i] & 15;
  861. pd[2*i + 0] = pd[i] >> 4;
  862. }
  863. } else {
  864. if (s->width & 1) pd[2*i + 0]= (pd[i] >> 4) * 0x11;
  865. for (i--; i >= 0; i--) {
  866. pd[2*i + 1] = (pd[i] & 15) * 0x11;
  867. pd[2*i + 0] = (pd[i] >> 4) * 0x11;
  868. }
  869. }
  870. pd += s->image_linesize;
  871. }
  872. }
  873. }
  874. static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s,
  875. uint32_t length)
  876. {
  877. uint32_t sequence_number;
  878. int cur_w, cur_h, x_offset, y_offset, dispose_op, blend_op;
  879. if (length != 26)
  880. return AVERROR_INVALIDDATA;
  881. if (!(s->hdr_state & PNG_IHDR)) {
  882. av_log(avctx, AV_LOG_ERROR, "fctl before IHDR\n");
  883. return AVERROR_INVALIDDATA;
  884. }
  885. s->last_w = s->cur_w;
  886. s->last_h = s->cur_h;
  887. s->last_x_offset = s->x_offset;
  888. s->last_y_offset = s->y_offset;
  889. s->last_dispose_op = s->dispose_op;
  890. sequence_number = bytestream2_get_be32(&s->gb);
  891. cur_w = bytestream2_get_be32(&s->gb);
  892. cur_h = bytestream2_get_be32(&s->gb);
  893. x_offset = bytestream2_get_be32(&s->gb);
  894. y_offset = bytestream2_get_be32(&s->gb);
  895. bytestream2_skip(&s->gb, 4); /* delay_num (2), delay_den (2) */
  896. dispose_op = bytestream2_get_byte(&s->gb);
  897. blend_op = bytestream2_get_byte(&s->gb);
  898. bytestream2_skip(&s->gb, 4); /* crc */
  899. if (sequence_number == 0 &&
  900. (cur_w != s->width ||
  901. cur_h != s->height ||
  902. x_offset != 0 ||
  903. y_offset != 0) ||
  904. cur_w <= 0 || cur_h <= 0 ||
  905. x_offset < 0 || y_offset < 0 ||
  906. cur_w > s->width - x_offset|| cur_h > s->height - y_offset)
  907. return AVERROR_INVALIDDATA;
  908. if (blend_op != APNG_BLEND_OP_OVER && blend_op != APNG_BLEND_OP_SOURCE) {
  909. av_log(avctx, AV_LOG_ERROR, "Invalid blend_op %d\n", blend_op);
  910. return AVERROR_INVALIDDATA;
  911. }
  912. if ((sequence_number == 0 || !s->previous_picture.f->data[0]) &&
  913. dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
  914. // No previous frame to revert to for the first frame
  915. // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND
  916. dispose_op = APNG_DISPOSE_OP_BACKGROUND;
  917. }
  918. if (blend_op == APNG_BLEND_OP_OVER && !s->has_trns && (
  919. avctx->pix_fmt == AV_PIX_FMT_RGB24 ||
  920. avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
  921. avctx->pix_fmt == AV_PIX_FMT_PAL8 ||
  922. avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
  923. avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
  924. avctx->pix_fmt == AV_PIX_FMT_MONOBLACK
  925. )) {
  926. // APNG_BLEND_OP_OVER is the same as APNG_BLEND_OP_SOURCE when there is no alpha channel
  927. blend_op = APNG_BLEND_OP_SOURCE;
  928. }
  929. s->cur_w = cur_w;
  930. s->cur_h = cur_h;
  931. s->x_offset = x_offset;
  932. s->y_offset = y_offset;
  933. s->dispose_op = dispose_op;
  934. s->blend_op = blend_op;
  935. return 0;
  936. }
  937. static void handle_p_frame_png(PNGDecContext *s, AVFrame *p)
  938. {
  939. int i, j;
  940. uint8_t *pd = p->data[0];
  941. uint8_t *pd_last = s->last_picture.f->data[0];
  942. int ls = FFMIN(av_image_get_linesize(p->format, s->width, 0), s->width * s->bpp);
  943. ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
  944. for (j = 0; j < s->height; j++) {
  945. for (i = 0; i < ls; i++)
  946. pd[i] += pd_last[i];
  947. pd += s->image_linesize;
  948. pd_last += s->image_linesize;
  949. }
  950. }
  951. // divide by 255 and round to nearest
  952. // apply a fast variant: (X+127)/255 = ((X+127)*257+257)>>16 = ((X+128)*257)>>16
  953. #define FAST_DIV255(x) ((((x) + 128) * 257) >> 16)
  954. static int handle_p_frame_apng(AVCodecContext *avctx, PNGDecContext *s,
  955. AVFrame *p)
  956. {
  957. size_t x, y;
  958. uint8_t *buffer;
  959. if (s->blend_op == APNG_BLEND_OP_OVER &&
  960. avctx->pix_fmt != AV_PIX_FMT_RGBA &&
  961. avctx->pix_fmt != AV_PIX_FMT_GRAY8A &&
  962. avctx->pix_fmt != AV_PIX_FMT_PAL8) {
  963. avpriv_request_sample(avctx, "Blending with pixel format %s",
  964. av_get_pix_fmt_name(avctx->pix_fmt));
  965. return AVERROR_PATCHWELCOME;
  966. }
  967. buffer = av_malloc_array(s->image_linesize, s->height);
  968. if (!buffer)
  969. return AVERROR(ENOMEM);
  970. // Do the disposal operation specified by the last frame on the frame
  971. if (s->last_dispose_op != APNG_DISPOSE_OP_PREVIOUS) {
  972. ff_thread_await_progress(&s->last_picture, INT_MAX, 0);
  973. memcpy(buffer, s->last_picture.f->data[0], s->image_linesize * s->height);
  974. if (s->last_dispose_op == APNG_DISPOSE_OP_BACKGROUND)
  975. for (y = s->last_y_offset; y < s->last_y_offset + s->last_h; ++y)
  976. memset(buffer + s->image_linesize * y + s->bpp * s->last_x_offset, 0, s->bpp * s->last_w);
  977. memcpy(s->previous_picture.f->data[0], buffer, s->image_linesize * s->height);
  978. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  979. } else {
  980. ff_thread_await_progress(&s->previous_picture, INT_MAX, 0);
  981. memcpy(buffer, s->previous_picture.f->data[0], s->image_linesize * s->height);
  982. }
  983. // Perform blending
  984. if (s->blend_op == APNG_BLEND_OP_SOURCE) {
  985. for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
  986. size_t row_start = s->image_linesize * y + s->bpp * s->x_offset;
  987. memcpy(buffer + row_start, p->data[0] + row_start, s->bpp * s->cur_w);
  988. }
  989. } else { // APNG_BLEND_OP_OVER
  990. for (y = s->y_offset; y < s->y_offset + s->cur_h; ++y) {
  991. uint8_t *foreground = p->data[0] + s->image_linesize * y + s->bpp * s->x_offset;
  992. uint8_t *background = buffer + s->image_linesize * y + s->bpp * s->x_offset;
  993. for (x = s->x_offset; x < s->x_offset + s->cur_w; ++x, foreground += s->bpp, background += s->bpp) {
  994. size_t b;
  995. uint8_t foreground_alpha, background_alpha, output_alpha;
  996. uint8_t output[10];
  997. // Since we might be blending alpha onto alpha, we use the following equations:
  998. // output_alpha = foreground_alpha + (1 - foreground_alpha) * background_alpha
  999. // output = (foreground_alpha * foreground + (1 - foreground_alpha) * background_alpha * background) / output_alpha
  1000. switch (avctx->pix_fmt) {
  1001. case AV_PIX_FMT_RGBA:
  1002. foreground_alpha = foreground[3];
  1003. background_alpha = background[3];
  1004. break;
  1005. case AV_PIX_FMT_GRAY8A:
  1006. foreground_alpha = foreground[1];
  1007. background_alpha = background[1];
  1008. break;
  1009. case AV_PIX_FMT_PAL8:
  1010. foreground_alpha = s->palette[foreground[0]] >> 24;
  1011. background_alpha = s->palette[background[0]] >> 24;
  1012. break;
  1013. }
  1014. if (foreground_alpha == 0)
  1015. continue;
  1016. if (foreground_alpha == 255) {
  1017. memcpy(background, foreground, s->bpp);
  1018. continue;
  1019. }
  1020. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  1021. // TODO: Alpha blending with PAL8 will likely need the entire image converted over to RGBA first
  1022. avpriv_request_sample(avctx, "Alpha blending palette samples");
  1023. background[0] = foreground[0];
  1024. continue;
  1025. }
  1026. output_alpha = foreground_alpha + FAST_DIV255((255 - foreground_alpha) * background_alpha);
  1027. av_assert0(s->bpp <= 10);
  1028. for (b = 0; b < s->bpp - 1; ++b) {
  1029. if (output_alpha == 0) {
  1030. output[b] = 0;
  1031. } else if (background_alpha == 255) {
  1032. output[b] = FAST_DIV255(foreground_alpha * foreground[b] + (255 - foreground_alpha) * background[b]);
  1033. } else {
  1034. output[b] = (255 * foreground_alpha * foreground[b] + (255 - foreground_alpha) * background_alpha * background[b]) / (255 * output_alpha);
  1035. }
  1036. }
  1037. output[b] = output_alpha;
  1038. memcpy(background, output, s->bpp);
  1039. }
  1040. }
  1041. }
  1042. // Copy blended buffer into the frame and free
  1043. memcpy(p->data[0], buffer, s->image_linesize * s->height);
  1044. av_free(buffer);
  1045. return 0;
  1046. }
  1047. static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s,
  1048. AVFrame *p, AVPacket *avpkt)
  1049. {
  1050. AVDictionary **metadatap = NULL;
  1051. uint32_t tag, length;
  1052. int decode_next_dat = 0;
  1053. int i, ret;
  1054. for (;;) {
  1055. length = bytestream2_get_bytes_left(&s->gb);
  1056. if (length <= 0) {
  1057. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1058. avctx->skip_frame == AVDISCARD_ALL) {
  1059. return 0;
  1060. }
  1061. if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && length == 0) {
  1062. if (!(s->pic_state & PNG_IDAT))
  1063. return 0;
  1064. else
  1065. goto exit_loop;
  1066. }
  1067. av_log(avctx, AV_LOG_ERROR, "%d bytes left\n", length);
  1068. if ( s->pic_state & PNG_ALLIMAGE
  1069. && avctx->strict_std_compliance <= FF_COMPLIANCE_NORMAL)
  1070. goto exit_loop;
  1071. ret = AVERROR_INVALIDDATA;
  1072. goto fail;
  1073. }
  1074. length = bytestream2_get_be32(&s->gb);
  1075. if (length > 0x7fffffff || length > bytestream2_get_bytes_left(&s->gb)) {
  1076. av_log(avctx, AV_LOG_ERROR, "chunk too big\n");
  1077. ret = AVERROR_INVALIDDATA;
  1078. goto fail;
  1079. }
  1080. tag = bytestream2_get_le32(&s->gb);
  1081. if (avctx->debug & FF_DEBUG_STARTCODE)
  1082. av_log(avctx, AV_LOG_DEBUG, "png: tag=%s length=%u\n",
  1083. av_fourcc2str(tag), length);
  1084. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1085. avctx->skip_frame == AVDISCARD_ALL) {
  1086. switch(tag) {
  1087. case MKTAG('I', 'H', 'D', 'R'):
  1088. case MKTAG('p', 'H', 'Y', 's'):
  1089. case MKTAG('t', 'E', 'X', 't'):
  1090. case MKTAG('I', 'D', 'A', 'T'):
  1091. case MKTAG('t', 'R', 'N', 'S'):
  1092. break;
  1093. default:
  1094. goto skip_tag;
  1095. }
  1096. }
  1097. metadatap = &p->metadata;
  1098. switch (tag) {
  1099. case MKTAG('I', 'H', 'D', 'R'):
  1100. if ((ret = decode_ihdr_chunk(avctx, s, length)) < 0)
  1101. goto fail;
  1102. break;
  1103. case MKTAG('p', 'H', 'Y', 's'):
  1104. if ((ret = decode_phys_chunk(avctx, s)) < 0)
  1105. goto fail;
  1106. break;
  1107. case MKTAG('f', 'c', 'T', 'L'):
  1108. if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
  1109. goto skip_tag;
  1110. if ((ret = decode_fctl_chunk(avctx, s, length)) < 0)
  1111. goto fail;
  1112. decode_next_dat = 1;
  1113. break;
  1114. case MKTAG('f', 'd', 'A', 'T'):
  1115. if (!CONFIG_APNG_DECODER || avctx->codec_id != AV_CODEC_ID_APNG)
  1116. goto skip_tag;
  1117. if (!decode_next_dat || length < 4) {
  1118. ret = AVERROR_INVALIDDATA;
  1119. goto fail;
  1120. }
  1121. bytestream2_get_be32(&s->gb);
  1122. length -= 4;
  1123. /* fallthrough */
  1124. case MKTAG('I', 'D', 'A', 'T'):
  1125. if (CONFIG_APNG_DECODER && avctx->codec_id == AV_CODEC_ID_APNG && !decode_next_dat)
  1126. goto skip_tag;
  1127. if ((ret = decode_idat_chunk(avctx, s, length, p)) < 0)
  1128. goto fail;
  1129. break;
  1130. case MKTAG('P', 'L', 'T', 'E'):
  1131. if (decode_plte_chunk(avctx, s, length) < 0)
  1132. goto skip_tag;
  1133. break;
  1134. case MKTAG('t', 'R', 'N', 'S'):
  1135. if (decode_trns_chunk(avctx, s, length) < 0)
  1136. goto skip_tag;
  1137. break;
  1138. case MKTAG('t', 'E', 'X', 't'):
  1139. if (decode_text_chunk(s, length, 0, metadatap) < 0)
  1140. av_log(avctx, AV_LOG_WARNING, "Broken tEXt chunk\n");
  1141. bytestream2_skip(&s->gb, length + 4);
  1142. break;
  1143. case MKTAG('z', 'T', 'X', 't'):
  1144. if (decode_text_chunk(s, length, 1, metadatap) < 0)
  1145. av_log(avctx, AV_LOG_WARNING, "Broken zTXt chunk\n");
  1146. bytestream2_skip(&s->gb, length + 4);
  1147. break;
  1148. case MKTAG('s', 'T', 'E', 'R'): {
  1149. int mode = bytestream2_get_byte(&s->gb);
  1150. AVStereo3D *stereo3d = av_stereo3d_create_side_data(p);
  1151. if (!stereo3d) {
  1152. ret = AVERROR(ENOMEM);
  1153. goto fail;
  1154. }
  1155. if (mode == 0 || mode == 1) {
  1156. stereo3d->type = AV_STEREO3D_SIDEBYSIDE;
  1157. stereo3d->flags = mode ? 0 : AV_STEREO3D_FLAG_INVERT;
  1158. } else {
  1159. av_log(avctx, AV_LOG_WARNING,
  1160. "Unknown value in sTER chunk (%d)\n", mode);
  1161. }
  1162. bytestream2_skip(&s->gb, 4); /* crc */
  1163. break;
  1164. }
  1165. case MKTAG('i', 'C', 'C', 'P'): {
  1166. if ((ret = decode_iccp_chunk(s, length, p)) < 0)
  1167. goto fail;
  1168. break;
  1169. }
  1170. case MKTAG('c', 'H', 'R', 'M'): {
  1171. AVMasteringDisplayMetadata *mdm = av_mastering_display_metadata_create_side_data(p);
  1172. if (!mdm) {
  1173. ret = AVERROR(ENOMEM);
  1174. goto fail;
  1175. }
  1176. mdm->white_point[0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1177. mdm->white_point[1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1178. /* RGB Primaries */
  1179. for (i = 0; i < 3; i++) {
  1180. mdm->display_primaries[i][0] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1181. mdm->display_primaries[i][1] = av_make_q(bytestream2_get_be32(&s->gb), 100000);
  1182. }
  1183. mdm->has_primaries = 1;
  1184. bytestream2_skip(&s->gb, 4); /* crc */
  1185. break;
  1186. }
  1187. case MKTAG('g', 'A', 'M', 'A'): {
  1188. AVBPrint bp;
  1189. char *gamma_str;
  1190. int num = bytestream2_get_be32(&s->gb);
  1191. av_bprint_init(&bp, 0, AV_BPRINT_SIZE_UNLIMITED);
  1192. av_bprintf(&bp, "%i/%i", num, 100000);
  1193. ret = av_bprint_finalize(&bp, &gamma_str);
  1194. if (ret < 0)
  1195. return ret;
  1196. av_dict_set(&p->metadata, "gamma", gamma_str, AV_DICT_DONT_STRDUP_VAL);
  1197. bytestream2_skip(&s->gb, 4); /* crc */
  1198. break;
  1199. }
  1200. case MKTAG('I', 'E', 'N', 'D'):
  1201. if (!(s->pic_state & PNG_ALLIMAGE))
  1202. av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
  1203. if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
  1204. ret = AVERROR_INVALIDDATA;
  1205. goto fail;
  1206. }
  1207. bytestream2_skip(&s->gb, 4); /* crc */
  1208. goto exit_loop;
  1209. default:
  1210. /* skip tag */
  1211. skip_tag:
  1212. bytestream2_skip(&s->gb, length + 4);
  1213. break;
  1214. }
  1215. }
  1216. exit_loop:
  1217. if (avctx->codec_id == AV_CODEC_ID_PNG &&
  1218. avctx->skip_frame == AVDISCARD_ALL) {
  1219. return 0;
  1220. }
  1221. if (percent_missing(s) > avctx->discard_damaged_percentage)
  1222. return AVERROR_INVALIDDATA;
  1223. if (s->bits_per_pixel <= 4)
  1224. handle_small_bpp(s, p);
  1225. /* apply transparency if needed */
  1226. if (s->has_trns && s->color_type != PNG_COLOR_TYPE_PALETTE) {
  1227. size_t byte_depth = s->bit_depth > 8 ? 2 : 1;
  1228. size_t raw_bpp = s->bpp - byte_depth;
  1229. unsigned x, y;
  1230. av_assert0(s->bit_depth > 1);
  1231. for (y = 0; y < s->height; ++y) {
  1232. uint8_t *row = &s->image_buf[s->image_linesize * y];
  1233. if (s->bpp == 2 && byte_depth == 1) {
  1234. uint8_t *pixel = &row[2 * s->width - 1];
  1235. uint8_t *rowp = &row[1 * s->width - 1];
  1236. int tcolor = s->transparent_color_be[0];
  1237. for (x = s->width; x > 0; --x) {
  1238. *pixel-- = *rowp == tcolor ? 0 : 0xff;
  1239. *pixel-- = *rowp--;
  1240. }
  1241. } else if (s->bpp == 4 && byte_depth == 1) {
  1242. uint8_t *pixel = &row[4 * s->width - 1];
  1243. uint8_t *rowp = &row[3 * s->width - 1];
  1244. int tcolor = AV_RL24(s->transparent_color_be);
  1245. for (x = s->width; x > 0; --x) {
  1246. *pixel-- = AV_RL24(rowp-2) == tcolor ? 0 : 0xff;
  1247. *pixel-- = *rowp--;
  1248. *pixel-- = *rowp--;
  1249. *pixel-- = *rowp--;
  1250. }
  1251. } else {
  1252. /* since we're updating in-place, we have to go from right to left */
  1253. for (x = s->width; x > 0; --x) {
  1254. uint8_t *pixel = &row[s->bpp * (x - 1)];
  1255. memmove(pixel, &row[raw_bpp * (x - 1)], raw_bpp);
  1256. if (!memcmp(pixel, s->transparent_color_be, raw_bpp)) {
  1257. memset(&pixel[raw_bpp], 0, byte_depth);
  1258. } else {
  1259. memset(&pixel[raw_bpp], 0xff, byte_depth);
  1260. }
  1261. }
  1262. }
  1263. }
  1264. }
  1265. /* handle P-frames only if a predecessor frame is available */
  1266. if (s->last_picture.f->data[0]) {
  1267. if ( !(avpkt->flags & AV_PKT_FLAG_KEY) && avctx->codec_tag != AV_RL32("MPNG")
  1268. && s->last_picture.f->width == p->width
  1269. && s->last_picture.f->height== p->height
  1270. && s->last_picture.f->format== p->format
  1271. ) {
  1272. if (CONFIG_PNG_DECODER && avctx->codec_id != AV_CODEC_ID_APNG)
  1273. handle_p_frame_png(s, p);
  1274. else if (CONFIG_APNG_DECODER &&
  1275. s->previous_picture.f->width == p->width &&
  1276. s->previous_picture.f->height== p->height &&
  1277. s->previous_picture.f->format== p->format &&
  1278. avctx->codec_id == AV_CODEC_ID_APNG &&
  1279. (ret = handle_p_frame_apng(avctx, s, p)) < 0)
  1280. goto fail;
  1281. }
  1282. }
  1283. ff_thread_report_progress(&s->picture, INT_MAX, 0);
  1284. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  1285. return 0;
  1286. fail:
  1287. ff_thread_report_progress(&s->picture, INT_MAX, 0);
  1288. ff_thread_report_progress(&s->previous_picture, INT_MAX, 0);
  1289. return ret;
  1290. }
  1291. #if CONFIG_PNG_DECODER
  1292. static int decode_frame_png(AVCodecContext *avctx,
  1293. void *data, int *got_frame,
  1294. AVPacket *avpkt)
  1295. {
  1296. PNGDecContext *const s = avctx->priv_data;
  1297. const uint8_t *buf = avpkt->data;
  1298. int buf_size = avpkt->size;
  1299. AVFrame *p;
  1300. int64_t sig;
  1301. int ret;
  1302. ff_thread_release_buffer(avctx, &s->last_picture);
  1303. FFSWAP(ThreadFrame, s->picture, s->last_picture);
  1304. p = s->picture.f;
  1305. bytestream2_init(&s->gb, buf, buf_size);
  1306. /* check signature */
  1307. sig = bytestream2_get_be64(&s->gb);
  1308. if (sig != PNGSIG &&
  1309. sig != MNGSIG) {
  1310. av_log(avctx, AV_LOG_ERROR, "Invalid PNG signature 0x%08"PRIX64".\n", sig);
  1311. return AVERROR_INVALIDDATA;
  1312. }
  1313. s->y = s->has_trns = 0;
  1314. s->hdr_state = 0;
  1315. s->pic_state = 0;
  1316. /* init the zlib */
  1317. s->zstream.zalloc = ff_png_zalloc;
  1318. s->zstream.zfree = ff_png_zfree;
  1319. s->zstream.opaque = NULL;
  1320. ret = inflateInit(&s->zstream);
  1321. if (ret != Z_OK) {
  1322. av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
  1323. return AVERROR_EXTERNAL;
  1324. }
  1325. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1326. goto the_end;
  1327. if (avctx->skip_frame == AVDISCARD_ALL) {
  1328. *got_frame = 0;
  1329. ret = bytestream2_tell(&s->gb);
  1330. goto the_end;
  1331. }
  1332. if ((ret = av_frame_ref(data, s->picture.f)) < 0)
  1333. goto the_end;
  1334. *got_frame = 1;
  1335. ret = bytestream2_tell(&s->gb);
  1336. the_end:
  1337. inflateEnd(&s->zstream);
  1338. s->crow_buf = NULL;
  1339. return ret;
  1340. }
  1341. #endif
  1342. #if CONFIG_APNG_DECODER
  1343. static int decode_frame_apng(AVCodecContext *avctx,
  1344. void *data, int *got_frame,
  1345. AVPacket *avpkt)
  1346. {
  1347. PNGDecContext *const s = avctx->priv_data;
  1348. int ret;
  1349. AVFrame *p;
  1350. ff_thread_release_buffer(avctx, &s->last_picture);
  1351. FFSWAP(ThreadFrame, s->picture, s->last_picture);
  1352. p = s->picture.f;
  1353. if (!(s->hdr_state & PNG_IHDR)) {
  1354. if (!avctx->extradata_size)
  1355. return AVERROR_INVALIDDATA;
  1356. /* only init fields, there is no zlib use in extradata */
  1357. s->zstream.zalloc = ff_png_zalloc;
  1358. s->zstream.zfree = ff_png_zfree;
  1359. bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
  1360. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1361. goto end;
  1362. }
  1363. /* reset state for a new frame */
  1364. if ((ret = inflateInit(&s->zstream)) != Z_OK) {
  1365. av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
  1366. ret = AVERROR_EXTERNAL;
  1367. goto end;
  1368. }
  1369. s->y = 0;
  1370. s->pic_state = 0;
  1371. bytestream2_init(&s->gb, avpkt->data, avpkt->size);
  1372. if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0)
  1373. goto end;
  1374. if (!(s->pic_state & PNG_ALLIMAGE))
  1375. av_log(avctx, AV_LOG_WARNING, "Frame did not contain a complete image\n");
  1376. if (!(s->pic_state & (PNG_ALLIMAGE|PNG_IDAT))) {
  1377. ret = AVERROR_INVALIDDATA;
  1378. goto end;
  1379. }
  1380. if ((ret = av_frame_ref(data, s->picture.f)) < 0)
  1381. goto end;
  1382. *got_frame = 1;
  1383. ret = bytestream2_tell(&s->gb);
  1384. end:
  1385. inflateEnd(&s->zstream);
  1386. return ret;
  1387. }
  1388. #endif
  1389. #if CONFIG_LSCR_DECODER
  1390. static int decode_frame_lscr(AVCodecContext *avctx,
  1391. void *data, int *got_frame,
  1392. AVPacket *avpkt)
  1393. {
  1394. PNGDecContext *const s = avctx->priv_data;
  1395. GetByteContext *gb = &s->gb;
  1396. AVFrame *frame = data;
  1397. int ret, nb_blocks, offset = 0;
  1398. if (avpkt->size < 2)
  1399. return AVERROR_INVALIDDATA;
  1400. bytestream2_init(gb, avpkt->data, avpkt->size);
  1401. if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
  1402. return ret;
  1403. nb_blocks = bytestream2_get_le16(gb);
  1404. if (bytestream2_get_bytes_left(gb) < 2 + nb_blocks * (12 + 8))
  1405. return AVERROR_INVALIDDATA;
  1406. if (s->last_picture.f->data[0]) {
  1407. ret = av_frame_copy(frame, s->last_picture.f);
  1408. if (ret < 0)
  1409. return ret;
  1410. }
  1411. for (int b = 0; b < nb_blocks; b++) {
  1412. int x, y, x2, y2, w, h, left;
  1413. uint32_t csize, size;
  1414. s->zstream.zalloc = ff_png_zalloc;
  1415. s->zstream.zfree = ff_png_zfree;
  1416. s->zstream.opaque = NULL;
  1417. if ((ret = inflateInit(&s->zstream)) != Z_OK) {
  1418. av_log(avctx, AV_LOG_ERROR, "inflateInit returned error %d\n", ret);
  1419. ret = AVERROR_EXTERNAL;
  1420. goto end;
  1421. }
  1422. bytestream2_seek(gb, 2 + b * 12, SEEK_SET);
  1423. x = bytestream2_get_le16(gb);
  1424. y = bytestream2_get_le16(gb);
  1425. x2 = bytestream2_get_le16(gb);
  1426. y2 = bytestream2_get_le16(gb);
  1427. s->width = s->cur_w = w = x2-x;
  1428. s->height = s->cur_h = h = y2-y;
  1429. if (w <= 0 || x < 0 || x >= avctx->width || w + x > avctx->width ||
  1430. h <= 0 || y < 0 || y >= avctx->height || h + y > avctx->height) {
  1431. ret = AVERROR_INVALIDDATA;
  1432. goto end;
  1433. }
  1434. size = bytestream2_get_le32(gb);
  1435. frame->key_frame = (nb_blocks == 1) &&
  1436. (w == avctx->width) &&
  1437. (h == avctx->height) &&
  1438. (x == 0) && (y == 0);
  1439. bytestream2_seek(gb, 2 + nb_blocks * 12 + offset, SEEK_SET);
  1440. csize = bytestream2_get_be32(gb);
  1441. if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
  1442. ret = AVERROR_INVALIDDATA;
  1443. goto end;
  1444. }
  1445. offset += size;
  1446. left = size;
  1447. s->y = 0;
  1448. s->row_size = w * 3;
  1449. av_fast_padded_malloc(&s->buffer, &s->buffer_size, s->row_size + 16);
  1450. if (!s->buffer) {
  1451. ret = AVERROR(ENOMEM);
  1452. goto end;
  1453. }
  1454. av_fast_padded_malloc(&s->last_row, &s->last_row_size, s->row_size);
  1455. if (!s->last_row) {
  1456. ret = AVERROR(ENOMEM);
  1457. goto end;
  1458. }
  1459. s->crow_size = w * 3 + 1;
  1460. s->crow_buf = s->buffer + 15;
  1461. s->zstream.avail_out = s->crow_size;
  1462. s->zstream.next_out = s->crow_buf;
  1463. s->image_buf = frame->data[0] + (avctx->height - y - 1) * frame->linesize[0] + x * 3;
  1464. s->image_linesize =-frame->linesize[0];
  1465. s->bpp = 3;
  1466. s->pic_state = 0;
  1467. while (left > 16) {
  1468. ret = png_decode_idat(s, csize);
  1469. if (ret < 0)
  1470. goto end;
  1471. left -= csize + 16;
  1472. if (left > 16) {
  1473. bytestream2_skip(gb, 4);
  1474. csize = bytestream2_get_be32(gb);
  1475. if (bytestream2_get_le32(gb) != MKTAG('I', 'D', 'A', 'T')) {
  1476. ret = AVERROR_INVALIDDATA;
  1477. goto end;
  1478. }
  1479. }
  1480. }
  1481. inflateEnd(&s->zstream);
  1482. }
  1483. frame->pict_type = frame->key_frame ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  1484. av_frame_unref(s->last_picture.f);
  1485. if ((ret = av_frame_ref(s->last_picture.f, frame)) < 0)
  1486. return ret;
  1487. *got_frame = 1;
  1488. end:
  1489. inflateEnd(&s->zstream);
  1490. if (ret < 0)
  1491. return ret;
  1492. return avpkt->size;
  1493. }
  1494. static void decode_flush(AVCodecContext *avctx)
  1495. {
  1496. PNGDecContext *s = avctx->priv_data;
  1497. av_frame_unref(s->last_picture.f);
  1498. }
  1499. #endif
  1500. #if HAVE_THREADS
  1501. static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
  1502. {
  1503. PNGDecContext *psrc = src->priv_data;
  1504. PNGDecContext *pdst = dst->priv_data;
  1505. int ret;
  1506. if (dst == src)
  1507. return 0;
  1508. ff_thread_release_buffer(dst, &pdst->picture);
  1509. if (psrc->picture.f->data[0] &&
  1510. (ret = ff_thread_ref_frame(&pdst->picture, &psrc->picture)) < 0)
  1511. return ret;
  1512. if (CONFIG_APNG_DECODER && dst->codec_id == AV_CODEC_ID_APNG) {
  1513. pdst->width = psrc->width;
  1514. pdst->height = psrc->height;
  1515. pdst->bit_depth = psrc->bit_depth;
  1516. pdst->color_type = psrc->color_type;
  1517. pdst->compression_type = psrc->compression_type;
  1518. pdst->interlace_type = psrc->interlace_type;
  1519. pdst->filter_type = psrc->filter_type;
  1520. pdst->cur_w = psrc->cur_w;
  1521. pdst->cur_h = psrc->cur_h;
  1522. pdst->x_offset = psrc->x_offset;
  1523. pdst->y_offset = psrc->y_offset;
  1524. pdst->has_trns = psrc->has_trns;
  1525. memcpy(pdst->transparent_color_be, psrc->transparent_color_be, sizeof(pdst->transparent_color_be));
  1526. pdst->dispose_op = psrc->dispose_op;
  1527. memcpy(pdst->palette, psrc->palette, sizeof(pdst->palette));
  1528. pdst->hdr_state |= psrc->hdr_state;
  1529. ff_thread_release_buffer(dst, &pdst->last_picture);
  1530. if (psrc->last_picture.f->data[0] &&
  1531. (ret = ff_thread_ref_frame(&pdst->last_picture, &psrc->last_picture)) < 0)
  1532. return ret;
  1533. ff_thread_release_buffer(dst, &pdst->previous_picture);
  1534. if (psrc->previous_picture.f->data[0] &&
  1535. (ret = ff_thread_ref_frame(&pdst->previous_picture, &psrc->previous_picture)) < 0)
  1536. return ret;
  1537. }
  1538. return 0;
  1539. }
  1540. #endif
  1541. static av_cold int png_dec_init(AVCodecContext *avctx)
  1542. {
  1543. PNGDecContext *s = avctx->priv_data;
  1544. avctx->color_range = AVCOL_RANGE_JPEG;
  1545. if (avctx->codec_id == AV_CODEC_ID_LSCR)
  1546. avctx->pix_fmt = AV_PIX_FMT_BGR24;
  1547. s->avctx = avctx;
  1548. s->previous_picture.f = av_frame_alloc();
  1549. s->last_picture.f = av_frame_alloc();
  1550. s->picture.f = av_frame_alloc();
  1551. if (!s->previous_picture.f || !s->last_picture.f || !s->picture.f) {
  1552. av_frame_free(&s->previous_picture.f);
  1553. av_frame_free(&s->last_picture.f);
  1554. av_frame_free(&s->picture.f);
  1555. return AVERROR(ENOMEM);
  1556. }
  1557. ff_pngdsp_init(&s->dsp);
  1558. return 0;
  1559. }
  1560. static av_cold int png_dec_end(AVCodecContext *avctx)
  1561. {
  1562. PNGDecContext *s = avctx->priv_data;
  1563. ff_thread_release_buffer(avctx, &s->previous_picture);
  1564. av_frame_free(&s->previous_picture.f);
  1565. ff_thread_release_buffer(avctx, &s->last_picture);
  1566. av_frame_free(&s->last_picture.f);
  1567. ff_thread_release_buffer(avctx, &s->picture);
  1568. av_frame_free(&s->picture.f);
  1569. av_freep(&s->buffer);
  1570. s->buffer_size = 0;
  1571. av_freep(&s->last_row);
  1572. s->last_row_size = 0;
  1573. av_freep(&s->tmp_row);
  1574. s->tmp_row_size = 0;
  1575. return 0;
  1576. }
  1577. #if CONFIG_APNG_DECODER
  1578. AVCodec ff_apng_decoder = {
  1579. .name = "apng",
  1580. .long_name = NULL_IF_CONFIG_SMALL("APNG (Animated Portable Network Graphics) image"),
  1581. .type = AVMEDIA_TYPE_VIDEO,
  1582. .id = AV_CODEC_ID_APNG,
  1583. .priv_data_size = sizeof(PNGDecContext),
  1584. .init = png_dec_init,
  1585. .close = png_dec_end,
  1586. .decode = decode_frame_apng,
  1587. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  1588. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  1589. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  1590. FF_CODEC_CAP_ALLOCATE_PROGRESS,
  1591. };
  1592. #endif
  1593. #if CONFIG_PNG_DECODER
  1594. AVCodec ff_png_decoder = {
  1595. .name = "png",
  1596. .long_name = NULL_IF_CONFIG_SMALL("PNG (Portable Network Graphics) image"),
  1597. .type = AVMEDIA_TYPE_VIDEO,
  1598. .id = AV_CODEC_ID_PNG,
  1599. .priv_data_size = sizeof(PNGDecContext),
  1600. .init = png_dec_init,
  1601. .close = png_dec_end,
  1602. .decode = decode_frame_png,
  1603. .update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
  1604. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  1605. .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_THREADSAFE |
  1606. FF_CODEC_CAP_ALLOCATE_PROGRESS,
  1607. };
  1608. #endif
  1609. #if CONFIG_LSCR_DECODER
  1610. AVCodec ff_lscr_decoder = {
  1611. .name = "lscr",
  1612. .long_name = NULL_IF_CONFIG_SMALL("LEAD Screen Capture"),
  1613. .type = AVMEDIA_TYPE_VIDEO,
  1614. .id = AV_CODEC_ID_LSCR,
  1615. .priv_data_size = sizeof(PNGDecContext),
  1616. .init = png_dec_init,
  1617. .close = png_dec_end,
  1618. .decode = decode_frame_lscr,
  1619. .flush = decode_flush,
  1620. .capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
  1621. .caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_THREADSAFE |
  1622. FF_CODEC_CAP_ALLOCATE_PROGRESS,
  1623. };
  1624. #endif