You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1272 lines
42KB

  1. /*
  2. * Copyright (c) 2006 Konstantin Shishkov
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * TIFF image decoder
  23. * @author Konstantin Shishkov
  24. */
  25. #include "config.h"
  26. #if CONFIG_ZLIB
  27. #include <zlib.h>
  28. #endif
  29. #include "libavutil/attributes.h"
  30. #include "libavutil/avstring.h"
  31. #include "libavutil/intreadwrite.h"
  32. #include "libavutil/imgutils.h"
  33. #include "avcodec.h"
  34. #include "bytestream.h"
  35. #include "faxcompr.h"
  36. #include "internal.h"
  37. #include "lzw.h"
  38. #include "mathops.h"
  39. #include "tiff.h"
  40. #include "tiff_data.h"
  41. #include "thread.h"
  42. typedef struct TiffContext {
  43. AVCodecContext *avctx;
  44. GetByteContext gb;
  45. int width, height;
  46. unsigned int bpp, bppcount;
  47. uint32_t palette[256];
  48. int palette_is_set;
  49. int le;
  50. enum TiffCompr compr;
  51. enum TiffPhotometric photometric;
  52. int planar;
  53. int subsampling[2];
  54. int fax_opts;
  55. int predictor;
  56. int fill_order;
  57. uint32_t res[4];
  58. int strips, rps, sstype;
  59. int sot;
  60. int stripsizesoff, stripsize, stripoff, strippos;
  61. LZWState *lzw;
  62. uint8_t *deinvert_buf;
  63. int deinvert_buf_size;
  64. uint8_t *yuv_line;
  65. unsigned int yuv_line_size;
  66. int geotag_count;
  67. TiffGeoTag *geotags;
  68. } TiffContext;
  69. static void free_geotags(TiffContext *const s)
  70. {
  71. int i;
  72. for (i = 0; i < s->geotag_count; i++) {
  73. if (s->geotags[i].val)
  74. av_freep(&s->geotags[i].val);
  75. }
  76. av_freep(&s->geotags);
  77. s->geotag_count = 0;
  78. }
  79. #define RET_GEOKEY(TYPE, array, element)\
  80. if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
  81. key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
  82. return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
  83. static const char *get_geokey_name(int key)
  84. {
  85. RET_GEOKEY(VERT, vert, name);
  86. RET_GEOKEY(PROJ, proj, name);
  87. RET_GEOKEY(GEOG, geog, name);
  88. RET_GEOKEY(CONF, conf, name);
  89. return NULL;
  90. }
  91. static int get_geokey_type(int key)
  92. {
  93. RET_GEOKEY(VERT, vert, type);
  94. RET_GEOKEY(PROJ, proj, type);
  95. RET_GEOKEY(GEOG, geog, type);
  96. RET_GEOKEY(CONF, conf, type);
  97. return AVERROR_INVALIDDATA;
  98. }
  99. static int cmp_id_key(const void *id, const void *k)
  100. {
  101. return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
  102. }
  103. static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
  104. {
  105. TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
  106. if(r)
  107. return r->name;
  108. return NULL;
  109. }
  110. static char *get_geokey_val(int key, int val)
  111. {
  112. char *ap;
  113. if (val == TIFF_GEO_KEY_UNDEFINED)
  114. return av_strdup("undefined");
  115. if (val == TIFF_GEO_KEY_USER_DEFINED)
  116. return av_strdup("User-Defined");
  117. #define RET_GEOKEY_VAL(TYPE, array)\
  118. if (val >= TIFF_##TYPE##_OFFSET &&\
  119. val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
  120. return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
  121. switch (key) {
  122. case TIFF_GT_MODEL_TYPE_GEOKEY:
  123. RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
  124. break;
  125. case TIFF_GT_RASTER_TYPE_GEOKEY:
  126. RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
  127. break;
  128. case TIFF_GEOG_LINEAR_UNITS_GEOKEY:
  129. case TIFF_PROJ_LINEAR_UNITS_GEOKEY:
  130. case TIFF_VERTICAL_UNITS_GEOKEY:
  131. RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
  132. break;
  133. case TIFF_GEOG_ANGULAR_UNITS_GEOKEY:
  134. case TIFF_GEOG_AZIMUTH_UNITS_GEOKEY:
  135. RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
  136. break;
  137. case TIFF_GEOGRAPHIC_TYPE_GEOKEY:
  138. RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
  139. RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
  140. break;
  141. case TIFF_GEOG_GEODETIC_DATUM_GEOKEY:
  142. RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
  143. RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
  144. break;
  145. case TIFF_GEOG_ELLIPSOID_GEOKEY:
  146. RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
  147. break;
  148. case TIFF_GEOG_PRIME_MERIDIAN_GEOKEY:
  149. RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
  150. break;
  151. case TIFF_PROJECTED_CS_TYPE_GEOKEY:
  152. ap = av_strdup(search_keyval(ff_tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(ff_tiff_proj_cs_type_codes), val));
  153. if(ap) return ap;
  154. break;
  155. case TIFF_PROJECTION_GEOKEY:
  156. ap = av_strdup(search_keyval(ff_tiff_projection_codes, FF_ARRAY_ELEMS(ff_tiff_projection_codes), val));
  157. if(ap) return ap;
  158. break;
  159. case TIFF_PROJ_COORD_TRANS_GEOKEY:
  160. RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
  161. break;
  162. case TIFF_VERTICAL_CS_TYPE_GEOKEY:
  163. RET_GEOKEY_VAL(VERT_CS, vert_cs);
  164. RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
  165. break;
  166. }
  167. ap = av_malloc(14);
  168. if (ap)
  169. snprintf(ap, 14, "Unknown-%d", val);
  170. return ap;
  171. }
  172. static char *doubles2str(double *dp, int count, const char *sep)
  173. {
  174. int i;
  175. char *ap, *ap0;
  176. uint64_t component_len;
  177. if (!sep) sep = ", ";
  178. component_len = 24LL + strlen(sep);
  179. if (count >= (INT_MAX - 1)/component_len)
  180. return NULL;
  181. ap = av_malloc(component_len * count + 1);
  182. if (!ap)
  183. return NULL;
  184. ap0 = ap;
  185. ap[0] = '\0';
  186. for (i = 0; i < count; i++) {
  187. unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
  188. if(l >= component_len) {
  189. av_free(ap0);
  190. return NULL;
  191. }
  192. ap += l;
  193. }
  194. ap0[strlen(ap0) - strlen(sep)] = '\0';
  195. return ap0;
  196. }
  197. static int add_metadata(int count, int type,
  198. const char *name, const char *sep, TiffContext *s, AVFrame *frame)
  199. {
  200. switch(type) {
  201. case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
  202. case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, avpriv_frame_get_metadatap(frame));
  203. case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
  204. default : return AVERROR_INVALIDDATA;
  205. };
  206. }
  207. static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst,
  208. int usePtr, const uint8_t *src,
  209. uint8_t c, int width, int offset)
  210. {
  211. switch (bpp) {
  212. case 1:
  213. while (--width >= 0) {
  214. dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
  215. dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
  216. dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
  217. dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
  218. dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
  219. dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
  220. dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
  221. dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
  222. }
  223. break;
  224. case 2:
  225. while (--width >= 0) {
  226. dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
  227. dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
  228. dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
  229. dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
  230. }
  231. break;
  232. case 4:
  233. while (--width >= 0) {
  234. dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
  235. dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
  236. }
  237. break;
  238. default:
  239. if (usePtr) {
  240. memcpy(dst + offset, src, width);
  241. } else {
  242. memset(dst + offset, c, width);
  243. }
  244. }
  245. }
  246. static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
  247. {
  248. int i;
  249. av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
  250. if (!s->deinvert_buf)
  251. return AVERROR(ENOMEM);
  252. for (i = 0; i < size; i++)
  253. s->deinvert_buf[i] = ff_reverse[src[i]];
  254. return 0;
  255. }
  256. static void unpack_yuv(TiffContext *s, AVFrame *p,
  257. const uint8_t *src, int lnum)
  258. {
  259. int i, j, k;
  260. int w = (s->width - 1) / s->subsampling[0] + 1;
  261. uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
  262. uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
  263. if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
  264. for (i = 0; i < w; i++) {
  265. for (j = 0; j < s->subsampling[1]; j++)
  266. for (k = 0; k < s->subsampling[0]; k++)
  267. p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
  268. FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
  269. *pu++ = *src++;
  270. *pv++ = *src++;
  271. }
  272. }else{
  273. for (i = 0; i < w; i++) {
  274. for (j = 0; j < s->subsampling[1]; j++)
  275. for (k = 0; k < s->subsampling[0]; k++)
  276. p->data[0][(lnum + j) * p->linesize[0] +
  277. i * s->subsampling[0] + k] = *src++;
  278. *pu++ = *src++;
  279. *pv++ = *src++;
  280. }
  281. }
  282. }
  283. #if CONFIG_ZLIB
  284. static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
  285. int size)
  286. {
  287. z_stream zstream = { 0 };
  288. int zret;
  289. zstream.next_in = (uint8_t *)src;
  290. zstream.avail_in = size;
  291. zstream.next_out = dst;
  292. zstream.avail_out = *len;
  293. zret = inflateInit(&zstream);
  294. if (zret != Z_OK) {
  295. av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
  296. return zret;
  297. }
  298. zret = inflate(&zstream, Z_SYNC_FLUSH);
  299. inflateEnd(&zstream);
  300. *len = zstream.total_out;
  301. return zret == Z_STREAM_END ? Z_OK : zret;
  302. }
  303. static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  304. const uint8_t *src, int size, int width, int lines,
  305. int strip_start, int is_yuv)
  306. {
  307. uint8_t *zbuf;
  308. unsigned long outlen;
  309. int ret, line;
  310. outlen = width * lines;
  311. zbuf = av_malloc(outlen);
  312. if (!zbuf)
  313. return AVERROR(ENOMEM);
  314. if (s->fill_order) {
  315. if ((ret = deinvert_buffer(s, src, size)) < 0) {
  316. av_free(zbuf);
  317. return ret;
  318. }
  319. src = s->deinvert_buf;
  320. }
  321. ret = tiff_uncompress(zbuf, &outlen, src, size);
  322. if (ret != Z_OK) {
  323. av_log(s->avctx, AV_LOG_ERROR,
  324. "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
  325. (unsigned long)width * lines, ret);
  326. av_free(zbuf);
  327. return AVERROR_UNKNOWN;
  328. }
  329. src = zbuf;
  330. for (line = 0; line < lines; line++) {
  331. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  332. horizontal_fill(s->bpp, dst, 1, src, 0, width, 0);
  333. } else {
  334. memcpy(dst, src, width);
  335. }
  336. if (is_yuv) {
  337. unpack_yuv(s, p, dst, strip_start + line);
  338. line += s->subsampling[1] - 1;
  339. }
  340. dst += stride;
  341. src += width;
  342. }
  343. av_free(zbuf);
  344. return 0;
  345. }
  346. #endif
  347. static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
  348. const uint8_t *src, int size, int width, int lines)
  349. {
  350. int i, ret = 0;
  351. int line;
  352. uint8_t *src2 = av_malloc((unsigned)size +
  353. FF_INPUT_BUFFER_PADDING_SIZE);
  354. if (!src2) {
  355. av_log(s->avctx, AV_LOG_ERROR,
  356. "Error allocating temporary buffer\n");
  357. return AVERROR(ENOMEM);
  358. }
  359. if (s->fax_opts & 2) {
  360. avpriv_request_sample(s->avctx, "Uncompressed fax mode");
  361. av_free(src2);
  362. return AVERROR_PATCHWELCOME;
  363. }
  364. if (!s->fill_order) {
  365. memcpy(src2, src, size);
  366. } else {
  367. for (i = 0; i < size; i++)
  368. src2[i] = ff_reverse[src[i]];
  369. }
  370. memset(src2 + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  371. ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
  372. s->compr, s->fax_opts);
  373. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  374. for (line = 0; line < lines; line++) {
  375. horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
  376. dst += stride;
  377. }
  378. av_free(src2);
  379. return ret;
  380. }
  381. static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  382. const uint8_t *src, int size, int strip_start, int lines)
  383. {
  384. PutByteContext pb;
  385. int c, line, pixels, code, ret;
  386. const uint8_t *ssrc = src;
  387. int width = ((s->width * s->bpp) + 7) >> 3;
  388. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(p->format);
  389. int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
  390. if (s->planar)
  391. width /= s->bppcount;
  392. if (size <= 0)
  393. return AVERROR_INVALIDDATA;
  394. if (is_yuv) {
  395. int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
  396. s->subsampling[0] * s->subsampling[1] + 7) >> 3;
  397. av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
  398. if (s->yuv_line == NULL) {
  399. av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
  400. return AVERROR(ENOMEM);
  401. }
  402. dst = s->yuv_line;
  403. stride = 0;
  404. width = s->width * s->subsampling[1] + 2*(s->width / s->subsampling[0]);
  405. av_assert0(width <= bytes_per_row);
  406. av_assert0(s->bpp == 24);
  407. }
  408. if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
  409. #if CONFIG_ZLIB
  410. return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
  411. strip_start, is_yuv);
  412. #else
  413. av_log(s->avctx, AV_LOG_ERROR,
  414. "zlib support not enabled, "
  415. "deflate compression not supported\n");
  416. return AVERROR(ENOSYS);
  417. #endif
  418. }
  419. if (s->compr == TIFF_LZW) {
  420. if (s->fill_order) {
  421. if ((ret = deinvert_buffer(s, src, size)) < 0)
  422. return ret;
  423. ssrc = src = s->deinvert_buf;
  424. }
  425. if (size > 1 && !src[0] && (src[1]&1)) {
  426. av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
  427. }
  428. if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
  429. av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
  430. return ret;
  431. }
  432. for (line = 0; line < lines; line++) {
  433. pixels = ff_lzw_decode(s->lzw, dst, width);
  434. if (pixels < width) {
  435. av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
  436. pixels, width);
  437. return AVERROR_INVALIDDATA;
  438. }
  439. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  440. horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
  441. if (is_yuv) {
  442. unpack_yuv(s, p, dst, strip_start + line);
  443. line += s->subsampling[1] - 1;
  444. }
  445. dst += stride;
  446. }
  447. return 0;
  448. }
  449. if (s->compr == TIFF_CCITT_RLE ||
  450. s->compr == TIFF_G3 ||
  451. s->compr == TIFF_G4) {
  452. if (is_yuv)
  453. return AVERROR_INVALIDDATA;
  454. return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
  455. }
  456. bytestream2_init(&s->gb, src, size);
  457. bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
  458. for (line = 0; line < lines; line++) {
  459. if (src - ssrc > size) {
  460. av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
  461. return AVERROR_INVALIDDATA;
  462. }
  463. if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
  464. break;
  465. bytestream2_seek_p(&pb, stride * line, SEEK_SET);
  466. switch (s->compr) {
  467. case TIFF_RAW:
  468. if (ssrc + size - src < width)
  469. return AVERROR_INVALIDDATA;
  470. if (!s->fill_order) {
  471. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  472. dst, 1, src, 0, width, 0);
  473. } else {
  474. int i;
  475. for (i = 0; i < width; i++)
  476. dst[i] = ff_reverse[src[i]];
  477. }
  478. src += width;
  479. break;
  480. case TIFF_PACKBITS:
  481. for (pixels = 0; pixels < width;) {
  482. if (ssrc + size - src < 2) {
  483. av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
  484. return AVERROR_INVALIDDATA;
  485. }
  486. code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
  487. if (code >= 0) {
  488. code++;
  489. if (pixels + code > width ||
  490. ssrc + size - src < code) {
  491. av_log(s->avctx, AV_LOG_ERROR,
  492. "Copy went out of bounds\n");
  493. return AVERROR_INVALIDDATA;
  494. }
  495. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  496. dst, 1, src, 0, code, pixels);
  497. src += code;
  498. pixels += code;
  499. } else if (code != -128) { // -127..-1
  500. code = (-code) + 1;
  501. if (pixels + code > width) {
  502. av_log(s->avctx, AV_LOG_ERROR,
  503. "Run went out of bounds\n");
  504. return AVERROR_INVALIDDATA;
  505. }
  506. c = *src++;
  507. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  508. dst, 0, NULL, c, code, pixels);
  509. pixels += code;
  510. }
  511. }
  512. if (s->fill_order) {
  513. int i;
  514. for (i = 0; i < width; i++)
  515. dst[i] = ff_reverse[dst[i]];
  516. }
  517. break;
  518. }
  519. if (is_yuv) {
  520. unpack_yuv(s, p, dst, strip_start + line);
  521. line += s->subsampling[1] - 1;
  522. }
  523. dst += stride;
  524. }
  525. return 0;
  526. }
  527. static int init_image(TiffContext *s, ThreadFrame *frame)
  528. {
  529. int ret;
  530. switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
  531. case 11:
  532. if (!s->palette_is_set) {
  533. s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
  534. break;
  535. }
  536. case 21:
  537. case 41:
  538. case 81:
  539. s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
  540. break;
  541. case 243:
  542. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  543. if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
  544. s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  545. } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
  546. s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  547. } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
  548. s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
  549. } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
  550. s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
  551. } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
  552. s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  553. } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
  554. s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  555. } else {
  556. av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
  557. return AVERROR_PATCHWELCOME;
  558. }
  559. } else
  560. s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
  561. break;
  562. case 161:
  563. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
  564. break;
  565. case 162:
  566. s->avctx->pix_fmt = AV_PIX_FMT_YA8;
  567. break;
  568. case 322:
  569. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
  570. break;
  571. case 324:
  572. s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
  573. break;
  574. case 483:
  575. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
  576. break;
  577. case 644:
  578. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
  579. break;
  580. case 1243:
  581. s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
  582. break;
  583. case 1324:
  584. s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  585. break;
  586. case 1483:
  587. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
  588. break;
  589. case 1644:
  590. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
  591. break;
  592. default:
  593. av_log(s->avctx, AV_LOG_ERROR,
  594. "This format is not supported (bpp=%d, bppcount=%d)\n",
  595. s->bpp, s->bppcount);
  596. return AVERROR_INVALIDDATA;
  597. }
  598. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  599. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
  600. if((desc->flags & AV_PIX_FMT_FLAG_RGB) || desc->nb_components < 3) {
  601. av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
  602. return AVERROR_INVALIDDATA;
  603. }
  604. }
  605. if (s->width != s->avctx->width || s->height != s->avctx->height) {
  606. ret = ff_set_dimensions(s->avctx, s->width, s->height);
  607. if (ret < 0)
  608. return ret;
  609. }
  610. if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
  611. return ret;
  612. if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  613. memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
  614. }
  615. return 0;
  616. }
  617. static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
  618. {
  619. int offset = tag == TIFF_YRES ? 2 : 0;
  620. s->res[offset++] = num;
  621. s->res[offset] = den;
  622. if (s->res[0] && s->res[1] && s->res[2] && s->res[3])
  623. av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
  624. s->res[2] * (uint64_t)s->res[1], s->res[0] * (uint64_t)s->res[3], INT32_MAX);
  625. }
  626. static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
  627. {
  628. unsigned tag, type, count, off, value = 0, value2 = 0;
  629. int i, start;
  630. int pos;
  631. int ret;
  632. double *dp;
  633. ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
  634. if (ret < 0) {
  635. goto end;
  636. }
  637. off = bytestream2_tell(&s->gb);
  638. if (count == 1) {
  639. switch (type) {
  640. case TIFF_BYTE:
  641. case TIFF_SHORT:
  642. case TIFF_LONG:
  643. value = ff_tget(&s->gb, type, s->le);
  644. break;
  645. case TIFF_RATIONAL:
  646. value = ff_tget(&s->gb, TIFF_LONG, s->le);
  647. value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
  648. break;
  649. case TIFF_STRING:
  650. if (count <= 4) {
  651. break;
  652. }
  653. default:
  654. value = UINT_MAX;
  655. }
  656. }
  657. switch (tag) {
  658. case TIFF_WIDTH:
  659. s->width = value;
  660. break;
  661. case TIFF_HEIGHT:
  662. s->height = value;
  663. break;
  664. case TIFF_BPP:
  665. s->bppcount = count;
  666. if (count > 4) {
  667. av_log(s->avctx, AV_LOG_ERROR,
  668. "This format is not supported (bpp=%d, %d components)\n",
  669. s->bpp, count);
  670. return AVERROR_INVALIDDATA;
  671. }
  672. if (count == 1)
  673. s->bpp = value;
  674. else {
  675. switch (type) {
  676. case TIFF_BYTE:
  677. case TIFF_SHORT:
  678. case TIFF_LONG:
  679. s->bpp = 0;
  680. if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
  681. return AVERROR_INVALIDDATA;
  682. for (i = 0; i < count; i++)
  683. s->bpp += ff_tget(&s->gb, type, s->le);
  684. break;
  685. default:
  686. s->bpp = -1;
  687. }
  688. }
  689. break;
  690. case TIFF_SAMPLES_PER_PIXEL:
  691. if (count != 1) {
  692. av_log(s->avctx, AV_LOG_ERROR,
  693. "Samples per pixel requires a single value, many provided\n");
  694. return AVERROR_INVALIDDATA;
  695. }
  696. if (value > 4U) {
  697. av_log(s->avctx, AV_LOG_ERROR,
  698. "Samples per pixel %d is too large\n", value);
  699. return AVERROR_INVALIDDATA;
  700. }
  701. if (s->bppcount == 1)
  702. s->bpp *= value;
  703. s->bppcount = value;
  704. break;
  705. case TIFF_COMPR:
  706. s->compr = value;
  707. s->predictor = 0;
  708. switch (s->compr) {
  709. case TIFF_RAW:
  710. case TIFF_PACKBITS:
  711. case TIFF_LZW:
  712. case TIFF_CCITT_RLE:
  713. break;
  714. case TIFF_G3:
  715. case TIFF_G4:
  716. s->fax_opts = 0;
  717. break;
  718. case TIFF_DEFLATE:
  719. case TIFF_ADOBE_DEFLATE:
  720. #if CONFIG_ZLIB
  721. break;
  722. #else
  723. av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
  724. return AVERROR(ENOSYS);
  725. #endif
  726. case TIFF_JPEG:
  727. case TIFF_NEWJPEG:
  728. avpriv_report_missing_feature(s->avctx, "JPEG compression");
  729. return AVERROR_PATCHWELCOME;
  730. case TIFF_LZMA:
  731. avpriv_report_missing_feature(s->avctx, "LZMA compression");
  732. return AVERROR_PATCHWELCOME;
  733. default:
  734. av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
  735. s->compr);
  736. return AVERROR_INVALIDDATA;
  737. }
  738. break;
  739. case TIFF_ROWSPERSTRIP:
  740. if (!value || (type == TIFF_LONG && value == UINT_MAX))
  741. value = s->height;
  742. s->rps = FFMIN(value, s->height);
  743. break;
  744. case TIFF_STRIP_OFFS:
  745. if (count == 1) {
  746. s->strippos = 0;
  747. s->stripoff = value;
  748. } else
  749. s->strippos = off;
  750. s->strips = count;
  751. if (s->strips == 1)
  752. s->rps = s->height;
  753. s->sot = type;
  754. break;
  755. case TIFF_STRIP_SIZE:
  756. if (count == 1) {
  757. s->stripsizesoff = 0;
  758. s->stripsize = value;
  759. s->strips = 1;
  760. } else {
  761. s->stripsizesoff = off;
  762. }
  763. s->strips = count;
  764. s->sstype = type;
  765. break;
  766. case TIFF_XRES:
  767. case TIFF_YRES:
  768. set_sar(s, tag, value, value2);
  769. break;
  770. case TIFF_TILE_BYTE_COUNTS:
  771. case TIFF_TILE_LENGTH:
  772. case TIFF_TILE_OFFSETS:
  773. case TIFF_TILE_WIDTH:
  774. av_log(s->avctx, AV_LOG_ERROR, "Tiled images are not supported\n");
  775. return AVERROR_PATCHWELCOME;
  776. break;
  777. case TIFF_PREDICTOR:
  778. s->predictor = value;
  779. break;
  780. case TIFF_PHOTOMETRIC:
  781. switch (value) {
  782. case TIFF_PHOTOMETRIC_WHITE_IS_ZERO:
  783. case TIFF_PHOTOMETRIC_BLACK_IS_ZERO:
  784. case TIFF_PHOTOMETRIC_RGB:
  785. case TIFF_PHOTOMETRIC_PALETTE:
  786. case TIFF_PHOTOMETRIC_YCBCR:
  787. s->photometric = value;
  788. break;
  789. case TIFF_PHOTOMETRIC_ALPHA_MASK:
  790. case TIFF_PHOTOMETRIC_SEPARATED:
  791. case TIFF_PHOTOMETRIC_CIE_LAB:
  792. case TIFF_PHOTOMETRIC_ICC_LAB:
  793. case TIFF_PHOTOMETRIC_ITU_LAB:
  794. case TIFF_PHOTOMETRIC_CFA:
  795. case TIFF_PHOTOMETRIC_LOG_L:
  796. case TIFF_PHOTOMETRIC_LOG_LUV:
  797. case TIFF_PHOTOMETRIC_LINEAR_RAW:
  798. avpriv_report_missing_feature(s->avctx,
  799. "PhotometricInterpretation 0x%04X",
  800. value);
  801. return AVERROR_PATCHWELCOME;
  802. default:
  803. av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
  804. "unknown\n", value);
  805. return AVERROR_INVALIDDATA;
  806. }
  807. break;
  808. case TIFF_FILL_ORDER:
  809. if (value < 1 || value > 2) {
  810. av_log(s->avctx, AV_LOG_ERROR,
  811. "Unknown FillOrder value %d, trying default one\n", value);
  812. value = 1;
  813. }
  814. s->fill_order = value - 1;
  815. break;
  816. case TIFF_PAL: {
  817. GetByteContext pal_gb[3];
  818. off = type_sizes[type];
  819. if (count / 3 > 256 ||
  820. bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
  821. return AVERROR_INVALIDDATA;
  822. pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
  823. bytestream2_skip(&pal_gb[1], count / 3 * off);
  824. bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
  825. off = (type_sizes[type] - 1) << 3;
  826. for (i = 0; i < count / 3; i++) {
  827. uint32_t p = 0xFF000000;
  828. p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
  829. p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
  830. p |= ff_tget(&pal_gb[2], type, s->le) >> off;
  831. s->palette[i] = p;
  832. }
  833. s->palette_is_set = 1;
  834. break;
  835. }
  836. case TIFF_PLANAR:
  837. s->planar = value == 2;
  838. break;
  839. case TIFF_YCBCR_SUBSAMPLING:
  840. if (count != 2) {
  841. av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
  842. return AVERROR_INVALIDDATA;
  843. }
  844. for (i = 0; i < count; i++)
  845. s->subsampling[i] = ff_tget(&s->gb, type, s->le);
  846. break;
  847. case TIFF_T4OPTIONS:
  848. if (s->compr == TIFF_G3)
  849. s->fax_opts = value;
  850. break;
  851. case TIFF_T6OPTIONS:
  852. if (s->compr == TIFF_G4)
  853. s->fax_opts = value;
  854. break;
  855. #define ADD_METADATA(count, name, sep)\
  856. if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
  857. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
  858. goto end;\
  859. }
  860. case TIFF_MODEL_PIXEL_SCALE:
  861. ADD_METADATA(count, "ModelPixelScaleTag", NULL);
  862. break;
  863. case TIFF_MODEL_TRANSFORMATION:
  864. ADD_METADATA(count, "ModelTransformationTag", NULL);
  865. break;
  866. case TIFF_MODEL_TIEPOINT:
  867. ADD_METADATA(count, "ModelTiepointTag", NULL);
  868. break;
  869. case TIFF_GEO_KEY_DIRECTORY:
  870. ADD_METADATA(1, "GeoTIFF_Version", NULL);
  871. ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
  872. s->geotag_count = ff_tget_short(&s->gb, s->le);
  873. if (s->geotag_count > count / 4 - 1) {
  874. s->geotag_count = count / 4 - 1;
  875. av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
  876. }
  877. if (bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4) {
  878. s->geotag_count = 0;
  879. return -1;
  880. }
  881. s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
  882. if (!s->geotags) {
  883. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  884. s->geotag_count = 0;
  885. goto end;
  886. }
  887. for (i = 0; i < s->geotag_count; i++) {
  888. s->geotags[i].key = ff_tget_short(&s->gb, s->le);
  889. s->geotags[i].type = ff_tget_short(&s->gb, s->le);
  890. s->geotags[i].count = ff_tget_short(&s->gb, s->le);
  891. if (!s->geotags[i].type)
  892. s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
  893. else
  894. s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
  895. }
  896. break;
  897. case TIFF_GEO_DOUBLE_PARAMS:
  898. if (count >= INT_MAX / sizeof(int64_t))
  899. return AVERROR_INVALIDDATA;
  900. if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
  901. return AVERROR_INVALIDDATA;
  902. dp = av_malloc_array(count, sizeof(double));
  903. if (!dp) {
  904. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  905. goto end;
  906. }
  907. for (i = 0; i < count; i++)
  908. dp[i] = ff_tget_double(&s->gb, s->le);
  909. for (i = 0; i < s->geotag_count; i++) {
  910. if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
  911. if (s->geotags[i].count == 0
  912. || s->geotags[i].offset + s->geotags[i].count > count) {
  913. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  914. } else {
  915. char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
  916. if (!ap) {
  917. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  918. av_freep(&dp);
  919. return AVERROR(ENOMEM);
  920. }
  921. s->geotags[i].val = ap;
  922. }
  923. }
  924. }
  925. av_freep(&dp);
  926. break;
  927. case TIFF_GEO_ASCII_PARAMS:
  928. pos = bytestream2_tell(&s->gb);
  929. for (i = 0; i < s->geotag_count; i++) {
  930. if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
  931. if (s->geotags[i].count == 0
  932. || s->geotags[i].offset + s->geotags[i].count > count) {
  933. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  934. } else {
  935. char *ap;
  936. bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
  937. if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
  938. return AVERROR_INVALIDDATA;
  939. ap = av_malloc(s->geotags[i].count);
  940. if (!ap) {
  941. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  942. return AVERROR(ENOMEM);
  943. }
  944. bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
  945. ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
  946. s->geotags[i].val = ap;
  947. }
  948. }
  949. }
  950. break;
  951. case TIFF_ARTIST:
  952. ADD_METADATA(count, "artist", NULL);
  953. break;
  954. case TIFF_COPYRIGHT:
  955. ADD_METADATA(count, "copyright", NULL);
  956. break;
  957. case TIFF_DATE:
  958. ADD_METADATA(count, "date", NULL);
  959. break;
  960. case TIFF_DOCUMENT_NAME:
  961. ADD_METADATA(count, "document_name", NULL);
  962. break;
  963. case TIFF_HOST_COMPUTER:
  964. ADD_METADATA(count, "computer", NULL);
  965. break;
  966. case TIFF_IMAGE_DESCRIPTION:
  967. ADD_METADATA(count, "description", NULL);
  968. break;
  969. case TIFF_MAKE:
  970. ADD_METADATA(count, "make", NULL);
  971. break;
  972. case TIFF_MODEL:
  973. ADD_METADATA(count, "model", NULL);
  974. break;
  975. case TIFF_PAGE_NAME:
  976. ADD_METADATA(count, "page_name", NULL);
  977. break;
  978. case TIFF_PAGE_NUMBER:
  979. ADD_METADATA(count, "page_number", " / ");
  980. break;
  981. case TIFF_SOFTWARE_NAME:
  982. ADD_METADATA(count, "software", NULL);
  983. break;
  984. default:
  985. if (s->avctx->err_recognition & AV_EF_EXPLODE) {
  986. av_log(s->avctx, AV_LOG_ERROR,
  987. "Unknown or unsupported tag %d/0X%0X\n",
  988. tag, tag);
  989. return AVERROR_INVALIDDATA;
  990. }
  991. }
  992. end:
  993. bytestream2_seek(&s->gb, start, SEEK_SET);
  994. return 0;
  995. }
  996. static int decode_frame(AVCodecContext *avctx,
  997. void *data, int *got_frame, AVPacket *avpkt)
  998. {
  999. TiffContext *const s = avctx->priv_data;
  1000. AVFrame *const p = data;
  1001. ThreadFrame frame = { .f = data };
  1002. unsigned off;
  1003. int le, ret, plane, planes;
  1004. int i, j, entries, stride;
  1005. unsigned soff, ssize;
  1006. uint8_t *dst;
  1007. GetByteContext stripsizes;
  1008. GetByteContext stripdata;
  1009. bytestream2_init(&s->gb, avpkt->data, avpkt->size);
  1010. // parse image header
  1011. if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
  1012. av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
  1013. return ret;
  1014. } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
  1015. av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
  1016. return AVERROR_INVALIDDATA;
  1017. }
  1018. s->le = le;
  1019. // TIFF_BPP is not a required tag and defaults to 1
  1020. s->bppcount = s->bpp = 1;
  1021. s->photometric = TIFF_PHOTOMETRIC_NONE;
  1022. s->compr = TIFF_RAW;
  1023. s->fill_order = 0;
  1024. free_geotags(s);
  1025. // Reset these offsets so we can tell if they were set this frame
  1026. s->stripsizesoff = s->strippos = 0;
  1027. /* parse image file directory */
  1028. bytestream2_seek(&s->gb, off, SEEK_SET);
  1029. entries = ff_tget_short(&s->gb, le);
  1030. if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
  1031. return AVERROR_INVALIDDATA;
  1032. for (i = 0; i < entries; i++) {
  1033. if ((ret = tiff_decode_tag(s, p)) < 0)
  1034. return ret;
  1035. }
  1036. for (i = 0; i<s->geotag_count; i++) {
  1037. const char *keyname = get_geokey_name(s->geotags[i].key);
  1038. if (!keyname) {
  1039. av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
  1040. continue;
  1041. }
  1042. if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
  1043. av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
  1044. continue;
  1045. }
  1046. ret = av_dict_set(avpriv_frame_get_metadatap(p), keyname, s->geotags[i].val, 0);
  1047. if (ret<0) {
  1048. av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
  1049. return ret;
  1050. }
  1051. }
  1052. if (!s->strippos && !s->stripoff) {
  1053. av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
  1054. return AVERROR_INVALIDDATA;
  1055. }
  1056. /* now we have the data and may start decoding */
  1057. if ((ret = init_image(s, &frame)) < 0)
  1058. return ret;
  1059. if (s->strips == 1 && !s->stripsize) {
  1060. av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
  1061. s->stripsize = avpkt->size - s->stripoff;
  1062. }
  1063. if (s->stripsizesoff) {
  1064. if (s->stripsizesoff >= (unsigned)avpkt->size)
  1065. return AVERROR_INVALIDDATA;
  1066. bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
  1067. avpkt->size - s->stripsizesoff);
  1068. }
  1069. if (s->strippos) {
  1070. if (s->strippos >= (unsigned)avpkt->size)
  1071. return AVERROR_INVALIDDATA;
  1072. bytestream2_init(&stripdata, avpkt->data + s->strippos,
  1073. avpkt->size - s->strippos);
  1074. }
  1075. if (s->rps <= 0) {
  1076. av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
  1077. return AVERROR_INVALIDDATA;
  1078. }
  1079. planes = s->planar ? s->bppcount : 1;
  1080. for (plane = 0; plane < planes; plane++) {
  1081. stride = p->linesize[plane];
  1082. dst = p->data[plane];
  1083. for (i = 0; i < s->height; i += s->rps) {
  1084. if (s->stripsizesoff)
  1085. ssize = ff_tget(&stripsizes, s->sstype, le);
  1086. else
  1087. ssize = s->stripsize;
  1088. if (s->strippos)
  1089. soff = ff_tget(&stripdata, s->sot, le);
  1090. else
  1091. soff = s->stripoff;
  1092. if (soff > avpkt->size || ssize > avpkt->size - soff) {
  1093. av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
  1094. return AVERROR_INVALIDDATA;
  1095. }
  1096. if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
  1097. FFMIN(s->rps, s->height - i))) < 0) {
  1098. if (avctx->err_recognition & AV_EF_EXPLODE)
  1099. return ret;
  1100. break;
  1101. }
  1102. dst += s->rps * stride;
  1103. }
  1104. if (s->predictor == 2) {
  1105. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  1106. av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
  1107. return AVERROR_PATCHWELCOME;
  1108. }
  1109. dst = p->data[plane];
  1110. soff = s->bpp >> 3;
  1111. if (s->planar)
  1112. soff = FFMAX(soff / s->bppcount, 1);
  1113. ssize = s->width * soff;
  1114. if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
  1115. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
  1116. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
  1117. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
  1118. for (i = 0; i < s->height; i++) {
  1119. for (j = soff; j < ssize; j += 2)
  1120. AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
  1121. dst += stride;
  1122. }
  1123. } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
  1124. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
  1125. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
  1126. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
  1127. for (i = 0; i < s->height; i++) {
  1128. for (j = soff; j < ssize; j += 2)
  1129. AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
  1130. dst += stride;
  1131. }
  1132. } else {
  1133. for (i = 0; i < s->height; i++) {
  1134. for (j = soff; j < ssize; j++)
  1135. dst[j] += dst[j - soff];
  1136. dst += stride;
  1137. }
  1138. }
  1139. }
  1140. if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
  1141. dst = p->data[plane];
  1142. for (i = 0; i < s->height; i++) {
  1143. for (j = 0; j < p->linesize[plane]; j++)
  1144. dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];
  1145. dst += stride;
  1146. }
  1147. }
  1148. }
  1149. if (s->planar && s->bppcount > 2) {
  1150. FFSWAP(uint8_t*, p->data[0], p->data[2]);
  1151. FFSWAP(int, p->linesize[0], p->linesize[2]);
  1152. FFSWAP(uint8_t*, p->data[0], p->data[1]);
  1153. FFSWAP(int, p->linesize[0], p->linesize[1]);
  1154. }
  1155. *got_frame = 1;
  1156. return avpkt->size;
  1157. }
  1158. static av_cold int tiff_init(AVCodecContext *avctx)
  1159. {
  1160. TiffContext *s = avctx->priv_data;
  1161. s->width = 0;
  1162. s->height = 0;
  1163. s->subsampling[0] =
  1164. s->subsampling[1] = 1;
  1165. s->avctx = avctx;
  1166. ff_lzw_decode_open(&s->lzw);
  1167. ff_ccitt_unpack_init();
  1168. return 0;
  1169. }
  1170. static av_cold int tiff_end(AVCodecContext *avctx)
  1171. {
  1172. TiffContext *const s = avctx->priv_data;
  1173. free_geotags(s);
  1174. ff_lzw_decode_close(&s->lzw);
  1175. av_freep(&s->deinvert_buf);
  1176. return 0;
  1177. }
  1178. AVCodec ff_tiff_decoder = {
  1179. .name = "tiff",
  1180. .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
  1181. .type = AVMEDIA_TYPE_VIDEO,
  1182. .id = AV_CODEC_ID_TIFF,
  1183. .priv_data_size = sizeof(TiffContext),
  1184. .init = tiff_init,
  1185. .close = tiff_end,
  1186. .decode = decode_frame,
  1187. .init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
  1188. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  1189. };