You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1353 lines
44KB

  1. /*
  2. * Copyright (c) 2006 Konstantin Shishkov
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * TIFF image decoder
  23. * @author Konstantin Shishkov
  24. */
  25. #include "config.h"
  26. #if CONFIG_ZLIB
  27. #include <zlib.h>
  28. #endif
  29. #if CONFIG_LZMA
  30. #include <lzma.h>
  31. #endif
  32. #include "libavutil/attributes.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/intreadwrite.h"
  35. #include "libavutil/imgutils.h"
  36. #include "avcodec.h"
  37. #include "bytestream.h"
  38. #include "faxcompr.h"
  39. #include "internal.h"
  40. #include "lzw.h"
  41. #include "mathops.h"
  42. #include "tiff.h"
  43. #include "tiff_data.h"
  44. #include "thread.h"
  45. typedef struct TiffContext {
  46. AVCodecContext *avctx;
  47. GetByteContext gb;
  48. int width, height;
  49. unsigned int bpp, bppcount;
  50. uint32_t palette[256];
  51. int palette_is_set;
  52. int le;
  53. enum TiffCompr compr;
  54. enum TiffPhotometric photometric;
  55. int planar;
  56. int subsampling[2];
  57. int fax_opts;
  58. int predictor;
  59. int fill_order;
  60. uint32_t res[4];
  61. int strips, rps, sstype;
  62. int sot;
  63. int stripsizesoff, stripsize, stripoff, strippos;
  64. LZWState *lzw;
  65. uint8_t *deinvert_buf;
  66. int deinvert_buf_size;
  67. uint8_t *yuv_line;
  68. unsigned int yuv_line_size;
  69. int geotag_count;
  70. TiffGeoTag *geotags;
  71. } TiffContext;
  72. static void free_geotags(TiffContext *const s)
  73. {
  74. int i;
  75. for (i = 0; i < s->geotag_count; i++) {
  76. if (s->geotags[i].val)
  77. av_freep(&s->geotags[i].val);
  78. }
  79. av_freep(&s->geotags);
  80. s->geotag_count = 0;
  81. }
  82. #define RET_GEOKEY(TYPE, array, element)\
  83. if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
  84. key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
  85. return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
  86. static const char *get_geokey_name(int key)
  87. {
  88. RET_GEOKEY(VERT, vert, name);
  89. RET_GEOKEY(PROJ, proj, name);
  90. RET_GEOKEY(GEOG, geog, name);
  91. RET_GEOKEY(CONF, conf, name);
  92. return NULL;
  93. }
  94. static int get_geokey_type(int key)
  95. {
  96. RET_GEOKEY(VERT, vert, type);
  97. RET_GEOKEY(PROJ, proj, type);
  98. RET_GEOKEY(GEOG, geog, type);
  99. RET_GEOKEY(CONF, conf, type);
  100. return AVERROR_INVALIDDATA;
  101. }
  102. static int cmp_id_key(const void *id, const void *k)
  103. {
  104. return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
  105. }
  106. static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
  107. {
  108. TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
  109. if(r)
  110. return r->name;
  111. return NULL;
  112. }
  113. static char *get_geokey_val(int key, int val)
  114. {
  115. char *ap;
  116. if (val == TIFF_GEO_KEY_UNDEFINED)
  117. return av_strdup("undefined");
  118. if (val == TIFF_GEO_KEY_USER_DEFINED)
  119. return av_strdup("User-Defined");
  120. #define RET_GEOKEY_VAL(TYPE, array)\
  121. if (val >= TIFF_##TYPE##_OFFSET &&\
  122. val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
  123. return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
  124. switch (key) {
  125. case TIFF_GT_MODEL_TYPE_GEOKEY:
  126. RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
  127. break;
  128. case TIFF_GT_RASTER_TYPE_GEOKEY:
  129. RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
  130. break;
  131. case TIFF_GEOG_LINEAR_UNITS_GEOKEY:
  132. case TIFF_PROJ_LINEAR_UNITS_GEOKEY:
  133. case TIFF_VERTICAL_UNITS_GEOKEY:
  134. RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
  135. break;
  136. case TIFF_GEOG_ANGULAR_UNITS_GEOKEY:
  137. case TIFF_GEOG_AZIMUTH_UNITS_GEOKEY:
  138. RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
  139. break;
  140. case TIFF_GEOGRAPHIC_TYPE_GEOKEY:
  141. RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
  142. RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
  143. break;
  144. case TIFF_GEOG_GEODETIC_DATUM_GEOKEY:
  145. RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
  146. RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
  147. break;
  148. case TIFF_GEOG_ELLIPSOID_GEOKEY:
  149. RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
  150. break;
  151. case TIFF_GEOG_PRIME_MERIDIAN_GEOKEY:
  152. RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
  153. break;
  154. case TIFF_PROJECTED_CS_TYPE_GEOKEY:
  155. ap = av_strdup(search_keyval(ff_tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(ff_tiff_proj_cs_type_codes), val));
  156. if(ap) return ap;
  157. break;
  158. case TIFF_PROJECTION_GEOKEY:
  159. ap = av_strdup(search_keyval(ff_tiff_projection_codes, FF_ARRAY_ELEMS(ff_tiff_projection_codes), val));
  160. if(ap) return ap;
  161. break;
  162. case TIFF_PROJ_COORD_TRANS_GEOKEY:
  163. RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
  164. break;
  165. case TIFF_VERTICAL_CS_TYPE_GEOKEY:
  166. RET_GEOKEY_VAL(VERT_CS, vert_cs);
  167. RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
  168. break;
  169. }
  170. ap = av_malloc(14);
  171. if (ap)
  172. snprintf(ap, 14, "Unknown-%d", val);
  173. return ap;
  174. }
  175. static char *doubles2str(double *dp, int count, const char *sep)
  176. {
  177. int i;
  178. char *ap, *ap0;
  179. uint64_t component_len;
  180. if (!sep) sep = ", ";
  181. component_len = 24LL + strlen(sep);
  182. if (count >= (INT_MAX - 1)/component_len)
  183. return NULL;
  184. ap = av_malloc(component_len * count + 1);
  185. if (!ap)
  186. return NULL;
  187. ap0 = ap;
  188. ap[0] = '\0';
  189. for (i = 0; i < count; i++) {
  190. unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
  191. if(l >= component_len) {
  192. av_free(ap0);
  193. return NULL;
  194. }
  195. ap += l;
  196. }
  197. ap0[strlen(ap0) - strlen(sep)] = '\0';
  198. return ap0;
  199. }
  200. static int add_metadata(int count, int type,
  201. const char *name, const char *sep, TiffContext *s, AVFrame *frame)
  202. {
  203. switch(type) {
  204. case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
  205. case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, avpriv_frame_get_metadatap(frame));
  206. case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
  207. default : return AVERROR_INVALIDDATA;
  208. };
  209. }
  210. static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst,
  211. int usePtr, const uint8_t *src,
  212. uint8_t c, int width, int offset)
  213. {
  214. switch (bpp) {
  215. case 1:
  216. while (--width >= 0) {
  217. dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
  218. dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
  219. dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
  220. dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
  221. dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
  222. dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
  223. dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
  224. dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
  225. }
  226. break;
  227. case 2:
  228. while (--width >= 0) {
  229. dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
  230. dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
  231. dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
  232. dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
  233. }
  234. break;
  235. case 4:
  236. while (--width >= 0) {
  237. dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
  238. dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
  239. }
  240. break;
  241. default:
  242. if (usePtr) {
  243. memcpy(dst + offset, src, width);
  244. } else {
  245. memset(dst + offset, c, width);
  246. }
  247. }
  248. }
  249. static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
  250. {
  251. int i;
  252. av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
  253. if (!s->deinvert_buf)
  254. return AVERROR(ENOMEM);
  255. for (i = 0; i < size; i++)
  256. s->deinvert_buf[i] = ff_reverse[src[i]];
  257. return 0;
  258. }
  259. static void unpack_yuv(TiffContext *s, AVFrame *p,
  260. const uint8_t *src, int lnum)
  261. {
  262. int i, j, k;
  263. int w = (s->width - 1) / s->subsampling[0] + 1;
  264. uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
  265. uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
  266. if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
  267. for (i = 0; i < w; i++) {
  268. for (j = 0; j < s->subsampling[1]; j++)
  269. for (k = 0; k < s->subsampling[0]; k++)
  270. p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
  271. FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
  272. *pu++ = *src++;
  273. *pv++ = *src++;
  274. }
  275. }else{
  276. for (i = 0; i < w; i++) {
  277. for (j = 0; j < s->subsampling[1]; j++)
  278. for (k = 0; k < s->subsampling[0]; k++)
  279. p->data[0][(lnum + j) * p->linesize[0] +
  280. i * s->subsampling[0] + k] = *src++;
  281. *pu++ = *src++;
  282. *pv++ = *src++;
  283. }
  284. }
  285. }
  286. #if CONFIG_ZLIB
  287. static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
  288. int size)
  289. {
  290. z_stream zstream = { 0 };
  291. int zret;
  292. zstream.next_in = (uint8_t *)src;
  293. zstream.avail_in = size;
  294. zstream.next_out = dst;
  295. zstream.avail_out = *len;
  296. zret = inflateInit(&zstream);
  297. if (zret != Z_OK) {
  298. av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
  299. return zret;
  300. }
  301. zret = inflate(&zstream, Z_SYNC_FLUSH);
  302. inflateEnd(&zstream);
  303. *len = zstream.total_out;
  304. return zret == Z_STREAM_END ? Z_OK : zret;
  305. }
  306. static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  307. const uint8_t *src, int size, int width, int lines,
  308. int strip_start, int is_yuv)
  309. {
  310. uint8_t *zbuf;
  311. unsigned long outlen;
  312. int ret, line;
  313. outlen = width * lines;
  314. zbuf = av_malloc(outlen);
  315. if (!zbuf)
  316. return AVERROR(ENOMEM);
  317. if (s->fill_order) {
  318. if ((ret = deinvert_buffer(s, src, size)) < 0) {
  319. av_free(zbuf);
  320. return ret;
  321. }
  322. src = s->deinvert_buf;
  323. }
  324. ret = tiff_uncompress(zbuf, &outlen, src, size);
  325. if (ret != Z_OK) {
  326. av_log(s->avctx, AV_LOG_ERROR,
  327. "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
  328. (unsigned long)width * lines, ret);
  329. av_free(zbuf);
  330. return AVERROR_UNKNOWN;
  331. }
  332. src = zbuf;
  333. for (line = 0; line < lines; line++) {
  334. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  335. horizontal_fill(s->bpp, dst, 1, src, 0, width, 0);
  336. } else {
  337. memcpy(dst, src, width);
  338. }
  339. if (is_yuv) {
  340. unpack_yuv(s, p, dst, strip_start + line);
  341. line += s->subsampling[1] - 1;
  342. }
  343. dst += stride;
  344. src += width;
  345. }
  346. av_free(zbuf);
  347. return 0;
  348. }
  349. #endif
  350. #if CONFIG_LZMA
  351. static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
  352. int size)
  353. {
  354. lzma_stream stream = LZMA_STREAM_INIT;
  355. lzma_ret ret;
  356. stream.next_in = (uint8_t *)src;
  357. stream.avail_in = size;
  358. stream.next_out = dst;
  359. stream.avail_out = *len;
  360. ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
  361. if (ret != LZMA_OK) {
  362. av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
  363. return ret;
  364. }
  365. ret = lzma_code(&stream, LZMA_RUN);
  366. lzma_end(&stream);
  367. *len = stream.total_out;
  368. return ret == LZMA_STREAM_END ? LZMA_OK : ret;
  369. }
  370. static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  371. const uint8_t *src, int size, int width, int lines,
  372. int strip_start, int is_yuv)
  373. {
  374. uint64_t outlen = width * lines;
  375. int ret, line;
  376. uint8_t *buf = av_malloc(outlen);
  377. if (!buf)
  378. return AVERROR(ENOMEM);
  379. if (s->fill_order) {
  380. if ((ret = deinvert_buffer(s, src, size)) < 0) {
  381. av_free(buf);
  382. return ret;
  383. }
  384. src = s->deinvert_buf;
  385. }
  386. ret = tiff_uncompress_lzma(buf, &outlen, src, size);
  387. if (ret != LZMA_OK) {
  388. av_log(s->avctx, AV_LOG_ERROR,
  389. "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
  390. (uint64_t)width * lines, ret);
  391. av_free(buf);
  392. return AVERROR_UNKNOWN;
  393. }
  394. src = buf;
  395. for (line = 0; line < lines; line++) {
  396. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  397. horizontal_fill(s->bpp, dst, 1, src, 0, width, 0);
  398. } else {
  399. memcpy(dst, src, width);
  400. }
  401. if (is_yuv) {
  402. unpack_yuv(s, p, dst, strip_start + line);
  403. line += s->subsampling[1] - 1;
  404. }
  405. dst += stride;
  406. src += width;
  407. }
  408. av_free(buf);
  409. return 0;
  410. }
  411. #endif
  412. static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
  413. const uint8_t *src, int size, int width, int lines)
  414. {
  415. int i, ret = 0;
  416. int line;
  417. uint8_t *src2 = av_malloc((unsigned)size +
  418. FF_INPUT_BUFFER_PADDING_SIZE);
  419. if (!src2) {
  420. av_log(s->avctx, AV_LOG_ERROR,
  421. "Error allocating temporary buffer\n");
  422. return AVERROR(ENOMEM);
  423. }
  424. if (s->fax_opts & 2) {
  425. avpriv_request_sample(s->avctx, "Uncompressed fax mode");
  426. av_free(src2);
  427. return AVERROR_PATCHWELCOME;
  428. }
  429. if (!s->fill_order) {
  430. memcpy(src2, src, size);
  431. } else {
  432. for (i = 0; i < size; i++)
  433. src2[i] = ff_reverse[src[i]];
  434. }
  435. memset(src2 + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  436. ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
  437. s->compr, s->fax_opts);
  438. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  439. for (line = 0; line < lines; line++) {
  440. horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
  441. dst += stride;
  442. }
  443. av_free(src2);
  444. return ret;
  445. }
  446. static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  447. const uint8_t *src, int size, int strip_start, int lines)
  448. {
  449. PutByteContext pb;
  450. int c, line, pixels, code, ret;
  451. const uint8_t *ssrc = src;
  452. int width = ((s->width * s->bpp) + 7) >> 3;
  453. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(p->format);
  454. int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) && desc->nb_components >= 2;
  455. if (s->planar)
  456. width /= s->bppcount;
  457. if (size <= 0)
  458. return AVERROR_INVALIDDATA;
  459. if (is_yuv) {
  460. int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
  461. s->subsampling[0] * s->subsampling[1] + 7) >> 3;
  462. av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
  463. if (s->yuv_line == NULL) {
  464. av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
  465. return AVERROR(ENOMEM);
  466. }
  467. dst = s->yuv_line;
  468. stride = 0;
  469. width = s->width * s->subsampling[1] + 2*(s->width / s->subsampling[0]);
  470. av_assert0(width <= bytes_per_row);
  471. av_assert0(s->bpp == 24);
  472. }
  473. if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
  474. #if CONFIG_ZLIB
  475. return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
  476. strip_start, is_yuv);
  477. #else
  478. av_log(s->avctx, AV_LOG_ERROR,
  479. "zlib support not enabled, "
  480. "deflate compression not supported\n");
  481. return AVERROR(ENOSYS);
  482. #endif
  483. }
  484. if (s->compr == TIFF_LZMA) {
  485. #if CONFIG_LZMA
  486. return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
  487. strip_start, is_yuv);
  488. #else
  489. av_log(s->avctx, AV_LOG_ERROR,
  490. "LZMA support not enabled\n");
  491. return AVERROR(ENOSYS);
  492. #endif
  493. }
  494. if (s->compr == TIFF_LZW) {
  495. if (s->fill_order) {
  496. if ((ret = deinvert_buffer(s, src, size)) < 0)
  497. return ret;
  498. ssrc = src = s->deinvert_buf;
  499. }
  500. if (size > 1 && !src[0] && (src[1]&1)) {
  501. av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
  502. }
  503. if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
  504. av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
  505. return ret;
  506. }
  507. for (line = 0; line < lines; line++) {
  508. pixels = ff_lzw_decode(s->lzw, dst, width);
  509. if (pixels < width) {
  510. av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
  511. pixels, width);
  512. return AVERROR_INVALIDDATA;
  513. }
  514. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  515. horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
  516. if (is_yuv) {
  517. unpack_yuv(s, p, dst, strip_start + line);
  518. line += s->subsampling[1] - 1;
  519. }
  520. dst += stride;
  521. }
  522. return 0;
  523. }
  524. if (s->compr == TIFF_CCITT_RLE ||
  525. s->compr == TIFF_G3 ||
  526. s->compr == TIFF_G4) {
  527. if (is_yuv)
  528. return AVERROR_INVALIDDATA;
  529. return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
  530. }
  531. bytestream2_init(&s->gb, src, size);
  532. bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
  533. for (line = 0; line < lines; line++) {
  534. if (src - ssrc > size) {
  535. av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
  536. return AVERROR_INVALIDDATA;
  537. }
  538. if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
  539. break;
  540. bytestream2_seek_p(&pb, stride * line, SEEK_SET);
  541. switch (s->compr) {
  542. case TIFF_RAW:
  543. if (ssrc + size - src < width)
  544. return AVERROR_INVALIDDATA;
  545. if (!s->fill_order) {
  546. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  547. dst, 1, src, 0, width, 0);
  548. } else {
  549. int i;
  550. for (i = 0; i < width; i++)
  551. dst[i] = ff_reverse[src[i]];
  552. }
  553. src += width;
  554. break;
  555. case TIFF_PACKBITS:
  556. for (pixels = 0; pixels < width;) {
  557. if (ssrc + size - src < 2) {
  558. av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
  559. return AVERROR_INVALIDDATA;
  560. }
  561. code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
  562. if (code >= 0) {
  563. code++;
  564. if (pixels + code > width ||
  565. ssrc + size - src < code) {
  566. av_log(s->avctx, AV_LOG_ERROR,
  567. "Copy went out of bounds\n");
  568. return AVERROR_INVALIDDATA;
  569. }
  570. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  571. dst, 1, src, 0, code, pixels);
  572. src += code;
  573. pixels += code;
  574. } else if (code != -128) { // -127..-1
  575. code = (-code) + 1;
  576. if (pixels + code > width) {
  577. av_log(s->avctx, AV_LOG_ERROR,
  578. "Run went out of bounds\n");
  579. return AVERROR_INVALIDDATA;
  580. }
  581. c = *src++;
  582. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  583. dst, 0, NULL, c, code, pixels);
  584. pixels += code;
  585. }
  586. }
  587. if (s->fill_order) {
  588. int i;
  589. for (i = 0; i < width; i++)
  590. dst[i] = ff_reverse[dst[i]];
  591. }
  592. break;
  593. }
  594. if (is_yuv) {
  595. unpack_yuv(s, p, dst, strip_start + line);
  596. line += s->subsampling[1] - 1;
  597. }
  598. dst += stride;
  599. }
  600. return 0;
  601. }
  602. static int init_image(TiffContext *s, ThreadFrame *frame)
  603. {
  604. int ret;
  605. switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
  606. case 11:
  607. if (!s->palette_is_set) {
  608. s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
  609. break;
  610. }
  611. case 21:
  612. case 41:
  613. case 81:
  614. s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
  615. break;
  616. case 243:
  617. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  618. if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
  619. s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  620. } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
  621. s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  622. } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
  623. s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
  624. } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
  625. s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
  626. } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
  627. s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  628. } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
  629. s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  630. } else {
  631. av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
  632. return AVERROR_PATCHWELCOME;
  633. }
  634. } else
  635. s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
  636. break;
  637. case 161:
  638. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
  639. break;
  640. case 162:
  641. s->avctx->pix_fmt = AV_PIX_FMT_YA8;
  642. break;
  643. case 322:
  644. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
  645. break;
  646. case 324:
  647. s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
  648. break;
  649. case 483:
  650. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
  651. break;
  652. case 644:
  653. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
  654. break;
  655. case 1243:
  656. s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
  657. break;
  658. case 1324:
  659. s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  660. break;
  661. case 1483:
  662. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
  663. break;
  664. case 1644:
  665. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
  666. break;
  667. default:
  668. av_log(s->avctx, AV_LOG_ERROR,
  669. "This format is not supported (bpp=%d, bppcount=%d)\n",
  670. s->bpp, s->bppcount);
  671. return AVERROR_INVALIDDATA;
  672. }
  673. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  674. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
  675. if((desc->flags & AV_PIX_FMT_FLAG_RGB) || desc->nb_components < 3) {
  676. av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
  677. return AVERROR_INVALIDDATA;
  678. }
  679. }
  680. if (s->width != s->avctx->width || s->height != s->avctx->height) {
  681. ret = ff_set_dimensions(s->avctx, s->width, s->height);
  682. if (ret < 0)
  683. return ret;
  684. }
  685. if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
  686. return ret;
  687. if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  688. memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
  689. }
  690. return 0;
  691. }
  692. static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
  693. {
  694. int offset = tag == TIFF_YRES ? 2 : 0;
  695. s->res[offset++] = num;
  696. s->res[offset] = den;
  697. if (s->res[0] && s->res[1] && s->res[2] && s->res[3])
  698. av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
  699. s->res[2] * (uint64_t)s->res[1], s->res[0] * (uint64_t)s->res[3], INT32_MAX);
  700. }
  701. static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
  702. {
  703. unsigned tag, type, count, off, value = 0, value2 = 0;
  704. int i, start;
  705. int pos;
  706. int ret;
  707. double *dp;
  708. ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
  709. if (ret < 0) {
  710. goto end;
  711. }
  712. off = bytestream2_tell(&s->gb);
  713. if (count == 1) {
  714. switch (type) {
  715. case TIFF_BYTE:
  716. case TIFF_SHORT:
  717. case TIFF_LONG:
  718. value = ff_tget(&s->gb, type, s->le);
  719. break;
  720. case TIFF_RATIONAL:
  721. value = ff_tget(&s->gb, TIFF_LONG, s->le);
  722. value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
  723. break;
  724. case TIFF_STRING:
  725. if (count <= 4) {
  726. break;
  727. }
  728. default:
  729. value = UINT_MAX;
  730. }
  731. }
  732. switch (tag) {
  733. case TIFF_WIDTH:
  734. s->width = value;
  735. break;
  736. case TIFF_HEIGHT:
  737. s->height = value;
  738. break;
  739. case TIFF_BPP:
  740. s->bppcount = count;
  741. if (count > 4) {
  742. av_log(s->avctx, AV_LOG_ERROR,
  743. "This format is not supported (bpp=%d, %d components)\n",
  744. s->bpp, count);
  745. return AVERROR_INVALIDDATA;
  746. }
  747. if (count == 1)
  748. s->bpp = value;
  749. else {
  750. switch (type) {
  751. case TIFF_BYTE:
  752. case TIFF_SHORT:
  753. case TIFF_LONG:
  754. s->bpp = 0;
  755. if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
  756. return AVERROR_INVALIDDATA;
  757. for (i = 0; i < count; i++)
  758. s->bpp += ff_tget(&s->gb, type, s->le);
  759. break;
  760. default:
  761. s->bpp = -1;
  762. }
  763. }
  764. break;
  765. case TIFF_SAMPLES_PER_PIXEL:
  766. if (count != 1) {
  767. av_log(s->avctx, AV_LOG_ERROR,
  768. "Samples per pixel requires a single value, many provided\n");
  769. return AVERROR_INVALIDDATA;
  770. }
  771. if (value > 4U) {
  772. av_log(s->avctx, AV_LOG_ERROR,
  773. "Samples per pixel %d is too large\n", value);
  774. return AVERROR_INVALIDDATA;
  775. }
  776. if (s->bppcount == 1)
  777. s->bpp *= value;
  778. s->bppcount = value;
  779. break;
  780. case TIFF_COMPR:
  781. s->compr = value;
  782. s->predictor = 0;
  783. switch (s->compr) {
  784. case TIFF_RAW:
  785. case TIFF_PACKBITS:
  786. case TIFF_LZW:
  787. case TIFF_CCITT_RLE:
  788. break;
  789. case TIFF_G3:
  790. case TIFF_G4:
  791. s->fax_opts = 0;
  792. break;
  793. case TIFF_DEFLATE:
  794. case TIFF_ADOBE_DEFLATE:
  795. #if CONFIG_ZLIB
  796. break;
  797. #else
  798. av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
  799. return AVERROR(ENOSYS);
  800. #endif
  801. case TIFF_JPEG:
  802. case TIFF_NEWJPEG:
  803. avpriv_report_missing_feature(s->avctx, "JPEG compression");
  804. return AVERROR_PATCHWELCOME;
  805. case TIFF_LZMA:
  806. #if CONFIG_LZMA
  807. break;
  808. #else
  809. av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
  810. return AVERROR(ENOSYS);
  811. #endif
  812. default:
  813. av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
  814. s->compr);
  815. return AVERROR_INVALIDDATA;
  816. }
  817. break;
  818. case TIFF_ROWSPERSTRIP:
  819. if (!value || (type == TIFF_LONG && value == UINT_MAX))
  820. value = s->height;
  821. s->rps = FFMIN(value, s->height);
  822. break;
  823. case TIFF_STRIP_OFFS:
  824. if (count == 1) {
  825. s->strippos = 0;
  826. s->stripoff = value;
  827. } else
  828. s->strippos = off;
  829. s->strips = count;
  830. if (s->strips == 1)
  831. s->rps = s->height;
  832. s->sot = type;
  833. break;
  834. case TIFF_STRIP_SIZE:
  835. if (count == 1) {
  836. s->stripsizesoff = 0;
  837. s->stripsize = value;
  838. s->strips = 1;
  839. } else {
  840. s->stripsizesoff = off;
  841. }
  842. s->strips = count;
  843. s->sstype = type;
  844. break;
  845. case TIFF_XRES:
  846. case TIFF_YRES:
  847. set_sar(s, tag, value, value2);
  848. break;
  849. case TIFF_TILE_BYTE_COUNTS:
  850. case TIFF_TILE_LENGTH:
  851. case TIFF_TILE_OFFSETS:
  852. case TIFF_TILE_WIDTH:
  853. av_log(s->avctx, AV_LOG_ERROR, "Tiled images are not supported\n");
  854. return AVERROR_PATCHWELCOME;
  855. break;
  856. case TIFF_PREDICTOR:
  857. s->predictor = value;
  858. break;
  859. case TIFF_PHOTOMETRIC:
  860. switch (value) {
  861. case TIFF_PHOTOMETRIC_WHITE_IS_ZERO:
  862. case TIFF_PHOTOMETRIC_BLACK_IS_ZERO:
  863. case TIFF_PHOTOMETRIC_RGB:
  864. case TIFF_PHOTOMETRIC_PALETTE:
  865. case TIFF_PHOTOMETRIC_YCBCR:
  866. s->photometric = value;
  867. break;
  868. case TIFF_PHOTOMETRIC_ALPHA_MASK:
  869. case TIFF_PHOTOMETRIC_SEPARATED:
  870. case TIFF_PHOTOMETRIC_CIE_LAB:
  871. case TIFF_PHOTOMETRIC_ICC_LAB:
  872. case TIFF_PHOTOMETRIC_ITU_LAB:
  873. case TIFF_PHOTOMETRIC_CFA:
  874. case TIFF_PHOTOMETRIC_LOG_L:
  875. case TIFF_PHOTOMETRIC_LOG_LUV:
  876. case TIFF_PHOTOMETRIC_LINEAR_RAW:
  877. avpriv_report_missing_feature(s->avctx,
  878. "PhotometricInterpretation 0x%04X",
  879. value);
  880. return AVERROR_PATCHWELCOME;
  881. default:
  882. av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
  883. "unknown\n", value);
  884. return AVERROR_INVALIDDATA;
  885. }
  886. break;
  887. case TIFF_FILL_ORDER:
  888. if (value < 1 || value > 2) {
  889. av_log(s->avctx, AV_LOG_ERROR,
  890. "Unknown FillOrder value %d, trying default one\n", value);
  891. value = 1;
  892. }
  893. s->fill_order = value - 1;
  894. break;
  895. case TIFF_PAL: {
  896. GetByteContext pal_gb[3];
  897. off = type_sizes[type];
  898. if (count / 3 > 256 ||
  899. bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
  900. return AVERROR_INVALIDDATA;
  901. pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
  902. bytestream2_skip(&pal_gb[1], count / 3 * off);
  903. bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
  904. off = (type_sizes[type] - 1) << 3;
  905. for (i = 0; i < count / 3; i++) {
  906. uint32_t p = 0xFF000000;
  907. p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
  908. p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
  909. p |= ff_tget(&pal_gb[2], type, s->le) >> off;
  910. s->palette[i] = p;
  911. }
  912. s->palette_is_set = 1;
  913. break;
  914. }
  915. case TIFF_PLANAR:
  916. s->planar = value == 2;
  917. break;
  918. case TIFF_YCBCR_SUBSAMPLING:
  919. if (count != 2) {
  920. av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
  921. return AVERROR_INVALIDDATA;
  922. }
  923. for (i = 0; i < count; i++)
  924. s->subsampling[i] = ff_tget(&s->gb, type, s->le);
  925. break;
  926. case TIFF_T4OPTIONS:
  927. if (s->compr == TIFF_G3)
  928. s->fax_opts = value;
  929. break;
  930. case TIFF_T6OPTIONS:
  931. if (s->compr == TIFF_G4)
  932. s->fax_opts = value;
  933. break;
  934. #define ADD_METADATA(count, name, sep)\
  935. if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
  936. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
  937. goto end;\
  938. }
  939. case TIFF_MODEL_PIXEL_SCALE:
  940. ADD_METADATA(count, "ModelPixelScaleTag", NULL);
  941. break;
  942. case TIFF_MODEL_TRANSFORMATION:
  943. ADD_METADATA(count, "ModelTransformationTag", NULL);
  944. break;
  945. case TIFF_MODEL_TIEPOINT:
  946. ADD_METADATA(count, "ModelTiepointTag", NULL);
  947. break;
  948. case TIFF_GEO_KEY_DIRECTORY:
  949. ADD_METADATA(1, "GeoTIFF_Version", NULL);
  950. ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
  951. s->geotag_count = ff_tget_short(&s->gb, s->le);
  952. if (s->geotag_count > count / 4 - 1) {
  953. s->geotag_count = count / 4 - 1;
  954. av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
  955. }
  956. if (bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4) {
  957. s->geotag_count = 0;
  958. return -1;
  959. }
  960. s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
  961. if (!s->geotags) {
  962. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  963. s->geotag_count = 0;
  964. goto end;
  965. }
  966. for (i = 0; i < s->geotag_count; i++) {
  967. s->geotags[i].key = ff_tget_short(&s->gb, s->le);
  968. s->geotags[i].type = ff_tget_short(&s->gb, s->le);
  969. s->geotags[i].count = ff_tget_short(&s->gb, s->le);
  970. if (!s->geotags[i].type)
  971. s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
  972. else
  973. s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
  974. }
  975. break;
  976. case TIFF_GEO_DOUBLE_PARAMS:
  977. if (count >= INT_MAX / sizeof(int64_t))
  978. return AVERROR_INVALIDDATA;
  979. if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
  980. return AVERROR_INVALIDDATA;
  981. dp = av_malloc_array(count, sizeof(double));
  982. if (!dp) {
  983. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  984. goto end;
  985. }
  986. for (i = 0; i < count; i++)
  987. dp[i] = ff_tget_double(&s->gb, s->le);
  988. for (i = 0; i < s->geotag_count; i++) {
  989. if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
  990. if (s->geotags[i].count == 0
  991. || s->geotags[i].offset + s->geotags[i].count > count) {
  992. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  993. } else {
  994. char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
  995. if (!ap) {
  996. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  997. av_freep(&dp);
  998. return AVERROR(ENOMEM);
  999. }
  1000. s->geotags[i].val = ap;
  1001. }
  1002. }
  1003. }
  1004. av_freep(&dp);
  1005. break;
  1006. case TIFF_GEO_ASCII_PARAMS:
  1007. pos = bytestream2_tell(&s->gb);
  1008. for (i = 0; i < s->geotag_count; i++) {
  1009. if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
  1010. if (s->geotags[i].count == 0
  1011. || s->geotags[i].offset + s->geotags[i].count > count) {
  1012. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  1013. } else {
  1014. char *ap;
  1015. bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
  1016. if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
  1017. return AVERROR_INVALIDDATA;
  1018. ap = av_malloc(s->geotags[i].count);
  1019. if (!ap) {
  1020. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  1021. return AVERROR(ENOMEM);
  1022. }
  1023. bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
  1024. ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
  1025. s->geotags[i].val = ap;
  1026. }
  1027. }
  1028. }
  1029. break;
  1030. case TIFF_ARTIST:
  1031. ADD_METADATA(count, "artist", NULL);
  1032. break;
  1033. case TIFF_COPYRIGHT:
  1034. ADD_METADATA(count, "copyright", NULL);
  1035. break;
  1036. case TIFF_DATE:
  1037. ADD_METADATA(count, "date", NULL);
  1038. break;
  1039. case TIFF_DOCUMENT_NAME:
  1040. ADD_METADATA(count, "document_name", NULL);
  1041. break;
  1042. case TIFF_HOST_COMPUTER:
  1043. ADD_METADATA(count, "computer", NULL);
  1044. break;
  1045. case TIFF_IMAGE_DESCRIPTION:
  1046. ADD_METADATA(count, "description", NULL);
  1047. break;
  1048. case TIFF_MAKE:
  1049. ADD_METADATA(count, "make", NULL);
  1050. break;
  1051. case TIFF_MODEL:
  1052. ADD_METADATA(count, "model", NULL);
  1053. break;
  1054. case TIFF_PAGE_NAME:
  1055. ADD_METADATA(count, "page_name", NULL);
  1056. break;
  1057. case TIFF_PAGE_NUMBER:
  1058. ADD_METADATA(count, "page_number", " / ");
  1059. break;
  1060. case TIFF_SOFTWARE_NAME:
  1061. ADD_METADATA(count, "software", NULL);
  1062. break;
  1063. default:
  1064. if (s->avctx->err_recognition & AV_EF_EXPLODE) {
  1065. av_log(s->avctx, AV_LOG_ERROR,
  1066. "Unknown or unsupported tag %d/0X%0X\n",
  1067. tag, tag);
  1068. return AVERROR_INVALIDDATA;
  1069. }
  1070. }
  1071. end:
  1072. bytestream2_seek(&s->gb, start, SEEK_SET);
  1073. return 0;
  1074. }
  1075. static int decode_frame(AVCodecContext *avctx,
  1076. void *data, int *got_frame, AVPacket *avpkt)
  1077. {
  1078. TiffContext *const s = avctx->priv_data;
  1079. AVFrame *const p = data;
  1080. ThreadFrame frame = { .f = data };
  1081. unsigned off;
  1082. int le, ret, plane, planes;
  1083. int i, j, entries, stride;
  1084. unsigned soff, ssize;
  1085. uint8_t *dst;
  1086. GetByteContext stripsizes;
  1087. GetByteContext stripdata;
  1088. bytestream2_init(&s->gb, avpkt->data, avpkt->size);
  1089. // parse image header
  1090. if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
  1091. av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
  1092. return ret;
  1093. } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
  1094. av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
  1095. return AVERROR_INVALIDDATA;
  1096. }
  1097. s->le = le;
  1098. // TIFF_BPP is not a required tag and defaults to 1
  1099. s->bppcount = s->bpp = 1;
  1100. s->photometric = TIFF_PHOTOMETRIC_NONE;
  1101. s->compr = TIFF_RAW;
  1102. s->fill_order = 0;
  1103. free_geotags(s);
  1104. // Reset these offsets so we can tell if they were set this frame
  1105. s->stripsizesoff = s->strippos = 0;
  1106. /* parse image file directory */
  1107. bytestream2_seek(&s->gb, off, SEEK_SET);
  1108. entries = ff_tget_short(&s->gb, le);
  1109. if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
  1110. return AVERROR_INVALIDDATA;
  1111. for (i = 0; i < entries; i++) {
  1112. if ((ret = tiff_decode_tag(s, p)) < 0)
  1113. return ret;
  1114. }
  1115. for (i = 0; i<s->geotag_count; i++) {
  1116. const char *keyname = get_geokey_name(s->geotags[i].key);
  1117. if (!keyname) {
  1118. av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
  1119. continue;
  1120. }
  1121. if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
  1122. av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
  1123. continue;
  1124. }
  1125. ret = av_dict_set(avpriv_frame_get_metadatap(p), keyname, s->geotags[i].val, 0);
  1126. if (ret<0) {
  1127. av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
  1128. return ret;
  1129. }
  1130. }
  1131. if (!s->strippos && !s->stripoff) {
  1132. av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
  1133. return AVERROR_INVALIDDATA;
  1134. }
  1135. /* now we have the data and may start decoding */
  1136. if ((ret = init_image(s, &frame)) < 0)
  1137. return ret;
  1138. if (s->strips == 1 && !s->stripsize) {
  1139. av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
  1140. s->stripsize = avpkt->size - s->stripoff;
  1141. }
  1142. if (s->stripsizesoff) {
  1143. if (s->stripsizesoff >= (unsigned)avpkt->size)
  1144. return AVERROR_INVALIDDATA;
  1145. bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
  1146. avpkt->size - s->stripsizesoff);
  1147. }
  1148. if (s->strippos) {
  1149. if (s->strippos >= (unsigned)avpkt->size)
  1150. return AVERROR_INVALIDDATA;
  1151. bytestream2_init(&stripdata, avpkt->data + s->strippos,
  1152. avpkt->size - s->strippos);
  1153. }
  1154. if (s->rps <= 0) {
  1155. av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
  1156. return AVERROR_INVALIDDATA;
  1157. }
  1158. planes = s->planar ? s->bppcount : 1;
  1159. for (plane = 0; plane < planes; plane++) {
  1160. stride = p->linesize[plane];
  1161. dst = p->data[plane];
  1162. for (i = 0; i < s->height; i += s->rps) {
  1163. if (s->stripsizesoff)
  1164. ssize = ff_tget(&stripsizes, s->sstype, le);
  1165. else
  1166. ssize = s->stripsize;
  1167. if (s->strippos)
  1168. soff = ff_tget(&stripdata, s->sot, le);
  1169. else
  1170. soff = s->stripoff;
  1171. if (soff > avpkt->size || ssize > avpkt->size - soff) {
  1172. av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
  1173. return AVERROR_INVALIDDATA;
  1174. }
  1175. if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
  1176. FFMIN(s->rps, s->height - i))) < 0) {
  1177. if (avctx->err_recognition & AV_EF_EXPLODE)
  1178. return ret;
  1179. break;
  1180. }
  1181. dst += s->rps * stride;
  1182. }
  1183. if (s->predictor == 2) {
  1184. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  1185. av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
  1186. return AVERROR_PATCHWELCOME;
  1187. }
  1188. dst = p->data[plane];
  1189. soff = s->bpp >> 3;
  1190. if (s->planar)
  1191. soff = FFMAX(soff / s->bppcount, 1);
  1192. ssize = s->width * soff;
  1193. if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
  1194. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
  1195. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
  1196. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
  1197. for (i = 0; i < s->height; i++) {
  1198. for (j = soff; j < ssize; j += 2)
  1199. AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
  1200. dst += stride;
  1201. }
  1202. } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
  1203. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
  1204. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
  1205. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
  1206. for (i = 0; i < s->height; i++) {
  1207. for (j = soff; j < ssize; j += 2)
  1208. AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
  1209. dst += stride;
  1210. }
  1211. } else {
  1212. for (i = 0; i < s->height; i++) {
  1213. for (j = soff; j < ssize; j++)
  1214. dst[j] += dst[j - soff];
  1215. dst += stride;
  1216. }
  1217. }
  1218. }
  1219. if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
  1220. dst = p->data[plane];
  1221. for (i = 0; i < s->height; i++) {
  1222. for (j = 0; j < p->linesize[plane]; j++)
  1223. dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];
  1224. dst += stride;
  1225. }
  1226. }
  1227. }
  1228. if (s->planar && s->bppcount > 2) {
  1229. FFSWAP(uint8_t*, p->data[0], p->data[2]);
  1230. FFSWAP(int, p->linesize[0], p->linesize[2]);
  1231. FFSWAP(uint8_t*, p->data[0], p->data[1]);
  1232. FFSWAP(int, p->linesize[0], p->linesize[1]);
  1233. }
  1234. *got_frame = 1;
  1235. return avpkt->size;
  1236. }
  1237. static av_cold int tiff_init(AVCodecContext *avctx)
  1238. {
  1239. TiffContext *s = avctx->priv_data;
  1240. s->width = 0;
  1241. s->height = 0;
  1242. s->subsampling[0] =
  1243. s->subsampling[1] = 1;
  1244. s->avctx = avctx;
  1245. ff_lzw_decode_open(&s->lzw);
  1246. ff_ccitt_unpack_init();
  1247. return 0;
  1248. }
  1249. static av_cold int tiff_end(AVCodecContext *avctx)
  1250. {
  1251. TiffContext *const s = avctx->priv_data;
  1252. free_geotags(s);
  1253. ff_lzw_decode_close(&s->lzw);
  1254. av_freep(&s->deinvert_buf);
  1255. return 0;
  1256. }
  1257. AVCodec ff_tiff_decoder = {
  1258. .name = "tiff",
  1259. .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
  1260. .type = AVMEDIA_TYPE_VIDEO,
  1261. .id = AV_CODEC_ID_TIFF,
  1262. .priv_data_size = sizeof(TiffContext),
  1263. .init = tiff_init,
  1264. .close = tiff_end,
  1265. .decode = decode_frame,
  1266. .init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
  1267. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  1268. };