You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1156 lines
37KB

  1. /*
  2. * Copyright (c) 2006 Konstantin Shishkov
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * TIFF image decoder
  23. * @author Konstantin Shishkov
  24. */
  25. #include "config.h"
  26. #if CONFIG_ZLIB
  27. #include <zlib.h>
  28. #endif
  29. #include "libavutil/attributes.h"
  30. #include "libavutil/avstring.h"
  31. #include "libavutil/intreadwrite.h"
  32. #include "libavutil/imgutils.h"
  33. #include "avcodec.h"
  34. #include "bytestream.h"
  35. #include "faxcompr.h"
  36. #include "internal.h"
  37. #include "lzw.h"
  38. #include "mathops.h"
  39. #include "tiff.h"
  40. #include "tiff_data.h"
  41. #include "thread.h"
  42. typedef struct TiffContext {
  43. AVCodecContext *avctx;
  44. GetByteContext gb;
  45. int width, height;
  46. unsigned int bpp, bppcount;
  47. uint32_t palette[256];
  48. int palette_is_set;
  49. int le;
  50. enum TiffCompr compr;
  51. int invert;
  52. int planar;
  53. int fax_opts;
  54. int predictor;
  55. int fill_order;
  56. uint32_t res[4];
  57. int strips, rps, sstype;
  58. int sot;
  59. int stripsizesoff, stripsize, stripoff, strippos;
  60. LZWState *lzw;
  61. uint8_t *deinvert_buf;
  62. int deinvert_buf_size;
  63. int geotag_count;
  64. TiffGeoTag *geotags;
  65. } TiffContext;
  66. static void free_geotags(TiffContext *const s)
  67. {
  68. int i;
  69. for (i = 0; i < s->geotag_count; i++) {
  70. if (s->geotags[i].val)
  71. av_freep(&s->geotags[i].val);
  72. }
  73. av_freep(&s->geotags);
  74. s->geotag_count = 0;
  75. }
  76. #define RET_GEOKEY(TYPE, array, element)\
  77. if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
  78. key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
  79. return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
  80. static const char *get_geokey_name(int key)
  81. {
  82. RET_GEOKEY(VERT, vert, name);
  83. RET_GEOKEY(PROJ, proj, name);
  84. RET_GEOKEY(GEOG, geog, name);
  85. RET_GEOKEY(CONF, conf, name);
  86. return NULL;
  87. }
  88. static int get_geokey_type(int key)
  89. {
  90. RET_GEOKEY(VERT, vert, type);
  91. RET_GEOKEY(PROJ, proj, type);
  92. RET_GEOKEY(GEOG, geog, type);
  93. RET_GEOKEY(CONF, conf, type);
  94. return AVERROR_INVALIDDATA;
  95. }
  96. static int cmp_id_key(const void *id, const void *k)
  97. {
  98. return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
  99. }
  100. static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
  101. {
  102. TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
  103. if(r)
  104. return r->name;
  105. return NULL;
  106. }
  107. static char *get_geokey_val(int key, int val)
  108. {
  109. char *ap;
  110. if (val == TIFF_GEO_KEY_UNDEFINED)
  111. return av_strdup("undefined");
  112. if (val == TIFF_GEO_KEY_USER_DEFINED)
  113. return av_strdup("User-Defined");
  114. #define RET_GEOKEY_VAL(TYPE, array)\
  115. if (val >= TIFF_##TYPE##_OFFSET &&\
  116. val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
  117. return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
  118. switch (key) {
  119. case TIFF_GT_MODEL_TYPE_GEOKEY:
  120. RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
  121. break;
  122. case TIFF_GT_RASTER_TYPE_GEOKEY:
  123. RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
  124. break;
  125. case TIFF_GEOG_LINEAR_UNITS_GEOKEY:
  126. case TIFF_PROJ_LINEAR_UNITS_GEOKEY:
  127. case TIFF_VERTICAL_UNITS_GEOKEY:
  128. RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
  129. break;
  130. case TIFF_GEOG_ANGULAR_UNITS_GEOKEY:
  131. case TIFF_GEOG_AZIMUTH_UNITS_GEOKEY:
  132. RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
  133. break;
  134. case TIFF_GEOGRAPHIC_TYPE_GEOKEY:
  135. RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
  136. RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
  137. break;
  138. case TIFF_GEOG_GEODETIC_DATUM_GEOKEY:
  139. RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
  140. RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
  141. break;
  142. case TIFF_GEOG_ELLIPSOID_GEOKEY:
  143. RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
  144. break;
  145. case TIFF_GEOG_PRIME_MERIDIAN_GEOKEY:
  146. RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
  147. break;
  148. case TIFF_PROJECTED_CS_TYPE_GEOKEY:
  149. ap = av_strdup(search_keyval(ff_tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(ff_tiff_proj_cs_type_codes), val));
  150. if(ap) return ap;
  151. break;
  152. case TIFF_PROJECTION_GEOKEY:
  153. ap = av_strdup(search_keyval(ff_tiff_projection_codes, FF_ARRAY_ELEMS(ff_tiff_projection_codes), val));
  154. if(ap) return ap;
  155. break;
  156. case TIFF_PROJ_COORD_TRANS_GEOKEY:
  157. RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
  158. break;
  159. case TIFF_VERTICAL_CS_TYPE_GEOKEY:
  160. RET_GEOKEY_VAL(VERT_CS, vert_cs);
  161. RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
  162. break;
  163. }
  164. ap = av_malloc(14);
  165. if (ap)
  166. snprintf(ap, 14, "Unknown-%d", val);
  167. return ap;
  168. }
  169. static char *doubles2str(double *dp, int count, const char *sep)
  170. {
  171. int i;
  172. char *ap, *ap0;
  173. uint64_t component_len;
  174. if (!sep) sep = ", ";
  175. component_len = 24LL + strlen(sep);
  176. if (count >= (INT_MAX - 1)/component_len)
  177. return NULL;
  178. ap = av_malloc(component_len * count + 1);
  179. if (!ap)
  180. return NULL;
  181. ap0 = ap;
  182. ap[0] = '\0';
  183. for (i = 0; i < count; i++) {
  184. unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
  185. if(l >= component_len) {
  186. av_free(ap0);
  187. return NULL;
  188. }
  189. ap += l;
  190. }
  191. ap0[strlen(ap0) - strlen(sep)] = '\0';
  192. return ap0;
  193. }
  194. static int add_metadata(int count, int type,
  195. const char *name, const char *sep, TiffContext *s, AVFrame *frame)
  196. {
  197. switch(type) {
  198. case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
  199. case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
  200. case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, avpriv_frame_get_metadatap(frame));
  201. default : return AVERROR_INVALIDDATA;
  202. };
  203. }
  204. static void av_always_inline horizontal_fill(unsigned int bpp, uint8_t* dst,
  205. int usePtr, const uint8_t *src,
  206. uint8_t c, int width, int offset)
  207. {
  208. switch (bpp) {
  209. case 1:
  210. while (--width >= 0) {
  211. dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
  212. dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
  213. dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
  214. dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
  215. dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
  216. dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
  217. dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
  218. dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
  219. }
  220. break;
  221. case 2:
  222. while (--width >= 0) {
  223. dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
  224. dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
  225. dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
  226. dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
  227. }
  228. break;
  229. case 4:
  230. while (--width >= 0) {
  231. dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
  232. dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
  233. }
  234. break;
  235. default:
  236. if (usePtr) {
  237. memcpy(dst + offset, src, width);
  238. } else {
  239. memset(dst + offset, c, width);
  240. }
  241. }
  242. }
  243. static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
  244. {
  245. int i;
  246. av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
  247. if (!s->deinvert_buf)
  248. return AVERROR(ENOMEM);
  249. for (i = 0; i < size; i++)
  250. s->deinvert_buf[i] = ff_reverse[src[i]];
  251. return 0;
  252. }
  253. #if CONFIG_ZLIB
  254. static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
  255. int size)
  256. {
  257. z_stream zstream = { 0 };
  258. int zret;
  259. zstream.next_in = (uint8_t *)src;
  260. zstream.avail_in = size;
  261. zstream.next_out = dst;
  262. zstream.avail_out = *len;
  263. zret = inflateInit(&zstream);
  264. if (zret != Z_OK) {
  265. av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
  266. return zret;
  267. }
  268. zret = inflate(&zstream, Z_SYNC_FLUSH);
  269. inflateEnd(&zstream);
  270. *len = zstream.total_out;
  271. return zret == Z_STREAM_END ? Z_OK : zret;
  272. }
  273. static int tiff_unpack_zlib(TiffContext *s, uint8_t *dst, int stride,
  274. const uint8_t *src, int size,
  275. int width, int lines)
  276. {
  277. uint8_t *zbuf;
  278. unsigned long outlen;
  279. int ret, line;
  280. outlen = width * lines;
  281. zbuf = av_malloc(outlen);
  282. if (!zbuf)
  283. return AVERROR(ENOMEM);
  284. if (s->fill_order) {
  285. if ((ret = deinvert_buffer(s, src, size)) < 0) {
  286. av_free(zbuf);
  287. return ret;
  288. }
  289. src = s->deinvert_buf;
  290. }
  291. ret = tiff_uncompress(zbuf, &outlen, src, size);
  292. if (ret != Z_OK) {
  293. av_log(s->avctx, AV_LOG_ERROR,
  294. "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
  295. (unsigned long)width * lines, ret);
  296. av_free(zbuf);
  297. return AVERROR_UNKNOWN;
  298. }
  299. src = zbuf;
  300. for (line = 0; line < lines; line++) {
  301. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  302. horizontal_fill(s->bpp, dst, 1, src, 0, width, 0);
  303. } else {
  304. memcpy(dst, src, width);
  305. }
  306. dst += stride;
  307. src += width;
  308. }
  309. av_free(zbuf);
  310. return 0;
  311. }
  312. #endif
  313. static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
  314. const uint8_t *src, int size, int width, int lines)
  315. {
  316. int i, ret = 0;
  317. int line;
  318. uint8_t *src2 = av_malloc((unsigned)size +
  319. FF_INPUT_BUFFER_PADDING_SIZE);
  320. if (!src2) {
  321. av_log(s->avctx, AV_LOG_ERROR,
  322. "Error allocating temporary buffer\n");
  323. return AVERROR(ENOMEM);
  324. }
  325. if (s->fax_opts & 2) {
  326. avpriv_request_sample(s->avctx, "Uncompressed fax mode");
  327. av_free(src2);
  328. return AVERROR_PATCHWELCOME;
  329. }
  330. if (!s->fill_order) {
  331. memcpy(src2, src, size);
  332. } else {
  333. for (i = 0; i < size; i++)
  334. src2[i] = ff_reverse[src[i]];
  335. }
  336. memset(src2 + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  337. ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
  338. s->compr, s->fax_opts);
  339. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  340. for (line = 0; line < lines; line++) {
  341. horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
  342. dst += stride;
  343. }
  344. av_free(src2);
  345. return ret;
  346. }
  347. static int tiff_unpack_strip(TiffContext *s, uint8_t *dst, int stride,
  348. const uint8_t *src, int size, int lines)
  349. {
  350. PutByteContext pb;
  351. int c, line, pixels, code, ret;
  352. const uint8_t *ssrc = src;
  353. int width = ((s->width * s->bpp) + 7) >> 3;
  354. if (s->planar)
  355. width /= s->bppcount;
  356. if (size <= 0)
  357. return AVERROR_INVALIDDATA;
  358. if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
  359. #if CONFIG_ZLIB
  360. return tiff_unpack_zlib(s, dst, stride, src, size, width, lines);
  361. #else
  362. av_log(s->avctx, AV_LOG_ERROR,
  363. "zlib support not enabled, "
  364. "deflate compression not supported\n");
  365. return AVERROR(ENOSYS);
  366. #endif
  367. }
  368. if (s->compr == TIFF_LZW) {
  369. if (s->fill_order) {
  370. if ((ret = deinvert_buffer(s, src, size)) < 0)
  371. return ret;
  372. ssrc = src = s->deinvert_buf;
  373. }
  374. if (size > 1 && !src[0] && (src[1]&1)) {
  375. av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
  376. }
  377. if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
  378. av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
  379. return ret;
  380. }
  381. for (line = 0; line < lines; line++) {
  382. pixels = ff_lzw_decode(s->lzw, dst, width);
  383. if (pixels < width) {
  384. av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
  385. pixels, width);
  386. return AVERROR_INVALIDDATA;
  387. }
  388. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  389. horizontal_fill(s->bpp, dst, 1, dst, 0, width, 0);
  390. dst += stride;
  391. }
  392. return 0;
  393. }
  394. if (s->compr == TIFF_CCITT_RLE ||
  395. s->compr == TIFF_G3 ||
  396. s->compr == TIFF_G4) {
  397. return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
  398. }
  399. bytestream2_init(&s->gb, src, size);
  400. bytestream2_init_writer(&pb, dst, stride * lines);
  401. for (line = 0; line < lines; line++) {
  402. if (src - ssrc > size) {
  403. av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
  404. return AVERROR_INVALIDDATA;
  405. }
  406. if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
  407. break;
  408. bytestream2_seek_p(&pb, stride * line, SEEK_SET);
  409. switch (s->compr) {
  410. case TIFF_RAW:
  411. if (ssrc + size - src < width)
  412. return AVERROR_INVALIDDATA;
  413. if (!s->fill_order) {
  414. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  415. dst, 1, src, 0, width, 0);
  416. } else {
  417. int i;
  418. for (i = 0; i < width; i++)
  419. dst[i] = ff_reverse[src[i]];
  420. }
  421. src += width;
  422. break;
  423. case TIFF_PACKBITS:
  424. for (pixels = 0; pixels < width;) {
  425. if (ssrc + size - src < 2) {
  426. av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
  427. return AVERROR_INVALIDDATA;
  428. }
  429. code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
  430. if (code >= 0) {
  431. code++;
  432. if (pixels + code > width ||
  433. ssrc + size - src < code) {
  434. av_log(s->avctx, AV_LOG_ERROR,
  435. "Copy went out of bounds\n");
  436. return AVERROR_INVALIDDATA;
  437. }
  438. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  439. dst, 1, src, 0, code, pixels);
  440. src += code;
  441. pixels += code;
  442. } else if (code != -128) { // -127..-1
  443. code = (-code) + 1;
  444. if (pixels + code > width) {
  445. av_log(s->avctx, AV_LOG_ERROR,
  446. "Run went out of bounds\n");
  447. return AVERROR_INVALIDDATA;
  448. }
  449. c = *src++;
  450. horizontal_fill(s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  451. dst, 0, NULL, c, code, pixels);
  452. pixels += code;
  453. }
  454. }
  455. if (s->fill_order) {
  456. int i;
  457. for (i = 0; i < width; i++)
  458. dst[i] = ff_reverse[dst[i]];
  459. }
  460. break;
  461. }
  462. dst += stride;
  463. }
  464. return 0;
  465. }
  466. static int init_image(TiffContext *s, ThreadFrame *frame)
  467. {
  468. int i, ret;
  469. uint32_t *pal;
  470. switch (s->planar * 1000 + s->bpp * 10 + s->bppcount) {
  471. case 11:
  472. if (!s->palette_is_set) {
  473. s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
  474. break;
  475. }
  476. case 21:
  477. case 41:
  478. case 81:
  479. s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
  480. break;
  481. case 243:
  482. s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
  483. break;
  484. case 161:
  485. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
  486. break;
  487. case 162:
  488. s->avctx->pix_fmt = AV_PIX_FMT_GRAY8A;
  489. break;
  490. case 324:
  491. s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
  492. break;
  493. case 483:
  494. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
  495. break;
  496. case 644:
  497. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
  498. break;
  499. case 1243:
  500. s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
  501. break;
  502. case 1324:
  503. s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  504. break;
  505. case 1483:
  506. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
  507. break;
  508. case 1644:
  509. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
  510. break;
  511. default:
  512. av_log(s->avctx, AV_LOG_ERROR,
  513. "This format is not supported (bpp=%d, bppcount=%d)\n",
  514. s->bpp, s->bppcount);
  515. return AVERROR_INVALIDDATA;
  516. }
  517. if (s->width != s->avctx->width || s->height != s->avctx->height) {
  518. ret = ff_set_dimensions(s->avctx, s->width, s->height);
  519. if (ret < 0)
  520. return ret;
  521. }
  522. if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
  523. return ret;
  524. if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  525. if (s->palette_is_set) {
  526. memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
  527. } else {
  528. /* make default grayscale pal */
  529. pal = (uint32_t *) frame->f->data[1];
  530. for (i = 0; i < 1<<s->bpp; i++)
  531. pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
  532. }
  533. }
  534. return 0;
  535. }
  536. static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
  537. {
  538. int offset = tag == TIFF_YRES ? 2 : 0;
  539. s->res[offset++] = num;
  540. s->res[offset] = den;
  541. if (s->res[0] && s->res[1] && s->res[2] && s->res[3])
  542. av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
  543. s->res[2] * (uint64_t)s->res[1], s->res[0] * (uint64_t)s->res[3], INT32_MAX);
  544. }
  545. static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
  546. {
  547. unsigned tag, type, count, off, value = 0, value2 = 0;
  548. int i, start;
  549. int j, k, pos;
  550. int ret;
  551. uint32_t *pal;
  552. double *dp;
  553. ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
  554. if (ret < 0) {
  555. goto end;
  556. }
  557. off = bytestream2_tell(&s->gb);
  558. if (count == 1) {
  559. switch (type) {
  560. case TIFF_BYTE:
  561. case TIFF_SHORT:
  562. case TIFF_LONG:
  563. value = ff_tget(&s->gb, type, s->le);
  564. break;
  565. case TIFF_RATIONAL:
  566. value = ff_tget(&s->gb, TIFF_LONG, s->le);
  567. value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
  568. break;
  569. case TIFF_STRING:
  570. if (count <= 4) {
  571. break;
  572. }
  573. default:
  574. value = UINT_MAX;
  575. }
  576. }
  577. switch (tag) {
  578. case TIFF_WIDTH:
  579. s->width = value;
  580. break;
  581. case TIFF_HEIGHT:
  582. s->height = value;
  583. break;
  584. case TIFF_BPP:
  585. s->bppcount = count;
  586. if (count > 4) {
  587. av_log(s->avctx, AV_LOG_ERROR,
  588. "This format is not supported (bpp=%d, %d components)\n",
  589. s->bpp, count);
  590. return AVERROR_INVALIDDATA;
  591. }
  592. if (count == 1)
  593. s->bpp = value;
  594. else {
  595. switch (type) {
  596. case TIFF_BYTE:
  597. case TIFF_SHORT:
  598. case TIFF_LONG:
  599. s->bpp = 0;
  600. if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
  601. return AVERROR_INVALIDDATA;
  602. for (i = 0; i < count; i++)
  603. s->bpp += ff_tget(&s->gb, type, s->le);
  604. break;
  605. default:
  606. s->bpp = -1;
  607. }
  608. }
  609. break;
  610. case TIFF_SAMPLES_PER_PIXEL:
  611. if (count != 1) {
  612. av_log(s->avctx, AV_LOG_ERROR,
  613. "Samples per pixel requires a single value, many provided\n");
  614. return AVERROR_INVALIDDATA;
  615. }
  616. if (value > 4U) {
  617. av_log(s->avctx, AV_LOG_ERROR,
  618. "Samples per pixel %d is too large\n", value);
  619. return AVERROR_INVALIDDATA;
  620. }
  621. if (s->bppcount == 1)
  622. s->bpp *= value;
  623. s->bppcount = value;
  624. break;
  625. case TIFF_COMPR:
  626. s->compr = value;
  627. s->predictor = 0;
  628. switch (s->compr) {
  629. case TIFF_RAW:
  630. case TIFF_PACKBITS:
  631. case TIFF_LZW:
  632. case TIFF_CCITT_RLE:
  633. break;
  634. case TIFF_G3:
  635. case TIFF_G4:
  636. s->fax_opts = 0;
  637. break;
  638. case TIFF_DEFLATE:
  639. case TIFF_ADOBE_DEFLATE:
  640. #if CONFIG_ZLIB
  641. break;
  642. #else
  643. av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
  644. return AVERROR(ENOSYS);
  645. #endif
  646. case TIFF_JPEG:
  647. case TIFF_NEWJPEG:
  648. avpriv_report_missing_feature(s->avctx, "JPEG compression");
  649. return AVERROR_PATCHWELCOME;
  650. default:
  651. av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
  652. s->compr);
  653. return AVERROR_INVALIDDATA;
  654. }
  655. break;
  656. case TIFF_ROWSPERSTRIP:
  657. if (!value || (type == TIFF_LONG && value == UINT_MAX))
  658. value = s->height;
  659. s->rps = FFMIN(value, s->height);
  660. break;
  661. case TIFF_STRIP_OFFS:
  662. if (count == 1) {
  663. s->strippos = 0;
  664. s->stripoff = value;
  665. } else
  666. s->strippos = off;
  667. s->strips = count;
  668. if (s->strips == 1)
  669. s->rps = s->height;
  670. s->sot = type;
  671. break;
  672. case TIFF_STRIP_SIZE:
  673. if (count == 1) {
  674. s->stripsizesoff = 0;
  675. s->stripsize = value;
  676. s->strips = 1;
  677. } else {
  678. s->stripsizesoff = off;
  679. }
  680. s->strips = count;
  681. s->sstype = type;
  682. break;
  683. case TIFF_XRES:
  684. case TIFF_YRES:
  685. set_sar(s, tag, value, value2);
  686. break;
  687. case TIFF_TILE_BYTE_COUNTS:
  688. case TIFF_TILE_LENGTH:
  689. case TIFF_TILE_OFFSETS:
  690. case TIFF_TILE_WIDTH:
  691. av_log(s->avctx, AV_LOG_ERROR, "Tiled images are not supported\n");
  692. return AVERROR_PATCHWELCOME;
  693. break;
  694. case TIFF_PREDICTOR:
  695. s->predictor = value;
  696. break;
  697. case TIFF_INVERT:
  698. switch (value) {
  699. case 0:
  700. s->invert = 1;
  701. break;
  702. case 1:
  703. s->invert = 0;
  704. break;
  705. case 2:
  706. case 3:
  707. break;
  708. default:
  709. av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n",
  710. value);
  711. return AVERROR_INVALIDDATA;
  712. }
  713. break;
  714. case TIFF_FILL_ORDER:
  715. if (value < 1 || value > 2) {
  716. av_log(s->avctx, AV_LOG_ERROR,
  717. "Unknown FillOrder value %d, trying default one\n", value);
  718. value = 1;
  719. }
  720. s->fill_order = value - 1;
  721. break;
  722. case TIFF_PAL: {
  723. pal = (uint32_t *) s->palette;
  724. off = type_sizes[type];
  725. if (count / 3 > 256 ||
  726. bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
  727. return AVERROR_INVALIDDATA;
  728. off = (type_sizes[type] - 1) << 3;
  729. for (k = 2; k >= 0; k--) {
  730. for (i = 0; i < count / 3; i++) {
  731. if (k == 2)
  732. pal[i] = 0xFFU << 24;
  733. j = (ff_tget(&s->gb, type, s->le) >> off) << (k * 8);
  734. pal[i] |= j;
  735. }
  736. }
  737. s->palette_is_set = 1;
  738. break;
  739. }
  740. case TIFF_PLANAR:
  741. s->planar = value == 2;
  742. break;
  743. case TIFF_T4OPTIONS:
  744. if (s->compr == TIFF_G3)
  745. s->fax_opts = value;
  746. break;
  747. case TIFF_T6OPTIONS:
  748. if (s->compr == TIFF_G4)
  749. s->fax_opts = value;
  750. break;
  751. #define ADD_METADATA(count, name, sep)\
  752. if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
  753. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
  754. goto end;\
  755. }
  756. case TIFF_MODEL_PIXEL_SCALE:
  757. ADD_METADATA(count, "ModelPixelScaleTag", NULL);
  758. break;
  759. case TIFF_MODEL_TRANSFORMATION:
  760. ADD_METADATA(count, "ModelTransformationTag", NULL);
  761. break;
  762. case TIFF_MODEL_TIEPOINT:
  763. ADD_METADATA(count, "ModelTiepointTag", NULL);
  764. break;
  765. case TIFF_GEO_KEY_DIRECTORY:
  766. ADD_METADATA(1, "GeoTIFF_Version", NULL);
  767. ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
  768. s->geotag_count = ff_tget_short(&s->gb, s->le);
  769. if (s->geotag_count > count / 4 - 1) {
  770. s->geotag_count = count / 4 - 1;
  771. av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
  772. }
  773. if (bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4) {
  774. s->geotag_count = 0;
  775. return -1;
  776. }
  777. s->geotags = av_mallocz(sizeof(TiffGeoTag) * s->geotag_count);
  778. if (!s->geotags) {
  779. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  780. s->geotag_count = 0;
  781. goto end;
  782. }
  783. for (i = 0; i < s->geotag_count; i++) {
  784. s->geotags[i].key = ff_tget_short(&s->gb, s->le);
  785. s->geotags[i].type = ff_tget_short(&s->gb, s->le);
  786. s->geotags[i].count = ff_tget_short(&s->gb, s->le);
  787. if (!s->geotags[i].type)
  788. s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
  789. else
  790. s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
  791. }
  792. break;
  793. case TIFF_GEO_DOUBLE_PARAMS:
  794. if (count >= INT_MAX / sizeof(int64_t))
  795. return AVERROR_INVALIDDATA;
  796. if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
  797. return AVERROR_INVALIDDATA;
  798. dp = av_malloc(count * sizeof(double));
  799. if (!dp) {
  800. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  801. goto end;
  802. }
  803. for (i = 0; i < count; i++)
  804. dp[i] = ff_tget_double(&s->gb, s->le);
  805. for (i = 0; i < s->geotag_count; i++) {
  806. if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
  807. if (s->geotags[i].count == 0
  808. || s->geotags[i].offset + s->geotags[i].count > count) {
  809. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  810. } else {
  811. char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
  812. if (!ap) {
  813. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  814. av_freep(&dp);
  815. return AVERROR(ENOMEM);
  816. }
  817. s->geotags[i].val = ap;
  818. }
  819. }
  820. }
  821. av_freep(&dp);
  822. break;
  823. case TIFF_GEO_ASCII_PARAMS:
  824. pos = bytestream2_tell(&s->gb);
  825. for (i = 0; i < s->geotag_count; i++) {
  826. if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
  827. if (s->geotags[i].count == 0
  828. || s->geotags[i].offset + s->geotags[i].count > count) {
  829. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  830. } else {
  831. char *ap;
  832. bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
  833. if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
  834. return AVERROR_INVALIDDATA;
  835. ap = av_malloc(s->geotags[i].count);
  836. if (!ap) {
  837. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  838. return AVERROR(ENOMEM);
  839. }
  840. bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
  841. ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
  842. s->geotags[i].val = ap;
  843. }
  844. }
  845. }
  846. break;
  847. case TIFF_ARTIST:
  848. ADD_METADATA(count, "artist", NULL);
  849. break;
  850. case TIFF_COPYRIGHT:
  851. ADD_METADATA(count, "copyright", NULL);
  852. break;
  853. case TIFF_DATE:
  854. ADD_METADATA(count, "date", NULL);
  855. break;
  856. case TIFF_DOCUMENT_NAME:
  857. ADD_METADATA(count, "document_name", NULL);
  858. break;
  859. case TIFF_HOST_COMPUTER:
  860. ADD_METADATA(count, "computer", NULL);
  861. break;
  862. case TIFF_IMAGE_DESCRIPTION:
  863. ADD_METADATA(count, "description", NULL);
  864. break;
  865. case TIFF_MAKE:
  866. ADD_METADATA(count, "make", NULL);
  867. break;
  868. case TIFF_MODEL:
  869. ADD_METADATA(count, "model", NULL);
  870. break;
  871. case TIFF_PAGE_NAME:
  872. ADD_METADATA(count, "page_name", NULL);
  873. break;
  874. case TIFF_PAGE_NUMBER:
  875. ADD_METADATA(count, "page_number", " / ");
  876. break;
  877. case TIFF_SOFTWARE_NAME:
  878. ADD_METADATA(count, "software", NULL);
  879. break;
  880. default:
  881. if (s->avctx->err_recognition & AV_EF_EXPLODE) {
  882. av_log(s->avctx, AV_LOG_ERROR,
  883. "Unknown or unsupported tag %d/0X%0X\n",
  884. tag, tag);
  885. return AVERROR_INVALIDDATA;
  886. }
  887. }
  888. end:
  889. bytestream2_seek(&s->gb, start, SEEK_SET);
  890. return 0;
  891. }
  892. static int decode_frame(AVCodecContext *avctx,
  893. void *data, int *got_frame, AVPacket *avpkt)
  894. {
  895. TiffContext *const s = avctx->priv_data;
  896. AVFrame *const p = data;
  897. ThreadFrame frame = { .f = data };
  898. unsigned off;
  899. int le, ret, plane, planes;
  900. int i, j, entries, stride;
  901. unsigned soff, ssize;
  902. uint8_t *dst;
  903. GetByteContext stripsizes;
  904. GetByteContext stripdata;
  905. bytestream2_init(&s->gb, avpkt->data, avpkt->size);
  906. // parse image header
  907. if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
  908. av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
  909. return ret;
  910. } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
  911. av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
  912. return AVERROR_INVALIDDATA;
  913. }
  914. s->le = le;
  915. // TIFF_BPP is not a required tag and defaults to 1
  916. s->bppcount = s->bpp = 1;
  917. s->invert = 0;
  918. s->compr = TIFF_RAW;
  919. s->fill_order = 0;
  920. free_geotags(s);
  921. // Reset these offsets so we can tell if they were set this frame
  922. s->stripsizesoff = s->strippos = 0;
  923. /* parse image file directory */
  924. bytestream2_seek(&s->gb, off, SEEK_SET);
  925. entries = ff_tget_short(&s->gb, le);
  926. if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
  927. return AVERROR_INVALIDDATA;
  928. for (i = 0; i < entries; i++) {
  929. if ((ret = tiff_decode_tag(s, p)) < 0)
  930. return ret;
  931. }
  932. for (i = 0; i<s->geotag_count; i++) {
  933. const char *keyname = get_geokey_name(s->geotags[i].key);
  934. if (!keyname) {
  935. av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
  936. continue;
  937. }
  938. if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
  939. av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
  940. continue;
  941. }
  942. ret = av_dict_set(avpriv_frame_get_metadatap(p), keyname, s->geotags[i].val, 0);
  943. if (ret<0) {
  944. av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
  945. return ret;
  946. }
  947. }
  948. if (!s->strippos && !s->stripoff) {
  949. av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
  950. return AVERROR_INVALIDDATA;
  951. }
  952. /* now we have the data and may start decoding */
  953. if ((ret = init_image(s, &frame)) < 0)
  954. return ret;
  955. if (s->strips == 1 && !s->stripsize) {
  956. av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
  957. s->stripsize = avpkt->size - s->stripoff;
  958. }
  959. if (s->stripsizesoff) {
  960. if (s->stripsizesoff >= (unsigned)avpkt->size)
  961. return AVERROR_INVALIDDATA;
  962. bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
  963. avpkt->size - s->stripsizesoff);
  964. }
  965. if (s->strippos) {
  966. if (s->strippos >= (unsigned)avpkt->size)
  967. return AVERROR_INVALIDDATA;
  968. bytestream2_init(&stripdata, avpkt->data + s->strippos,
  969. avpkt->size - s->strippos);
  970. }
  971. if (s->rps <= 0) {
  972. av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
  973. return AVERROR_INVALIDDATA;
  974. }
  975. planes = s->planar ? s->bppcount : 1;
  976. for (plane = 0; plane < planes; plane++) {
  977. stride = p->linesize[plane];
  978. dst = p->data[plane];
  979. for (i = 0; i < s->height; i += s->rps) {
  980. if (s->stripsizesoff)
  981. ssize = ff_tget(&stripsizes, s->sstype, le);
  982. else
  983. ssize = s->stripsize;
  984. if (s->strippos)
  985. soff = ff_tget(&stripdata, s->sot, le);
  986. else
  987. soff = s->stripoff;
  988. if (soff > avpkt->size || ssize > avpkt->size - soff) {
  989. av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
  990. return AVERROR_INVALIDDATA;
  991. }
  992. if ((ret = tiff_unpack_strip(s, dst, stride, avpkt->data + soff, ssize,
  993. FFMIN(s->rps, s->height - i))) < 0) {
  994. if (avctx->err_recognition & AV_EF_EXPLODE)
  995. return ret;
  996. break;
  997. }
  998. dst += s->rps * stride;
  999. }
  1000. if (s->predictor == 2) {
  1001. dst = p->data[plane];
  1002. soff = s->bpp >> 3;
  1003. if (s->planar)
  1004. soff = FFMAX(soff / s->bppcount, 1);
  1005. ssize = s->width * soff;
  1006. if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
  1007. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
  1008. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
  1009. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
  1010. for (i = 0; i < s->height; i++) {
  1011. for (j = soff; j < ssize; j += 2)
  1012. AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
  1013. dst += stride;
  1014. }
  1015. } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
  1016. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
  1017. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
  1018. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
  1019. for (i = 0; i < s->height; i++) {
  1020. for (j = soff; j < ssize; j += 2)
  1021. AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
  1022. dst += stride;
  1023. }
  1024. } else {
  1025. for (i = 0; i < s->height; i++) {
  1026. for (j = soff; j < ssize; j++)
  1027. dst[j] += dst[j - soff];
  1028. dst += stride;
  1029. }
  1030. }
  1031. }
  1032. if (s->invert) {
  1033. dst = p->data[plane];
  1034. for (i = 0; i < s->height; i++) {
  1035. for (j = 0; j < p->linesize[plane]; j++)
  1036. dst[j] = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255) - dst[j];
  1037. dst += stride;
  1038. }
  1039. }
  1040. }
  1041. if (s->planar && s->bppcount > 2) {
  1042. FFSWAP(uint8_t*, p->data[0], p->data[2]);
  1043. FFSWAP(int, p->linesize[0], p->linesize[2]);
  1044. FFSWAP(uint8_t*, p->data[0], p->data[1]);
  1045. FFSWAP(int, p->linesize[0], p->linesize[1]);
  1046. }
  1047. *got_frame = 1;
  1048. return avpkt->size;
  1049. }
  1050. static av_cold int tiff_init(AVCodecContext *avctx)
  1051. {
  1052. TiffContext *s = avctx->priv_data;
  1053. s->width = 0;
  1054. s->height = 0;
  1055. s->avctx = avctx;
  1056. ff_lzw_decode_open(&s->lzw);
  1057. ff_ccitt_unpack_init();
  1058. return 0;
  1059. }
  1060. static av_cold int tiff_end(AVCodecContext *avctx)
  1061. {
  1062. TiffContext *const s = avctx->priv_data;
  1063. free_geotags(s);
  1064. ff_lzw_decode_close(&s->lzw);
  1065. av_freep(&s->deinvert_buf);
  1066. return 0;
  1067. }
  1068. AVCodec ff_tiff_decoder = {
  1069. .name = "tiff",
  1070. .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
  1071. .type = AVMEDIA_TYPE_VIDEO,
  1072. .id = AV_CODEC_ID_TIFF,
  1073. .priv_data_size = sizeof(TiffContext),
  1074. .init = tiff_init,
  1075. .close = tiff_end,
  1076. .decode = decode_frame,
  1077. .init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
  1078. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  1079. };