You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1697 lines
57KB

  1. /*
  2. * Copyright (c) 2006 Konstantin Shishkov
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * TIFF image decoder
  23. * @author Konstantin Shishkov
  24. */
  25. #include "config.h"
  26. #if CONFIG_ZLIB
  27. #include <zlib.h>
  28. #endif
  29. #if CONFIG_LZMA
  30. #define LZMA_API_STATIC
  31. #include <lzma.h>
  32. #endif
  33. #include "libavutil/attributes.h"
  34. #include "libavutil/avstring.h"
  35. #include "libavutil/intreadwrite.h"
  36. #include "libavutil/imgutils.h"
  37. #include "libavutil/opt.h"
  38. #include "avcodec.h"
  39. #include "bytestream.h"
  40. #include "faxcompr.h"
  41. #include "internal.h"
  42. #include "lzw.h"
  43. #include "mathops.h"
  44. #include "tiff.h"
  45. #include "tiff_data.h"
  46. #include "thread.h"
  47. #include "get_bits.h"
  48. typedef struct TiffContext {
  49. AVClass *class;
  50. AVCodecContext *avctx;
  51. GetByteContext gb;
  52. int get_subimage;
  53. uint16_t get_page;
  54. int width, height;
  55. unsigned int bpp, bppcount;
  56. uint32_t palette[256];
  57. int palette_is_set;
  58. int le;
  59. enum TiffCompr compr;
  60. enum TiffPhotometric photometric;
  61. int planar;
  62. int subsampling[2];
  63. int fax_opts;
  64. int predictor;
  65. int fill_order;
  66. uint32_t res[4];
  67. int is_bayer;
  68. uint8_t pattern[4];
  69. unsigned white_level;
  70. uint32_t sub_ifd;
  71. uint16_t cur_page;
  72. int strips, rps, sstype;
  73. int sot;
  74. int stripsizesoff, stripsize, stripoff, strippos;
  75. LZWState *lzw;
  76. uint8_t *deinvert_buf;
  77. int deinvert_buf_size;
  78. uint8_t *yuv_line;
  79. unsigned int yuv_line_size;
  80. uint8_t *fax_buffer;
  81. unsigned int fax_buffer_size;
  82. int geotag_count;
  83. TiffGeoTag *geotags;
  84. } TiffContext;
  85. static void free_geotags(TiffContext *const s)
  86. {
  87. int i;
  88. for (i = 0; i < s->geotag_count; i++) {
  89. if (s->geotags[i].val)
  90. av_freep(&s->geotags[i].val);
  91. }
  92. av_freep(&s->geotags);
  93. s->geotag_count = 0;
  94. }
  95. #define RET_GEOKEY(TYPE, array, element)\
  96. if (key >= TIFF_##TYPE##_KEY_ID_OFFSET &&\
  97. key - TIFF_##TYPE##_KEY_ID_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_name_type_map))\
  98. return ff_tiff_##array##_name_type_map[key - TIFF_##TYPE##_KEY_ID_OFFSET].element;
  99. static const char *get_geokey_name(int key)
  100. {
  101. RET_GEOKEY(VERT, vert, name);
  102. RET_GEOKEY(PROJ, proj, name);
  103. RET_GEOKEY(GEOG, geog, name);
  104. RET_GEOKEY(CONF, conf, name);
  105. return NULL;
  106. }
  107. static int get_geokey_type(int key)
  108. {
  109. RET_GEOKEY(VERT, vert, type);
  110. RET_GEOKEY(PROJ, proj, type);
  111. RET_GEOKEY(GEOG, geog, type);
  112. RET_GEOKEY(CONF, conf, type);
  113. return AVERROR_INVALIDDATA;
  114. }
  115. static int cmp_id_key(const void *id, const void *k)
  116. {
  117. return *(const int*)id - ((const TiffGeoTagKeyName*)k)->key;
  118. }
  119. static const char *search_keyval(const TiffGeoTagKeyName *keys, int n, int id)
  120. {
  121. TiffGeoTagKeyName *r = bsearch(&id, keys, n, sizeof(keys[0]), cmp_id_key);
  122. if(r)
  123. return r->name;
  124. return NULL;
  125. }
  126. static char *get_geokey_val(int key, int val)
  127. {
  128. char *ap;
  129. if (val == TIFF_GEO_KEY_UNDEFINED)
  130. return av_strdup("undefined");
  131. if (val == TIFF_GEO_KEY_USER_DEFINED)
  132. return av_strdup("User-Defined");
  133. #define RET_GEOKEY_VAL(TYPE, array)\
  134. if (val >= TIFF_##TYPE##_OFFSET &&\
  135. val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
  136. return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);
  137. switch (key) {
  138. case TIFF_GT_MODEL_TYPE_GEOKEY:
  139. RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
  140. break;
  141. case TIFF_GT_RASTER_TYPE_GEOKEY:
  142. RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
  143. break;
  144. case TIFF_GEOG_LINEAR_UNITS_GEOKEY:
  145. case TIFF_PROJ_LINEAR_UNITS_GEOKEY:
  146. case TIFF_VERTICAL_UNITS_GEOKEY:
  147. RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
  148. break;
  149. case TIFF_GEOG_ANGULAR_UNITS_GEOKEY:
  150. case TIFF_GEOG_AZIMUTH_UNITS_GEOKEY:
  151. RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
  152. break;
  153. case TIFF_GEOGRAPHIC_TYPE_GEOKEY:
  154. RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
  155. RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
  156. break;
  157. case TIFF_GEOG_GEODETIC_DATUM_GEOKEY:
  158. RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
  159. RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
  160. break;
  161. case TIFF_GEOG_ELLIPSOID_GEOKEY:
  162. RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
  163. break;
  164. case TIFF_GEOG_PRIME_MERIDIAN_GEOKEY:
  165. RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
  166. break;
  167. case TIFF_PROJECTED_CS_TYPE_GEOKEY:
  168. ap = av_strdup(search_keyval(ff_tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(ff_tiff_proj_cs_type_codes), val));
  169. if(ap) return ap;
  170. break;
  171. case TIFF_PROJECTION_GEOKEY:
  172. ap = av_strdup(search_keyval(ff_tiff_projection_codes, FF_ARRAY_ELEMS(ff_tiff_projection_codes), val));
  173. if(ap) return ap;
  174. break;
  175. case TIFF_PROJ_COORD_TRANS_GEOKEY:
  176. RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
  177. break;
  178. case TIFF_VERTICAL_CS_TYPE_GEOKEY:
  179. RET_GEOKEY_VAL(VERT_CS, vert_cs);
  180. RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
  181. break;
  182. }
  183. ap = av_malloc(14);
  184. if (ap)
  185. snprintf(ap, 14, "Unknown-%d", val);
  186. return ap;
  187. }
  188. static char *doubles2str(double *dp, int count, const char *sep)
  189. {
  190. int i;
  191. char *ap, *ap0;
  192. uint64_t component_len;
  193. if (!sep) sep = ", ";
  194. component_len = 24LL + strlen(sep);
  195. if (count >= (INT_MAX - 1)/component_len)
  196. return NULL;
  197. ap = av_malloc(component_len * count + 1);
  198. if (!ap)
  199. return NULL;
  200. ap0 = ap;
  201. ap[0] = '\0';
  202. for (i = 0; i < count; i++) {
  203. unsigned l = snprintf(ap, component_len, "%.15g%s", dp[i], sep);
  204. if(l >= component_len) {
  205. av_free(ap0);
  206. return NULL;
  207. }
  208. ap += l;
  209. }
  210. ap0[strlen(ap0) - strlen(sep)] = '\0';
  211. return ap0;
  212. }
  213. static int add_metadata(int count, int type,
  214. const char *name, const char *sep, TiffContext *s, AVFrame *frame)
  215. {
  216. switch(type) {
  217. case TIFF_DOUBLE: return ff_tadd_doubles_metadata(count, name, sep, &s->gb, s->le, &frame->metadata);
  218. case TIFF_SHORT : return ff_tadd_shorts_metadata(count, name, sep, &s->gb, s->le, 0, &frame->metadata);
  219. case TIFF_STRING: return ff_tadd_string_metadata(count, name, &s->gb, s->le, &frame->metadata);
  220. default : return AVERROR_INVALIDDATA;
  221. };
  222. }
  223. static void av_always_inline horizontal_fill(TiffContext *s,
  224. unsigned int bpp, uint8_t* dst,
  225. int usePtr, const uint8_t *src,
  226. uint8_t c, int width, int offset)
  227. {
  228. switch (bpp) {
  229. case 1:
  230. while (--width >= 0) {
  231. dst[(width+offset)*8+7] = (usePtr ? src[width] : c) & 0x1;
  232. dst[(width+offset)*8+6] = (usePtr ? src[width] : c) >> 1 & 0x1;
  233. dst[(width+offset)*8+5] = (usePtr ? src[width] : c) >> 2 & 0x1;
  234. dst[(width+offset)*8+4] = (usePtr ? src[width] : c) >> 3 & 0x1;
  235. dst[(width+offset)*8+3] = (usePtr ? src[width] : c) >> 4 & 0x1;
  236. dst[(width+offset)*8+2] = (usePtr ? src[width] : c) >> 5 & 0x1;
  237. dst[(width+offset)*8+1] = (usePtr ? src[width] : c) >> 6 & 0x1;
  238. dst[(width+offset)*8+0] = (usePtr ? src[width] : c) >> 7;
  239. }
  240. break;
  241. case 2:
  242. while (--width >= 0) {
  243. dst[(width+offset)*4+3] = (usePtr ? src[width] : c) & 0x3;
  244. dst[(width+offset)*4+2] = (usePtr ? src[width] : c) >> 2 & 0x3;
  245. dst[(width+offset)*4+1] = (usePtr ? src[width] : c) >> 4 & 0x3;
  246. dst[(width+offset)*4+0] = (usePtr ? src[width] : c) >> 6;
  247. }
  248. break;
  249. case 4:
  250. while (--width >= 0) {
  251. dst[(width+offset)*2+1] = (usePtr ? src[width] : c) & 0xF;
  252. dst[(width+offset)*2+0] = (usePtr ? src[width] : c) >> 4;
  253. }
  254. break;
  255. case 12: {
  256. uint16_t *dst16 = (uint16_t *)dst;
  257. GetBitContext gb;
  258. init_get_bits8(&gb, src, width);
  259. for (int i = 0; i < s->width; i++) {
  260. dst16[i] = get_bits(&gb, 12) << 4;
  261. }
  262. }
  263. break;
  264. default:
  265. if (usePtr) {
  266. memcpy(dst + offset, src, width);
  267. } else {
  268. memset(dst + offset, c, width);
  269. }
  270. }
  271. }
  272. static int deinvert_buffer(TiffContext *s, const uint8_t *src, int size)
  273. {
  274. int i;
  275. av_fast_padded_malloc(&s->deinvert_buf, &s->deinvert_buf_size, size);
  276. if (!s->deinvert_buf)
  277. return AVERROR(ENOMEM);
  278. for (i = 0; i < size; i++)
  279. s->deinvert_buf[i] = ff_reverse[src[i]];
  280. return 0;
  281. }
  282. static void unpack_gray(TiffContext *s, AVFrame *p,
  283. const uint8_t *src, int lnum, int width, int bpp)
  284. {
  285. GetBitContext gb;
  286. uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]);
  287. init_get_bits8(&gb, src, width);
  288. for (int i = 0; i < s->width; i++) {
  289. dst[i] = get_bits(&gb, bpp);
  290. }
  291. }
  292. static void unpack_yuv(TiffContext *s, AVFrame *p,
  293. const uint8_t *src, int lnum)
  294. {
  295. int i, j, k;
  296. int w = (s->width - 1) / s->subsampling[0] + 1;
  297. uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
  298. uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
  299. if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
  300. for (i = 0; i < w; i++) {
  301. for (j = 0; j < s->subsampling[1]; j++)
  302. for (k = 0; k < s->subsampling[0]; k++)
  303. p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
  304. FFMIN(i * s->subsampling[0] + k, s->width-1)] = *src++;
  305. *pu++ = *src++;
  306. *pv++ = *src++;
  307. }
  308. }else{
  309. for (i = 0; i < w; i++) {
  310. for (j = 0; j < s->subsampling[1]; j++)
  311. for (k = 0; k < s->subsampling[0]; k++)
  312. p->data[0][(lnum + j) * p->linesize[0] +
  313. i * s->subsampling[0] + k] = *src++;
  314. *pu++ = *src++;
  315. *pv++ = *src++;
  316. }
  317. }
  318. }
  319. #if CONFIG_ZLIB
  320. static int tiff_uncompress(uint8_t *dst, unsigned long *len, const uint8_t *src,
  321. int size)
  322. {
  323. z_stream zstream = { 0 };
  324. int zret;
  325. zstream.next_in = (uint8_t *)src;
  326. zstream.avail_in = size;
  327. zstream.next_out = dst;
  328. zstream.avail_out = *len;
  329. zret = inflateInit(&zstream);
  330. if (zret != Z_OK) {
  331. av_log(NULL, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
  332. return zret;
  333. }
  334. zret = inflate(&zstream, Z_SYNC_FLUSH);
  335. inflateEnd(&zstream);
  336. *len = zstream.total_out;
  337. return zret == Z_STREAM_END ? Z_OK : zret;
  338. }
  339. static int tiff_unpack_zlib(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  340. const uint8_t *src, int size, int width, int lines,
  341. int strip_start, int is_yuv)
  342. {
  343. uint8_t *zbuf;
  344. unsigned long outlen;
  345. int ret, line;
  346. outlen = width * lines;
  347. zbuf = av_malloc(outlen);
  348. if (!zbuf)
  349. return AVERROR(ENOMEM);
  350. if (s->fill_order) {
  351. if ((ret = deinvert_buffer(s, src, size)) < 0) {
  352. av_free(zbuf);
  353. return ret;
  354. }
  355. src = s->deinvert_buf;
  356. }
  357. ret = tiff_uncompress(zbuf, &outlen, src, size);
  358. if (ret != Z_OK) {
  359. av_log(s->avctx, AV_LOG_ERROR,
  360. "Uncompressing failed (%lu of %lu) with error %d\n", outlen,
  361. (unsigned long)width * lines, ret);
  362. av_free(zbuf);
  363. return AVERROR_UNKNOWN;
  364. }
  365. src = zbuf;
  366. for (line = 0; line < lines; line++) {
  367. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  368. horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
  369. } else {
  370. memcpy(dst, src, width);
  371. }
  372. if (is_yuv) {
  373. unpack_yuv(s, p, dst, strip_start + line);
  374. line += s->subsampling[1] - 1;
  375. }
  376. dst += stride;
  377. src += width;
  378. }
  379. av_free(zbuf);
  380. return 0;
  381. }
  382. #endif
  383. #if CONFIG_LZMA
  384. static int tiff_uncompress_lzma(uint8_t *dst, uint64_t *len, const uint8_t *src,
  385. int size)
  386. {
  387. lzma_stream stream = LZMA_STREAM_INIT;
  388. lzma_ret ret;
  389. stream.next_in = (uint8_t *)src;
  390. stream.avail_in = size;
  391. stream.next_out = dst;
  392. stream.avail_out = *len;
  393. ret = lzma_stream_decoder(&stream, UINT64_MAX, 0);
  394. if (ret != LZMA_OK) {
  395. av_log(NULL, AV_LOG_ERROR, "LZMA init error: %d\n", ret);
  396. return ret;
  397. }
  398. ret = lzma_code(&stream, LZMA_RUN);
  399. lzma_end(&stream);
  400. *len = stream.total_out;
  401. return ret == LZMA_STREAM_END ? LZMA_OK : ret;
  402. }
  403. static int tiff_unpack_lzma(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  404. const uint8_t *src, int size, int width, int lines,
  405. int strip_start, int is_yuv)
  406. {
  407. uint64_t outlen = width * (uint64_t)lines;
  408. int ret, line;
  409. uint8_t *buf = av_malloc(outlen);
  410. if (!buf)
  411. return AVERROR(ENOMEM);
  412. if (s->fill_order) {
  413. if ((ret = deinvert_buffer(s, src, size)) < 0) {
  414. av_free(buf);
  415. return ret;
  416. }
  417. src = s->deinvert_buf;
  418. }
  419. ret = tiff_uncompress_lzma(buf, &outlen, src, size);
  420. if (ret != LZMA_OK) {
  421. av_log(s->avctx, AV_LOG_ERROR,
  422. "Uncompressing failed (%"PRIu64" of %"PRIu64") with error %d\n", outlen,
  423. (uint64_t)width * lines, ret);
  424. av_free(buf);
  425. return AVERROR_UNKNOWN;
  426. }
  427. src = buf;
  428. for (line = 0; line < lines; line++) {
  429. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  430. horizontal_fill(s, s->bpp, dst, 1, src, 0, width, 0);
  431. } else {
  432. memcpy(dst, src, width);
  433. }
  434. if (is_yuv) {
  435. unpack_yuv(s, p, dst, strip_start + line);
  436. line += s->subsampling[1] - 1;
  437. }
  438. dst += stride;
  439. src += width;
  440. }
  441. av_free(buf);
  442. return 0;
  443. }
  444. #endif
  445. static int tiff_unpack_fax(TiffContext *s, uint8_t *dst, int stride,
  446. const uint8_t *src, int size, int width, int lines)
  447. {
  448. int i, ret = 0;
  449. int line;
  450. uint8_t *src2;
  451. av_fast_padded_malloc(&s->fax_buffer, &s->fax_buffer_size, size);
  452. src2 = s->fax_buffer;
  453. if (!src2) {
  454. av_log(s->avctx, AV_LOG_ERROR,
  455. "Error allocating temporary buffer\n");
  456. return AVERROR(ENOMEM);
  457. }
  458. if (!s->fill_order) {
  459. memcpy(src2, src, size);
  460. } else {
  461. for (i = 0; i < size; i++)
  462. src2[i] = ff_reverse[src[i]];
  463. }
  464. memset(src2 + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
  465. ret = ff_ccitt_unpack(s->avctx, src2, size, dst, lines, stride,
  466. s->compr, s->fax_opts);
  467. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  468. for (line = 0; line < lines; line++) {
  469. horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
  470. dst += stride;
  471. }
  472. return ret;
  473. }
  474. static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride,
  475. const uint8_t *src, int size, int strip_start, int lines)
  476. {
  477. PutByteContext pb;
  478. int c, line, pixels, code, ret;
  479. const uint8_t *ssrc = src;
  480. int width = ((s->width * s->bpp) + 7) >> 3;
  481. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(p->format);
  482. int is_yuv = !(desc->flags & AV_PIX_FMT_FLAG_RGB) &&
  483. (desc->flags & AV_PIX_FMT_FLAG_PLANAR) &&
  484. desc->nb_components >= 3;
  485. if (s->planar)
  486. width /= s->bppcount;
  487. if (size <= 0)
  488. return AVERROR_INVALIDDATA;
  489. if (is_yuv) {
  490. int bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
  491. s->subsampling[0] * s->subsampling[1] + 7) >> 3;
  492. av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
  493. if (s->yuv_line == NULL) {
  494. av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
  495. return AVERROR(ENOMEM);
  496. }
  497. dst = s->yuv_line;
  498. stride = 0;
  499. width = (s->width - 1) / s->subsampling[0] + 1;
  500. width = width * s->subsampling[0] * s->subsampling[1] + 2*width;
  501. av_assert0(width <= bytes_per_row);
  502. av_assert0(s->bpp == 24);
  503. }
  504. if (s->is_bayer) {
  505. width = (s->bpp * s->width + 7) >> 3;
  506. }
  507. if (p->format == AV_PIX_FMT_GRAY12) {
  508. av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width);
  509. if (s->yuv_line == NULL) {
  510. av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
  511. return AVERROR(ENOMEM);
  512. }
  513. dst = s->yuv_line;
  514. stride = 0;
  515. }
  516. if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
  517. #if CONFIG_ZLIB
  518. return tiff_unpack_zlib(s, p, dst, stride, src, size, width, lines,
  519. strip_start, is_yuv);
  520. #else
  521. av_log(s->avctx, AV_LOG_ERROR,
  522. "zlib support not enabled, "
  523. "deflate compression not supported\n");
  524. return AVERROR(ENOSYS);
  525. #endif
  526. }
  527. if (s->compr == TIFF_LZMA) {
  528. #if CONFIG_LZMA
  529. return tiff_unpack_lzma(s, p, dst, stride, src, size, width, lines,
  530. strip_start, is_yuv);
  531. #else
  532. av_log(s->avctx, AV_LOG_ERROR,
  533. "LZMA support not enabled\n");
  534. return AVERROR(ENOSYS);
  535. #endif
  536. }
  537. if (s->compr == TIFF_LZW) {
  538. if (s->fill_order) {
  539. if ((ret = deinvert_buffer(s, src, size)) < 0)
  540. return ret;
  541. ssrc = src = s->deinvert_buf;
  542. }
  543. if (size > 1 && !src[0] && (src[1]&1)) {
  544. av_log(s->avctx, AV_LOG_ERROR, "Old style LZW is unsupported\n");
  545. }
  546. if ((ret = ff_lzw_decode_init(s->lzw, 8, src, size, FF_LZW_TIFF)) < 0) {
  547. av_log(s->avctx, AV_LOG_ERROR, "Error initializing LZW decoder\n");
  548. return ret;
  549. }
  550. for (line = 0; line < lines; line++) {
  551. pixels = ff_lzw_decode(s->lzw, dst, width);
  552. if (pixels < width) {
  553. av_log(s->avctx, AV_LOG_ERROR, "Decoded only %i bytes of %i\n",
  554. pixels, width);
  555. return AVERROR_INVALIDDATA;
  556. }
  557. if (s->bpp < 8 && s->avctx->pix_fmt == AV_PIX_FMT_PAL8)
  558. horizontal_fill(s, s->bpp, dst, 1, dst, 0, width, 0);
  559. if (is_yuv) {
  560. unpack_yuv(s, p, dst, strip_start + line);
  561. line += s->subsampling[1] - 1;
  562. } else if (p->format == AV_PIX_FMT_GRAY12) {
  563. unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
  564. }
  565. dst += stride;
  566. }
  567. return 0;
  568. }
  569. if (s->compr == TIFF_CCITT_RLE ||
  570. s->compr == TIFF_G3 ||
  571. s->compr == TIFF_G4) {
  572. if (is_yuv || p->format == AV_PIX_FMT_GRAY12)
  573. return AVERROR_INVALIDDATA;
  574. return tiff_unpack_fax(s, dst, stride, src, size, width, lines);
  575. }
  576. bytestream2_init(&s->gb, src, size);
  577. bytestream2_init_writer(&pb, dst, is_yuv ? s->yuv_line_size : (stride * lines));
  578. for (line = 0; line < lines; line++) {
  579. if (src - ssrc > size) {
  580. av_log(s->avctx, AV_LOG_ERROR, "Source data overread\n");
  581. return AVERROR_INVALIDDATA;
  582. }
  583. if (bytestream2_get_bytes_left(&s->gb) == 0 || bytestream2_get_eof(&pb))
  584. break;
  585. bytestream2_seek_p(&pb, stride * line, SEEK_SET);
  586. switch (s->compr) {
  587. case TIFF_RAW:
  588. if (ssrc + size - src < width)
  589. return AVERROR_INVALIDDATA;
  590. if (!s->fill_order) {
  591. horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 || s->is_bayer),
  592. dst, 1, src, 0, width, 0);
  593. } else {
  594. int i;
  595. for (i = 0; i < width; i++)
  596. dst[i] = ff_reverse[src[i]];
  597. }
  598. src += width;
  599. break;
  600. case TIFF_PACKBITS:
  601. for (pixels = 0; pixels < width;) {
  602. if (ssrc + size - src < 2) {
  603. av_log(s->avctx, AV_LOG_ERROR, "Read went out of bounds\n");
  604. return AVERROR_INVALIDDATA;
  605. }
  606. code = s->fill_order ? (int8_t) ff_reverse[*src++]: (int8_t) *src++;
  607. if (code >= 0) {
  608. code++;
  609. if (pixels + code > width ||
  610. ssrc + size - src < code) {
  611. av_log(s->avctx, AV_LOG_ERROR,
  612. "Copy went out of bounds\n");
  613. return AVERROR_INVALIDDATA;
  614. }
  615. horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  616. dst, 1, src, 0, code, pixels);
  617. src += code;
  618. pixels += code;
  619. } else if (code != -128) { // -127..-1
  620. code = (-code) + 1;
  621. if (pixels + code > width) {
  622. av_log(s->avctx, AV_LOG_ERROR,
  623. "Run went out of bounds\n");
  624. return AVERROR_INVALIDDATA;
  625. }
  626. c = *src++;
  627. horizontal_fill(s, s->bpp * (s->avctx->pix_fmt == AV_PIX_FMT_PAL8),
  628. dst, 0, NULL, c, code, pixels);
  629. pixels += code;
  630. }
  631. }
  632. if (s->fill_order) {
  633. int i;
  634. for (i = 0; i < width; i++)
  635. dst[i] = ff_reverse[dst[i]];
  636. }
  637. break;
  638. }
  639. if (is_yuv) {
  640. unpack_yuv(s, p, dst, strip_start + line);
  641. line += s->subsampling[1] - 1;
  642. } else if (p->format == AV_PIX_FMT_GRAY12) {
  643. unpack_gray(s, p, dst, strip_start + line, width, s->bpp);
  644. }
  645. dst += stride;
  646. }
  647. return 0;
  648. }
  649. static int init_image(TiffContext *s, ThreadFrame *frame)
  650. {
  651. int ret;
  652. int create_gray_palette = 0;
  653. // make sure there is no aliasing in the following switch
  654. if (s->bpp >= 100 || s->bppcount >= 10) {
  655. av_log(s->avctx, AV_LOG_ERROR,
  656. "Unsupported image parameters: bpp=%d, bppcount=%d\n",
  657. s->bpp, s->bppcount);
  658. return AVERROR_INVALIDDATA;
  659. }
  660. switch (s->planar * 1000 + s->bpp * 10 + s->bppcount + s->is_bayer * 10000) {
  661. case 11:
  662. if (!s->palette_is_set) {
  663. s->avctx->pix_fmt = AV_PIX_FMT_MONOBLACK;
  664. break;
  665. }
  666. case 21:
  667. case 41:
  668. s->avctx->pix_fmt = AV_PIX_FMT_PAL8;
  669. if (!s->palette_is_set) {
  670. create_gray_palette = 1;
  671. }
  672. break;
  673. case 81:
  674. s->avctx->pix_fmt = s->palette_is_set ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
  675. break;
  676. case 121:
  677. s->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
  678. break;
  679. case 10081:
  680. switch (AV_RL32(s->pattern)) {
  681. case 0x02010100:
  682. s->avctx->pix_fmt = AV_PIX_FMT_BAYER_RGGB8;
  683. break;
  684. case 0x00010102:
  685. s->avctx->pix_fmt = AV_PIX_FMT_BAYER_BGGR8;
  686. break;
  687. case 0x01000201:
  688. s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GBRG8;
  689. break;
  690. case 0x01020001:
  691. s->avctx->pix_fmt = AV_PIX_FMT_BAYER_GRBG8;
  692. break;
  693. default:
  694. av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
  695. AV_RL32(s->pattern));
  696. return AVERROR_PATCHWELCOME;
  697. }
  698. break;
  699. case 10121:
  700. switch (AV_RL32(s->pattern)) {
  701. case 0x02010100:
  702. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE;
  703. break;
  704. case 0x00010102:
  705. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE;
  706. break;
  707. case 0x01000201:
  708. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE;
  709. break;
  710. case 0x01020001:
  711. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE;
  712. break;
  713. default:
  714. av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
  715. AV_RL32(s->pattern));
  716. return AVERROR_PATCHWELCOME;
  717. }
  718. break;
  719. case 10161:
  720. switch (AV_RL32(s->pattern)) {
  721. case 0x02010100:
  722. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_RGGB16LE : AV_PIX_FMT_BAYER_RGGB16BE;
  723. break;
  724. case 0x00010102:
  725. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_BGGR16LE : AV_PIX_FMT_BAYER_BGGR16BE;
  726. break;
  727. case 0x01000201:
  728. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GBRG16LE : AV_PIX_FMT_BAYER_GBRG16BE;
  729. break;
  730. case 0x01020001:
  731. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_BAYER_GRBG16LE : AV_PIX_FMT_BAYER_GRBG16BE;
  732. break;
  733. default:
  734. av_log(s->avctx, AV_LOG_ERROR, "Unsupported Bayer pattern: 0x%X\n",
  735. AV_RL32(s->pattern));
  736. return AVERROR_PATCHWELCOME;
  737. }
  738. break;
  739. case 243:
  740. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  741. if (s->subsampling[0] == 1 && s->subsampling[1] == 1) {
  742. s->avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  743. } else if (s->subsampling[0] == 2 && s->subsampling[1] == 1) {
  744. s->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  745. } else if (s->subsampling[0] == 4 && s->subsampling[1] == 1) {
  746. s->avctx->pix_fmt = AV_PIX_FMT_YUV411P;
  747. } else if (s->subsampling[0] == 1 && s->subsampling[1] == 2) {
  748. s->avctx->pix_fmt = AV_PIX_FMT_YUV440P;
  749. } else if (s->subsampling[0] == 2 && s->subsampling[1] == 2) {
  750. s->avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  751. } else if (s->subsampling[0] == 4 && s->subsampling[1] == 4) {
  752. s->avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  753. } else {
  754. av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr subsampling\n");
  755. return AVERROR_PATCHWELCOME;
  756. }
  757. } else
  758. s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
  759. break;
  760. case 161:
  761. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GRAY16LE : AV_PIX_FMT_GRAY16BE;
  762. break;
  763. case 162:
  764. s->avctx->pix_fmt = AV_PIX_FMT_YA8;
  765. break;
  766. case 322:
  767. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_YA16LE : AV_PIX_FMT_YA16BE;
  768. break;
  769. case 324:
  770. s->avctx->pix_fmt = s->photometric == TIFF_PHOTOMETRIC_SEPARATED ? AV_PIX_FMT_RGB0 : AV_PIX_FMT_RGBA;
  771. break;
  772. case 405:
  773. if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED)
  774. s->avctx->pix_fmt = AV_PIX_FMT_RGBA;
  775. else {
  776. av_log(s->avctx, AV_LOG_ERROR,
  777. "bpp=40 without PHOTOMETRIC_SEPARATED is unsupported\n");
  778. return AVERROR_PATCHWELCOME;
  779. }
  780. break;
  781. case 483:
  782. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGB48LE : AV_PIX_FMT_RGB48BE;
  783. break;
  784. case 644:
  785. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_RGBA64LE : AV_PIX_FMT_RGBA64BE;
  786. break;
  787. case 1243:
  788. s->avctx->pix_fmt = AV_PIX_FMT_GBRP;
  789. break;
  790. case 1324:
  791. s->avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  792. break;
  793. case 1483:
  794. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRP16LE : AV_PIX_FMT_GBRP16BE;
  795. break;
  796. case 1644:
  797. s->avctx->pix_fmt = s->le ? AV_PIX_FMT_GBRAP16LE : AV_PIX_FMT_GBRAP16BE;
  798. break;
  799. default:
  800. av_log(s->avctx, AV_LOG_ERROR,
  801. "This format is not supported (bpp=%d, bppcount=%d)\n",
  802. s->bpp, s->bppcount);
  803. return AVERROR_INVALIDDATA;
  804. }
  805. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  806. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
  807. if((desc->flags & AV_PIX_FMT_FLAG_RGB) ||
  808. !(desc->flags & AV_PIX_FMT_FLAG_PLANAR) ||
  809. desc->nb_components < 3) {
  810. av_log(s->avctx, AV_LOG_ERROR, "Unsupported YCbCr variant\n");
  811. return AVERROR_INVALIDDATA;
  812. }
  813. }
  814. if (s->width != s->avctx->width || s->height != s->avctx->height) {
  815. ret = ff_set_dimensions(s->avctx, s->width, s->height);
  816. if (ret < 0)
  817. return ret;
  818. }
  819. if ((ret = ff_thread_get_buffer(s->avctx, frame, 0)) < 0)
  820. return ret;
  821. if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  822. if (!create_gray_palette)
  823. memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
  824. else {
  825. /* make default grayscale pal */
  826. int i;
  827. uint32_t *pal = (uint32_t *)frame->f->data[1];
  828. for (i = 0; i < 1<<s->bpp; i++)
  829. pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
  830. }
  831. }
  832. return 0;
  833. }
  834. static void set_sar(TiffContext *s, unsigned tag, unsigned num, unsigned den)
  835. {
  836. int offset = tag == TIFF_YRES ? 2 : 0;
  837. s->res[offset++] = num;
  838. s->res[offset] = den;
  839. if (s->res[0] && s->res[1] && s->res[2] && s->res[3]) {
  840. uint64_t num = s->res[2] * (uint64_t)s->res[1];
  841. uint64_t den = s->res[0] * (uint64_t)s->res[3];
  842. if (num > INT64_MAX || den > INT64_MAX) {
  843. num = num >> 1;
  844. den = den >> 1;
  845. }
  846. av_reduce(&s->avctx->sample_aspect_ratio.num, &s->avctx->sample_aspect_ratio.den,
  847. num, den, INT32_MAX);
  848. if (!s->avctx->sample_aspect_ratio.den)
  849. s->avctx->sample_aspect_ratio = (AVRational) {0, 1};
  850. }
  851. }
  852. static int tiff_decode_tag(TiffContext *s, AVFrame *frame)
  853. {
  854. unsigned tag, type, count, off, value = 0, value2 = 0;
  855. int i, start;
  856. int pos;
  857. int ret;
  858. double *dp;
  859. ret = ff_tread_tag(&s->gb, s->le, &tag, &type, &count, &start);
  860. if (ret < 0) {
  861. goto end;
  862. }
  863. off = bytestream2_tell(&s->gb);
  864. if (count == 1) {
  865. switch (type) {
  866. case TIFF_BYTE:
  867. case TIFF_SHORT:
  868. case TIFF_LONG:
  869. value = ff_tget(&s->gb, type, s->le);
  870. break;
  871. case TIFF_RATIONAL:
  872. value = ff_tget(&s->gb, TIFF_LONG, s->le);
  873. value2 = ff_tget(&s->gb, TIFF_LONG, s->le);
  874. break;
  875. case TIFF_STRING:
  876. if (count <= 4) {
  877. break;
  878. }
  879. default:
  880. value = UINT_MAX;
  881. }
  882. }
  883. switch (tag) {
  884. case TIFF_WIDTH:
  885. s->width = value;
  886. break;
  887. case TIFF_HEIGHT:
  888. s->height = value;
  889. break;
  890. case TIFF_BPP:
  891. if (count > 5U) {
  892. av_log(s->avctx, AV_LOG_ERROR,
  893. "This format is not supported (bpp=%d, %d components)\n",
  894. value, count);
  895. return AVERROR_INVALIDDATA;
  896. }
  897. s->bppcount = count;
  898. if (count == 1)
  899. s->bpp = value;
  900. else {
  901. switch (type) {
  902. case TIFF_BYTE:
  903. case TIFF_SHORT:
  904. case TIFF_LONG:
  905. s->bpp = 0;
  906. if (bytestream2_get_bytes_left(&s->gb) < type_sizes[type] * count)
  907. return AVERROR_INVALIDDATA;
  908. for (i = 0; i < count; i++)
  909. s->bpp += ff_tget(&s->gb, type, s->le);
  910. break;
  911. default:
  912. s->bpp = -1;
  913. }
  914. }
  915. break;
  916. case TIFF_SAMPLES_PER_PIXEL:
  917. if (count != 1) {
  918. av_log(s->avctx, AV_LOG_ERROR,
  919. "Samples per pixel requires a single value, many provided\n");
  920. return AVERROR_INVALIDDATA;
  921. }
  922. if (value > 5U) {
  923. av_log(s->avctx, AV_LOG_ERROR,
  924. "Samples per pixel %d is too large\n", value);
  925. return AVERROR_INVALIDDATA;
  926. }
  927. if (s->bppcount == 1)
  928. s->bpp *= value;
  929. s->bppcount = value;
  930. break;
  931. case TIFF_COMPR:
  932. s->compr = value;
  933. av_log(s->avctx, AV_LOG_DEBUG, "compression: %d\n", s->compr);
  934. s->predictor = 0;
  935. switch (s->compr) {
  936. case TIFF_RAW:
  937. case TIFF_PACKBITS:
  938. case TIFF_LZW:
  939. case TIFF_CCITT_RLE:
  940. break;
  941. case TIFF_G3:
  942. case TIFF_G4:
  943. s->fax_opts = 0;
  944. break;
  945. case TIFF_DEFLATE:
  946. case TIFF_ADOBE_DEFLATE:
  947. #if CONFIG_ZLIB
  948. break;
  949. #else
  950. av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
  951. return AVERROR(ENOSYS);
  952. #endif
  953. case TIFF_JPEG:
  954. case TIFF_NEWJPEG:
  955. avpriv_report_missing_feature(s->avctx, "JPEG compression");
  956. return AVERROR_PATCHWELCOME;
  957. case TIFF_LZMA:
  958. #if CONFIG_LZMA
  959. break;
  960. #else
  961. av_log(s->avctx, AV_LOG_ERROR, "LZMA not compiled in\n");
  962. return AVERROR(ENOSYS);
  963. #endif
  964. default:
  965. av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n",
  966. s->compr);
  967. return AVERROR_INVALIDDATA;
  968. }
  969. break;
  970. case TIFF_ROWSPERSTRIP:
  971. if (!value || (type == TIFF_LONG && value == UINT_MAX))
  972. value = s->height;
  973. s->rps = FFMIN(value, s->height);
  974. break;
  975. case TIFF_STRIP_OFFS:
  976. if (count == 1) {
  977. if (value > INT_MAX) {
  978. av_log(s->avctx, AV_LOG_ERROR,
  979. "strippos %u too large\n", value);
  980. return AVERROR_INVALIDDATA;
  981. }
  982. s->strippos = 0;
  983. s->stripoff = value;
  984. } else
  985. s->strippos = off;
  986. s->strips = count;
  987. if (s->strips == 1)
  988. s->rps = s->height;
  989. s->sot = type;
  990. break;
  991. case TIFF_STRIP_SIZE:
  992. if (count == 1) {
  993. if (value > INT_MAX) {
  994. av_log(s->avctx, AV_LOG_ERROR,
  995. "stripsize %u too large\n", value);
  996. return AVERROR_INVALIDDATA;
  997. }
  998. s->stripsizesoff = 0;
  999. s->stripsize = value;
  1000. s->strips = 1;
  1001. } else {
  1002. s->stripsizesoff = off;
  1003. }
  1004. s->strips = count;
  1005. s->sstype = type;
  1006. break;
  1007. case TIFF_XRES:
  1008. case TIFF_YRES:
  1009. set_sar(s, tag, value, value2);
  1010. break;
  1011. case TIFF_TILE_BYTE_COUNTS:
  1012. case TIFF_TILE_LENGTH:
  1013. case TIFF_TILE_OFFSETS:
  1014. case TIFF_TILE_WIDTH:
  1015. av_log(s->avctx, AV_LOG_ERROR, "Tiled images are not supported\n");
  1016. return AVERROR_PATCHWELCOME;
  1017. break;
  1018. case TIFF_PREDICTOR:
  1019. s->predictor = value;
  1020. break;
  1021. case TIFF_SUB_IFDS:
  1022. s->sub_ifd = value;
  1023. break;
  1024. case TIFF_WHITE_LEVEL:
  1025. s->white_level = value;
  1026. break;
  1027. case TIFF_CFA_PATTERN_DIM:
  1028. if (count != 2 || (ff_tget(&s->gb, type, s->le) != 2 &&
  1029. ff_tget(&s->gb, type, s->le) != 2)) {
  1030. av_log(s->avctx, AV_LOG_ERROR, "CFA Pattern dimensions are not 2x2\n");
  1031. return AVERROR_INVALIDDATA;
  1032. }
  1033. break;
  1034. case TIFF_CFA_PATTERN:
  1035. s->is_bayer = 1;
  1036. s->pattern[0] = ff_tget(&s->gb, type, s->le);
  1037. s->pattern[1] = ff_tget(&s->gb, type, s->le);
  1038. s->pattern[2] = ff_tget(&s->gb, type, s->le);
  1039. s->pattern[3] = ff_tget(&s->gb, type, s->le);
  1040. break;
  1041. case TIFF_PHOTOMETRIC:
  1042. switch (value) {
  1043. case TIFF_PHOTOMETRIC_WHITE_IS_ZERO:
  1044. case TIFF_PHOTOMETRIC_BLACK_IS_ZERO:
  1045. case TIFF_PHOTOMETRIC_RGB:
  1046. case TIFF_PHOTOMETRIC_PALETTE:
  1047. case TIFF_PHOTOMETRIC_SEPARATED:
  1048. case TIFF_PHOTOMETRIC_YCBCR:
  1049. case TIFF_PHOTOMETRIC_CFA:
  1050. s->photometric = value;
  1051. break;
  1052. case TIFF_PHOTOMETRIC_ALPHA_MASK:
  1053. case TIFF_PHOTOMETRIC_CIE_LAB:
  1054. case TIFF_PHOTOMETRIC_ICC_LAB:
  1055. case TIFF_PHOTOMETRIC_ITU_LAB:
  1056. case TIFF_PHOTOMETRIC_LOG_L:
  1057. case TIFF_PHOTOMETRIC_LOG_LUV:
  1058. case TIFF_PHOTOMETRIC_LINEAR_RAW:
  1059. avpriv_report_missing_feature(s->avctx,
  1060. "PhotometricInterpretation 0x%04X",
  1061. value);
  1062. return AVERROR_PATCHWELCOME;
  1063. default:
  1064. av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is "
  1065. "unknown\n", value);
  1066. return AVERROR_INVALIDDATA;
  1067. }
  1068. break;
  1069. case TIFF_FILL_ORDER:
  1070. if (value < 1 || value > 2) {
  1071. av_log(s->avctx, AV_LOG_ERROR,
  1072. "Unknown FillOrder value %d, trying default one\n", value);
  1073. value = 1;
  1074. }
  1075. s->fill_order = value - 1;
  1076. break;
  1077. case TIFF_PAL: {
  1078. GetByteContext pal_gb[3];
  1079. off = type_sizes[type];
  1080. if (count / 3 > 256 ||
  1081. bytestream2_get_bytes_left(&s->gb) < count / 3 * off * 3)
  1082. return AVERROR_INVALIDDATA;
  1083. pal_gb[0] = pal_gb[1] = pal_gb[2] = s->gb;
  1084. bytestream2_skip(&pal_gb[1], count / 3 * off);
  1085. bytestream2_skip(&pal_gb[2], count / 3 * off * 2);
  1086. off = (type_sizes[type] - 1) << 3;
  1087. if (off > 31U) {
  1088. av_log(s->avctx, AV_LOG_ERROR, "palette shift %d is out of range\n", off);
  1089. return AVERROR_INVALIDDATA;
  1090. }
  1091. for (i = 0; i < count / 3; i++) {
  1092. uint32_t p = 0xFF000000;
  1093. p |= (ff_tget(&pal_gb[0], type, s->le) >> off) << 16;
  1094. p |= (ff_tget(&pal_gb[1], type, s->le) >> off) << 8;
  1095. p |= ff_tget(&pal_gb[2], type, s->le) >> off;
  1096. s->palette[i] = p;
  1097. }
  1098. s->palette_is_set = 1;
  1099. break;
  1100. }
  1101. case TIFF_PLANAR:
  1102. s->planar = value == 2;
  1103. break;
  1104. case TIFF_YCBCR_SUBSAMPLING:
  1105. if (count != 2) {
  1106. av_log(s->avctx, AV_LOG_ERROR, "subsample count invalid\n");
  1107. return AVERROR_INVALIDDATA;
  1108. }
  1109. for (i = 0; i < count; i++) {
  1110. s->subsampling[i] = ff_tget(&s->gb, type, s->le);
  1111. if (s->subsampling[i] <= 0) {
  1112. av_log(s->avctx, AV_LOG_ERROR, "subsampling %d is invalid\n", s->subsampling[i]);
  1113. s->subsampling[i] = 1;
  1114. return AVERROR_INVALIDDATA;
  1115. }
  1116. }
  1117. break;
  1118. case TIFF_T4OPTIONS:
  1119. if (s->compr == TIFF_G3)
  1120. s->fax_opts = value;
  1121. break;
  1122. case TIFF_T6OPTIONS:
  1123. if (s->compr == TIFF_G4)
  1124. s->fax_opts = value;
  1125. break;
  1126. #define ADD_METADATA(count, name, sep)\
  1127. if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\
  1128. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");\
  1129. goto end;\
  1130. }
  1131. case TIFF_MODEL_PIXEL_SCALE:
  1132. ADD_METADATA(count, "ModelPixelScaleTag", NULL);
  1133. break;
  1134. case TIFF_MODEL_TRANSFORMATION:
  1135. ADD_METADATA(count, "ModelTransformationTag", NULL);
  1136. break;
  1137. case TIFF_MODEL_TIEPOINT:
  1138. ADD_METADATA(count, "ModelTiepointTag", NULL);
  1139. break;
  1140. case TIFF_GEO_KEY_DIRECTORY:
  1141. if (s->geotag_count) {
  1142. avpriv_request_sample(s->avctx, "Multiple geo key directories\n");
  1143. return AVERROR_INVALIDDATA;
  1144. }
  1145. ADD_METADATA(1, "GeoTIFF_Version", NULL);
  1146. ADD_METADATA(2, "GeoTIFF_Key_Revision", ".");
  1147. s->geotag_count = ff_tget_short(&s->gb, s->le);
  1148. if (s->geotag_count > count / 4 - 1) {
  1149. s->geotag_count = count / 4 - 1;
  1150. av_log(s->avctx, AV_LOG_WARNING, "GeoTIFF key directory buffer shorter than specified\n");
  1151. }
  1152. if ( bytestream2_get_bytes_left(&s->gb) < s->geotag_count * sizeof(int16_t) * 4
  1153. || s->geotag_count == 0) {
  1154. s->geotag_count = 0;
  1155. return -1;
  1156. }
  1157. s->geotags = av_mallocz_array(s->geotag_count, sizeof(TiffGeoTag));
  1158. if (!s->geotags) {
  1159. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  1160. s->geotag_count = 0;
  1161. goto end;
  1162. }
  1163. for (i = 0; i < s->geotag_count; i++) {
  1164. s->geotags[i].key = ff_tget_short(&s->gb, s->le);
  1165. s->geotags[i].type = ff_tget_short(&s->gb, s->le);
  1166. s->geotags[i].count = ff_tget_short(&s->gb, s->le);
  1167. if (!s->geotags[i].type)
  1168. s->geotags[i].val = get_geokey_val(s->geotags[i].key, ff_tget_short(&s->gb, s->le));
  1169. else
  1170. s->geotags[i].offset = ff_tget_short(&s->gb, s->le);
  1171. }
  1172. break;
  1173. case TIFF_GEO_DOUBLE_PARAMS:
  1174. if (count >= INT_MAX / sizeof(int64_t))
  1175. return AVERROR_INVALIDDATA;
  1176. if (bytestream2_get_bytes_left(&s->gb) < count * sizeof(int64_t))
  1177. return AVERROR_INVALIDDATA;
  1178. dp = av_malloc_array(count, sizeof(double));
  1179. if (!dp) {
  1180. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  1181. goto end;
  1182. }
  1183. for (i = 0; i < count; i++)
  1184. dp[i] = ff_tget_double(&s->gb, s->le);
  1185. for (i = 0; i < s->geotag_count; i++) {
  1186. if (s->geotags[i].type == TIFF_GEO_DOUBLE_PARAMS) {
  1187. if (s->geotags[i].count == 0
  1188. || s->geotags[i].offset + s->geotags[i].count > count) {
  1189. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  1190. } else if (s->geotags[i].val) {
  1191. av_log(s->avctx, AV_LOG_WARNING, "Duplicate GeoTIFF key %d\n", s->geotags[i].key);
  1192. } else {
  1193. char *ap = doubles2str(&dp[s->geotags[i].offset], s->geotags[i].count, ", ");
  1194. if (!ap) {
  1195. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  1196. av_freep(&dp);
  1197. return AVERROR(ENOMEM);
  1198. }
  1199. s->geotags[i].val = ap;
  1200. }
  1201. }
  1202. }
  1203. av_freep(&dp);
  1204. break;
  1205. case TIFF_GEO_ASCII_PARAMS:
  1206. pos = bytestream2_tell(&s->gb);
  1207. for (i = 0; i < s->geotag_count; i++) {
  1208. if (s->geotags[i].type == TIFF_GEO_ASCII_PARAMS) {
  1209. if (s->geotags[i].count == 0
  1210. || s->geotags[i].offset + s->geotags[i].count > count) {
  1211. av_log(s->avctx, AV_LOG_WARNING, "Invalid GeoTIFF key %d\n", s->geotags[i].key);
  1212. } else {
  1213. char *ap;
  1214. bytestream2_seek(&s->gb, pos + s->geotags[i].offset, SEEK_SET);
  1215. if (bytestream2_get_bytes_left(&s->gb) < s->geotags[i].count)
  1216. return AVERROR_INVALIDDATA;
  1217. if (s->geotags[i].val)
  1218. return AVERROR_INVALIDDATA;
  1219. ap = av_malloc(s->geotags[i].count);
  1220. if (!ap) {
  1221. av_log(s->avctx, AV_LOG_ERROR, "Error allocating temporary buffer\n");
  1222. return AVERROR(ENOMEM);
  1223. }
  1224. bytestream2_get_bufferu(&s->gb, ap, s->geotags[i].count);
  1225. ap[s->geotags[i].count - 1] = '\0'; //replace the "|" delimiter with a 0 byte
  1226. s->geotags[i].val = ap;
  1227. }
  1228. }
  1229. }
  1230. break;
  1231. case TIFF_ARTIST:
  1232. ADD_METADATA(count, "artist", NULL);
  1233. break;
  1234. case TIFF_COPYRIGHT:
  1235. ADD_METADATA(count, "copyright", NULL);
  1236. break;
  1237. case TIFF_DATE:
  1238. ADD_METADATA(count, "date", NULL);
  1239. break;
  1240. case TIFF_DOCUMENT_NAME:
  1241. ADD_METADATA(count, "document_name", NULL);
  1242. break;
  1243. case TIFF_HOST_COMPUTER:
  1244. ADD_METADATA(count, "computer", NULL);
  1245. break;
  1246. case TIFF_IMAGE_DESCRIPTION:
  1247. ADD_METADATA(count, "description", NULL);
  1248. break;
  1249. case TIFF_MAKE:
  1250. ADD_METADATA(count, "make", NULL);
  1251. break;
  1252. case TIFF_MODEL:
  1253. ADD_METADATA(count, "model", NULL);
  1254. break;
  1255. case TIFF_PAGE_NAME:
  1256. ADD_METADATA(count, "page_name", NULL);
  1257. break;
  1258. case TIFF_PAGE_NUMBER:
  1259. ADD_METADATA(count, "page_number", " / ");
  1260. // need to seek back to re-read the page number
  1261. bytestream2_seek(&s->gb, -count * sizeof(uint16_t), SEEK_CUR);
  1262. // read the page number
  1263. s->cur_page = ff_tget(&s->gb, TIFF_SHORT, s->le);
  1264. // get back to where we were before the previous seek
  1265. bytestream2_seek(&s->gb, count * sizeof(uint16_t) - sizeof(uint16_t), SEEK_CUR);
  1266. break;
  1267. case TIFF_SOFTWARE_NAME:
  1268. ADD_METADATA(count, "software", NULL);
  1269. break;
  1270. default:
  1271. if (s->avctx->err_recognition & AV_EF_EXPLODE) {
  1272. av_log(s->avctx, AV_LOG_ERROR,
  1273. "Unknown or unsupported tag %d/0x%0X\n",
  1274. tag, tag);
  1275. return AVERROR_INVALIDDATA;
  1276. }
  1277. }
  1278. end:
  1279. if (s->bpp > 64U) {
  1280. av_log(s->avctx, AV_LOG_ERROR,
  1281. "This format is not supported (bpp=%d, %d components)\n",
  1282. s->bpp, count);
  1283. s->bpp = 0;
  1284. return AVERROR_INVALIDDATA;
  1285. }
  1286. bytestream2_seek(&s->gb, start, SEEK_SET);
  1287. return 0;
  1288. }
  1289. static int decode_frame(AVCodecContext *avctx,
  1290. void *data, int *got_frame, AVPacket *avpkt)
  1291. {
  1292. TiffContext *const s = avctx->priv_data;
  1293. AVFrame *const p = data;
  1294. ThreadFrame frame = { .f = data };
  1295. unsigned off;
  1296. int le, ret, plane, planes;
  1297. int i, j, entries, stride;
  1298. unsigned soff, ssize;
  1299. uint8_t *dst;
  1300. GetByteContext stripsizes;
  1301. GetByteContext stripdata;
  1302. int retry_for_subifd, retry_for_page;
  1303. bytestream2_init(&s->gb, avpkt->data, avpkt->size);
  1304. // parse image header
  1305. if ((ret = ff_tdecode_header(&s->gb, &le, &off))) {
  1306. av_log(avctx, AV_LOG_ERROR, "Invalid TIFF header\n");
  1307. return ret;
  1308. } else if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
  1309. av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
  1310. return AVERROR_INVALIDDATA;
  1311. }
  1312. s->le = le;
  1313. // TIFF_BPP is not a required tag and defaults to 1
  1314. again:
  1315. s->bppcount = s->bpp = 1;
  1316. s->photometric = TIFF_PHOTOMETRIC_NONE;
  1317. s->compr = TIFF_RAW;
  1318. s->fill_order = 0;
  1319. s->white_level = 0;
  1320. s->is_bayer = 0;
  1321. s->cur_page = 0;
  1322. free_geotags(s);
  1323. // Reset these offsets so we can tell if they were set this frame
  1324. s->stripsizesoff = s->strippos = 0;
  1325. /* parse image file directory */
  1326. bytestream2_seek(&s->gb, off, SEEK_SET);
  1327. entries = ff_tget_short(&s->gb, le);
  1328. if (bytestream2_get_bytes_left(&s->gb) < entries * 12)
  1329. return AVERROR_INVALIDDATA;
  1330. for (i = 0; i < entries; i++) {
  1331. if ((ret = tiff_decode_tag(s, p)) < 0)
  1332. return ret;
  1333. }
  1334. /** whether we should look for this IFD's SubIFD */
  1335. retry_for_subifd = s->sub_ifd && s->get_subimage;
  1336. /** whether we should look for this multi-page IFD's next page */
  1337. retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed
  1338. if (retry_for_page) {
  1339. // set offset to the next IFD
  1340. off = ff_tget_long(&s->gb, le);
  1341. } else if (retry_for_subifd) {
  1342. // set offset to the SubIFD
  1343. off = s->sub_ifd;
  1344. }
  1345. if (retry_for_subifd || retry_for_page) {
  1346. if (off >= UINT_MAX - 14 || avpkt->size < off + 14) {
  1347. av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n");
  1348. return AVERROR_INVALIDDATA;
  1349. }
  1350. s->sub_ifd = 0;
  1351. goto again;
  1352. }
  1353. for (i = 0; i<s->geotag_count; i++) {
  1354. const char *keyname = get_geokey_name(s->geotags[i].key);
  1355. if (!keyname) {
  1356. av_log(avctx, AV_LOG_WARNING, "Unknown or unsupported GeoTIFF key %d\n", s->geotags[i].key);
  1357. continue;
  1358. }
  1359. if (get_geokey_type(s->geotags[i].key) != s->geotags[i].type) {
  1360. av_log(avctx, AV_LOG_WARNING, "Type of GeoTIFF key %d is wrong\n", s->geotags[i].key);
  1361. continue;
  1362. }
  1363. ret = av_dict_set(&p->metadata, keyname, s->geotags[i].val, 0);
  1364. if (ret<0) {
  1365. av_log(avctx, AV_LOG_ERROR, "Writing metadata with key '%s' failed\n", keyname);
  1366. return ret;
  1367. }
  1368. }
  1369. if (!s->strippos && !s->stripoff) {
  1370. av_log(avctx, AV_LOG_ERROR, "Image data is missing\n");
  1371. return AVERROR_INVALIDDATA;
  1372. }
  1373. /* now we have the data and may start decoding */
  1374. if ((ret = init_image(s, &frame)) < 0)
  1375. return ret;
  1376. if (s->strips == 1 && !s->stripsize) {
  1377. av_log(avctx, AV_LOG_WARNING, "Image data size missing\n");
  1378. s->stripsize = avpkt->size - s->stripoff;
  1379. }
  1380. if (s->stripsizesoff) {
  1381. if (s->stripsizesoff >= (unsigned)avpkt->size)
  1382. return AVERROR_INVALIDDATA;
  1383. bytestream2_init(&stripsizes, avpkt->data + s->stripsizesoff,
  1384. avpkt->size - s->stripsizesoff);
  1385. }
  1386. if (s->strippos) {
  1387. if (s->strippos >= (unsigned)avpkt->size)
  1388. return AVERROR_INVALIDDATA;
  1389. bytestream2_init(&stripdata, avpkt->data + s->strippos,
  1390. avpkt->size - s->strippos);
  1391. }
  1392. if (s->rps <= 0 || s->rps % s->subsampling[1]) {
  1393. av_log(avctx, AV_LOG_ERROR, "rps %d invalid\n", s->rps);
  1394. return AVERROR_INVALIDDATA;
  1395. }
  1396. planes = s->planar ? s->bppcount : 1;
  1397. for (plane = 0; plane < planes; plane++) {
  1398. uint8_t *five_planes = NULL;
  1399. int remaining = avpkt->size;
  1400. int decoded_height;
  1401. stride = p->linesize[plane];
  1402. dst = p->data[plane];
  1403. if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
  1404. s->avctx->pix_fmt == AV_PIX_FMT_RGBA) {
  1405. stride = stride * 5 / 4;
  1406. five_planes =
  1407. dst = av_malloc(stride * s->height);
  1408. if (!dst)
  1409. return AVERROR(ENOMEM);
  1410. }
  1411. for (i = 0; i < s->height; i += s->rps) {
  1412. if (i)
  1413. dst += s->rps * stride;
  1414. if (s->stripsizesoff)
  1415. ssize = ff_tget(&stripsizes, s->sstype, le);
  1416. else
  1417. ssize = s->stripsize;
  1418. if (s->strippos)
  1419. soff = ff_tget(&stripdata, s->sot, le);
  1420. else
  1421. soff = s->stripoff;
  1422. if (soff > avpkt->size || ssize > avpkt->size - soff || ssize > remaining) {
  1423. av_log(avctx, AV_LOG_ERROR, "Invalid strip size/offset\n");
  1424. av_freep(&five_planes);
  1425. return AVERROR_INVALIDDATA;
  1426. }
  1427. remaining -= ssize;
  1428. if ((ret = tiff_unpack_strip(s, p, dst, stride, avpkt->data + soff, ssize, i,
  1429. FFMIN(s->rps, s->height - i))) < 0) {
  1430. if (avctx->err_recognition & AV_EF_EXPLODE) {
  1431. av_freep(&five_planes);
  1432. return ret;
  1433. }
  1434. break;
  1435. }
  1436. }
  1437. decoded_height = FFMIN(i, s->height);
  1438. if (s->predictor == 2) {
  1439. if (s->photometric == TIFF_PHOTOMETRIC_YCBCR) {
  1440. av_log(s->avctx, AV_LOG_ERROR, "predictor == 2 with YUV is unsupported");
  1441. return AVERROR_PATCHWELCOME;
  1442. }
  1443. dst = five_planes ? five_planes : p->data[plane];
  1444. soff = s->bpp >> 3;
  1445. if (s->planar)
  1446. soff = FFMAX(soff / s->bppcount, 1);
  1447. ssize = s->width * soff;
  1448. if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48LE ||
  1449. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64LE ||
  1450. s->avctx->pix_fmt == AV_PIX_FMT_GRAY16LE ||
  1451. s->avctx->pix_fmt == AV_PIX_FMT_YA16LE ||
  1452. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16LE ||
  1453. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16LE) {
  1454. for (i = 0; i < decoded_height; i++) {
  1455. for (j = soff; j < ssize; j += 2)
  1456. AV_WL16(dst + j, AV_RL16(dst + j) + AV_RL16(dst + j - soff));
  1457. dst += stride;
  1458. }
  1459. } else if (s->avctx->pix_fmt == AV_PIX_FMT_RGB48BE ||
  1460. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE ||
  1461. s->avctx->pix_fmt == AV_PIX_FMT_GRAY16BE ||
  1462. s->avctx->pix_fmt == AV_PIX_FMT_YA16BE ||
  1463. s->avctx->pix_fmt == AV_PIX_FMT_GBRP16BE ||
  1464. s->avctx->pix_fmt == AV_PIX_FMT_GBRAP16BE) {
  1465. for (i = 0; i < decoded_height; i++) {
  1466. for (j = soff; j < ssize; j += 2)
  1467. AV_WB16(dst + j, AV_RB16(dst + j) + AV_RB16(dst + j - soff));
  1468. dst += stride;
  1469. }
  1470. } else {
  1471. for (i = 0; i < decoded_height; i++) {
  1472. for (j = soff; j < ssize; j++)
  1473. dst[j] += dst[j - soff];
  1474. dst += stride;
  1475. }
  1476. }
  1477. }
  1478. if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) {
  1479. int c = (s->avctx->pix_fmt == AV_PIX_FMT_PAL8 ? (1<<s->bpp) - 1 : 255);
  1480. dst = p->data[plane];
  1481. for (i = 0; i < s->height; i++) {
  1482. for (j = 0; j < stride; j++)
  1483. dst[j] = c - dst[j];
  1484. dst += stride;
  1485. }
  1486. }
  1487. if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
  1488. (s->avctx->pix_fmt == AV_PIX_FMT_RGB0 || s->avctx->pix_fmt == AV_PIX_FMT_RGBA)) {
  1489. int x = s->avctx->pix_fmt == AV_PIX_FMT_RGB0 ? 4 : 5;
  1490. uint8_t *src = five_planes ? five_planes : p->data[plane];
  1491. dst = p->data[plane];
  1492. for (i = 0; i < s->height; i++) {
  1493. for (j = 0; j < s->width; j++) {
  1494. int k = 255 - src[x * j + 3];
  1495. int r = (255 - src[x * j ]) * k;
  1496. int g = (255 - src[x * j + 1]) * k;
  1497. int b = (255 - src[x * j + 2]) * k;
  1498. dst[4 * j ] = r * 257 >> 16;
  1499. dst[4 * j + 1] = g * 257 >> 16;
  1500. dst[4 * j + 2] = b * 257 >> 16;
  1501. dst[4 * j + 3] = s->avctx->pix_fmt == AV_PIX_FMT_RGBA ? src[x * j + 4] : 255;
  1502. }
  1503. src += stride;
  1504. dst += p->linesize[plane];
  1505. }
  1506. av_freep(&five_planes);
  1507. } else if (s->photometric == TIFF_PHOTOMETRIC_SEPARATED &&
  1508. s->avctx->pix_fmt == AV_PIX_FMT_RGBA64BE) {
  1509. dst = p->data[plane];
  1510. for (i = 0; i < s->height; i++) {
  1511. for (j = 0; j < s->width; j++) {
  1512. uint64_t k = 65535 - AV_RB16(dst + 8 * j + 6);
  1513. uint64_t r = (65535 - AV_RB16(dst + 8 * j )) * k;
  1514. uint64_t g = (65535 - AV_RB16(dst + 8 * j + 2)) * k;
  1515. uint64_t b = (65535 - AV_RB16(dst + 8 * j + 4)) * k;
  1516. AV_WB16(dst + 8 * j , r * 65537 >> 32);
  1517. AV_WB16(dst + 8 * j + 2, g * 65537 >> 32);
  1518. AV_WB16(dst + 8 * j + 4, b * 65537 >> 32);
  1519. AV_WB16(dst + 8 * j + 6, 65535);
  1520. }
  1521. dst += p->linesize[plane];
  1522. }
  1523. }
  1524. }
  1525. if (s->planar && s->bppcount > 2) {
  1526. FFSWAP(uint8_t*, p->data[0], p->data[2]);
  1527. FFSWAP(int, p->linesize[0], p->linesize[2]);
  1528. FFSWAP(uint8_t*, p->data[0], p->data[1]);
  1529. FFSWAP(int, p->linesize[0], p->linesize[1]);
  1530. }
  1531. if (s->is_bayer && s->white_level && s->bpp == 16) {
  1532. uint16_t *dst = (uint16_t *)p->data[0];
  1533. for (i = 0; i < s->height; i++) {
  1534. for (j = 0; j < s->width; j++)
  1535. dst[j] = FFMIN((dst[j] / (float)s->white_level) * 65535, 65535);
  1536. dst += stride / 2;
  1537. }
  1538. }
  1539. *got_frame = 1;
  1540. return avpkt->size;
  1541. }
  1542. static av_cold int tiff_init(AVCodecContext *avctx)
  1543. {
  1544. TiffContext *s = avctx->priv_data;
  1545. s->width = 0;
  1546. s->height = 0;
  1547. s->subsampling[0] =
  1548. s->subsampling[1] = 1;
  1549. s->avctx = avctx;
  1550. ff_lzw_decode_open(&s->lzw);
  1551. if (!s->lzw)
  1552. return AVERROR(ENOMEM);
  1553. ff_ccitt_unpack_init();
  1554. return 0;
  1555. }
  1556. static av_cold int tiff_end(AVCodecContext *avctx)
  1557. {
  1558. TiffContext *const s = avctx->priv_data;
  1559. free_geotags(s);
  1560. ff_lzw_decode_close(&s->lzw);
  1561. av_freep(&s->deinvert_buf);
  1562. s->deinvert_buf_size = 0;
  1563. av_freep(&s->yuv_line);
  1564. s->yuv_line_size = 0;
  1565. av_freep(&s->fax_buffer);
  1566. s->fax_buffer_size = 0;
  1567. return 0;
  1568. }
  1569. #define OFFSET(x) offsetof(TiffContext, x)
  1570. static const AVOption tiff_options[] = {
  1571. { "subimage", "decode subimage instead if available", OFFSET(get_subimage), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
  1572. { "page", "page number of multi-page image to decode (starting from 1)", OFFSET(get_page), AV_OPT_TYPE_INT, {.i64=0}, 0, UINT16_MAX, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
  1573. { NULL },
  1574. };
  1575. static const AVClass tiff_decoder_class = {
  1576. .class_name = "TIFF decoder",
  1577. .item_name = av_default_item_name,
  1578. .option = tiff_options,
  1579. .version = LIBAVUTIL_VERSION_INT,
  1580. };
  1581. AVCodec ff_tiff_decoder = {
  1582. .name = "tiff",
  1583. .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
  1584. .type = AVMEDIA_TYPE_VIDEO,
  1585. .id = AV_CODEC_ID_TIFF,
  1586. .priv_data_size = sizeof(TiffContext),
  1587. .init = tiff_init,
  1588. .close = tiff_end,
  1589. .decode = decode_frame,
  1590. .init_thread_copy = ONLY_IF_THREADS_ENABLED(tiff_init),
  1591. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  1592. .priv_class = &tiff_decoder_class,
  1593. };