You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

513 lines
17KB

  1. /*
  2. * TIFF image encoder
  3. * Copyright (c) 2007 Bartlomiej Wolowiec
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * TIFF image encoder
  24. * @author Bartlomiej Wolowiec
  25. */
  26. #include "libavutil/log.h"
  27. #include "libavutil/opt.h"
  28. #include "avcodec.h"
  29. #if CONFIG_ZLIB
  30. #include <zlib.h>
  31. #endif
  32. #include "libavutil/opt.h"
  33. #include "bytestream.h"
  34. #include "tiff.h"
  35. #include "rle.h"
  36. #include "lzw.h"
  37. #include "put_bits.h"
  38. #define TIFF_MAX_ENTRY 32
  39. /** sizes of various TIFF field types (string size = 1)*/
  40. static const uint8_t type_sizes2[6] = {
  41. 0, 1, 1, 2, 4, 8
  42. };
  43. typedef struct TiffEncoderContext {
  44. AVClass *class; ///< for private options
  45. AVCodecContext *avctx;
  46. AVFrame picture;
  47. int width; ///< picture width
  48. int height; ///< picture height
  49. unsigned int bpp; ///< bits per pixel
  50. int compr; ///< compression level
  51. int bpp_tab_size; ///< bpp_tab size
  52. int photometric_interpretation; ///< photometric interpretation
  53. int strips; ///< number of strips
  54. int rps; ///< row per strip
  55. uint8_t entries[TIFF_MAX_ENTRY*12]; ///< entires in header
  56. int num_entries; ///< number of entires
  57. uint8_t **buf; ///< actual position in buffer
  58. uint8_t *buf_start; ///< pointer to first byte in buffer
  59. int buf_size; ///< buffer size
  60. uint16_t subsampling[2]; ///< YUV subsampling factors
  61. struct LZWEncodeState *lzws; ///< LZW Encode state
  62. uint32_t dpi; ///< image resolution in DPI
  63. } TiffEncoderContext;
  64. /**
  65. * Check free space in buffer
  66. * @param s Tiff context
  67. * @param need Needed bytes
  68. * @return 0 - ok, 1 - no free space
  69. */
  70. inline static int check_size(TiffEncoderContext * s, uint64_t need)
  71. {
  72. if (s->buf_size < *s->buf - s->buf_start + need) {
  73. *s->buf = s->buf_start + s->buf_size + 1;
  74. av_log(s->avctx, AV_LOG_ERROR, "Buffer is too small\n");
  75. return 1;
  76. }
  77. return 0;
  78. }
  79. /**
  80. * Put n values to buffer
  81. *
  82. * @param p Pointer to pointer to output buffer
  83. * @param n Number of values
  84. * @param val Pointer to values
  85. * @param type Type of values
  86. * @param flip =0 - normal copy, >0 - flip
  87. */
  88. static void tnput(uint8_t ** p, int n, const uint8_t * val, enum TiffTypes type,
  89. int flip)
  90. {
  91. int i;
  92. #if HAVE_BIGENDIAN
  93. flip ^= ((int[]) {0, 0, 0, 1, 3, 3})[type];
  94. #endif
  95. for (i = 0; i < n * type_sizes2[type]; i++)
  96. *(*p)++ = val[i ^ flip];
  97. }
  98. /**
  99. * Add entry to directory in tiff header.
  100. * @param s Tiff context
  101. * @param tag Tag that identifies the entry
  102. * @param type Entry type
  103. * @param count The number of values
  104. * @param ptr_val Pointer to values
  105. */
  106. static void add_entry(TiffEncoderContext * s,
  107. enum TiffTags tag, enum TiffTypes type, int count,
  108. const void *ptr_val)
  109. {
  110. uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
  111. assert(s->num_entries < TIFF_MAX_ENTRY);
  112. bytestream_put_le16(&entries_ptr, tag);
  113. bytestream_put_le16(&entries_ptr, type);
  114. bytestream_put_le32(&entries_ptr, count);
  115. if (type_sizes[type] * count <= 4) {
  116. tnput(&entries_ptr, count, ptr_val, type, 0);
  117. } else {
  118. bytestream_put_le32(&entries_ptr, *s->buf - s->buf_start);
  119. check_size(s, count * type_sizes2[type]);
  120. tnput(s->buf, count, ptr_val, type, 0);
  121. }
  122. s->num_entries++;
  123. }
  124. static void add_entry1(TiffEncoderContext * s,
  125. enum TiffTags tag, enum TiffTypes type, int val){
  126. uint16_t w = val;
  127. uint32_t dw= val;
  128. add_entry(s, tag, type, 1, type == TIFF_SHORT ? (void *)&w : (void *)&dw);
  129. }
  130. /**
  131. * Encode one strip in tiff file
  132. *
  133. * @param s Tiff context
  134. * @param src Input buffer
  135. * @param dst Output buffer
  136. * @param n Size of input buffer
  137. * @param compr Compression method
  138. * @return Number of output bytes. If an output error is encountered, -1 returned
  139. */
  140. static int encode_strip(TiffEncoderContext * s, const int8_t * src,
  141. uint8_t * dst, int n, int compr)
  142. {
  143. switch (compr) {
  144. #if CONFIG_ZLIB
  145. case TIFF_DEFLATE:
  146. case TIFF_ADOBE_DEFLATE:
  147. {
  148. unsigned long zlen = s->buf_size - (*s->buf - s->buf_start);
  149. if (compress(dst, &zlen, src, n) != Z_OK) {
  150. av_log(s->avctx, AV_LOG_ERROR, "Compressing failed\n");
  151. return -1;
  152. }
  153. return zlen;
  154. }
  155. #endif
  156. case TIFF_RAW:
  157. if (check_size(s, n))
  158. return -1;
  159. memcpy(dst, src, n);
  160. return n;
  161. case TIFF_PACKBITS:
  162. return ff_rle_encode(dst, s->buf_size - (*s->buf - s->buf_start), src, 1, n, 2, 0xff, -1, 0);
  163. case TIFF_LZW:
  164. return ff_lzw_encode(s->lzws, src, n);
  165. default:
  166. return -1;
  167. }
  168. }
  169. static void pack_yuv(TiffEncoderContext * s, uint8_t * dst, int lnum)
  170. {
  171. AVFrame *p = &s->picture;
  172. int i, j, k;
  173. int w = (s->width - 1) / s->subsampling[0] + 1;
  174. uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
  175. uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
  176. for (i = 0; i < w; i++){
  177. for (j = 0; j < s->subsampling[1]; j++)
  178. for (k = 0; k < s->subsampling[0]; k++)
  179. *dst++ = p->data[0][(lnum + j) * p->linesize[0] +
  180. i * s->subsampling[0] + k];
  181. *dst++ = *pu++;
  182. *dst++ = *pv++;
  183. }
  184. }
  185. static int encode_frame(AVCodecContext * avctx, unsigned char *buf,
  186. int buf_size, void *data)
  187. {
  188. TiffEncoderContext *s = avctx->priv_data;
  189. AVFrame *pict = data;
  190. AVFrame *const p = (AVFrame *) & s->picture;
  191. int i;
  192. int n;
  193. uint8_t *ptr = buf;
  194. uint8_t *offset;
  195. uint32_t strips;
  196. uint32_t *strip_sizes = NULL;
  197. uint32_t *strip_offsets = NULL;
  198. int bytes_per_row;
  199. uint32_t res[2] = { s->dpi, 1 }; // image resolution (72/1)
  200. uint16_t bpp_tab[] = { 8, 8, 8, 8 };
  201. int ret = -1;
  202. int is_yuv = 0;
  203. uint8_t *yuv_line = NULL;
  204. int shift_h, shift_v;
  205. s->avctx = avctx;
  206. s->buf_start = buf;
  207. s->buf = &ptr;
  208. s->buf_size = buf_size;
  209. *p = *pict;
  210. p->pict_type = AV_PICTURE_TYPE_I;
  211. p->key_frame = 1;
  212. avctx->coded_frame= &s->picture;
  213. #if FF_API_TIFFENC_COMPLEVEL
  214. if (avctx->compression_level != FF_COMPRESSION_DEFAULT)
  215. av_log(avctx, AV_LOG_WARNING, "Using compression_level to set compression "
  216. "algorithm is deprecated. Please use the compression_algo private "
  217. "option instead.\n");
  218. if (avctx->compression_level == 0) {
  219. s->compr = TIFF_RAW;
  220. } else if(avctx->compression_level == 2) {
  221. s->compr = TIFF_LZW;
  222. #if CONFIG_ZLIB
  223. } else if ((avctx->compression_level >= 3)) {
  224. s->compr = TIFF_DEFLATE;
  225. #endif
  226. }
  227. #endif
  228. s->width = avctx->width;
  229. s->height = avctx->height;
  230. s->subsampling[0] = 1;
  231. s->subsampling[1] = 1;
  232. switch (avctx->pix_fmt) {
  233. case PIX_FMT_RGBA64LE:
  234. s->bpp = 64;
  235. s->photometric_interpretation = 2;
  236. bpp_tab[0] = 16;
  237. bpp_tab[1] = 16;
  238. bpp_tab[2] = 16;
  239. bpp_tab[3] = 16;
  240. break;
  241. case PIX_FMT_RGB48LE:
  242. s->bpp = 48;
  243. s->photometric_interpretation = 2;
  244. bpp_tab[0] = 16;
  245. bpp_tab[1] = 16;
  246. bpp_tab[2] = 16;
  247. bpp_tab[3] = 16;
  248. break;
  249. case PIX_FMT_RGBA:
  250. s->bpp = 32;
  251. s->photometric_interpretation = 2;
  252. break;
  253. case PIX_FMT_RGB24:
  254. s->bpp = 24;
  255. s->photometric_interpretation = 2;
  256. break;
  257. case PIX_FMT_GRAY8:
  258. s->bpp = 8;
  259. s->photometric_interpretation = 1;
  260. break;
  261. case PIX_FMT_PAL8:
  262. s->bpp = 8;
  263. s->photometric_interpretation = 3;
  264. break;
  265. case PIX_FMT_MONOBLACK:
  266. case PIX_FMT_MONOWHITE:
  267. s->bpp = 1;
  268. s->photometric_interpretation = avctx->pix_fmt == PIX_FMT_MONOBLACK;
  269. bpp_tab[0] = 1;
  270. break;
  271. case PIX_FMT_YUV420P:
  272. case PIX_FMT_YUV422P:
  273. case PIX_FMT_YUV444P:
  274. case PIX_FMT_YUV410P:
  275. case PIX_FMT_YUV411P:
  276. s->photometric_interpretation = 6;
  277. avcodec_get_chroma_sub_sample(avctx->pix_fmt,
  278. &shift_h, &shift_v);
  279. s->bpp = 8 + (16 >> (shift_h + shift_v));
  280. s->subsampling[0] = 1 << shift_h;
  281. s->subsampling[1] = 1 << shift_v;
  282. s->bpp_tab_size = 3;
  283. is_yuv = 1;
  284. break;
  285. default:
  286. av_log(s->avctx, AV_LOG_ERROR,
  287. "This colors format is not supported\n");
  288. return -1;
  289. }
  290. if (!is_yuv)
  291. s->bpp_tab_size = (s->bpp >= 48) ? ((s->bpp + 7) >> 4):((s->bpp + 7) >> 3);
  292. if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE || s->compr == TIFF_LZW)
  293. //best choose for DEFLATE
  294. s->rps = s->height;
  295. else
  296. s->rps = FFMAX(8192 / (((s->width * s->bpp) >> 3) + 1), 1); // suggest size of strip
  297. s->rps = ((s->rps - 1) / s->subsampling[1] + 1) * s->subsampling[1]; // round rps up
  298. strips = (s->height - 1) / s->rps + 1;
  299. if (check_size(s, 8))
  300. goto fail;
  301. // write header
  302. bytestream_put_le16(&ptr, 0x4949);
  303. bytestream_put_le16(&ptr, 42);
  304. offset = ptr;
  305. bytestream_put_le32(&ptr, 0);
  306. strip_sizes = av_mallocz(sizeof(*strip_sizes) * strips);
  307. strip_offsets = av_mallocz(sizeof(*strip_offsets) * strips);
  308. bytes_per_row = (((s->width - 1)/s->subsampling[0] + 1) * s->bpp
  309. * s->subsampling[0] * s->subsampling[1] + 7) >> 3;
  310. if (is_yuv){
  311. yuv_line = av_malloc(bytes_per_row);
  312. if (yuv_line == NULL){
  313. av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
  314. goto fail;
  315. }
  316. }
  317. #if CONFIG_ZLIB
  318. if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
  319. uint8_t *zbuf;
  320. int zlen, zn;
  321. int j;
  322. zlen = bytes_per_row * s->rps;
  323. zbuf = av_malloc(zlen);
  324. strip_offsets[0] = ptr - buf;
  325. zn = 0;
  326. for (j = 0; j < s->rps; j++) {
  327. if (is_yuv){
  328. pack_yuv(s, yuv_line, j);
  329. memcpy(zbuf + zn, yuv_line, bytes_per_row);
  330. j += s->subsampling[1] - 1;
  331. }
  332. else
  333. memcpy(zbuf + j * bytes_per_row,
  334. p->data[0] + j * p->linesize[0], bytes_per_row);
  335. zn += bytes_per_row;
  336. }
  337. n = encode_strip(s, zbuf, ptr, zn, s->compr);
  338. av_free(zbuf);
  339. if (n<0) {
  340. av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
  341. goto fail;
  342. }
  343. ptr += n;
  344. strip_sizes[0] = ptr - buf - strip_offsets[0];
  345. } else
  346. #endif
  347. {
  348. if(s->compr == TIFF_LZW)
  349. s->lzws = av_malloc(ff_lzw_encode_state_size);
  350. for (i = 0; i < s->height; i++) {
  351. if (strip_sizes[i / s->rps] == 0) {
  352. if(s->compr == TIFF_LZW){
  353. ff_lzw_encode_init(s->lzws, ptr, s->buf_size - (*s->buf - s->buf_start),
  354. 12, FF_LZW_TIFF, put_bits);
  355. }
  356. strip_offsets[i / s->rps] = ptr - buf;
  357. }
  358. if (is_yuv){
  359. pack_yuv(s, yuv_line, i);
  360. n = encode_strip(s, yuv_line, ptr, bytes_per_row, s->compr);
  361. i += s->subsampling[1] - 1;
  362. }
  363. else
  364. n = encode_strip(s, p->data[0] + i * p->linesize[0],
  365. ptr, bytes_per_row, s->compr);
  366. if (n < 0) {
  367. av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
  368. goto fail;
  369. }
  370. strip_sizes[i / s->rps] += n;
  371. ptr += n;
  372. if(s->compr == TIFF_LZW && (i==s->height-1 || i%s->rps == s->rps-1)){
  373. int ret;
  374. ret = ff_lzw_encode_flush(s->lzws, flush_put_bits);
  375. strip_sizes[(i / s->rps )] += ret ;
  376. ptr += ret;
  377. }
  378. }
  379. if(s->compr == TIFF_LZW)
  380. av_free(s->lzws);
  381. }
  382. s->num_entries = 0;
  383. add_entry1(s,TIFF_SUBFILE, TIFF_LONG, 0);
  384. add_entry1(s,TIFF_WIDTH, TIFF_LONG, s->width);
  385. add_entry1(s,TIFF_HEIGHT, TIFF_LONG, s->height);
  386. if (s->bpp_tab_size)
  387. add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab);
  388. add_entry1(s,TIFF_COMPR, TIFF_SHORT, s->compr);
  389. add_entry1(s,TIFF_INVERT, TIFF_SHORT, s->photometric_interpretation);
  390. add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, strip_offsets);
  391. if (s->bpp_tab_size)
  392. add_entry1(s,TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size);
  393. add_entry1(s,TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps);
  394. add_entry(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, strip_sizes);
  395. add_entry(s, TIFF_XRES, TIFF_RATIONAL, 1, res);
  396. add_entry(s, TIFF_YRES, TIFF_RATIONAL, 1, res);
  397. add_entry1(s,TIFF_RES_UNIT, TIFF_SHORT, 2);
  398. if(!(avctx->flags & CODEC_FLAG_BITEXACT))
  399. add_entry(s, TIFF_SOFTWARE_NAME, TIFF_STRING,
  400. strlen(LIBAVCODEC_IDENT) + 1, LIBAVCODEC_IDENT);
  401. if (avctx->pix_fmt == PIX_FMT_PAL8) {
  402. uint16_t pal[256 * 3];
  403. for (i = 0; i < 256; i++) {
  404. uint32_t rgb = *(uint32_t *) (p->data[1] + i * 4);
  405. pal[i] = ((rgb >> 16) & 0xff) * 257;
  406. pal[i + 256] = ((rgb >> 8 ) & 0xff) * 257;
  407. pal[i + 512] = ( rgb & 0xff) * 257;
  408. }
  409. add_entry(s, TIFF_PAL, TIFF_SHORT, 256 * 3, pal);
  410. }
  411. if (is_yuv){
  412. /** according to CCIR Recommendation 601.1 */
  413. uint32_t refbw[12] = {15, 1, 235, 1, 128, 1, 240, 1, 128, 1, 240, 1};
  414. add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling);
  415. add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw);
  416. }
  417. bytestream_put_le32(&offset, ptr - buf); // write offset to dir
  418. if (check_size(s, 6 + s->num_entries * 12))
  419. goto fail;
  420. bytestream_put_le16(&ptr, s->num_entries); // write tag count
  421. bytestream_put_buffer(&ptr, s->entries, s->num_entries * 12);
  422. bytestream_put_le32(&ptr, 0);
  423. ret = ptr - buf;
  424. fail:
  425. av_free(strip_sizes);
  426. av_free(strip_offsets);
  427. av_free(yuv_line);
  428. return ret;
  429. }
  430. #define OFFSET(x) offsetof(TiffEncoderContext, x)
  431. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  432. static const AVOption options[] = {
  433. {"dpi", "set the image resolution (in dpi)", OFFSET(dpi), AV_OPT_TYPE_INT, {.dbl = 72}, 1, 0x10000, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
  434. { "compression_algo", NULL, OFFSET(compr), AV_OPT_TYPE_INT, {TIFF_PACKBITS}, TIFF_RAW, TIFF_DEFLATE, VE, "compression_algo" },
  435. { "packbits", NULL, 0, AV_OPT_TYPE_CONST, {TIFF_PACKBITS}, 0, 0, VE, "compression_algo" },
  436. { "raw", NULL, 0, AV_OPT_TYPE_CONST, {TIFF_RAW}, 0, 0, VE, "compression_algo" },
  437. { "lzw", NULL, 0, AV_OPT_TYPE_CONST, {TIFF_LZW}, 0, 0, VE, "compression_algo" },
  438. #if CONFIG_ZLIB
  439. { "deflate", NULL, 0, AV_OPT_TYPE_CONST, {TIFF_DEFLATE}, 0, 0, VE, "compression_algo" },
  440. #endif
  441. { NULL },
  442. };
  443. static const AVClass tiffenc_class = {
  444. .class_name = "TIFF encoder",
  445. .item_name = av_default_item_name,
  446. .option = options,
  447. .version = LIBAVUTIL_VERSION_INT,
  448. };
  449. AVCodec ff_tiff_encoder = {
  450. .name = "tiff",
  451. .type = AVMEDIA_TYPE_VIDEO,
  452. .id = CODEC_ID_TIFF,
  453. .priv_data_size = sizeof(TiffEncoderContext),
  454. .encode = encode_frame,
  455. .pix_fmts =
  456. (const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8,
  457. PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,
  458. PIX_FMT_YUV420P, PIX_FMT_YUV422P,
  459. PIX_FMT_YUV444P, PIX_FMT_YUV410P,
  460. PIX_FMT_YUV411P, PIX_FMT_RGB48LE,
  461. PIX_FMT_RGBA, PIX_FMT_RGBA64LE, PIX_FMT_NONE},
  462. .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
  463. .priv_class = &tiffenc_class,
  464. };