You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

567 lines
19KB

  1. /*
  2. * TIFF image encoder
  3. * Copyright (c) 2007 Bartlomiej Wolowiec
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * TIFF image encoder
  24. * @author Bartlomiej Wolowiec
  25. */
  26. #include "config.h"
  27. #if CONFIG_ZLIB
  28. #include <zlib.h>
  29. #endif
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/log.h"
  32. #include "libavutil/opt.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "avcodec.h"
  35. #include "bytestream.h"
  36. #include "internal.h"
  37. #include "lzw.h"
  38. #include "put_bits.h"
  39. #include "rle.h"
  40. #include "tiff.h"
  41. #define TIFF_MAX_ENTRY 32
  42. /** sizes of various TIFF field types (string size = 1)*/
  43. static const uint8_t type_sizes2[14] = {
  44. 0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8, 4
  45. };
  46. typedef struct TiffEncoderContext {
  47. AVClass *class; ///< for private options
  48. AVCodecContext *avctx;
  49. int width; ///< picture width
  50. int height; ///< picture height
  51. unsigned int bpp; ///< bits per pixel
  52. int compr; ///< compression level
  53. int bpp_tab_size; ///< bpp_tab size
  54. enum TiffPhotometric photometric_interpretation; ///< photometric interpretation
  55. int strips; ///< number of strips
  56. uint32_t *strip_sizes;
  57. unsigned int strip_sizes_size;
  58. uint32_t *strip_offsets;
  59. unsigned int strip_offsets_size;
  60. uint8_t *yuv_line;
  61. unsigned int yuv_line_size;
  62. int rps; ///< row per strip
  63. uint8_t entries[TIFF_MAX_ENTRY * 12]; ///< entries in header
  64. int num_entries; ///< number of entries
  65. uint8_t **buf; ///< actual position in buffer
  66. uint8_t *buf_start; ///< pointer to first byte in buffer
  67. int buf_size; ///< buffer size
  68. uint16_t subsampling[2]; ///< YUV subsampling factors
  69. struct LZWEncodeState *lzws; ///< LZW encode state
  70. uint32_t dpi; ///< image resolution in DPI
  71. } TiffEncoderContext;
  72. /**
  73. * Check free space in buffer.
  74. *
  75. * @param s Tiff context
  76. * @param need Needed bytes
  77. * @return 0 - ok, 1 - no free space
  78. */
  79. static inline int check_size(TiffEncoderContext *s, uint64_t need)
  80. {
  81. if (s->buf_size < *s->buf - s->buf_start + need) {
  82. *s->buf = s->buf_start + s->buf_size + 1;
  83. av_log(s->avctx, AV_LOG_ERROR, "Buffer is too small\n");
  84. return 1;
  85. }
  86. return 0;
  87. }
  88. /**
  89. * Put n values to buffer.
  90. *
  91. * @param p pointer to pointer to output buffer
  92. * @param n number of values
  93. * @param val pointer to values
  94. * @param type type of values
  95. * @param flip = 0 - normal copy, >0 - flip
  96. */
  97. static void tnput(uint8_t **p, int n, const uint8_t *val, enum TiffTypes type,
  98. int flip)
  99. {
  100. int i;
  101. #if HAVE_BIGENDIAN
  102. flip ^= ((int[]) { 0, 0, 0, 1, 3, 3 })[type];
  103. #endif
  104. for (i = 0; i < n * type_sizes2[type]; i++)
  105. *(*p)++ = val[i ^ flip];
  106. }
  107. /**
  108. * Add entry to directory in tiff header.
  109. *
  110. * @param s Tiff context
  111. * @param tag tag that identifies the entry
  112. * @param type entry type
  113. * @param count the number of values
  114. * @param ptr_val pointer to values
  115. */
  116. static void add_entry(TiffEncoderContext *s, enum TiffTags tag,
  117. enum TiffTypes type, int count, const void *ptr_val)
  118. {
  119. uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
  120. av_assert0(s->num_entries < TIFF_MAX_ENTRY);
  121. bytestream_put_le16(&entries_ptr, tag);
  122. bytestream_put_le16(&entries_ptr, type);
  123. bytestream_put_le32(&entries_ptr, count);
  124. if (type_sizes[type] * (int64_t)count <= 4) {
  125. tnput(&entries_ptr, count, ptr_val, type, 0);
  126. } else {
  127. bytestream_put_le32(&entries_ptr, *s->buf - s->buf_start);
  128. check_size(s, count * (int64_t)type_sizes2[type]);
  129. tnput(s->buf, count, ptr_val, type, 0);
  130. }
  131. s->num_entries++;
  132. }
  133. static void add_entry1(TiffEncoderContext *s,
  134. enum TiffTags tag, enum TiffTypes type, int val)
  135. {
  136. uint16_t w = val;
  137. uint32_t dw = val;
  138. add_entry(s, tag, type, 1, type == TIFF_SHORT ? (void *)&w : (void *)&dw);
  139. }
  140. /**
  141. * Encode one strip in tiff file.
  142. *
  143. * @param s Tiff context
  144. * @param src input buffer
  145. * @param dst output buffer
  146. * @param n size of input buffer
  147. * @param compr compression method
  148. * @return number of output bytes. If an output error is encountered, -1 is returned
  149. */
  150. static int encode_strip(TiffEncoderContext *s, const int8_t *src,
  151. uint8_t *dst, int n, int compr)
  152. {
  153. switch (compr) {
  154. #if CONFIG_ZLIB
  155. case TIFF_DEFLATE:
  156. case TIFF_ADOBE_DEFLATE:
  157. {
  158. unsigned long zlen = s->buf_size - (*s->buf - s->buf_start);
  159. if (compress(dst, &zlen, src, n) != Z_OK) {
  160. av_log(s->avctx, AV_LOG_ERROR, "Compressing failed\n");
  161. return -1;
  162. }
  163. return zlen;
  164. }
  165. #endif
  166. case TIFF_RAW:
  167. if (check_size(s, n))
  168. return -1;
  169. memcpy(dst, src, n);
  170. return n;
  171. case TIFF_PACKBITS:
  172. return ff_rle_encode(dst, s->buf_size - (*s->buf - s->buf_start),
  173. src, 1, n, 2, 0xff, -1, 0);
  174. case TIFF_LZW:
  175. return ff_lzw_encode(s->lzws, src, n);
  176. default:
  177. return -1;
  178. }
  179. }
  180. static void pack_yuv(TiffEncoderContext *s, const AVFrame *p,
  181. uint8_t *dst, int lnum)
  182. {
  183. int i, j, k;
  184. int w = (s->width - 1) / s->subsampling[0] + 1;
  185. uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
  186. uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
  187. if (s->width % s->subsampling[0] || s->height % s->subsampling[1]) {
  188. for (i = 0; i < w; i++) {
  189. for (j = 0; j < s->subsampling[1]; j++)
  190. for (k = 0; k < s->subsampling[0]; k++)
  191. *dst++ = p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
  192. FFMIN(i * s->subsampling[0] + k, s->width-1)];
  193. *dst++ = *pu++;
  194. *dst++ = *pv++;
  195. }
  196. }else{
  197. for (i = 0; i < w; i++) {
  198. for (j = 0; j < s->subsampling[1]; j++)
  199. for (k = 0; k < s->subsampling[0]; k++)
  200. *dst++ = p->data[0][(lnum + j) * p->linesize[0] +
  201. i * s->subsampling[0] + k];
  202. *dst++ = *pu++;
  203. *dst++ = *pv++;
  204. }
  205. }
  206. }
  207. static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  208. const AVFrame *pict, int *got_packet)
  209. {
  210. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  211. TiffEncoderContext *s = avctx->priv_data;
  212. const AVFrame *const p = pict;
  213. int i;
  214. uint8_t *ptr;
  215. uint8_t *offset;
  216. uint32_t strips;
  217. int bytes_per_row;
  218. uint32_t res[2] = { s->dpi, 1 }; // image resolution (72/1)
  219. uint16_t bpp_tab[4];
  220. int ret = -1;
  221. int is_yuv = 0, alpha = 0;
  222. int shift_h, shift_v;
  223. int packet_size;
  224. s->width = avctx->width;
  225. s->height = avctx->height;
  226. s->subsampling[0] = 1;
  227. s->subsampling[1] = 1;
  228. avctx->bits_per_coded_sample =
  229. s->bpp = av_get_bits_per_pixel(desc);
  230. s->bpp_tab_size = desc->nb_components;
  231. switch (avctx->pix_fmt) {
  232. case AV_PIX_FMT_RGBA64LE:
  233. case AV_PIX_FMT_RGBA:
  234. alpha = 1;
  235. case AV_PIX_FMT_RGB48LE:
  236. case AV_PIX_FMT_RGB24:
  237. s->photometric_interpretation = TIFF_PHOTOMETRIC_RGB;
  238. break;
  239. case AV_PIX_FMT_GRAY8:
  240. avctx->bits_per_coded_sample = 0x28;
  241. case AV_PIX_FMT_GRAY8A:
  242. alpha = avctx->pix_fmt == AV_PIX_FMT_GRAY8A;
  243. case AV_PIX_FMT_GRAY16LE:
  244. case AV_PIX_FMT_MONOBLACK:
  245. s->photometric_interpretation = TIFF_PHOTOMETRIC_BLACK_IS_ZERO;
  246. break;
  247. case AV_PIX_FMT_PAL8:
  248. s->photometric_interpretation = TIFF_PHOTOMETRIC_PALETTE;
  249. break;
  250. case AV_PIX_FMT_MONOWHITE:
  251. s->photometric_interpretation = TIFF_PHOTOMETRIC_WHITE_IS_ZERO;
  252. break;
  253. case AV_PIX_FMT_YUV420P:
  254. case AV_PIX_FMT_YUV422P:
  255. case AV_PIX_FMT_YUV440P:
  256. case AV_PIX_FMT_YUV444P:
  257. case AV_PIX_FMT_YUV410P:
  258. case AV_PIX_FMT_YUV411P:
  259. av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &shift_h, &shift_v);
  260. s->photometric_interpretation = TIFF_PHOTOMETRIC_YCBCR;
  261. s->subsampling[0] = 1 << shift_h;
  262. s->subsampling[1] = 1 << shift_v;
  263. is_yuv = 1;
  264. break;
  265. default:
  266. av_log(s->avctx, AV_LOG_ERROR,
  267. "This colors format is not supported\n");
  268. return -1;
  269. }
  270. for (i = 0; i < s->bpp_tab_size; i++)
  271. bpp_tab[i] = desc->comp[i].depth_minus1 + 1;
  272. if (s->compr == TIFF_DEFLATE ||
  273. s->compr == TIFF_ADOBE_DEFLATE ||
  274. s->compr == TIFF_LZW)
  275. // best choice for DEFLATE
  276. s->rps = s->height;
  277. else
  278. // suggest size of strip
  279. s->rps = FFMAX(8192 / (((s->width * s->bpp) >> 3) + 1), 1);
  280. // round rps up
  281. s->rps = ((s->rps - 1) / s->subsampling[1] + 1) * s->subsampling[1];
  282. strips = (s->height - 1) / s->rps + 1;
  283. bytes_per_row = (((s->width - 1) / s->subsampling[0] + 1) * s->bpp *
  284. s->subsampling[0] * s->subsampling[1] + 7) >> 3;
  285. packet_size = avctx->height * bytes_per_row * 2 +
  286. avctx->height * 4 + FF_MIN_BUFFER_SIZE;
  287. if ((ret = ff_alloc_packet2(avctx, pkt, packet_size)) < 0)
  288. return ret;
  289. ptr = pkt->data;
  290. s->buf_start = pkt->data;
  291. s->buf = &ptr;
  292. s->buf_size = pkt->size;
  293. if (check_size(s, 8))
  294. goto fail;
  295. // write header
  296. bytestream_put_le16(&ptr, 0x4949);
  297. bytestream_put_le16(&ptr, 42);
  298. offset = ptr;
  299. bytestream_put_le32(&ptr, 0);
  300. if (strips > INT_MAX / FFMAX(sizeof(s->strip_sizes[0]), sizeof(s->strip_offsets[0]))) {
  301. ret = AVERROR(ENOMEM);
  302. goto fail;
  303. }
  304. av_fast_padded_mallocz(&s->strip_sizes , &s->strip_sizes_size , sizeof(s->strip_sizes [0]) * strips);
  305. av_fast_padded_mallocz(&s->strip_offsets, &s->strip_offsets_size, sizeof(s->strip_offsets[0]) * strips);
  306. if (!s->strip_sizes || !s->strip_offsets) {
  307. ret = AVERROR(ENOMEM);
  308. goto fail;
  309. }
  310. if (is_yuv) {
  311. av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, bytes_per_row);
  312. if (s->yuv_line == NULL) {
  313. av_log(s->avctx, AV_LOG_ERROR, "Not enough memory\n");
  314. ret = AVERROR(ENOMEM);
  315. goto fail;
  316. }
  317. }
  318. #if CONFIG_ZLIB
  319. if (s->compr == TIFF_DEFLATE || s->compr == TIFF_ADOBE_DEFLATE) {
  320. uint8_t *zbuf;
  321. int zlen, zn;
  322. int j;
  323. zlen = bytes_per_row * s->rps;
  324. zbuf = av_malloc(zlen);
  325. if (!zbuf) {
  326. ret = AVERROR(ENOMEM);
  327. goto fail;
  328. }
  329. s->strip_offsets[0] = ptr - pkt->data;
  330. zn = 0;
  331. for (j = 0; j < s->rps; j++) {
  332. if (is_yuv) {
  333. pack_yuv(s, p, s->yuv_line, j);
  334. memcpy(zbuf + zn, s->yuv_line, bytes_per_row);
  335. j += s->subsampling[1] - 1;
  336. } else
  337. memcpy(zbuf + j * bytes_per_row,
  338. p->data[0] + j * p->linesize[0], bytes_per_row);
  339. zn += bytes_per_row;
  340. }
  341. ret = encode_strip(s, zbuf, ptr, zn, s->compr);
  342. av_free(zbuf);
  343. if (ret < 0) {
  344. av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
  345. goto fail;
  346. }
  347. ptr += ret;
  348. s->strip_sizes[0] = ptr - pkt->data - s->strip_offsets[0];
  349. } else
  350. #endif
  351. {
  352. if (s->compr == TIFF_LZW) {
  353. s->lzws = av_malloc(ff_lzw_encode_state_size);
  354. if (!s->lzws) {
  355. ret = AVERROR(ENOMEM);
  356. goto fail;
  357. }
  358. }
  359. for (i = 0; i < s->height; i++) {
  360. if (s->strip_sizes[i / s->rps] == 0) {
  361. if (s->compr == TIFF_LZW) {
  362. ff_lzw_encode_init(s->lzws, ptr,
  363. s->buf_size - (*s->buf - s->buf_start),
  364. 12, FF_LZW_TIFF, put_bits);
  365. }
  366. s->strip_offsets[i / s->rps] = ptr - pkt->data;
  367. }
  368. if (is_yuv) {
  369. pack_yuv(s, p, s->yuv_line, i);
  370. ret = encode_strip(s, s->yuv_line, ptr, bytes_per_row, s->compr);
  371. i += s->subsampling[1] - 1;
  372. } else
  373. ret = encode_strip(s, p->data[0] + i * p->linesize[0],
  374. ptr, bytes_per_row, s->compr);
  375. if (ret < 0) {
  376. av_log(s->avctx, AV_LOG_ERROR, "Encode strip failed\n");
  377. goto fail;
  378. }
  379. s->strip_sizes[i / s->rps] += ret;
  380. ptr += ret;
  381. if (s->compr == TIFF_LZW &&
  382. (i == s->height - 1 || i % s->rps == s->rps - 1)) {
  383. ret = ff_lzw_encode_flush(s->lzws, flush_put_bits);
  384. s->strip_sizes[(i / s->rps)] += ret;
  385. ptr += ret;
  386. }
  387. }
  388. if (s->compr == TIFF_LZW)
  389. av_freep(&s->lzws);
  390. }
  391. s->num_entries = 0;
  392. add_entry1(s, TIFF_SUBFILE, TIFF_LONG, 0);
  393. add_entry1(s, TIFF_WIDTH, TIFF_LONG, s->width);
  394. add_entry1(s, TIFF_HEIGHT, TIFF_LONG, s->height);
  395. if (s->bpp_tab_size)
  396. add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab);
  397. add_entry1(s, TIFF_COMPR, TIFF_SHORT, s->compr);
  398. add_entry1(s, TIFF_PHOTOMETRIC, TIFF_SHORT, s->photometric_interpretation);
  399. add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, s->strip_offsets);
  400. if (s->bpp_tab_size)
  401. add_entry1(s, TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size);
  402. add_entry1(s, TIFF_ROWSPERSTRIP, TIFF_LONG, s->rps);
  403. add_entry(s, TIFF_STRIP_SIZE, TIFF_LONG, strips, s->strip_sizes);
  404. add_entry(s, TIFF_XRES, TIFF_RATIONAL, 1, res);
  405. if (avctx->sample_aspect_ratio.num > 0 &&
  406. avctx->sample_aspect_ratio.den > 0) {
  407. AVRational y = av_mul_q(av_make_q(s->dpi, 1),
  408. avctx->sample_aspect_ratio);
  409. res[0] = y.num;
  410. res[1] = y.den;
  411. }
  412. add_entry(s, TIFF_YRES, TIFF_RATIONAL, 1, res);
  413. add_entry1(s, TIFF_RES_UNIT, TIFF_SHORT, 2);
  414. if (!(avctx->flags & CODEC_FLAG_BITEXACT))
  415. add_entry(s, TIFF_SOFTWARE_NAME, TIFF_STRING,
  416. strlen(LIBAVCODEC_IDENT) + 1, LIBAVCODEC_IDENT);
  417. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  418. uint16_t pal[256 * 3];
  419. for (i = 0; i < 256; i++) {
  420. uint32_t rgb = *(uint32_t *) (p->data[1] + i * 4);
  421. pal[i] = ((rgb >> 16) & 0xff) * 257;
  422. pal[i + 256] = ((rgb >> 8) & 0xff) * 257;
  423. pal[i + 512] = (rgb & 0xff) * 257;
  424. }
  425. add_entry(s, TIFF_PAL, TIFF_SHORT, 256 * 3, pal);
  426. }
  427. if (alpha)
  428. add_entry1(s,TIFF_EXTRASAMPLES, TIFF_SHORT, 2);
  429. if (is_yuv) {
  430. /** according to CCIR Recommendation 601.1 */
  431. uint32_t refbw[12] = { 15, 1, 235, 1, 128, 1, 240, 1, 128, 1, 240, 1 };
  432. add_entry(s, TIFF_YCBCR_SUBSAMPLING, TIFF_SHORT, 2, s->subsampling);
  433. if (avctx->chroma_sample_location == AVCHROMA_LOC_TOPLEFT)
  434. add_entry1(s, TIFF_YCBCR_POSITIONING, TIFF_SHORT, 2);
  435. add_entry(s, TIFF_REFERENCE_BW, TIFF_RATIONAL, 6, refbw);
  436. }
  437. // write offset to dir
  438. bytestream_put_le32(&offset, ptr - pkt->data);
  439. if (check_size(s, 6 + s->num_entries * 12)) {
  440. ret = AVERROR(EINVAL);
  441. goto fail;
  442. }
  443. bytestream_put_le16(&ptr, s->num_entries); // write tag count
  444. bytestream_put_buffer(&ptr, s->entries, s->num_entries * 12);
  445. bytestream_put_le32(&ptr, 0);
  446. pkt->size = ptr - pkt->data;
  447. pkt->flags |= AV_PKT_FLAG_KEY;
  448. *got_packet = 1;
  449. fail:
  450. return ret < 0 ? ret : 0;
  451. }
  452. static av_cold int encode_init(AVCodecContext *avctx)
  453. {
  454. TiffEncoderContext *s = avctx->priv_data;
  455. avctx->coded_frame = av_frame_alloc();
  456. if (!avctx->coded_frame)
  457. return AVERROR(ENOMEM);
  458. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  459. avctx->coded_frame->key_frame = 1;
  460. s->avctx = avctx;
  461. return 0;
  462. }
  463. static av_cold int encode_close(AVCodecContext *avctx)
  464. {
  465. TiffEncoderContext *s = avctx->priv_data;
  466. av_frame_free(&avctx->coded_frame);
  467. av_freep(&s->strip_sizes);
  468. av_freep(&s->strip_offsets);
  469. av_freep(&s->yuv_line);
  470. return 0;
  471. }
  472. #define OFFSET(x) offsetof(TiffEncoderContext, x)
  473. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  474. static const AVOption options[] = {
  475. {"dpi", "set the image resolution (in dpi)", OFFSET(dpi), AV_OPT_TYPE_INT, {.i64 = 72}, 1, 0x10000, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM},
  476. { "compression_algo", NULL, OFFSET(compr), AV_OPT_TYPE_INT, { .i64 = TIFF_PACKBITS }, TIFF_RAW, TIFF_DEFLATE, VE, "compression_algo" },
  477. { "packbits", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_PACKBITS }, 0, 0, VE, "compression_algo" },
  478. { "raw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_RAW }, 0, 0, VE, "compression_algo" },
  479. { "lzw", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_LZW }, 0, 0, VE, "compression_algo" },
  480. #if CONFIG_ZLIB
  481. { "deflate", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = TIFF_DEFLATE }, 0, 0, VE, "compression_algo" },
  482. #endif
  483. { NULL },
  484. };
  485. static const AVClass tiffenc_class = {
  486. .class_name = "TIFF encoder",
  487. .item_name = av_default_item_name,
  488. .option = options,
  489. .version = LIBAVUTIL_VERSION_INT,
  490. };
  491. AVCodec ff_tiff_encoder = {
  492. .name = "tiff",
  493. .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
  494. .type = AVMEDIA_TYPE_VIDEO,
  495. .id = AV_CODEC_ID_TIFF,
  496. .priv_data_size = sizeof(TiffEncoderContext),
  497. .init = encode_init,
  498. .close = encode_close,
  499. .capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
  500. .encode2 = encode_frame,
  501. .pix_fmts = (const enum AVPixelFormat[]) {
  502. AV_PIX_FMT_RGB24, AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
  503. AV_PIX_FMT_GRAY8A, AV_PIX_FMT_GRAY16LE,
  504. AV_PIX_FMT_MONOBLACK, AV_PIX_FMT_MONOWHITE,
  505. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P,
  506. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_RGB48LE,
  507. AV_PIX_FMT_RGBA, AV_PIX_FMT_RGBA64LE,
  508. AV_PIX_FMT_NONE
  509. },
  510. .priv_class = &tiffenc_class,
  511. };