You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

829 lines
33KB

  1. /*
  2. * IFF ACBM/DEEP/ILBM/PBM bitmap decoder
  3. * Copyright (c) 2010 Peter Ross <pross@xvid.org>
  4. * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * IFF ACBM/DEEP/ILBM/PBM bitmap decoder
  25. */
  26. #include "libavutil/imgutils.h"
  27. #include "bytestream.h"
  28. #include "avcodec.h"
  29. #include "get_bits.h"
  30. #include "internal.h"
  31. // TODO: masking bits
  32. typedef enum {
  33. MASK_NONE,
  34. MASK_HAS_MASK,
  35. MASK_HAS_TRANSPARENT_COLOR,
  36. MASK_LASSO
  37. } mask_type;
  38. typedef struct {
  39. AVFrame frame;
  40. int planesize;
  41. uint8_t * planebuf;
  42. uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
  43. uint32_t *ham_palbuf; ///< HAM decode table
  44. uint32_t *mask_buf; ///< temporary buffer for palette indices
  45. uint32_t *mask_palbuf; ///< masking palette table
  46. unsigned compression; ///< delta compression method used
  47. unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
  48. unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
  49. unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
  50. unsigned transparency; ///< TODO: transparency color index in palette
  51. unsigned masking; ///< TODO: masking method used
  52. int init; // 1 if buffer and palette data already initialized, 0 otherwise
  53. int16_t tvdc[16]; ///< TVDC lookup table
  54. } IffContext;
  55. #define LUT8_PART(plane, v) \
  56. AV_LE2NE64C(UINT64_C(0x0000000)<<32 | v) << plane, \
  57. AV_LE2NE64C(UINT64_C(0x1000000)<<32 | v) << plane, \
  58. AV_LE2NE64C(UINT64_C(0x0010000)<<32 | v) << plane, \
  59. AV_LE2NE64C(UINT64_C(0x1010000)<<32 | v) << plane, \
  60. AV_LE2NE64C(UINT64_C(0x0000100)<<32 | v) << plane, \
  61. AV_LE2NE64C(UINT64_C(0x1000100)<<32 | v) << plane, \
  62. AV_LE2NE64C(UINT64_C(0x0010100)<<32 | v) << plane, \
  63. AV_LE2NE64C(UINT64_C(0x1010100)<<32 | v) << plane, \
  64. AV_LE2NE64C(UINT64_C(0x0000001)<<32 | v) << plane, \
  65. AV_LE2NE64C(UINT64_C(0x1000001)<<32 | v) << plane, \
  66. AV_LE2NE64C(UINT64_C(0x0010001)<<32 | v) << plane, \
  67. AV_LE2NE64C(UINT64_C(0x1010001)<<32 | v) << plane, \
  68. AV_LE2NE64C(UINT64_C(0x0000101)<<32 | v) << plane, \
  69. AV_LE2NE64C(UINT64_C(0x1000101)<<32 | v) << plane, \
  70. AV_LE2NE64C(UINT64_C(0x0010101)<<32 | v) << plane, \
  71. AV_LE2NE64C(UINT64_C(0x1010101)<<32 | v) << plane
  72. #define LUT8(plane) { \
  73. LUT8_PART(plane, 0x0000000), \
  74. LUT8_PART(plane, 0x1000000), \
  75. LUT8_PART(plane, 0x0010000), \
  76. LUT8_PART(plane, 0x1010000), \
  77. LUT8_PART(plane, 0x0000100), \
  78. LUT8_PART(plane, 0x1000100), \
  79. LUT8_PART(plane, 0x0010100), \
  80. LUT8_PART(plane, 0x1010100), \
  81. LUT8_PART(plane, 0x0000001), \
  82. LUT8_PART(plane, 0x1000001), \
  83. LUT8_PART(plane, 0x0010001), \
  84. LUT8_PART(plane, 0x1010001), \
  85. LUT8_PART(plane, 0x0000101), \
  86. LUT8_PART(plane, 0x1000101), \
  87. LUT8_PART(plane, 0x0010101), \
  88. LUT8_PART(plane, 0x1010101), \
  89. }
  90. // 8 planes * 8-bit mask
  91. static const uint64_t plane8_lut[8][256] = {
  92. LUT8(0), LUT8(1), LUT8(2), LUT8(3),
  93. LUT8(4), LUT8(5), LUT8(6), LUT8(7),
  94. };
  95. #define LUT32(plane) { \
  96. 0, 0, 0, 0, \
  97. 0, 0, 0, 1 << plane, \
  98. 0, 0, 1 << plane, 0, \
  99. 0, 0, 1 << plane, 1 << plane, \
  100. 0, 1 << plane, 0, 0, \
  101. 0, 1 << plane, 0, 1 << plane, \
  102. 0, 1 << plane, 1 << plane, 0, \
  103. 0, 1 << plane, 1 << plane, 1 << plane, \
  104. 1 << plane, 0, 0, 0, \
  105. 1 << plane, 0, 0, 1 << plane, \
  106. 1 << plane, 0, 1 << plane, 0, \
  107. 1 << plane, 0, 1 << plane, 1 << plane, \
  108. 1 << plane, 1 << plane, 0, 0, \
  109. 1 << plane, 1 << plane, 0, 1 << plane, \
  110. 1 << plane, 1 << plane, 1 << plane, 0, \
  111. 1 << plane, 1 << plane, 1 << plane, 1 << plane, \
  112. }
  113. // 32 planes * 4-bit mask * 4 lookup tables each
  114. static const uint32_t plane32_lut[32][16*4] = {
  115. LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
  116. LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
  117. LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
  118. LUT32(12), LUT32(13), LUT32(14), LUT32(15),
  119. LUT32(16), LUT32(17), LUT32(18), LUT32(19),
  120. LUT32(20), LUT32(21), LUT32(22), LUT32(23),
  121. LUT32(24), LUT32(25), LUT32(26), LUT32(27),
  122. LUT32(28), LUT32(29), LUT32(30), LUT32(31),
  123. };
  124. // Gray to RGB, required for palette table of grayscale images with bpp < 8
  125. static av_always_inline uint32_t gray2rgb(const uint32_t x) {
  126. return x << 16 | x << 8 | x;
  127. }
  128. /**
  129. * Convert CMAP buffer (stored in extradata) to lavc palette format
  130. */
  131. static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
  132. {
  133. IffContext *s = avctx->priv_data;
  134. int count, i;
  135. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  136. int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  137. if (avctx->bits_per_coded_sample > 8) {
  138. av_log(avctx, AV_LOG_ERROR, "bits_per_coded_sample > 8 not supported\n");
  139. return AVERROR_INVALIDDATA;
  140. }
  141. count = 1 << avctx->bits_per_coded_sample;
  142. // If extradata is smaller than actually needed, fill the remaining with black.
  143. count = FFMIN(palette_size / 3, count);
  144. if (count) {
  145. for (i=0; i < count; i++) {
  146. pal[i] = 0xFF000000 | AV_RB24(palette + i*3);
  147. }
  148. if (s->flags && count >= 32) { // EHB
  149. for (i = 0; i < 32; i++)
  150. pal[i + 32] = 0xFF000000 | (AV_RB24(palette + i*3) & 0xFEFEFE) >> 1;
  151. count = FFMAX(count, 64);
  152. }
  153. } else { // Create gray-scale color palette for bps < 8
  154. count = 1 << avctx->bits_per_coded_sample;
  155. for (i=0; i < count; i++) {
  156. pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
  157. }
  158. }
  159. if (s->masking == MASK_HAS_MASK) {
  160. memcpy(pal + (1 << avctx->bits_per_coded_sample), pal, count * 4);
  161. for (i = 0; i < count; i++)
  162. pal[i] &= 0xFFFFFF;
  163. } else if (s->masking == MASK_HAS_TRANSPARENT_COLOR &&
  164. s->transparency < 1 << avctx->bits_per_coded_sample)
  165. pal[s->transparency] &= 0xFFFFFF;
  166. return 0;
  167. }
  168. /**
  169. * Extracts the IFF extra context and updates internal
  170. * decoder structures.
  171. *
  172. * @param avctx the AVCodecContext where to extract extra context to
  173. * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
  174. * @return 0 in case of success, a negative error code otherwise
  175. */
  176. static int extract_header(AVCodecContext *const avctx,
  177. const AVPacket *const avpkt) {
  178. const uint8_t *buf;
  179. unsigned buf_size;
  180. IffContext *s = avctx->priv_data;
  181. int i, palette_size;
  182. if (avctx->extradata_size < 2) {
  183. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  184. return AVERROR_INVALIDDATA;
  185. }
  186. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  187. if (avpkt) {
  188. int image_size;
  189. if (avpkt->size < 2)
  190. return AVERROR_INVALIDDATA;
  191. image_size = avpkt->size - AV_RB16(avpkt->data);
  192. buf = avpkt->data;
  193. buf_size = bytestream_get_be16(&buf);
  194. if (buf_size <= 1 || image_size <= 1) {
  195. av_log(avctx, AV_LOG_ERROR,
  196. "Invalid image size received: %u -> image data offset: %d\n",
  197. buf_size, image_size);
  198. return AVERROR_INVALIDDATA;
  199. }
  200. } else {
  201. buf = avctx->extradata;
  202. buf_size = bytestream_get_be16(&buf);
  203. if (buf_size <= 1 || palette_size < 0) {
  204. av_log(avctx, AV_LOG_ERROR,
  205. "Invalid palette size received: %u -> palette data offset: %d\n",
  206. buf_size, palette_size);
  207. return AVERROR_INVALIDDATA;
  208. }
  209. }
  210. if (buf_size >= 41) {
  211. s->compression = bytestream_get_byte(&buf);
  212. s->bpp = bytestream_get_byte(&buf);
  213. s->ham = bytestream_get_byte(&buf);
  214. s->flags = bytestream_get_byte(&buf);
  215. s->transparency = bytestream_get_be16(&buf);
  216. s->masking = bytestream_get_byte(&buf);
  217. for (i = 0; i < 16; i++)
  218. s->tvdc[i] = bytestream_get_be16(&buf);
  219. if (s->masking == MASK_HAS_MASK) {
  220. if (s->bpp >= 8 && !s->ham) {
  221. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  222. av_freep(&s->mask_buf);
  223. av_freep(&s->mask_palbuf);
  224. s->mask_buf = av_malloc((s->planesize * 32) + FF_INPUT_BUFFER_PADDING_SIZE);
  225. if (!s->mask_buf)
  226. return AVERROR(ENOMEM);
  227. if (s->bpp > 16) {
  228. av_log(avctx, AV_LOG_ERROR, "bpp %d too large for palette\n", s->bpp);
  229. av_freep(&s->mask_buf);
  230. return AVERROR(ENOMEM);
  231. }
  232. s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
  233. if (!s->mask_palbuf) {
  234. av_freep(&s->mask_buf);
  235. return AVERROR(ENOMEM);
  236. }
  237. }
  238. s->bpp++;
  239. } else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
  240. av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
  241. return AVERROR_PATCHWELCOME;
  242. }
  243. if (!s->bpp || s->bpp > 32) {
  244. av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
  245. return AVERROR_INVALIDDATA;
  246. } else if (s->ham >= 8) {
  247. av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
  248. return AVERROR_INVALIDDATA;
  249. }
  250. av_freep(&s->ham_buf);
  251. av_freep(&s->ham_palbuf);
  252. if (s->ham) {
  253. int i, count = FFMIN(palette_size / 3, 1 << s->ham);
  254. int ham_count;
  255. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  256. s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
  257. if (!s->ham_buf)
  258. return AVERROR(ENOMEM);
  259. ham_count = 8 * (1 << s->ham);
  260. s->ham_palbuf = av_malloc((ham_count << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
  261. if (!s->ham_palbuf) {
  262. av_freep(&s->ham_buf);
  263. return AVERROR(ENOMEM);
  264. }
  265. if (count) { // HAM with color palette attached
  266. // prefill with black and palette and set HAM take direct value mask to zero
  267. memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
  268. for (i=0; i < count; i++) {
  269. s->ham_palbuf[i*2+1] = 0xFF000000 | AV_RL24(palette + i*3);
  270. }
  271. count = 1 << s->ham;
  272. } else { // HAM with grayscale color palette
  273. count = 1 << s->ham;
  274. for (i=0; i < count; i++) {
  275. s->ham_palbuf[i*2] = 0xFF000000; // take direct color value from palette
  276. s->ham_palbuf[i*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((i * 255) >> s->ham));
  277. }
  278. }
  279. for (i=0; i < count; i++) {
  280. uint32_t tmp = i << (8 - s->ham);
  281. tmp |= tmp >> s->ham;
  282. s->ham_palbuf[(i+count)*2] = 0xFF00FFFF; // just modify blue color component
  283. s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00; // just modify red color component
  284. s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF; // just modify green color component
  285. s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
  286. s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
  287. s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
  288. }
  289. if (s->masking == MASK_HAS_MASK) {
  290. for (i = 0; i < ham_count; i++)
  291. s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
  292. }
  293. }
  294. }
  295. return 0;
  296. }
  297. static av_cold int decode_init(AVCodecContext *avctx)
  298. {
  299. IffContext *s = avctx->priv_data;
  300. int err;
  301. if (avctx->bits_per_coded_sample <= 8) {
  302. int palette_size;
  303. if (avctx->extradata_size >= 2)
  304. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  305. else
  306. palette_size = 0;
  307. avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
  308. (avctx->extradata_size >= 2 && palette_size) ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
  309. } else if (avctx->bits_per_coded_sample <= 32) {
  310. if (avctx->codec_tag != MKTAG('D','E','E','P')) {
  311. if (avctx->bits_per_coded_sample == 24) {
  312. avctx->pix_fmt = AV_PIX_FMT_RGB0;
  313. } else if (avctx->bits_per_coded_sample == 32) {
  314. avctx->pix_fmt = AV_PIX_FMT_BGR32;
  315. } else {
  316. av_log_ask_for_sample(avctx, "unknown bits_per_coded_sample\n");
  317. return AVERROR_PATCHWELCOME;
  318. }
  319. }
  320. } else {
  321. return AVERROR_INVALIDDATA;
  322. }
  323. if ((err = av_image_check_size(avctx->width, avctx->height, 0, avctx)))
  324. return err;
  325. s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
  326. s->planebuf = av_malloc(s->planesize + FF_INPUT_BUFFER_PADDING_SIZE);
  327. if (!s->planebuf)
  328. return AVERROR(ENOMEM);
  329. s->bpp = avctx->bits_per_coded_sample;
  330. avcodec_get_frame_defaults(&s->frame);
  331. if ((err = extract_header(avctx, NULL)) < 0)
  332. return err;
  333. s->frame.reference = 3;
  334. return 0;
  335. }
  336. /**
  337. * Decode interleaved plane buffer up to 8bpp
  338. * @param dst Destination buffer
  339. * @param buf Source buffer
  340. * @param buf_size
  341. * @param plane plane number to decode as
  342. */
  343. static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
  344. {
  345. const uint64_t *lut = plane8_lut[plane];
  346. if (plane >= 8) {
  347. av_log(NULL, AV_LOG_WARNING, "Ignoring extra planes beyond 8\n");
  348. return;
  349. }
  350. do {
  351. uint64_t v = AV_RN64A(dst) | lut[*buf++];
  352. AV_WN64A(dst, v);
  353. dst += 8;
  354. } while (--buf_size);
  355. }
  356. /**
  357. * Decode interleaved plane buffer up to 24bpp
  358. * @param dst Destination buffer
  359. * @param buf Source buffer
  360. * @param buf_size
  361. * @param plane plane number to decode as
  362. */
  363. static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
  364. {
  365. const uint32_t *lut = plane32_lut[plane];
  366. do {
  367. unsigned mask = (*buf >> 2) & ~3;
  368. dst[0] |= lut[mask++];
  369. dst[1] |= lut[mask++];
  370. dst[2] |= lut[mask++];
  371. dst[3] |= lut[mask];
  372. mask = (*buf++ << 2) & 0x3F;
  373. dst[4] |= lut[mask++];
  374. dst[5] |= lut[mask++];
  375. dst[6] |= lut[mask++];
  376. dst[7] |= lut[mask];
  377. dst += 8;
  378. } while (--buf_size);
  379. }
  380. #define DECODE_HAM_PLANE32(x) \
  381. first = buf[x] << 1; \
  382. second = buf[(x)+1] << 1; \
  383. delta &= pal[first++]; \
  384. delta |= pal[first]; \
  385. dst[x] = delta; \
  386. delta &= pal[second++]; \
  387. delta |= pal[second]; \
  388. dst[(x)+1] = delta
  389. /**
  390. * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
  391. *
  392. * @param dst the destination 24bpp buffer
  393. * @param buf the source 8bpp chunky buffer
  394. * @param pal the HAM decode table
  395. * @param buf_size the plane size in bytes
  396. */
  397. static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
  398. const uint32_t *const pal, unsigned buf_size)
  399. {
  400. uint32_t delta = pal[1]; /* first palette entry */
  401. do {
  402. uint32_t first, second;
  403. DECODE_HAM_PLANE32(0);
  404. DECODE_HAM_PLANE32(2);
  405. DECODE_HAM_PLANE32(4);
  406. DECODE_HAM_PLANE32(6);
  407. buf += 8;
  408. dst += 8;
  409. } while (--buf_size);
  410. }
  411. static void lookup_pal_indicies(uint32_t *dst, const uint32_t *buf,
  412. const uint32_t *const pal, unsigned width)
  413. {
  414. do {
  415. *dst++ = pal[*buf++];
  416. } while (--width);
  417. }
  418. /**
  419. * Decode one complete byterun1 encoded line.
  420. *
  421. * @param dst the destination buffer where to store decompressed bitstream
  422. * @param dst_size the destination plane size in bytes
  423. * @param buf the source byterun1 compressed bitstream
  424. * @param buf_end the EOF of source byterun1 compressed bitstream
  425. * @return number of consumed bytes in byterun1 compressed bitstream
  426. */
  427. static int decode_byterun(uint8_t *dst, int dst_size,
  428. const uint8_t *buf, const uint8_t *const buf_end) {
  429. const uint8_t *const buf_start = buf;
  430. unsigned x;
  431. for (x = 0; x < dst_size && buf < buf_end;) {
  432. unsigned length;
  433. const int8_t value = *buf++;
  434. if (value >= 0) {
  435. length = value + 1;
  436. memcpy(dst + x, buf, FFMIN3(length, dst_size - x, buf_end - buf));
  437. buf += length;
  438. } else if (value > -128) {
  439. length = -value + 1;
  440. memset(dst + x, *buf++, FFMIN(length, dst_size - x));
  441. } else { // noop
  442. continue;
  443. }
  444. x += length;
  445. }
  446. return buf - buf_start;
  447. }
  448. /**
  449. * Decode DEEP RLE 32-bit buffer
  450. * @param[out] dst Destination buffer
  451. * @param[in] src Source buffer
  452. * @param src_size Source buffer size (bytes)
  453. * @param width Width of destination buffer (pixels)
  454. * @param height Height of destination buffer (pixels)
  455. * @param linesize Line size of destination buffer (bytes)
  456. */
  457. static void decode_deep_rle32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize)
  458. {
  459. const uint8_t *src_end = src + src_size;
  460. int x = 0, y = 0, i;
  461. while (src + 5 <= src_end) {
  462. int opcode;
  463. opcode = *(int8_t *)src++;
  464. if (opcode >= 0) {
  465. int size = opcode + 1;
  466. for (i = 0; i < size; i++) {
  467. int length = FFMIN(size - i, width);
  468. memcpy(dst + y*linesize + x * 4, src, length * 4);
  469. src += length * 4;
  470. x += length;
  471. i += length;
  472. if (x >= width) {
  473. x = 0;
  474. y += 1;
  475. if (y >= height)
  476. return;
  477. }
  478. }
  479. } else {
  480. int size = -opcode + 1;
  481. uint32_t pixel = AV_RL32(src);
  482. for (i = 0; i < size; i++) {
  483. *(uint32_t *)(dst + y*linesize + x * 4) = pixel;
  484. x += 1;
  485. if (x >= width) {
  486. x = 0;
  487. y += 1;
  488. if (y >= height)
  489. return;
  490. }
  491. }
  492. src += 4;
  493. }
  494. }
  495. }
  496. /**
  497. * Decode DEEP TVDC 32-bit buffer
  498. * @param[out] dst Destination buffer
  499. * @param[in] src Source buffer
  500. * @param src_size Source buffer size (bytes)
  501. * @param width Width of destination buffer (pixels)
  502. * @param height Height of destination buffer (pixels)
  503. * @param linesize Line size of destination buffer (bytes)
  504. * @param[int] tvdc TVDC lookup table
  505. */
  506. static void decode_deep_tvdc32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize, const int16_t *tvdc)
  507. {
  508. int x = 0, y = 0, plane = 0;
  509. int8_t pixel = 0;
  510. int i, j;
  511. for (i = 0; i < src_size * 2;) {
  512. #define GETNIBBLE ((i & 1) ? (src[i>>1] & 0xF) : (src[i>>1] >> 4))
  513. int d = tvdc[GETNIBBLE];
  514. i++;
  515. if (d) {
  516. pixel += d;
  517. dst[y * linesize + x*4 + plane] = pixel;
  518. x++;
  519. } else {
  520. if (i >= src_size * 2)
  521. return;
  522. d = GETNIBBLE + 1;
  523. i++;
  524. d = FFMIN(d, width - x);
  525. for (j = 0; j < d; j++) {
  526. dst[y * linesize + x*4 + plane] = pixel;
  527. x++;
  528. }
  529. }
  530. if (x >= width) {
  531. plane++;
  532. if (plane >= 4) {
  533. y++;
  534. if (y >= height)
  535. return;
  536. plane = 0;
  537. }
  538. x = 0;
  539. pixel = 0;
  540. i = (i + 1) & ~1;
  541. }
  542. }
  543. }
  544. static int unsupported(AVCodecContext *avctx)
  545. {
  546. IffContext *s = avctx->priv_data;
  547. av_log_ask_for_sample(avctx, "unsupported bitmap (compression %i, bpp %i, ham %i)\n", s->compression, s->bpp, s->ham);
  548. return AVERROR_INVALIDDATA;
  549. }
  550. static int decode_frame(AVCodecContext *avctx,
  551. void *data, int *got_frame,
  552. AVPacket *avpkt)
  553. {
  554. IffContext *s = avctx->priv_data;
  555. const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
  556. const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
  557. const uint8_t *buf_end = buf+buf_size;
  558. int y, plane, res;
  559. if ((res = extract_header(avctx, avpkt)) < 0)
  560. return res;
  561. if (s->init) {
  562. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  563. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  564. return res;
  565. }
  566. } else if ((res = ff_get_buffer(avctx, &s->frame)) < 0) {
  567. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  568. return res;
  569. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  570. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  571. return res;
  572. } else if (avctx->pix_fmt == AV_PIX_FMT_RGB32 && avctx->bits_per_coded_sample <= 8) {
  573. if ((res = ff_cmap_read_palette(avctx, s->mask_palbuf)) < 0)
  574. return res;
  575. }
  576. s->init = 1;
  577. switch (s->compression) {
  578. case 0:
  579. if (avctx->codec_tag == MKTAG('A','C','B','M')) {
  580. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  581. memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]);
  582. for (plane = 0; plane < s->bpp; plane++) {
  583. for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
  584. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  585. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  586. buf += s->planesize;
  587. }
  588. }
  589. } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
  590. memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]);
  591. for(y = 0; y < avctx->height; y++) {
  592. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  593. memset(s->ham_buf, 0, s->planesize * 8);
  594. for (plane = 0; plane < s->bpp; plane++) {
  595. const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize;
  596. if (start >= buf_end)
  597. break;
  598. decodeplane8(s->ham_buf, start, FFMIN(s->planesize, buf_end - start), plane);
  599. }
  600. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  601. }
  602. } else
  603. return unsupported(avctx);
  604. } else if (avctx->codec_tag == MKTAG('D','E','E','P')) {
  605. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  606. int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3);
  607. int x;
  608. for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
  609. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  610. memcpy(row, buf, FFMIN(raw_width, buf_end - buf));
  611. buf += raw_width;
  612. if (avctx->pix_fmt == AV_PIX_FMT_BGR32) {
  613. for(x = 0; x < avctx->width; x++)
  614. row[4 * x + 3] = row[4 * x + 3] & 0xF0 | (row[4 * x + 3] >> 4);
  615. }
  616. }
  617. } else if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved
  618. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  619. for(y = 0; y < avctx->height; y++ ) {
  620. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  621. memset(row, 0, avctx->width);
  622. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  623. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  624. buf += s->planesize;
  625. }
  626. }
  627. } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
  628. for (y = 0; y < avctx->height; y++) {
  629. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  630. memset(s->ham_buf, 0, s->planesize * 8);
  631. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  632. decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
  633. buf += s->planesize;
  634. }
  635. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  636. }
  637. } else { // AV_PIX_FMT_BGR32
  638. for(y = 0; y < avctx->height; y++ ) {
  639. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  640. memset(row, 0, avctx->width << 2);
  641. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  642. decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  643. buf += s->planesize;
  644. }
  645. }
  646. }
  647. } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
  648. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  649. for(y = 0; y < avctx->height && buf_end > buf; y++ ) {
  650. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  651. memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
  652. buf += avctx->width + (avctx->width % 2); // padding if odd
  653. }
  654. } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
  655. for (y = 0; y < avctx->height && buf_end > buf; y++) {
  656. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  657. memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
  658. buf += avctx->width + (avctx->width & 1); // padding if odd
  659. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  660. }
  661. } else
  662. return unsupported(avctx);
  663. }
  664. break;
  665. case 1:
  666. if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved
  667. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  668. for(y = 0; y < avctx->height ; y++ ) {
  669. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  670. memset(row, 0, avctx->width);
  671. for (plane = 0; plane < s->bpp; plane++) {
  672. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  673. decodeplane8(row, s->planebuf, s->planesize, plane);
  674. }
  675. }
  676. } else if (avctx->bits_per_coded_sample <= 8) { //8-bit (+ mask) to AV_PIX_FMT_BGR32
  677. for (y = 0; y < avctx->height ; y++ ) {
  678. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  679. memset(s->mask_buf, 0, avctx->width * sizeof(uint32_t));
  680. for (plane = 0; plane < s->bpp; plane++) {
  681. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  682. decodeplane32(s->mask_buf, s->planebuf, s->planesize, plane);
  683. }
  684. lookup_pal_indicies((uint32_t *) row, s->mask_buf, s->mask_palbuf, avctx->width);
  685. }
  686. } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
  687. for (y = 0; y < avctx->height ; y++) {
  688. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  689. memset(s->ham_buf, 0, s->planesize * 8);
  690. for (plane = 0; plane < s->bpp; plane++) {
  691. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  692. decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
  693. }
  694. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  695. }
  696. } else { //AV_PIX_FMT_BGR32
  697. for(y = 0; y < avctx->height ; y++ ) {
  698. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  699. memset(row, 0, avctx->width << 2);
  700. for (plane = 0; plane < s->bpp; plane++) {
  701. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  702. decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
  703. }
  704. }
  705. }
  706. } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
  707. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  708. for(y = 0; y < avctx->height ; y++ ) {
  709. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  710. buf += decode_byterun(row, avctx->width, buf, buf_end);
  711. }
  712. } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
  713. for (y = 0; y < avctx->height ; y++) {
  714. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  715. buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
  716. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  717. }
  718. } else
  719. return unsupported(avctx);
  720. } else if (avctx->codec_tag == MKTAG('D','E','E','P')) { // IFF-DEEP
  721. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  722. if (av_get_bits_per_pixel(desc) == 32)
  723. decode_deep_rle32(s->frame.data[0], buf, buf_size, avctx->width, avctx->height, s->frame.linesize[0]);
  724. else
  725. return unsupported(avctx);
  726. }
  727. break;
  728. case 5:
  729. if (avctx->codec_tag == MKTAG('D','E','E','P')) {
  730. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  731. if (av_get_bits_per_pixel(desc) == 32)
  732. decode_deep_tvdc32(s->frame.data[0], buf, buf_size, avctx->width, avctx->height, s->frame.linesize[0], s->tvdc);
  733. else
  734. return unsupported(avctx);
  735. } else
  736. return unsupported(avctx);
  737. break;
  738. default:
  739. return unsupported(avctx);
  740. }
  741. *got_frame = 1;
  742. *(AVFrame*)data = s->frame;
  743. return buf_size;
  744. }
  745. static av_cold int decode_end(AVCodecContext *avctx)
  746. {
  747. IffContext *s = avctx->priv_data;
  748. if (s->frame.data[0])
  749. avctx->release_buffer(avctx, &s->frame);
  750. av_freep(&s->planebuf);
  751. av_freep(&s->ham_buf);
  752. av_freep(&s->ham_palbuf);
  753. return 0;
  754. }
  755. #if CONFIG_IFF_ILBM_DECODER
  756. AVCodec ff_iff_ilbm_decoder = {
  757. .name = "iff",
  758. .type = AVMEDIA_TYPE_VIDEO,
  759. .id = AV_CODEC_ID_IFF_ILBM,
  760. .priv_data_size = sizeof(IffContext),
  761. .init = decode_init,
  762. .close = decode_end,
  763. .decode = decode_frame,
  764. .capabilities = CODEC_CAP_DR1,
  765. .long_name = NULL_IF_CONFIG_SMALL("IFF"),
  766. };
  767. #endif
  768. #if CONFIG_IFF_BYTERUN1_DECODER
  769. AVCodec ff_iff_byterun1_decoder = {
  770. .name = "iff",
  771. .type = AVMEDIA_TYPE_VIDEO,
  772. .id = AV_CODEC_ID_IFF_BYTERUN1,
  773. .priv_data_size = sizeof(IffContext),
  774. .init = decode_init,
  775. .close = decode_end,
  776. .decode = decode_frame,
  777. .capabilities = CODEC_CAP_DR1,
  778. .long_name = NULL_IF_CONFIG_SMALL("IFF"),
  779. };
  780. #endif