You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

922 lines
36KB

  1. /*
  2. * IFF ACBM/DEEP/ILBM/PBM bitmap decoder
  3. * Copyright (c) 2010 Peter Ross <pross@xvid.org>
  4. * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * IFF ACBM/DEEP/ILBM/PBM bitmap decoder
  25. */
  26. #include <stdint.h>
  27. #include "libavutil/imgutils.h"
  28. #include "bytestream.h"
  29. #include "avcodec.h"
  30. #include "get_bits.h"
  31. #include "internal.h"
  32. // TODO: masking bits
  33. typedef enum {
  34. MASK_NONE,
  35. MASK_HAS_MASK,
  36. MASK_HAS_TRANSPARENT_COLOR,
  37. MASK_LASSO
  38. } mask_type;
  39. typedef struct IffContext {
  40. AVFrame *frame;
  41. int planesize;
  42. uint8_t * planebuf;
  43. uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
  44. uint32_t *ham_palbuf; ///< HAM decode table
  45. uint32_t *mask_buf; ///< temporary buffer for palette indices
  46. uint32_t *mask_palbuf; ///< masking palette table
  47. unsigned compression; ///< delta compression method used
  48. unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
  49. unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
  50. unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
  51. unsigned transparency; ///< TODO: transparency color index in palette
  52. unsigned masking; ///< TODO: masking method used
  53. int init; // 1 if buffer and palette data already initialized, 0 otherwise
  54. int16_t tvdc[16]; ///< TVDC lookup table
  55. } IffContext;
  56. #define LUT8_PART(plane, v) \
  57. AV_LE2NE64C(UINT64_C(0x0000000)<<32 | v) << plane, \
  58. AV_LE2NE64C(UINT64_C(0x1000000)<<32 | v) << plane, \
  59. AV_LE2NE64C(UINT64_C(0x0010000)<<32 | v) << plane, \
  60. AV_LE2NE64C(UINT64_C(0x1010000)<<32 | v) << plane, \
  61. AV_LE2NE64C(UINT64_C(0x0000100)<<32 | v) << plane, \
  62. AV_LE2NE64C(UINT64_C(0x1000100)<<32 | v) << plane, \
  63. AV_LE2NE64C(UINT64_C(0x0010100)<<32 | v) << plane, \
  64. AV_LE2NE64C(UINT64_C(0x1010100)<<32 | v) << plane, \
  65. AV_LE2NE64C(UINT64_C(0x0000001)<<32 | v) << plane, \
  66. AV_LE2NE64C(UINT64_C(0x1000001)<<32 | v) << plane, \
  67. AV_LE2NE64C(UINT64_C(0x0010001)<<32 | v) << plane, \
  68. AV_LE2NE64C(UINT64_C(0x1010001)<<32 | v) << plane, \
  69. AV_LE2NE64C(UINT64_C(0x0000101)<<32 | v) << plane, \
  70. AV_LE2NE64C(UINT64_C(0x1000101)<<32 | v) << plane, \
  71. AV_LE2NE64C(UINT64_C(0x0010101)<<32 | v) << plane, \
  72. AV_LE2NE64C(UINT64_C(0x1010101)<<32 | v) << plane
  73. #define LUT8(plane) { \
  74. LUT8_PART(plane, 0x0000000), \
  75. LUT8_PART(plane, 0x1000000), \
  76. LUT8_PART(plane, 0x0010000), \
  77. LUT8_PART(plane, 0x1010000), \
  78. LUT8_PART(plane, 0x0000100), \
  79. LUT8_PART(plane, 0x1000100), \
  80. LUT8_PART(plane, 0x0010100), \
  81. LUT8_PART(plane, 0x1010100), \
  82. LUT8_PART(plane, 0x0000001), \
  83. LUT8_PART(plane, 0x1000001), \
  84. LUT8_PART(plane, 0x0010001), \
  85. LUT8_PART(plane, 0x1010001), \
  86. LUT8_PART(plane, 0x0000101), \
  87. LUT8_PART(plane, 0x1000101), \
  88. LUT8_PART(plane, 0x0010101), \
  89. LUT8_PART(plane, 0x1010101), \
  90. }
  91. // 8 planes * 8-bit mask
  92. static const uint64_t plane8_lut[8][256] = {
  93. LUT8(0), LUT8(1), LUT8(2), LUT8(3),
  94. LUT8(4), LUT8(5), LUT8(6), LUT8(7),
  95. };
  96. #define LUT32(plane) { \
  97. 0, 0, 0, 0, \
  98. 0, 0, 0, 1U << plane, \
  99. 0, 0, 1U << plane, 0, \
  100. 0, 0, 1U << plane, 1U << plane, \
  101. 0, 1U << plane, 0, 0, \
  102. 0, 1U << plane, 0, 1U << plane, \
  103. 0, 1U << plane, 1U << plane, 0, \
  104. 0, 1U << plane, 1U << plane, 1U << plane, \
  105. 1U << plane, 0, 0, 0, \
  106. 1U << plane, 0, 0, 1U << plane, \
  107. 1U << plane, 0, 1U << plane, 0, \
  108. 1U << plane, 0, 1U << plane, 1U << plane, \
  109. 1U << plane, 1U << plane, 0, 0, \
  110. 1U << plane, 1U << plane, 0, 1U << plane, \
  111. 1U << plane, 1U << plane, 1U << plane, 0, \
  112. 1U << plane, 1U << plane, 1U << plane, 1U << plane, \
  113. }
  114. // 32 planes * 4-bit mask * 4 lookup tables each
  115. static const uint32_t plane32_lut[32][16*4] = {
  116. LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
  117. LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
  118. LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
  119. LUT32(12), LUT32(13), LUT32(14), LUT32(15),
  120. LUT32(16), LUT32(17), LUT32(18), LUT32(19),
  121. LUT32(20), LUT32(21), LUT32(22), LUT32(23),
  122. LUT32(24), LUT32(25), LUT32(26), LUT32(27),
  123. LUT32(28), LUT32(29), LUT32(30), LUT32(31),
  124. };
  125. // Gray to RGB, required for palette table of grayscale images with bpp < 8
  126. static av_always_inline uint32_t gray2rgb(const uint32_t x) {
  127. return x << 16 | x << 8 | x;
  128. }
  129. /**
  130. * Convert CMAP buffer (stored in extradata) to lavc palette format
  131. */
  132. static int cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
  133. {
  134. IffContext *s = avctx->priv_data;
  135. int count, i;
  136. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  137. int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  138. if (avctx->bits_per_coded_sample > 8) {
  139. av_log(avctx, AV_LOG_ERROR, "bits_per_coded_sample > 8 not supported\n");
  140. return AVERROR_INVALIDDATA;
  141. }
  142. count = 1 << avctx->bits_per_coded_sample;
  143. // If extradata is smaller than actually needed, fill the remaining with black.
  144. count = FFMIN(palette_size / 3, count);
  145. if (count) {
  146. for (i = 0; i < count; i++)
  147. pal[i] = 0xFF000000 | AV_RB24(palette + i*3);
  148. if (s->flags && count >= 32) { // EHB
  149. for (i = 0; i < 32; i++)
  150. pal[i + 32] = 0xFF000000 | (AV_RB24(palette + i*3) & 0xFEFEFE) >> 1;
  151. count = FFMAX(count, 64);
  152. }
  153. } else { // Create gray-scale color palette for bps < 8
  154. count = 1 << avctx->bits_per_coded_sample;
  155. for (i = 0; i < count; i++)
  156. pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
  157. }
  158. if (s->masking == MASK_HAS_MASK) {
  159. if ((1 << avctx->bits_per_coded_sample) < count) {
  160. avpriv_request_sample(avctx, "overlapping mask");
  161. return AVERROR_PATCHWELCOME;
  162. }
  163. memcpy(pal + (1 << avctx->bits_per_coded_sample), pal, count * 4);
  164. for (i = 0; i < count; i++)
  165. pal[i] &= 0xFFFFFF;
  166. } else if (s->masking == MASK_HAS_TRANSPARENT_COLOR &&
  167. s->transparency < 1 << avctx->bits_per_coded_sample)
  168. pal[s->transparency] &= 0xFFFFFF;
  169. return 0;
  170. }
  171. /**
  172. * Extracts the IFF extra context and updates internal
  173. * decoder structures.
  174. *
  175. * @param avctx the AVCodecContext where to extract extra context to
  176. * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
  177. * @return >= 0 in case of success, a negative error code otherwise
  178. */
  179. static int extract_header(AVCodecContext *const avctx,
  180. const AVPacket *const avpkt) {
  181. const uint8_t *buf;
  182. unsigned buf_size;
  183. IffContext *s = avctx->priv_data;
  184. int i, palette_size;
  185. if (avctx->extradata_size < 2) {
  186. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  187. return AVERROR_INVALIDDATA;
  188. }
  189. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  190. if (avpkt) {
  191. int image_size;
  192. if (avpkt->size < 2)
  193. return AVERROR_INVALIDDATA;
  194. image_size = avpkt->size - AV_RB16(avpkt->data);
  195. buf = avpkt->data;
  196. buf_size = bytestream_get_be16(&buf);
  197. if (buf_size <= 1 || image_size <= 1) {
  198. av_log(avctx, AV_LOG_ERROR,
  199. "Invalid image size received: %u -> image data offset: %d\n",
  200. buf_size, image_size);
  201. return AVERROR_INVALIDDATA;
  202. }
  203. } else {
  204. buf = avctx->extradata;
  205. buf_size = bytestream_get_be16(&buf);
  206. if (buf_size <= 1 || palette_size < 0) {
  207. av_log(avctx, AV_LOG_ERROR,
  208. "Invalid palette size received: %u -> palette data offset: %d\n",
  209. buf_size, palette_size);
  210. return AVERROR_INVALIDDATA;
  211. }
  212. }
  213. if (buf_size >= 41) {
  214. s->compression = bytestream_get_byte(&buf);
  215. s->bpp = bytestream_get_byte(&buf);
  216. s->ham = bytestream_get_byte(&buf);
  217. s->flags = bytestream_get_byte(&buf);
  218. s->transparency = bytestream_get_be16(&buf);
  219. s->masking = bytestream_get_byte(&buf);
  220. for (i = 0; i < 16; i++)
  221. s->tvdc[i] = bytestream_get_be16(&buf);
  222. if (s->ham) {
  223. if (s->bpp > 8) {
  224. av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
  225. return AVERROR_INVALIDDATA;
  226. } if (s->ham != (s->bpp > 6 ? 6 : 4)) {
  227. av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u, BPP: %u\n", s->ham, s->bpp);
  228. return AVERROR_INVALIDDATA;
  229. }
  230. }
  231. if (s->masking == MASK_HAS_MASK) {
  232. if (s->bpp >= 8 && !s->ham) {
  233. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  234. av_freep(&s->mask_buf);
  235. av_freep(&s->mask_palbuf);
  236. s->mask_buf = av_malloc((s->planesize * 32) + AV_INPUT_BUFFER_PADDING_SIZE);
  237. if (!s->mask_buf)
  238. return AVERROR(ENOMEM);
  239. if (s->bpp > 16) {
  240. av_log(avctx, AV_LOG_ERROR, "bpp %d too large for palette\n", s->bpp);
  241. av_freep(&s->mask_buf);
  242. return AVERROR(ENOMEM);
  243. }
  244. s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + AV_INPUT_BUFFER_PADDING_SIZE);
  245. if (!s->mask_palbuf) {
  246. av_freep(&s->mask_buf);
  247. return AVERROR(ENOMEM);
  248. }
  249. }
  250. s->bpp++;
  251. } else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
  252. av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
  253. return AVERROR_PATCHWELCOME;
  254. }
  255. if (!s->bpp || s->bpp > 32) {
  256. av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
  257. return AVERROR_INVALIDDATA;
  258. }
  259. av_freep(&s->ham_buf);
  260. av_freep(&s->ham_palbuf);
  261. if (s->ham) {
  262. int i, count = FFMIN(palette_size / 3, 1 << s->ham);
  263. int ham_count;
  264. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  265. int extra_space = 1;
  266. if (avctx->codec_tag == MKTAG('P', 'B', 'M', ' ') && s->ham == 4)
  267. extra_space = 4;
  268. s->ham_buf = av_malloc((s->planesize * 8) + AV_INPUT_BUFFER_PADDING_SIZE);
  269. if (!s->ham_buf)
  270. return AVERROR(ENOMEM);
  271. ham_count = 8 * (1 << s->ham);
  272. s->ham_palbuf = av_malloc(extra_space * (ham_count << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + AV_INPUT_BUFFER_PADDING_SIZE);
  273. if (!s->ham_palbuf) {
  274. av_freep(&s->ham_buf);
  275. return AVERROR(ENOMEM);
  276. }
  277. if (count) { // HAM with color palette attached
  278. // prefill with black and palette and set HAM take direct value mask to zero
  279. memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
  280. for (i=0; i < count; i++) {
  281. s->ham_palbuf[i*2+1] = 0xFF000000 | AV_RL24(palette + i*3);
  282. }
  283. count = 1 << s->ham;
  284. } else { // HAM with grayscale color palette
  285. count = 1 << s->ham;
  286. for (i=0; i < count; i++) {
  287. s->ham_palbuf[i*2] = 0xFF000000; // take direct color value from palette
  288. s->ham_palbuf[i*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((i * 255) >> s->ham));
  289. }
  290. }
  291. for (i=0; i < count; i++) {
  292. uint32_t tmp = i << (8 - s->ham);
  293. tmp |= tmp >> s->ham;
  294. s->ham_palbuf[(i+count)*2] = 0xFF00FFFF; // just modify blue color component
  295. s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00; // just modify red color component
  296. s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF; // just modify green color component
  297. s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
  298. s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
  299. s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
  300. }
  301. if (s->masking == MASK_HAS_MASK) {
  302. for (i = 0; i < ham_count; i++)
  303. s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
  304. }
  305. }
  306. }
  307. return 0;
  308. }
  309. static av_cold int decode_end(AVCodecContext *avctx)
  310. {
  311. IffContext *s = avctx->priv_data;
  312. av_frame_free(&s->frame);
  313. av_freep(&s->planebuf);
  314. av_freep(&s->ham_buf);
  315. av_freep(&s->ham_palbuf);
  316. return 0;
  317. }
  318. static av_cold int decode_init(AVCodecContext *avctx)
  319. {
  320. IffContext *s = avctx->priv_data;
  321. int err;
  322. if (avctx->bits_per_coded_sample <= 8) {
  323. int palette_size;
  324. if (avctx->extradata_size >= 2)
  325. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  326. else
  327. palette_size = 0;
  328. avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
  329. (avctx->extradata_size >= 2 && palette_size) ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
  330. } else if (avctx->bits_per_coded_sample <= 32) {
  331. if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8')) {
  332. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  333. } else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N')) {
  334. avctx->pix_fmt = AV_PIX_FMT_RGB444;
  335. } else if (avctx->codec_tag != MKTAG('D', 'E', 'E', 'P')) {
  336. if (avctx->bits_per_coded_sample == 24) {
  337. avctx->pix_fmt = AV_PIX_FMT_0BGR32;
  338. } else if (avctx->bits_per_coded_sample == 32) {
  339. avctx->pix_fmt = AV_PIX_FMT_BGR32;
  340. } else {
  341. avpriv_request_sample(avctx, "unknown bits_per_coded_sample");
  342. return AVERROR_PATCHWELCOME;
  343. }
  344. }
  345. } else {
  346. return AVERROR_INVALIDDATA;
  347. }
  348. if ((err = av_image_check_size(avctx->width, avctx->height, 0, avctx)))
  349. return err;
  350. s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
  351. s->planebuf = av_malloc(s->planesize + AV_INPUT_BUFFER_PADDING_SIZE);
  352. if (!s->planebuf)
  353. return AVERROR(ENOMEM);
  354. s->bpp = avctx->bits_per_coded_sample;
  355. s->frame = av_frame_alloc();
  356. if (!s->frame) {
  357. decode_end(avctx);
  358. return AVERROR(ENOMEM);
  359. }
  360. if ((err = extract_header(avctx, NULL)) < 0)
  361. return err;
  362. return 0;
  363. }
  364. /**
  365. * Decode interleaved plane buffer up to 8bpp
  366. * @param dst Destination buffer
  367. * @param buf Source buffer
  368. * @param buf_size
  369. * @param plane plane number to decode as
  370. */
  371. static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
  372. {
  373. const uint64_t *lut;
  374. if (plane >= 8) {
  375. av_log(NULL, AV_LOG_WARNING, "Ignoring extra planes beyond 8\n");
  376. return;
  377. }
  378. lut = plane8_lut[plane];
  379. do {
  380. uint64_t v = AV_RN64A(dst) | lut[*buf++];
  381. AV_WN64A(dst, v);
  382. dst += 8;
  383. } while (--buf_size);
  384. }
  385. /**
  386. * Decode interleaved plane buffer up to 24bpp
  387. * @param dst Destination buffer
  388. * @param buf Source buffer
  389. * @param buf_size
  390. * @param plane plane number to decode as
  391. */
  392. static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
  393. {
  394. const uint32_t *lut = plane32_lut[plane];
  395. do {
  396. unsigned mask = (*buf >> 2) & ~3;
  397. dst[0] |= lut[mask++];
  398. dst[1] |= lut[mask++];
  399. dst[2] |= lut[mask++];
  400. dst[3] |= lut[mask];
  401. mask = (*buf++ << 2) & 0x3F;
  402. dst[4] |= lut[mask++];
  403. dst[5] |= lut[mask++];
  404. dst[6] |= lut[mask++];
  405. dst[7] |= lut[mask];
  406. dst += 8;
  407. } while (--buf_size);
  408. }
  409. #define DECODE_HAM_PLANE32(x) \
  410. first = buf[x] << 1; \
  411. second = buf[(x)+1] << 1; \
  412. delta &= pal[first++]; \
  413. delta |= pal[first]; \
  414. dst[x] = delta; \
  415. delta &= pal[second++]; \
  416. delta |= pal[second]; \
  417. dst[(x)+1] = delta
  418. /**
  419. * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
  420. *
  421. * @param dst the destination 24bpp buffer
  422. * @param buf the source 8bpp chunky buffer
  423. * @param pal the HAM decode table
  424. * @param buf_size the plane size in bytes
  425. */
  426. static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
  427. const uint32_t *const pal, unsigned buf_size)
  428. {
  429. uint32_t delta = pal[1]; /* first palette entry */
  430. do {
  431. uint32_t first, second;
  432. DECODE_HAM_PLANE32(0);
  433. DECODE_HAM_PLANE32(2);
  434. DECODE_HAM_PLANE32(4);
  435. DECODE_HAM_PLANE32(6);
  436. buf += 8;
  437. dst += 8;
  438. } while (--buf_size);
  439. }
  440. static void lookup_pal_indicies(uint32_t *dst, const uint32_t *buf,
  441. const uint32_t *const pal, unsigned width)
  442. {
  443. do {
  444. *dst++ = pal[*buf++];
  445. } while (--width);
  446. }
  447. /**
  448. * Decode one complete byterun1 encoded line.
  449. *
  450. * @param dst the destination buffer where to store decompressed bitstream
  451. * @param dst_size the destination plane size in bytes
  452. * @param buf the source byterun1 compressed bitstream
  453. * @param buf_end the EOF of source byterun1 compressed bitstream
  454. * @return number of consumed bytes in byterun1 compressed bitstream
  455. */
  456. static int decode_byterun(uint8_t *dst, int dst_size,
  457. const uint8_t *buf, const uint8_t *const buf_end)
  458. {
  459. const uint8_t *const buf_start = buf;
  460. unsigned x;
  461. for (x = 0; x < dst_size && buf < buf_end;) {
  462. unsigned length;
  463. const int8_t value = *buf++;
  464. if (value >= 0) {
  465. length = FFMIN3(value + 1, dst_size - x, buf_end - buf);
  466. memcpy(dst + x, buf, length);
  467. buf += length;
  468. } else if (value > -128) {
  469. length = FFMIN(-value + 1, dst_size - x);
  470. memset(dst + x, *buf++, length);
  471. } else { // noop
  472. continue;
  473. }
  474. x += length;
  475. }
  476. if (x < dst_size) {
  477. av_log(NULL, AV_LOG_WARNING, "decode_byterun ended before plane size\n");
  478. memset(dst+x, 0, dst_size - x);
  479. }
  480. return buf - buf_start;
  481. }
  482. #define DECODE_RGBX_COMMON(type) \
  483. if (!length) { \
  484. length = bytestream2_get_byte(gb); \
  485. if (!length) { \
  486. length = bytestream2_get_be16(gb); \
  487. if (!length) \
  488. return; \
  489. } \
  490. } \
  491. for (i = 0; i < length; i++) { \
  492. *(type *)(dst + y*linesize + x * sizeof(type)) = pixel; \
  493. x += 1; \
  494. if (x >= width) { \
  495. y += 1; \
  496. if (y >= height) \
  497. return; \
  498. x = 0; \
  499. } \
  500. }
  501. /**
  502. * Decode RGB8 buffer
  503. * @param[out] dst Destination buffer
  504. * @param width Width of destination buffer (pixels)
  505. * @param height Height of destination buffer (pixels)
  506. * @param linesize Line size of destination buffer (bytes)
  507. */
  508. static void decode_rgb8(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
  509. {
  510. int x = 0, y = 0, i, length;
  511. while (bytestream2_get_bytes_left(gb) >= 4) {
  512. uint32_t pixel = 0xFF000000 | bytestream2_get_be24(gb);
  513. length = bytestream2_get_byte(gb) & 0x7F;
  514. DECODE_RGBX_COMMON(uint32_t)
  515. }
  516. }
  517. /**
  518. * Decode RGBN buffer
  519. * @param[out] dst Destination buffer
  520. * @param width Width of destination buffer (pixels)
  521. * @param height Height of destination buffer (pixels)
  522. * @param linesize Line size of destination buffer (bytes)
  523. */
  524. static void decode_rgbn(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
  525. {
  526. int x = 0, y = 0, i, length;
  527. while (bytestream2_get_bytes_left(gb) >= 2) {
  528. uint32_t pixel = bytestream2_get_be16u(gb);
  529. length = pixel & 0x7;
  530. pixel >>= 4;
  531. DECODE_RGBX_COMMON(uint16_t)
  532. }
  533. }
  534. /**
  535. * Decode DEEP RLE 32-bit buffer
  536. * @param[out] dst Destination buffer
  537. * @param[in] src Source buffer
  538. * @param src_size Source buffer size (bytes)
  539. * @param width Width of destination buffer (pixels)
  540. * @param height Height of destination buffer (pixels)
  541. * @param linesize Line size of destination buffer (bytes)
  542. */
  543. static void decode_deep_rle32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize)
  544. {
  545. const uint8_t *src_end = src + src_size;
  546. int x = 0, y = 0, i;
  547. while (src_end - src >= 5) {
  548. int opcode;
  549. opcode = *(int8_t *)src++;
  550. if (opcode >= 0) {
  551. int size = opcode + 1;
  552. for (i = 0; i < size; i++) {
  553. int length = FFMIN(size - i, width);
  554. memcpy(dst + y*linesize + x * 4, src, length * 4);
  555. src += length * 4;
  556. x += length;
  557. i += length;
  558. if (x >= width) {
  559. x = 0;
  560. y += 1;
  561. if (y >= height)
  562. return;
  563. }
  564. }
  565. } else {
  566. int size = -opcode + 1;
  567. uint32_t pixel = AV_RN32(src);
  568. for (i = 0; i < size; i++) {
  569. *(uint32_t *)(dst + y*linesize + x * 4) = pixel;
  570. x += 1;
  571. if (x >= width) {
  572. x = 0;
  573. y += 1;
  574. if (y >= height)
  575. return;
  576. }
  577. }
  578. src += 4;
  579. }
  580. }
  581. }
  582. /**
  583. * Decode DEEP TVDC 32-bit buffer
  584. * @param[out] dst Destination buffer
  585. * @param[in] src Source buffer
  586. * @param src_size Source buffer size (bytes)
  587. * @param width Width of destination buffer (pixels)
  588. * @param height Height of destination buffer (pixels)
  589. * @param linesize Line size of destination buffer (bytes)
  590. * @param[int] tvdc TVDC lookup table
  591. */
  592. static void decode_deep_tvdc32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize, const int16_t *tvdc)
  593. {
  594. int x = 0, y = 0, plane = 0;
  595. int8_t pixel = 0;
  596. int i, j;
  597. for (i = 0; i < src_size * 2;) {
  598. #define GETNIBBLE ((i & 1) ? (src[i>>1] & 0xF) : (src[i>>1] >> 4))
  599. int d = tvdc[GETNIBBLE];
  600. i++;
  601. if (d) {
  602. pixel += d;
  603. dst[y * linesize + x*4 + plane] = pixel;
  604. x++;
  605. } else {
  606. if (i >= src_size * 2)
  607. return;
  608. d = GETNIBBLE + 1;
  609. i++;
  610. d = FFMIN(d, width - x);
  611. for (j = 0; j < d; j++) {
  612. dst[y * linesize + x*4 + plane] = pixel;
  613. x++;
  614. }
  615. }
  616. if (x >= width) {
  617. plane++;
  618. if (plane >= 4) {
  619. y++;
  620. if (y >= height)
  621. return;
  622. plane = 0;
  623. }
  624. x = 0;
  625. pixel = 0;
  626. i = (i + 1) & ~1;
  627. }
  628. }
  629. }
  630. static int unsupported(AVCodecContext *avctx)
  631. {
  632. IffContext *s = avctx->priv_data;
  633. avpriv_request_sample(avctx, "bitmap (compression %i, bpp %i, ham %i)", s->compression, s->bpp, s->ham);
  634. return AVERROR_INVALIDDATA;
  635. }
  636. static int decode_frame(AVCodecContext *avctx,
  637. void *data, int *got_frame,
  638. AVPacket *avpkt)
  639. {
  640. IffContext *s = avctx->priv_data;
  641. const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
  642. const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
  643. const uint8_t *buf_end = buf + buf_size;
  644. int y, plane, res;
  645. GetByteContext gb;
  646. const AVPixFmtDescriptor *desc;
  647. if ((res = extract_header(avctx, avpkt)) < 0)
  648. return res;
  649. if ((res = ff_reget_buffer(avctx, s->frame)) < 0)
  650. return res;
  651. desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  652. if (!s->init && avctx->bits_per_coded_sample <= 8 - (s->masking == MASK_HAS_MASK) &&
  653. avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  654. if ((res = cmap_read_palette(avctx, (uint32_t *)s->frame->data[1])) < 0)
  655. return res;
  656. } else if (!s->init && avctx->bits_per_coded_sample <= 8 &&
  657. avctx->pix_fmt == AV_PIX_FMT_RGB32) {
  658. if ((res = cmap_read_palette(avctx, s->mask_palbuf)) < 0)
  659. return res;
  660. }
  661. s->init = 1;
  662. switch (s->compression) {
  663. case 0:
  664. if (avctx->codec_tag == MKTAG('A', 'C', 'B', 'M')) {
  665. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  666. memset(s->frame->data[0], 0, avctx->height * s->frame->linesize[0]);
  667. for (plane = 0; plane < s->bpp; plane++) {
  668. for (y = 0; y < avctx->height && buf < buf_end; y++) {
  669. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  670. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  671. buf += s->planesize;
  672. }
  673. }
  674. } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
  675. memset(s->frame->data[0], 0, avctx->height * s->frame->linesize[0]);
  676. for (y = 0; y < avctx->height; y++) {
  677. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  678. memset(s->ham_buf, 0, s->planesize * 8);
  679. for (plane = 0; plane < s->bpp; plane++) {
  680. const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize;
  681. if (start >= buf_end)
  682. break;
  683. decodeplane8(s->ham_buf, start, FFMIN(s->planesize, buf_end - start), plane);
  684. }
  685. decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
  686. }
  687. } else
  688. return unsupported(avctx);
  689. } else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
  690. int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3);
  691. int x;
  692. for (y = 0; y < avctx->height && buf < buf_end; y++) {
  693. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  694. memcpy(row, buf, FFMIN(raw_width, buf_end - buf));
  695. buf += raw_width;
  696. if (avctx->pix_fmt == AV_PIX_FMT_BGR32) {
  697. for (x = 0; x < avctx->width; x++)
  698. row[4 * x + 3] = row[4 * x + 3] & 0xF0 | (row[4 * x + 3] >> 4);
  699. }
  700. }
  701. } else if (avctx->codec_tag == MKTAG('I', 'L', 'B', 'M')) { // interleaved
  702. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  703. for (y = 0; y < avctx->height; y++) {
  704. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  705. memset(row, 0, avctx->width);
  706. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  707. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  708. buf += s->planesize;
  709. }
  710. }
  711. } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
  712. for (y = 0; y < avctx->height; y++) {
  713. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  714. memset(s->ham_buf, 0, s->planesize * 8);
  715. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  716. decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
  717. buf += s->planesize;
  718. }
  719. decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
  720. }
  721. } else { // AV_PIX_FMT_BGR32
  722. for (y = 0; y < avctx->height; y++) {
  723. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  724. memset(row, 0, avctx->width << 2);
  725. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  726. decodeplane32((uint32_t *)row, buf,
  727. FFMIN(s->planesize, buf_end - buf), plane);
  728. buf += s->planesize;
  729. }
  730. }
  731. }
  732. } else if (avctx->codec_tag == MKTAG('P', 'B', 'M', ' ')) { // IFF-PBM
  733. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  734. for (y = 0; y < avctx->height && buf_end > buf; y++) {
  735. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  736. memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
  737. buf += avctx->width + (avctx->width % 2); // padding if odd
  738. }
  739. } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
  740. for (y = 0; y < avctx->height && buf_end > buf; y++) {
  741. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  742. memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
  743. buf += avctx->width + (avctx->width & 1); // padding if odd
  744. decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
  745. }
  746. } else
  747. return unsupported(avctx);
  748. }
  749. break;
  750. case 1:
  751. if (avctx->codec_tag == MKTAG('I', 'L', 'B', 'M')) { // interleaved
  752. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  753. for (y = 0; y < avctx->height; y++) {
  754. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  755. memset(row, 0, avctx->width);
  756. for (plane = 0; plane < s->bpp; plane++) {
  757. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  758. decodeplane8(row, s->planebuf, s->planesize, plane);
  759. }
  760. }
  761. } else if (avctx->bits_per_coded_sample <= 8) { //8-bit (+ mask) to AV_PIX_FMT_BGR32
  762. for (y = 0; y < avctx->height; y++) {
  763. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  764. memset(s->mask_buf, 0, avctx->width * sizeof(uint32_t));
  765. for (plane = 0; plane < s->bpp; plane++) {
  766. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  767. decodeplane32(s->mask_buf, s->planebuf, s->planesize, plane);
  768. }
  769. lookup_pal_indicies((uint32_t *)row, s->mask_buf, s->mask_palbuf, avctx->width);
  770. }
  771. } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
  772. for (y = 0; y < avctx->height; y++) {
  773. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  774. memset(s->ham_buf, 0, s->planesize * 8);
  775. for (plane = 0; plane < s->bpp; plane++) {
  776. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  777. decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
  778. }
  779. decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
  780. }
  781. } else { // AV_PIX_FMT_BGR32
  782. for (y = 0; y < avctx->height; y++) {
  783. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  784. memset(row, 0, avctx->width << 2);
  785. for (plane = 0; plane < s->bpp; plane++) {
  786. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  787. decodeplane32((uint32_t *)row, s->planebuf, s->planesize, plane);
  788. }
  789. }
  790. }
  791. } else if (avctx->codec_tag == MKTAG('P', 'B', 'M', ' ')) { // IFF-PBM
  792. if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
  793. for (y = 0; y < avctx->height; y++) {
  794. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  795. buf += decode_byterun(row, avctx->width, buf, buf_end);
  796. }
  797. } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
  798. for (y = 0; y < avctx->height; y++) {
  799. uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
  800. buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
  801. decode_ham_plane32((uint32_t *)row, s->ham_buf, s->ham_palbuf, s->planesize);
  802. }
  803. } else
  804. return unsupported(avctx);
  805. } else if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) { // IFF-DEEP
  806. if (av_get_bits_per_pixel(desc) == 32)
  807. decode_deep_rle32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0]);
  808. else
  809. return unsupported(avctx);
  810. }
  811. break;
  812. case 4:
  813. bytestream2_init(&gb, buf, buf_size);
  814. if (avctx->codec_tag == MKTAG('R', 'G', 'B', '8') && avctx->pix_fmt == AV_PIX_FMT_RGB32)
  815. decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
  816. else if (avctx->codec_tag == MKTAG('R', 'G', 'B', 'N') && avctx->pix_fmt == AV_PIX_FMT_RGB444)
  817. decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
  818. else
  819. return unsupported(avctx);
  820. break;
  821. case 5:
  822. if (avctx->codec_tag == MKTAG('D', 'E', 'E', 'P')) {
  823. if (av_get_bits_per_pixel(desc) == 32)
  824. decode_deep_tvdc32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0], s->tvdc);
  825. else
  826. return unsupported(avctx);
  827. } else
  828. return unsupported(avctx);
  829. break;
  830. default:
  831. return unsupported(avctx);
  832. }
  833. if ((res = av_frame_ref(data, s->frame)) < 0)
  834. return res;
  835. *got_frame = 1;
  836. return buf_size;
  837. }
  838. #if CONFIG_IFF_ILBM_DECODER
  839. AVCodec ff_iff_ilbm_decoder = {
  840. .name = "iff",
  841. .long_name = NULL_IF_CONFIG_SMALL("IFF"),
  842. .type = AVMEDIA_TYPE_VIDEO,
  843. .id = AV_CODEC_ID_IFF_ILBM,
  844. .priv_data_size = sizeof(IffContext),
  845. .init = decode_init,
  846. .close = decode_end,
  847. .decode = decode_frame,
  848. .capabilities = AV_CODEC_CAP_DR1,
  849. };
  850. #endif
  851. #if CONFIG_IFF_BYTERUN1_DECODER
  852. AVCodec ff_iff_byterun1_decoder = {
  853. .name = "iff",
  854. .long_name = NULL_IF_CONFIG_SMALL("IFF"),
  855. .type = AVMEDIA_TYPE_VIDEO,
  856. .id = AV_CODEC_ID_IFF_BYTERUN1,
  857. .priv_data_size = sizeof(IffContext),
  858. .init = decode_init,
  859. .close = decode_end,
  860. .decode = decode_frame,
  861. .capabilities = AV_CODEC_CAP_DR1,
  862. };
  863. #endif