You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

717 lines
28KB

  1. /*
  2. * IFF PBM/ILBM bitmap decoder
  3. * Copyright (c) 2010 Peter Ross <pross@xvid.org>
  4. * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * IFF PBM/ILBM bitmap decoder
  25. */
  26. #include "libavutil/imgutils.h"
  27. #include "bytestream.h"
  28. #include "avcodec.h"
  29. #include "get_bits.h"
  30. // TODO: masking bits
  31. typedef enum {
  32. MASK_NONE,
  33. MASK_HAS_MASK,
  34. MASK_HAS_TRANSPARENT_COLOR,
  35. MASK_LASSO
  36. } mask_type;
  37. typedef struct {
  38. AVFrame frame;
  39. int planesize;
  40. uint8_t * planebuf;
  41. uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
  42. uint32_t *ham_palbuf; ///< HAM decode table
  43. uint32_t *mask_buf; ///< temporary buffer for palette indices
  44. uint32_t *mask_palbuf; ///< masking palette table
  45. unsigned compression; ///< delta compression method used
  46. unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
  47. unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
  48. unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
  49. unsigned transparency; ///< TODO: transparency color index in palette
  50. unsigned masking; ///< TODO: masking method used
  51. int init; // 1 if buffer and palette data already initialized, 0 otherwise
  52. } IffContext;
  53. #define LUT8_PART(plane, v) \
  54. AV_LE2NE64C(UINT64_C(0x0000000)<<32 | v) << plane, \
  55. AV_LE2NE64C(UINT64_C(0x1000000)<<32 | v) << plane, \
  56. AV_LE2NE64C(UINT64_C(0x0010000)<<32 | v) << plane, \
  57. AV_LE2NE64C(UINT64_C(0x1010000)<<32 | v) << plane, \
  58. AV_LE2NE64C(UINT64_C(0x0000100)<<32 | v) << plane, \
  59. AV_LE2NE64C(UINT64_C(0x1000100)<<32 | v) << plane, \
  60. AV_LE2NE64C(UINT64_C(0x0010100)<<32 | v) << plane, \
  61. AV_LE2NE64C(UINT64_C(0x1010100)<<32 | v) << plane, \
  62. AV_LE2NE64C(UINT64_C(0x0000001)<<32 | v) << plane, \
  63. AV_LE2NE64C(UINT64_C(0x1000001)<<32 | v) << plane, \
  64. AV_LE2NE64C(UINT64_C(0x0010001)<<32 | v) << plane, \
  65. AV_LE2NE64C(UINT64_C(0x1010001)<<32 | v) << plane, \
  66. AV_LE2NE64C(UINT64_C(0x0000101)<<32 | v) << plane, \
  67. AV_LE2NE64C(UINT64_C(0x1000101)<<32 | v) << plane, \
  68. AV_LE2NE64C(UINT64_C(0x0010101)<<32 | v) << plane, \
  69. AV_LE2NE64C(UINT64_C(0x1010101)<<32 | v) << plane
  70. #define LUT8(plane) { \
  71. LUT8_PART(plane, 0x0000000), \
  72. LUT8_PART(plane, 0x1000000), \
  73. LUT8_PART(plane, 0x0010000), \
  74. LUT8_PART(plane, 0x1010000), \
  75. LUT8_PART(plane, 0x0000100), \
  76. LUT8_PART(plane, 0x1000100), \
  77. LUT8_PART(plane, 0x0010100), \
  78. LUT8_PART(plane, 0x1010100), \
  79. LUT8_PART(plane, 0x0000001), \
  80. LUT8_PART(plane, 0x1000001), \
  81. LUT8_PART(plane, 0x0010001), \
  82. LUT8_PART(plane, 0x1010001), \
  83. LUT8_PART(plane, 0x0000101), \
  84. LUT8_PART(plane, 0x1000101), \
  85. LUT8_PART(plane, 0x0010101), \
  86. LUT8_PART(plane, 0x1010101), \
  87. }
  88. // 8 planes * 8-bit mask
  89. static const uint64_t plane8_lut[8][256] = {
  90. LUT8(0), LUT8(1), LUT8(2), LUT8(3),
  91. LUT8(4), LUT8(5), LUT8(6), LUT8(7),
  92. };
  93. #define LUT32(plane) { \
  94. 0, 0, 0, 0, \
  95. 0, 0, 0, 1 << plane, \
  96. 0, 0, 1 << plane, 0, \
  97. 0, 0, 1 << plane, 1 << plane, \
  98. 0, 1 << plane, 0, 0, \
  99. 0, 1 << plane, 0, 1 << plane, \
  100. 0, 1 << plane, 1 << plane, 0, \
  101. 0, 1 << plane, 1 << plane, 1 << plane, \
  102. 1 << plane, 0, 0, 0, \
  103. 1 << plane, 0, 0, 1 << plane, \
  104. 1 << plane, 0, 1 << plane, 0, \
  105. 1 << plane, 0, 1 << plane, 1 << plane, \
  106. 1 << plane, 1 << plane, 0, 0, \
  107. 1 << plane, 1 << plane, 0, 1 << plane, \
  108. 1 << plane, 1 << plane, 1 << plane, 0, \
  109. 1 << plane, 1 << plane, 1 << plane, 1 << plane, \
  110. }
  111. // 32 planes * 4-bit mask * 4 lookup tables each
  112. static const uint32_t plane32_lut[32][16*4] = {
  113. LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
  114. LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
  115. LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
  116. LUT32(12), LUT32(13), LUT32(14), LUT32(15),
  117. LUT32(16), LUT32(17), LUT32(18), LUT32(19),
  118. LUT32(20), LUT32(21), LUT32(22), LUT32(23),
  119. LUT32(24), LUT32(25), LUT32(26), LUT32(27),
  120. LUT32(28), LUT32(29), LUT32(30), LUT32(31),
  121. };
  122. // Gray to RGB, required for palette table of grayscale images with bpp < 8
  123. static av_always_inline uint32_t gray2rgb(const uint32_t x) {
  124. return x << 16 | x << 8 | x;
  125. }
  126. /**
  127. * Convert CMAP buffer (stored in extradata) to lavc palette format
  128. */
  129. static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
  130. {
  131. IffContext *s = avctx->priv_data;
  132. int count, i;
  133. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  134. int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  135. if (avctx->bits_per_coded_sample > 8) {
  136. av_log(avctx, AV_LOG_ERROR, "bits_per_coded_sample > 8 not supported\n");
  137. return AVERROR_INVALIDDATA;
  138. }
  139. count = 1 << avctx->bits_per_coded_sample;
  140. // If extradata is smaller than actually needed, fill the remaining with black.
  141. count = FFMIN(palette_size / 3, count);
  142. if (count) {
  143. for (i=0; i < count; i++) {
  144. pal[i] = 0xFF000000 | AV_RB24(palette + i*3);
  145. }
  146. if (s->flags && count >= 32) { // EHB
  147. for (i = 0; i < 32; i++)
  148. pal[i + 32] = 0xFF000000 | (AV_RB24(palette + i*3) & 0xFEFEFE) >> 1;
  149. count = FFMAX(count, 64);
  150. }
  151. } else { // Create gray-scale color palette for bps < 8
  152. count = 1 << avctx->bits_per_coded_sample;
  153. for (i=0; i < count; i++) {
  154. pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
  155. }
  156. }
  157. if (s->masking == MASK_HAS_MASK) {
  158. memcpy(pal + (1 << avctx->bits_per_coded_sample), pal, count * 4);
  159. for (i = 0; i < count; i++)
  160. pal[i] &= 0xFFFFFF;
  161. } else if (s->masking == MASK_HAS_TRANSPARENT_COLOR &&
  162. s->transparency < 1 << avctx->bits_per_coded_sample)
  163. pal[s->transparency] &= 0xFFFFFF;
  164. return 0;
  165. }
  166. /**
  167. * Extracts the IFF extra context and updates internal
  168. * decoder structures.
  169. *
  170. * @param avctx the AVCodecContext where to extract extra context to
  171. * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
  172. * @return 0 in case of success, a negative error code otherwise
  173. */
  174. static int extract_header(AVCodecContext *const avctx,
  175. const AVPacket *const avpkt) {
  176. const uint8_t *buf;
  177. unsigned buf_size;
  178. IffContext *s = avctx->priv_data;
  179. int palette_size;
  180. if (avctx->extradata_size < 2) {
  181. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  182. return AVERROR_INVALIDDATA;
  183. }
  184. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  185. if (avpkt) {
  186. int image_size;
  187. if (avpkt->size < 2)
  188. return AVERROR_INVALIDDATA;
  189. image_size = avpkt->size - AV_RB16(avpkt->data);
  190. buf = avpkt->data;
  191. buf_size = bytestream_get_be16(&buf);
  192. if (buf_size <= 1 || image_size <= 1) {
  193. av_log(avctx, AV_LOG_ERROR,
  194. "Invalid image size received: %u -> image data offset: %d\n",
  195. buf_size, image_size);
  196. return AVERROR_INVALIDDATA;
  197. }
  198. } else {
  199. buf = avctx->extradata;
  200. buf_size = bytestream_get_be16(&buf);
  201. if (buf_size <= 1 || palette_size < 0) {
  202. av_log(avctx, AV_LOG_ERROR,
  203. "Invalid palette size received: %u -> palette data offset: %d\n",
  204. buf_size, palette_size);
  205. return AVERROR_INVALIDDATA;
  206. }
  207. }
  208. if (buf_size > 8) {
  209. s->compression = bytestream_get_byte(&buf);
  210. s->bpp = bytestream_get_byte(&buf);
  211. s->ham = bytestream_get_byte(&buf);
  212. s->flags = bytestream_get_byte(&buf);
  213. s->transparency = bytestream_get_be16(&buf);
  214. s->masking = bytestream_get_byte(&buf);
  215. if (s->masking == MASK_HAS_MASK) {
  216. if (s->bpp >= 8) {
  217. avctx->pix_fmt = PIX_FMT_RGB32;
  218. av_freep(&s->mask_buf);
  219. av_freep(&s->mask_palbuf);
  220. s->mask_buf = av_malloc((s->planesize * 32) + FF_INPUT_BUFFER_PADDING_SIZE);
  221. if (!s->mask_buf)
  222. return AVERROR(ENOMEM);
  223. if (s->bpp > 16) {
  224. av_log(avctx, AV_LOG_ERROR, "bpp %d too large for palette\n", s->bpp);
  225. av_freep(&s->mask_buf);
  226. return AVERROR(ENOMEM);
  227. }
  228. s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
  229. if (!s->mask_palbuf) {
  230. av_freep(&s->mask_buf);
  231. return AVERROR(ENOMEM);
  232. }
  233. }
  234. s->bpp++;
  235. } else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
  236. av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
  237. return AVERROR_PATCHWELCOME;
  238. }
  239. if (!s->bpp || s->bpp > 32) {
  240. av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
  241. return AVERROR_INVALIDDATA;
  242. } else if (s->ham >= 8) {
  243. av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
  244. return AVERROR_INVALIDDATA;
  245. }
  246. av_freep(&s->ham_buf);
  247. av_freep(&s->ham_palbuf);
  248. if (s->ham) {
  249. int i, count = FFMIN(palette_size / 3, 1 << s->ham);
  250. int ham_count;
  251. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  252. s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
  253. if (!s->ham_buf)
  254. return AVERROR(ENOMEM);
  255. ham_count = 8 * (1 << s->ham);
  256. s->ham_palbuf = av_malloc((ham_count << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
  257. if (!s->ham_palbuf) {
  258. av_freep(&s->ham_buf);
  259. return AVERROR(ENOMEM);
  260. }
  261. if (count) { // HAM with color palette attached
  262. // prefill with black and palette and set HAM take direct value mask to zero
  263. memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
  264. for (i=0; i < count; i++) {
  265. s->ham_palbuf[i*2+1] = 0xFF000000 | AV_RL24(palette + i*3);
  266. }
  267. count = 1 << s->ham;
  268. } else { // HAM with grayscale color palette
  269. count = 1 << s->ham;
  270. for (i=0; i < count; i++) {
  271. s->ham_palbuf[i*2] = 0xFF000000; // take direct color value from palette
  272. s->ham_palbuf[i*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((i * 255) >> s->ham));
  273. }
  274. }
  275. for (i=0; i < count; i++) {
  276. uint32_t tmp = i << (8 - s->ham);
  277. tmp |= tmp >> s->ham;
  278. s->ham_palbuf[(i+count)*2] = 0xFF00FFFF; // just modify blue color component
  279. s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00; // just modify red color component
  280. s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF; // just modify green color component
  281. s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
  282. s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
  283. s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
  284. }
  285. if (s->masking == MASK_HAS_MASK) {
  286. for (i = 0; i < ham_count; i++)
  287. s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
  288. }
  289. }
  290. }
  291. return 0;
  292. }
  293. static av_cold int decode_init(AVCodecContext *avctx)
  294. {
  295. IffContext *s = avctx->priv_data;
  296. int err;
  297. if (avctx->bits_per_coded_sample <= 8) {
  298. int palette_size;
  299. if (avctx->extradata_size >= 2)
  300. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  301. else
  302. palette_size = 0;
  303. avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
  304. (avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
  305. } else if (avctx->bits_per_coded_sample <= 32) {
  306. if (avctx->codec_tag != MKTAG('D','E','E','P'))
  307. avctx->pix_fmt = PIX_FMT_BGR32;
  308. } else {
  309. return AVERROR_INVALIDDATA;
  310. }
  311. if ((err = av_image_check_size(avctx->width, avctx->height, 0, avctx)))
  312. return err;
  313. s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
  314. s->planebuf = av_malloc(s->planesize + FF_INPUT_BUFFER_PADDING_SIZE);
  315. if (!s->planebuf)
  316. return AVERROR(ENOMEM);
  317. s->bpp = avctx->bits_per_coded_sample;
  318. avcodec_get_frame_defaults(&s->frame);
  319. if ((err = extract_header(avctx, NULL)) < 0)
  320. return err;
  321. s->frame.reference = 3;
  322. return 0;
  323. }
  324. /**
  325. * Decode interleaved plane buffer up to 8bpp
  326. * @param dst Destination buffer
  327. * @param buf Source buffer
  328. * @param buf_size
  329. * @param plane plane number to decode as
  330. */
  331. static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
  332. {
  333. const uint64_t *lut = plane8_lut[plane];
  334. do {
  335. uint64_t v = AV_RN64A(dst) | lut[*buf++];
  336. AV_WN64A(dst, v);
  337. dst += 8;
  338. } while (--buf_size);
  339. }
  340. /**
  341. * Decode interleaved plane buffer up to 24bpp
  342. * @param dst Destination buffer
  343. * @param buf Source buffer
  344. * @param buf_size
  345. * @param plane plane number to decode as
  346. */
  347. static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
  348. {
  349. const uint32_t *lut = plane32_lut[plane];
  350. do {
  351. unsigned mask = (*buf >> 2) & ~3;
  352. dst[0] |= lut[mask++];
  353. dst[1] |= lut[mask++];
  354. dst[2] |= lut[mask++];
  355. dst[3] |= lut[mask];
  356. mask = (*buf++ << 2) & 0x3F;
  357. dst[4] |= lut[mask++];
  358. dst[5] |= lut[mask++];
  359. dst[6] |= lut[mask++];
  360. dst[7] |= lut[mask];
  361. dst += 8;
  362. } while (--buf_size);
  363. }
  364. #define DECODE_HAM_PLANE32(x) \
  365. first = buf[x] << 1; \
  366. second = buf[(x)+1] << 1; \
  367. delta &= pal[first++]; \
  368. delta |= pal[first]; \
  369. dst[x] = delta; \
  370. delta &= pal[second++]; \
  371. delta |= pal[second]; \
  372. dst[(x)+1] = delta
  373. /**
  374. * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
  375. *
  376. * @param dst the destination 24bpp buffer
  377. * @param buf the source 8bpp chunky buffer
  378. * @param pal the HAM decode table
  379. * @param buf_size the plane size in bytes
  380. */
  381. static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
  382. const uint32_t *const pal, unsigned buf_size)
  383. {
  384. uint32_t delta = 0;
  385. do {
  386. uint32_t first, second;
  387. DECODE_HAM_PLANE32(0);
  388. DECODE_HAM_PLANE32(2);
  389. DECODE_HAM_PLANE32(4);
  390. DECODE_HAM_PLANE32(6);
  391. buf += 8;
  392. dst += 8;
  393. } while (--buf_size);
  394. }
  395. static void lookup_pal_indicies(uint32_t *dst, const uint32_t *buf,
  396. const uint32_t *const pal, unsigned width)
  397. {
  398. do {
  399. *dst++ = pal[*buf++];
  400. } while (--width);
  401. }
  402. /**
  403. * Decode one complete byterun1 encoded line.
  404. *
  405. * @param dst the destination buffer where to store decompressed bitstream
  406. * @param dst_size the destination plane size in bytes
  407. * @param buf the source byterun1 compressed bitstream
  408. * @param buf_end the EOF of source byterun1 compressed bitstream
  409. * @return number of consumed bytes in byterun1 compressed bitstream
  410. */
  411. static int decode_byterun(uint8_t *dst, int dst_size,
  412. const uint8_t *buf, const uint8_t *const buf_end) {
  413. const uint8_t *const buf_start = buf;
  414. unsigned x;
  415. for (x = 0; x < dst_size && buf < buf_end;) {
  416. unsigned length;
  417. const int8_t value = *buf++;
  418. if (value >= 0) {
  419. length = value + 1;
  420. memcpy(dst + x, buf, FFMIN3(length, dst_size - x, buf_end - buf));
  421. buf += length;
  422. } else if (value > -128) {
  423. length = -value + 1;
  424. memset(dst + x, *buf++, FFMIN(length, dst_size - x));
  425. } else { // noop
  426. continue;
  427. }
  428. x += length;
  429. }
  430. return buf - buf_start;
  431. }
  432. static int decode_frame_ilbm(AVCodecContext *avctx,
  433. void *data, int *data_size,
  434. AVPacket *avpkt)
  435. {
  436. IffContext *s = avctx->priv_data;
  437. const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
  438. const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
  439. const uint8_t *buf_end = buf+buf_size;
  440. int y, plane, res;
  441. if ((res = extract_header(avctx, avpkt)) < 0)
  442. return res;
  443. if (s->init) {
  444. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  445. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  446. return res;
  447. }
  448. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  449. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  450. return res;
  451. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt == PIX_FMT_PAL8) {
  452. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  453. return res;
  454. }
  455. s->init = 1;
  456. if (avctx->codec_tag == MKTAG('A','C','B','M')) {
  457. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  458. memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]);
  459. for (plane = 0; plane < s->bpp; plane++) {
  460. for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
  461. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  462. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  463. buf += s->planesize;
  464. }
  465. }
  466. } else if (s->ham) { // HAM to PIX_FMT_BGR32
  467. memset(s->frame.data[0], 0, avctx->height * s->frame.linesize[0]);
  468. for(y = 0; y < avctx->height; y++) {
  469. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  470. memset(s->ham_buf, 0, s->planesize * 8);
  471. for (plane = 0; plane < s->bpp; plane++) {
  472. const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize;
  473. if (start >= buf_end)
  474. break;
  475. decodeplane8(s->ham_buf, start, FFMIN(s->planesize, buf_end - start), plane);
  476. }
  477. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  478. }
  479. }
  480. } else if (avctx->codec_tag == MKTAG('D','E','E','P')) {
  481. int raw_width = avctx->width * (av_get_bits_per_pixel(&av_pix_fmt_descriptors[avctx->pix_fmt]) >> 3);
  482. int x;
  483. for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
  484. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  485. memcpy(row, buf, FFMIN(raw_width, buf_end - buf));
  486. buf += raw_width;
  487. if (avctx->pix_fmt == PIX_FMT_BGR32) {
  488. for(x = 0; x < avctx->width; x++)
  489. row[4 * x + 3] = row[4 * x + 3] & 0xF0 | (row[4 * x + 3] >> 4);
  490. }
  491. }
  492. } else if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved
  493. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  494. for(y = 0; y < avctx->height; y++ ) {
  495. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  496. memset(row, 0, avctx->width);
  497. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  498. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  499. buf += s->planesize;
  500. }
  501. }
  502. } else if (s->ham) { // HAM to PIX_FMT_BGR32
  503. for (y = 0; y < avctx->height; y++) {
  504. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  505. memset(s->ham_buf, 0, s->planesize * 8);
  506. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  507. decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
  508. buf += s->planesize;
  509. }
  510. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  511. }
  512. } else { // PIX_FMT_BGR32
  513. for(y = 0; y < avctx->height; y++ ) {
  514. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  515. memset(row, 0, avctx->width << 2);
  516. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  517. decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  518. buf += s->planesize;
  519. }
  520. }
  521. }
  522. } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
  523. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  524. for(y = 0; y < avctx->height; y++ ) {
  525. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  526. memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
  527. buf += avctx->width + (avctx->width % 2); // padding if odd
  528. }
  529. } else if (s->ham) { // IFF-PBM: HAM to PIX_FMT_BGR32
  530. for (y = 0; y < avctx->height; y++) {
  531. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  532. memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
  533. buf += avctx->width + (avctx->width & 1); // padding if odd
  534. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  535. }
  536. } else {
  537. av_log_ask_for_sample(avctx, "unsupported bpp\n");
  538. return AVERROR_INVALIDDATA;
  539. }
  540. }
  541. *data_size = sizeof(AVFrame);
  542. *(AVFrame*)data = s->frame;
  543. return buf_size;
  544. }
  545. static int decode_frame_byterun1(AVCodecContext *avctx,
  546. void *data, int *data_size,
  547. AVPacket *avpkt)
  548. {
  549. IffContext *s = avctx->priv_data;
  550. const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
  551. const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
  552. const uint8_t *buf_end = buf+buf_size;
  553. int y, plane, res;
  554. if ((res = extract_header(avctx, avpkt)) < 0)
  555. return res;
  556. if (s->init) {
  557. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  558. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  559. return res;
  560. }
  561. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  562. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  563. return res;
  564. } else if (avctx->pix_fmt == PIX_FMT_PAL8) {
  565. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  566. return res;
  567. } else if (avctx->pix_fmt == PIX_FMT_RGB32 && avctx->bits_per_coded_sample <= 8) {
  568. if ((res = ff_cmap_read_palette(avctx, s->mask_palbuf)) < 0)
  569. return res;
  570. }
  571. s->init = 1;
  572. if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved
  573. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  574. for(y = 0; y < avctx->height ; y++ ) {
  575. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  576. memset(row, 0, avctx->width);
  577. for (plane = 0; plane < s->bpp; plane++) {
  578. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  579. decodeplane8(row, s->planebuf, s->planesize, plane);
  580. }
  581. }
  582. } else if (avctx->bits_per_coded_sample <= 8) { //8-bit (+ mask) to PIX_FMT_BGR32
  583. for (y = 0; y < avctx->height ; y++ ) {
  584. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  585. memset(s->mask_buf, 0, avctx->width * sizeof(uint32_t));
  586. for (plane = 0; plane < s->bpp; plane++) {
  587. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  588. decodeplane32(s->mask_buf, s->planebuf, s->planesize, plane);
  589. }
  590. lookup_pal_indicies((uint32_t *) row, s->mask_buf, s->mask_palbuf, avctx->width);
  591. }
  592. } else if (s->ham) { // HAM to PIX_FMT_BGR32
  593. for (y = 0; y < avctx->height ; y++) {
  594. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  595. memset(s->ham_buf, 0, s->planesize * 8);
  596. for (plane = 0; plane < s->bpp; plane++) {
  597. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  598. decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
  599. }
  600. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  601. }
  602. } else { //PIX_FMT_BGR32
  603. for(y = 0; y < avctx->height ; y++ ) {
  604. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  605. memset(row, 0, avctx->width << 2);
  606. for (plane = 0; plane < s->bpp; plane++) {
  607. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  608. decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
  609. }
  610. }
  611. }
  612. } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
  613. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  614. for(y = 0; y < avctx->height ; y++ ) {
  615. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  616. buf += decode_byterun(row, avctx->width, buf, buf_end);
  617. }
  618. } else if (s->ham) { // IFF-PBM: HAM to PIX_FMT_BGR32
  619. for (y = 0; y < avctx->height ; y++) {
  620. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  621. buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
  622. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  623. }
  624. } else {
  625. av_log_ask_for_sample(avctx, "unsupported bpp\n");
  626. return AVERROR_INVALIDDATA;
  627. }
  628. }
  629. *data_size = sizeof(AVFrame);
  630. *(AVFrame*)data = s->frame;
  631. return buf_size;
  632. }
  633. static av_cold int decode_end(AVCodecContext *avctx)
  634. {
  635. IffContext *s = avctx->priv_data;
  636. if (s->frame.data[0])
  637. avctx->release_buffer(avctx, &s->frame);
  638. av_freep(&s->planebuf);
  639. av_freep(&s->ham_buf);
  640. av_freep(&s->ham_palbuf);
  641. return 0;
  642. }
  643. #if CONFIG_IFF_ILBM_DECODER
  644. AVCodec ff_iff_ilbm_decoder = {
  645. .name = "iff_ilbm",
  646. .type = AVMEDIA_TYPE_VIDEO,
  647. .id = AV_CODEC_ID_IFF_ILBM,
  648. .priv_data_size = sizeof(IffContext),
  649. .init = decode_init,
  650. .close = decode_end,
  651. .decode = decode_frame_ilbm,
  652. .capabilities = CODEC_CAP_DR1,
  653. .long_name = NULL_IF_CONFIG_SMALL("IFF ILBM"),
  654. };
  655. #endif
  656. #if CONFIG_IFF_BYTERUN1_DECODER
  657. AVCodec ff_iff_byterun1_decoder = {
  658. .name = "iff_byterun1",
  659. .type = AVMEDIA_TYPE_VIDEO,
  660. .id = AV_CODEC_ID_IFF_BYTERUN1,
  661. .priv_data_size = sizeof(IffContext),
  662. .init = decode_init,
  663. .close = decode_end,
  664. .decode = decode_frame_byterun1,
  665. .capabilities = CODEC_CAP_DR1,
  666. .long_name = NULL_IF_CONFIG_SMALL("IFF ByteRun1"),
  667. };
  668. #endif