You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

612 lines
23KB

  1. /*
  2. * IFF PBM/ILBM bitmap decoder
  3. * Copyright (c) 2010 Peter Ross <pross@xvid.org>
  4. * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * IFF PBM/ILBM bitmap decoder
  25. */
  26. #include "libavutil/imgutils.h"
  27. #include "bytestream.h"
  28. #include "avcodec.h"
  29. #include "get_bits.h"
  30. // TODO: masking bits
  31. typedef enum {
  32. MASK_NONE,
  33. MASK_HAS_MASK,
  34. MASK_HAS_TRANSPARENT_COLOR,
  35. MASK_LASSO
  36. } mask_type;
  37. typedef struct {
  38. AVFrame frame;
  39. int planesize;
  40. uint8_t * planebuf;
  41. uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
  42. uint32_t *ham_palbuf; ///< HAM decode table
  43. unsigned compression; ///< delta compression method used
  44. unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
  45. unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
  46. unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
  47. unsigned transparency; ///< TODO: transparency color index in palette
  48. unsigned masking; ///< TODO: masking method used
  49. int init; // 1 if buffer and palette data already initialized, 0 otherwise
  50. } IffContext;
  51. #define LUT8_PART(plane, v) \
  52. AV_LE2NE64C(UINT64_C(0x0000000)<<32 | v) << plane, \
  53. AV_LE2NE64C(UINT64_C(0x1000000)<<32 | v) << plane, \
  54. AV_LE2NE64C(UINT64_C(0x0010000)<<32 | v) << plane, \
  55. AV_LE2NE64C(UINT64_C(0x1010000)<<32 | v) << plane, \
  56. AV_LE2NE64C(UINT64_C(0x0000100)<<32 | v) << plane, \
  57. AV_LE2NE64C(UINT64_C(0x1000100)<<32 | v) << plane, \
  58. AV_LE2NE64C(UINT64_C(0x0010100)<<32 | v) << plane, \
  59. AV_LE2NE64C(UINT64_C(0x1010100)<<32 | v) << plane, \
  60. AV_LE2NE64C(UINT64_C(0x0000001)<<32 | v) << plane, \
  61. AV_LE2NE64C(UINT64_C(0x1000001)<<32 | v) << plane, \
  62. AV_LE2NE64C(UINT64_C(0x0010001)<<32 | v) << plane, \
  63. AV_LE2NE64C(UINT64_C(0x1010001)<<32 | v) << plane, \
  64. AV_LE2NE64C(UINT64_C(0x0000101)<<32 | v) << plane, \
  65. AV_LE2NE64C(UINT64_C(0x1000101)<<32 | v) << plane, \
  66. AV_LE2NE64C(UINT64_C(0x0010101)<<32 | v) << plane, \
  67. AV_LE2NE64C(UINT64_C(0x1010101)<<32 | v) << plane
  68. #define LUT8(plane) { \
  69. LUT8_PART(plane, 0x0000000), \
  70. LUT8_PART(plane, 0x1000000), \
  71. LUT8_PART(plane, 0x0010000), \
  72. LUT8_PART(plane, 0x1010000), \
  73. LUT8_PART(plane, 0x0000100), \
  74. LUT8_PART(plane, 0x1000100), \
  75. LUT8_PART(plane, 0x0010100), \
  76. LUT8_PART(plane, 0x1010100), \
  77. LUT8_PART(plane, 0x0000001), \
  78. LUT8_PART(plane, 0x1000001), \
  79. LUT8_PART(plane, 0x0010001), \
  80. LUT8_PART(plane, 0x1010001), \
  81. LUT8_PART(plane, 0x0000101), \
  82. LUT8_PART(plane, 0x1000101), \
  83. LUT8_PART(plane, 0x0010101), \
  84. LUT8_PART(plane, 0x1010101), \
  85. }
  86. // 8 planes * 8-bit mask
  87. static const uint64_t plane8_lut[8][256] = {
  88. LUT8(0), LUT8(1), LUT8(2), LUT8(3),
  89. LUT8(4), LUT8(5), LUT8(6), LUT8(7),
  90. };
  91. #define LUT32(plane) { \
  92. 0, 0, 0, 0, \
  93. 0, 0, 0, 1 << plane, \
  94. 0, 0, 1 << plane, 0, \
  95. 0, 0, 1 << plane, 1 << plane, \
  96. 0, 1 << plane, 0, 0, \
  97. 0, 1 << plane, 0, 1 << plane, \
  98. 0, 1 << plane, 1 << plane, 0, \
  99. 0, 1 << plane, 1 << plane, 1 << plane, \
  100. 1 << plane, 0, 0, 0, \
  101. 1 << plane, 0, 0, 1 << plane, \
  102. 1 << plane, 0, 1 << plane, 0, \
  103. 1 << plane, 0, 1 << plane, 1 << plane, \
  104. 1 << plane, 1 << plane, 0, 0, \
  105. 1 << plane, 1 << plane, 0, 1 << plane, \
  106. 1 << plane, 1 << plane, 1 << plane, 0, \
  107. 1 << plane, 1 << plane, 1 << plane, 1 << plane, \
  108. }
  109. // 32 planes * 4-bit mask * 4 lookup tables each
  110. static const uint32_t plane32_lut[32][16*4] = {
  111. LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
  112. LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
  113. LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
  114. LUT32(12), LUT32(13), LUT32(14), LUT32(15),
  115. LUT32(16), LUT32(17), LUT32(18), LUT32(19),
  116. LUT32(20), LUT32(21), LUT32(22), LUT32(23),
  117. LUT32(24), LUT32(25), LUT32(26), LUT32(27),
  118. LUT32(28), LUT32(29), LUT32(30), LUT32(31),
  119. };
  120. // Gray to RGB, required for palette table of grayscale images with bpp < 8
  121. static av_always_inline uint32_t gray2rgb(const uint32_t x) {
  122. return x << 16 | x << 8 | x;
  123. }
  124. /**
  125. * Convert CMAP buffer (stored in extradata) to lavc palette format
  126. */
  127. static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
  128. {
  129. int count, i;
  130. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  131. int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  132. if (avctx->bits_per_coded_sample > 8) {
  133. av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n");
  134. return AVERROR_INVALIDDATA;
  135. }
  136. count = 1 << avctx->bits_per_coded_sample;
  137. // If extradata is smaller than actually needed, fill the remaining with black.
  138. count = FFMIN(palette_size / 3, count);
  139. if (count) {
  140. for (i=0; i < count; i++) {
  141. pal[i] = 0xFF000000 | AV_RB24(palette + i*3);
  142. }
  143. } else { // Create gray-scale color palette for bps < 8
  144. count = 1 << avctx->bits_per_coded_sample;
  145. for (i=0; i < count; i++) {
  146. pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
  147. }
  148. }
  149. return 0;
  150. }
  151. /**
  152. * Extracts the IFF extra context and updates internal
  153. * decoder structures.
  154. *
  155. * @param avctx the AVCodecContext where to extract extra context to
  156. * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
  157. * @return 0 in case of success, a negative error code otherwise
  158. */
  159. static int extract_header(AVCodecContext *const avctx,
  160. const AVPacket *const avpkt) {
  161. const uint8_t *buf;
  162. unsigned buf_size;
  163. IffContext *s = avctx->priv_data;
  164. int palette_size;
  165. if (avctx->extradata_size < 2) {
  166. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  167. return AVERROR_INVALIDDATA;
  168. }
  169. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  170. if (avpkt) {
  171. int image_size;
  172. if (avpkt->size < 2)
  173. return AVERROR_INVALIDDATA;
  174. image_size = avpkt->size - AV_RB16(avpkt->data);
  175. buf = avpkt->data;
  176. buf_size = bytestream_get_be16(&buf);
  177. if (buf_size <= 1 || image_size <= 1) {
  178. av_log(avctx, AV_LOG_ERROR,
  179. "Invalid image size received: %u -> image data offset: %d\n",
  180. buf_size, image_size);
  181. return AVERROR_INVALIDDATA;
  182. }
  183. } else {
  184. buf = avctx->extradata;
  185. buf_size = bytestream_get_be16(&buf);
  186. if (buf_size <= 1 || palette_size < 0) {
  187. av_log(avctx, AV_LOG_ERROR,
  188. "Invalid palette size received: %u -> palette data offset: %d\n",
  189. buf_size, palette_size);
  190. return AVERROR_INVALIDDATA;
  191. }
  192. }
  193. if (buf_size > 8) {
  194. s->compression = bytestream_get_byte(&buf);
  195. s->bpp = bytestream_get_byte(&buf);
  196. s->ham = bytestream_get_byte(&buf);
  197. s->flags = bytestream_get_byte(&buf);
  198. s->transparency = bytestream_get_be16(&buf);
  199. s->masking = bytestream_get_byte(&buf);
  200. if (s->masking == MASK_HAS_TRANSPARENT_COLOR) {
  201. av_log(avctx, AV_LOG_ERROR, "Transparency not supported\n");
  202. return AVERROR_PATCHWELCOME;
  203. } else if (s->masking != MASK_NONE) {
  204. av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
  205. return AVERROR_PATCHWELCOME;
  206. }
  207. if (!s->bpp || s->bpp > 32) {
  208. av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
  209. return AVERROR_INVALIDDATA;
  210. } else if (s->ham >= 8) {
  211. av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
  212. return AVERROR_INVALIDDATA;
  213. }
  214. av_freep(&s->ham_buf);
  215. av_freep(&s->ham_palbuf);
  216. if (s->ham) {
  217. int i, count = FFMIN(palette_size / 3, 1 << s->ham);
  218. const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
  219. s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
  220. if (!s->ham_buf)
  221. return AVERROR(ENOMEM);
  222. s->ham_palbuf = av_malloc((8 * (1 << s->ham) * sizeof (uint32_t)) + FF_INPUT_BUFFER_PADDING_SIZE);
  223. if (!s->ham_palbuf) {
  224. av_freep(&s->ham_buf);
  225. return AVERROR(ENOMEM);
  226. }
  227. if (count) { // HAM with color palette attached
  228. // prefill with black and palette and set HAM take direct value mask to zero
  229. memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
  230. for (i=0; i < count; i++) {
  231. s->ham_palbuf[i*2+1] = AV_RL24(palette + i*3);
  232. }
  233. count = 1 << s->ham;
  234. } else { // HAM with grayscale color palette
  235. count = 1 << s->ham;
  236. for (i=0; i < count; i++) {
  237. s->ham_palbuf[i*2] = 0; // take direct color value from palette
  238. s->ham_palbuf[i*2+1] = av_le2ne32(gray2rgb((i * 255) >> s->ham));
  239. }
  240. }
  241. for (i=0; i < count; i++) {
  242. uint32_t tmp = i << (8 - s->ham);
  243. tmp |= tmp >> s->ham;
  244. s->ham_palbuf[(i+count)*2] = 0x00FFFF; // just modify blue color component
  245. s->ham_palbuf[(i+count*2)*2] = 0xFFFF00; // just modify red color component
  246. s->ham_palbuf[(i+count*3)*2] = 0xFF00FF; // just modify green color component
  247. s->ham_palbuf[(i+count)*2+1] = tmp << 16;
  248. s->ham_palbuf[(i+count*2)*2+1] = tmp;
  249. s->ham_palbuf[(i+count*3)*2+1] = tmp << 8;
  250. }
  251. } else if (s->flags & 1) { // EHB (ExtraHalfBrite) color palette
  252. av_log(avctx, AV_LOG_ERROR, "ExtraHalfBrite (EHB) mode not supported\n");
  253. return AVERROR_PATCHWELCOME;
  254. }
  255. }
  256. return 0;
  257. }
  258. static av_cold int decode_init(AVCodecContext *avctx)
  259. {
  260. IffContext *s = avctx->priv_data;
  261. int err;
  262. if (avctx->bits_per_coded_sample <= 8) {
  263. int palette_size;
  264. if (avctx->extradata_size >= 2)
  265. palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
  266. else
  267. palette_size = 0;
  268. avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
  269. (avctx->extradata_size >= 2 && palette_size) ? PIX_FMT_PAL8 : PIX_FMT_GRAY8;
  270. } else if (avctx->bits_per_coded_sample <= 32) {
  271. avctx->pix_fmt = PIX_FMT_BGR32;
  272. } else {
  273. return AVERROR_INVALIDDATA;
  274. }
  275. if ((err = av_image_check_size(avctx->width, avctx->height, 0, avctx)))
  276. return err;
  277. s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
  278. s->planebuf = av_malloc(s->planesize + FF_INPUT_BUFFER_PADDING_SIZE);
  279. if (!s->planebuf)
  280. return AVERROR(ENOMEM);
  281. s->bpp = avctx->bits_per_coded_sample;
  282. avcodec_get_frame_defaults(&s->frame);
  283. if ((err = extract_header(avctx, NULL)) < 0)
  284. return err;
  285. s->frame.reference = 1;
  286. return 0;
  287. }
  288. /**
  289. * Decode interleaved plane buffer up to 8bpp
  290. * @param dst Destination buffer
  291. * @param buf Source buffer
  292. * @param buf_size
  293. * @param plane plane number to decode as
  294. */
  295. static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
  296. {
  297. const uint64_t *lut = plane8_lut[plane];
  298. do {
  299. uint64_t v = AV_RN64A(dst) | lut[*buf++];
  300. AV_WN64A(dst, v);
  301. dst += 8;
  302. } while (--buf_size);
  303. }
  304. /**
  305. * Decode interleaved plane buffer up to 24bpp
  306. * @param dst Destination buffer
  307. * @param buf Source buffer
  308. * @param buf_size
  309. * @param plane plane number to decode as
  310. */
  311. static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
  312. {
  313. const uint32_t *lut = plane32_lut[plane];
  314. do {
  315. unsigned mask = (*buf >> 2) & ~3;
  316. dst[0] |= lut[mask++];
  317. dst[1] |= lut[mask++];
  318. dst[2] |= lut[mask++];
  319. dst[3] |= lut[mask];
  320. mask = (*buf++ << 2) & 0x3F;
  321. dst[4] |= lut[mask++];
  322. dst[5] |= lut[mask++];
  323. dst[6] |= lut[mask++];
  324. dst[7] |= lut[mask];
  325. dst += 8;
  326. } while (--buf_size);
  327. }
  328. #define DECODE_HAM_PLANE32(x) \
  329. first = buf[x] << 1; \
  330. second = buf[(x)+1] << 1; \
  331. delta &= pal[first++]; \
  332. delta |= pal[first]; \
  333. dst[x] = delta; \
  334. delta &= pal[second++]; \
  335. delta |= pal[second]; \
  336. dst[(x)+1] = delta
  337. /**
  338. * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
  339. *
  340. * @param dst the destination 24bpp buffer
  341. * @param buf the source 8bpp chunky buffer
  342. * @param pal the HAM decode table
  343. * @param buf_size the plane size in bytes
  344. */
  345. static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
  346. const uint32_t *const pal, unsigned buf_size)
  347. {
  348. uint32_t delta = 0;
  349. do {
  350. uint32_t first, second;
  351. DECODE_HAM_PLANE32(0);
  352. DECODE_HAM_PLANE32(2);
  353. DECODE_HAM_PLANE32(4);
  354. DECODE_HAM_PLANE32(6);
  355. buf += 8;
  356. dst += 8;
  357. } while (--buf_size);
  358. }
  359. /**
  360. * Decode one complete byterun1 encoded line.
  361. *
  362. * @param dst the destination buffer where to store decompressed bitstream
  363. * @param dst_size the destination plane size in bytes
  364. * @param buf the source byterun1 compressed bitstream
  365. * @param buf_end the EOF of source byterun1 compressed bitstream
  366. * @return number of consumed bytes in byterun1 compressed bitstream
  367. */
  368. static int decode_byterun(uint8_t *dst, int dst_size,
  369. const uint8_t *buf, const uint8_t *const buf_end) {
  370. const uint8_t *const buf_start = buf;
  371. unsigned x;
  372. for (x = 0; x < dst_size && buf < buf_end;) {
  373. unsigned length;
  374. const int8_t value = *buf++;
  375. if (value >= 0) {
  376. length = value + 1;
  377. memcpy(dst + x, buf, FFMIN3(length, dst_size - x, buf_end - buf));
  378. buf += length;
  379. } else if (value > -128) {
  380. length = -value + 1;
  381. memset(dst + x, *buf++, FFMIN(length, dst_size - x));
  382. } else { // noop
  383. continue;
  384. }
  385. x += length;
  386. }
  387. return buf - buf_start;
  388. }
  389. static int decode_frame_ilbm(AVCodecContext *avctx,
  390. void *data, int *data_size,
  391. AVPacket *avpkt)
  392. {
  393. IffContext *s = avctx->priv_data;
  394. const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
  395. const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
  396. const uint8_t *buf_end = buf+buf_size;
  397. int y, plane, res;
  398. if ((res = extract_header(avctx, avpkt)) < 0)
  399. return res;
  400. if (s->init) {
  401. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  402. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  403. return res;
  404. }
  405. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  406. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  407. return res;
  408. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
  409. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  410. return res;
  411. }
  412. s->init = 1;
  413. if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved
  414. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  415. for(y = 0; y < avctx->height; y++ ) {
  416. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  417. memset(row, 0, avctx->width);
  418. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  419. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  420. buf += s->planesize;
  421. }
  422. }
  423. } else if (s->ham) { // HAM to PIX_FMT_BGR32
  424. for (y = 0; y < avctx->height; y++) {
  425. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  426. memset(s->ham_buf, 0, avctx->width);
  427. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  428. decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
  429. buf += s->planesize;
  430. }
  431. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  432. }
  433. } else { // PIX_FMT_BGR32
  434. for(y = 0; y < avctx->height; y++ ) {
  435. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  436. memset(row, 0, avctx->width << 2);
  437. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  438. decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  439. buf += s->planesize;
  440. }
  441. }
  442. }
  443. } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM
  444. for(y = 0; y < avctx->height; y++ ) {
  445. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  446. memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
  447. buf += avctx->width + (avctx->width % 2); // padding if odd
  448. }
  449. } else { // IFF-PBM: HAM to PIX_FMT_BGR32
  450. for (y = 0; y < avctx->height; y++) {
  451. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  452. memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
  453. buf += avctx->width + (avctx->width & 1); // padding if odd
  454. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
  455. }
  456. }
  457. *data_size = sizeof(AVFrame);
  458. *(AVFrame*)data = s->frame;
  459. return buf_size;
  460. }
  461. static int decode_frame_byterun1(AVCodecContext *avctx,
  462. void *data, int *data_size,
  463. AVPacket *avpkt)
  464. {
  465. IffContext *s = avctx->priv_data;
  466. const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
  467. const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
  468. const uint8_t *buf_end = buf+buf_size;
  469. int y, plane, res;
  470. if ((res = extract_header(avctx, avpkt)) < 0)
  471. return res;
  472. if (s->init) {
  473. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  474. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  475. return res;
  476. }
  477. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  478. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  479. return res;
  480. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
  481. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  482. return res;
  483. }
  484. s->init = 1;
  485. if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved
  486. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  487. for(y = 0; y < avctx->height ; y++ ) {
  488. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  489. memset(row, 0, avctx->width);
  490. for (plane = 0; plane < s->bpp; plane++) {
  491. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  492. decodeplane8(row, s->planebuf, s->planesize, plane);
  493. }
  494. }
  495. } else if (s->ham) { // HAM to PIX_FMT_BGR32
  496. for (y = 0; y < avctx->height ; y++) {
  497. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  498. memset(s->ham_buf, 0, avctx->width);
  499. for (plane = 0; plane < s->bpp; plane++) {
  500. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  501. decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
  502. }
  503. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  504. }
  505. } else { //PIX_FMT_BGR32
  506. for(y = 0; y < avctx->height ; y++ ) {
  507. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  508. memset(row, 0, avctx->width << 2);
  509. for (plane = 0; plane < s->bpp; plane++) {
  510. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  511. decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
  512. }
  513. }
  514. }
  515. } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM
  516. for(y = 0; y < avctx->height ; y++ ) {
  517. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  518. buf += decode_byterun(row, avctx->width, buf, buf_end);
  519. }
  520. } else { // IFF-PBM: HAM to PIX_FMT_BGR32
  521. for (y = 0; y < avctx->height ; y++) {
  522. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  523. buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
  524. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
  525. }
  526. }
  527. *data_size = sizeof(AVFrame);
  528. *(AVFrame*)data = s->frame;
  529. return buf_size;
  530. }
  531. static av_cold int decode_end(AVCodecContext *avctx)
  532. {
  533. IffContext *s = avctx->priv_data;
  534. if (s->frame.data[0])
  535. avctx->release_buffer(avctx, &s->frame);
  536. av_freep(&s->planebuf);
  537. av_freep(&s->ham_buf);
  538. av_freep(&s->ham_palbuf);
  539. return 0;
  540. }
  541. AVCodec ff_iff_ilbm_decoder = {
  542. "iff_ilbm",
  543. AVMEDIA_TYPE_VIDEO,
  544. CODEC_ID_IFF_ILBM,
  545. sizeof(IffContext),
  546. decode_init,
  547. NULL,
  548. decode_end,
  549. decode_frame_ilbm,
  550. CODEC_CAP_DR1,
  551. .long_name = NULL_IF_CONFIG_SMALL("IFF ILBM"),
  552. };
  553. AVCodec ff_iff_byterun1_decoder = {
  554. "iff_byterun1",
  555. AVMEDIA_TYPE_VIDEO,
  556. CODEC_ID_IFF_BYTERUN1,
  557. sizeof(IffContext),
  558. decode_init,
  559. NULL,
  560. decode_end,
  561. decode_frame_byterun1,
  562. CODEC_CAP_DR1,
  563. .long_name = NULL_IF_CONFIG_SMALL("IFF ByteRun1"),
  564. };