You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

643 lines
24KB

  1. /*
  2. * IFF PBM/ILBM bitmap decoder
  3. * Copyright (c) 2010 Peter Ross <pross@xvid.org>
  4. * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * IFF PBM/ILBM bitmap decoder
  25. */
  26. #include "libavutil/imgutils.h"
  27. #include "bytestream.h"
  28. #include "avcodec.h"
  29. #include "get_bits.h"
  30. // TODO: masking bits
  31. typedef enum {
  32. MASK_NONE,
  33. MASK_HAS_MASK,
  34. MASK_HAS_TRANSPARENT_COLOR,
  35. MASK_LASSO
  36. } mask_type;
  37. /**
  38. * Gets the actual extra data after video preperties which contains
  39. * the raw CMAP palette data beyond the IFF extra context.
  40. *
  41. * @param avctx the AVCodecContext where to extract raw palette data from
  42. * @return pointer to raw CMAP palette data
  43. */
  44. static av_always_inline uint8_t *get_palette_data(const AVCodecContext *const avctx) {
  45. return avctx->extradata + AV_RB16(avctx->extradata);
  46. }
  47. /**
  48. * Gets the size of CMAP palette data beyond the IFF extra context.
  49. * Please note that any value < 2 of IFF extra context or
  50. * raw extradata < 0 is considered as illegal extradata.
  51. *
  52. * @param avctx the AVCodecContext where to extract palette data size from
  53. * @return size of raw palette data in bytes
  54. */
  55. static av_always_inline int get_palette_size(const AVCodecContext *const avctx) {
  56. return avctx->extradata_size - AV_RB16(avctx->extradata);
  57. }
  58. /**
  59. * Gets the actual raw image data after video properties which
  60. * contains the raw image data beyond the IFF extra context.
  61. *
  62. * @param avpkt the AVPacket where to extract raw image data from
  63. * @return pointer to raw image data
  64. */
  65. static av_always_inline uint8_t *get_image_data(const AVPacket *const avpkt) {
  66. return avpkt->data + AV_RB16(avpkt->data);
  67. }
  68. /**
  69. * Gets the size of raw image data beyond the IFF extra context.
  70. * Please note that any value < 2 of either IFF extra context
  71. * or raw image data is considered as an illegal packet.
  72. *
  73. * @param avpkt the AVPacket where to extract image data size from
  74. * @return size of raw image data in bytes
  75. */
  76. static av_always_inline int get_image_size(const AVPacket *const avpkt) {
  77. return avpkt->size - AV_RB16(avpkt->data);
  78. }
  79. typedef struct {
  80. AVFrame frame;
  81. int planesize;
  82. uint8_t * planebuf;
  83. uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
  84. uint32_t *ham_palbuf; ///< HAM decode table
  85. unsigned compression; ///< delta compression method used
  86. unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
  87. unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
  88. unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
  89. unsigned transparency; ///< TODO: transparency color index in palette
  90. unsigned masking; ///< TODO: masking method used
  91. int init; // 1 if buffer and palette data already initialized, 0 otherwise
  92. } IffContext;
  93. #define LUT8_PART(plane, v) \
  94. AV_LE2NE64C(UINT64_C(0x0000000)<<32 | v) << plane, \
  95. AV_LE2NE64C(UINT64_C(0x1000000)<<32 | v) << plane, \
  96. AV_LE2NE64C(UINT64_C(0x0010000)<<32 | v) << plane, \
  97. AV_LE2NE64C(UINT64_C(0x1010000)<<32 | v) << plane, \
  98. AV_LE2NE64C(UINT64_C(0x0000100)<<32 | v) << plane, \
  99. AV_LE2NE64C(UINT64_C(0x1000100)<<32 | v) << plane, \
  100. AV_LE2NE64C(UINT64_C(0x0010100)<<32 | v) << plane, \
  101. AV_LE2NE64C(UINT64_C(0x1010100)<<32 | v) << plane, \
  102. AV_LE2NE64C(UINT64_C(0x0000001)<<32 | v) << plane, \
  103. AV_LE2NE64C(UINT64_C(0x1000001)<<32 | v) << plane, \
  104. AV_LE2NE64C(UINT64_C(0x0010001)<<32 | v) << plane, \
  105. AV_LE2NE64C(UINT64_C(0x1010001)<<32 | v) << plane, \
  106. AV_LE2NE64C(UINT64_C(0x0000101)<<32 | v) << plane, \
  107. AV_LE2NE64C(UINT64_C(0x1000101)<<32 | v) << plane, \
  108. AV_LE2NE64C(UINT64_C(0x0010101)<<32 | v) << plane, \
  109. AV_LE2NE64C(UINT64_C(0x1010101)<<32 | v) << plane
  110. #define LUT8(plane) { \
  111. LUT8_PART(plane, 0x0000000), \
  112. LUT8_PART(plane, 0x1000000), \
  113. LUT8_PART(plane, 0x0010000), \
  114. LUT8_PART(plane, 0x1010000), \
  115. LUT8_PART(plane, 0x0000100), \
  116. LUT8_PART(plane, 0x1000100), \
  117. LUT8_PART(plane, 0x0010100), \
  118. LUT8_PART(plane, 0x1010100), \
  119. LUT8_PART(plane, 0x0000001), \
  120. LUT8_PART(plane, 0x1000001), \
  121. LUT8_PART(plane, 0x0010001), \
  122. LUT8_PART(plane, 0x1010001), \
  123. LUT8_PART(plane, 0x0000101), \
  124. LUT8_PART(plane, 0x1000101), \
  125. LUT8_PART(plane, 0x0010101), \
  126. LUT8_PART(plane, 0x1010101), \
  127. }
  128. // 8 planes * 8-bit mask
  129. static const uint64_t plane8_lut[8][256] = {
  130. LUT8(0), LUT8(1), LUT8(2), LUT8(3),
  131. LUT8(4), LUT8(5), LUT8(6), LUT8(7),
  132. };
  133. #define LUT32(plane) { \
  134. 0, 0, 0, 0, \
  135. 0, 0, 0, 1 << plane, \
  136. 0, 0, 1 << plane, 0, \
  137. 0, 0, 1 << plane, 1 << plane, \
  138. 0, 1 << plane, 0, 0, \
  139. 0, 1 << plane, 0, 1 << plane, \
  140. 0, 1 << plane, 1 << plane, 0, \
  141. 0, 1 << plane, 1 << plane, 1 << plane, \
  142. 1 << plane, 0, 0, 0, \
  143. 1 << plane, 0, 0, 1 << plane, \
  144. 1 << plane, 0, 1 << plane, 0, \
  145. 1 << plane, 0, 1 << plane, 1 << plane, \
  146. 1 << plane, 1 << plane, 0, 0, \
  147. 1 << plane, 1 << plane, 0, 1 << plane, \
  148. 1 << plane, 1 << plane, 1 << plane, 0, \
  149. 1 << plane, 1 << plane, 1 << plane, 1 << plane, \
  150. }
  151. // 32 planes * 4-bit mask * 4 lookup tables each
  152. static const uint32_t plane32_lut[32][16*4] = {
  153. LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
  154. LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
  155. LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
  156. LUT32(12), LUT32(13), LUT32(14), LUT32(15),
  157. LUT32(16), LUT32(17), LUT32(18), LUT32(19),
  158. LUT32(20), LUT32(21), LUT32(22), LUT32(23),
  159. LUT32(24), LUT32(25), LUT32(26), LUT32(27),
  160. LUT32(28), LUT32(29), LUT32(30), LUT32(31),
  161. };
  162. // Gray to RGB, required for palette table of grayscale images with bpp < 8
  163. static av_always_inline uint32_t gray2rgb(const uint32_t x) {
  164. return x << 16 | x << 8 | x;
  165. }
  166. /**
  167. * Convert CMAP buffer (stored in extradata) to lavc palette format
  168. */
  169. static int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
  170. {
  171. int count, i;
  172. const uint8_t *const extradata = get_palette_data(avctx);
  173. if (avctx->bits_per_coded_sample > 8) {
  174. av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n");
  175. return AVERROR_INVALIDDATA;
  176. }
  177. count = 1 << avctx->bits_per_coded_sample;
  178. // If extradata is smaller than actually needed, fill the remaining with black.
  179. count = FFMIN(get_palette_size(avctx) / 3, count);
  180. if (count) {
  181. for (i=0; i < count; i++) {
  182. pal[i] = 0xFF000000 | AV_RB24(extradata + i*3);
  183. }
  184. } else { // Create gray-scale color palette for bps < 8
  185. count = 1 << avctx->bits_per_coded_sample;
  186. for (i=0; i < count; i++) {
  187. pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
  188. }
  189. }
  190. return 0;
  191. }
  192. /**
  193. * Extracts the IFF extra context and updates internal
  194. * decoder structures.
  195. *
  196. * @param avctx the AVCodecContext where to extract extra context to
  197. * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
  198. * @return 0 in case of success, a negative error code otherwise
  199. */
  200. static int extract_header(AVCodecContext *const avctx,
  201. const AVPacket *const avpkt) {
  202. const uint8_t *buf;
  203. unsigned buf_size;
  204. IffContext *s = avctx->priv_data;
  205. if (avpkt) {
  206. if (avpkt->size < 2)
  207. return AVERROR_INVALIDDATA;
  208. buf = avpkt->data;
  209. buf_size = bytestream_get_be16(&buf);
  210. if (buf_size <= 1 || get_image_size(avpkt) <= 1) {
  211. av_log(avctx, AV_LOG_ERROR,
  212. "Invalid image size received: %u -> image data offset: %d\n",
  213. buf_size, get_image_size(avpkt));
  214. return AVERROR_INVALIDDATA;
  215. }
  216. } else {
  217. if (avctx->extradata_size < 2)
  218. return AVERROR_INVALIDDATA;
  219. buf = avctx->extradata;
  220. buf_size = bytestream_get_be16(&buf);
  221. if (buf_size <= 1 || get_palette_size(avctx) < 0) {
  222. av_log(avctx, AV_LOG_ERROR,
  223. "Invalid palette size received: %u -> palette data offset: %d\n",
  224. buf_size, get_palette_size(avctx));
  225. return AVERROR_INVALIDDATA;
  226. }
  227. }
  228. if (buf_size > 8) {
  229. s->compression = bytestream_get_byte(&buf);
  230. s->bpp = bytestream_get_byte(&buf);
  231. s->ham = bytestream_get_byte(&buf);
  232. s->flags = bytestream_get_byte(&buf);
  233. s->transparency = bytestream_get_be16(&buf);
  234. s->masking = bytestream_get_byte(&buf);
  235. if (s->masking == MASK_HAS_TRANSPARENT_COLOR) {
  236. av_log(avctx, AV_LOG_ERROR, "Transparency not supported\n");
  237. return AVERROR_PATCHWELCOME;
  238. } else if (s->masking != MASK_NONE) {
  239. av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
  240. return AVERROR_PATCHWELCOME;
  241. }
  242. if (!s->bpp || s->bpp > 32) {
  243. av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
  244. return AVERROR_INVALIDDATA;
  245. } else if (s->ham >= 8) {
  246. av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
  247. return AVERROR_INVALIDDATA;
  248. }
  249. av_freep(&s->ham_buf);
  250. av_freep(&s->ham_palbuf);
  251. if (s->ham) {
  252. int i, count = FFMIN(get_palette_size(avctx) / 3, 1 << s->ham);
  253. const uint8_t *const extradata = get_palette_data(avctx);
  254. s->ham_buf = av_malloc((s->planesize * 8) + FF_INPUT_BUFFER_PADDING_SIZE);
  255. if (!s->ham_buf)
  256. return AVERROR(ENOMEM);
  257. s->ham_palbuf = av_malloc((8 * (1 << s->ham) * sizeof (uint32_t)) + FF_INPUT_BUFFER_PADDING_SIZE);
  258. if (!s->ham_palbuf) {
  259. av_freep(&s->ham_buf);
  260. return AVERROR(ENOMEM);
  261. }
  262. if (count) { // HAM with color palette attached
  263. // prefill with black and palette and set HAM take direct value mask to zero
  264. memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
  265. for (i=0; i < count; i++) {
  266. s->ham_palbuf[i*2+1] = AV_RL24(extradata + i*3);
  267. }
  268. count = 1 << s->ham;
  269. } else { // HAM with grayscale color palette
  270. count = 1 << s->ham;
  271. for (i=0; i < count; i++) {
  272. s->ham_palbuf[i*2] = 0; // take direct color value from palette
  273. s->ham_palbuf[i*2+1] = av_le2ne32(gray2rgb((i * 255) >> s->ham));
  274. }
  275. }
  276. for (i=0; i < count; i++) {
  277. uint32_t tmp = i << (8 - s->ham);
  278. tmp |= tmp >> s->ham;
  279. s->ham_palbuf[(i+count)*2] = 0x00FFFF; // just modify blue color component
  280. s->ham_palbuf[(i+count*2)*2] = 0xFFFF00; // just modify red color component
  281. s->ham_palbuf[(i+count*3)*2] = 0xFF00FF; // just modify green color component
  282. s->ham_palbuf[(i+count)*2+1] = tmp << 16;
  283. s->ham_palbuf[(i+count*2)*2+1] = tmp;
  284. s->ham_palbuf[(i+count*3)*2+1] = tmp << 8;
  285. }
  286. } else if (s->flags & 1) { // EHB (ExtraHalfBrite) color palette
  287. av_log(avctx, AV_LOG_ERROR, "ExtraHalfBrite (EHB) mode not supported\n");
  288. return AVERROR_PATCHWELCOME;
  289. }
  290. }
  291. return 0;
  292. }
  293. static av_cold int decode_init(AVCodecContext *avctx)
  294. {
  295. IffContext *s = avctx->priv_data;
  296. int err;
  297. if (avctx->bits_per_coded_sample <= 8) {
  298. avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
  299. (avctx->extradata_size >= 2 && get_palette_size(avctx)) ? PIX_FMT_PAL8
  300. : PIX_FMT_GRAY8;
  301. } else if (avctx->bits_per_coded_sample <= 32) {
  302. avctx->pix_fmt = PIX_FMT_BGR32;
  303. } else {
  304. return AVERROR_INVALIDDATA;
  305. }
  306. if ((err = av_image_check_size(avctx->width, avctx->height, 0, avctx)))
  307. return err;
  308. s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
  309. s->planebuf = av_malloc(s->planesize + FF_INPUT_BUFFER_PADDING_SIZE);
  310. if (!s->planebuf)
  311. return AVERROR(ENOMEM);
  312. s->bpp = avctx->bits_per_coded_sample;
  313. if ((err = extract_header(avctx, NULL)) < 0)
  314. return err;
  315. s->frame.reference = 1;
  316. return 0;
  317. }
  318. /**
  319. * Decode interleaved plane buffer up to 8bpp
  320. * @param dst Destination buffer
  321. * @param buf Source buffer
  322. * @param buf_size
  323. * @param plane plane number to decode as
  324. */
  325. static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
  326. {
  327. const uint64_t *lut = plane8_lut[plane];
  328. do {
  329. uint64_t v = AV_RN64A(dst) | lut[*buf++];
  330. AV_WN64A(dst, v);
  331. dst += 8;
  332. } while (--buf_size);
  333. }
  334. /**
  335. * Decode interleaved plane buffer up to 24bpp
  336. * @param dst Destination buffer
  337. * @param buf Source buffer
  338. * @param buf_size
  339. * @param plane plane number to decode as
  340. */
  341. static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
  342. {
  343. const uint32_t *lut = plane32_lut[plane];
  344. do {
  345. unsigned mask = (*buf >> 2) & ~3;
  346. dst[0] |= lut[mask++];
  347. dst[1] |= lut[mask++];
  348. dst[2] |= lut[mask++];
  349. dst[3] |= lut[mask];
  350. mask = (*buf++ << 2) & 0x3F;
  351. dst[4] |= lut[mask++];
  352. dst[5] |= lut[mask++];
  353. dst[6] |= lut[mask++];
  354. dst[7] |= lut[mask];
  355. dst += 8;
  356. } while (--buf_size);
  357. }
  358. #define DECODE_HAM_PLANE32(x) \
  359. first = buf[x] << 1; \
  360. second = buf[(x)+1] << 1; \
  361. delta &= pal[first++]; \
  362. delta |= pal[first]; \
  363. dst[x] = delta; \
  364. delta &= pal[second++]; \
  365. delta |= pal[second]; \
  366. dst[(x)+1] = delta
  367. /**
  368. * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
  369. *
  370. * @param dst the destination 24bpp buffer
  371. * @param buf the source 8bpp chunky buffer
  372. * @param pal the HAM decode table
  373. * @param buf_size the plane size in bytes
  374. */
  375. static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
  376. const uint32_t *const pal, unsigned buf_size)
  377. {
  378. uint32_t delta = 0;
  379. do {
  380. uint32_t first, second;
  381. DECODE_HAM_PLANE32(0);
  382. DECODE_HAM_PLANE32(2);
  383. DECODE_HAM_PLANE32(4);
  384. DECODE_HAM_PLANE32(6);
  385. buf += 8;
  386. dst += 8;
  387. } while (--buf_size);
  388. }
  389. /**
  390. * Decode one complete byterun1 encoded line.
  391. *
  392. * @param dst the destination buffer where to store decompressed bitstream
  393. * @param dst_size the destination plane size in bytes
  394. * @param buf the source byterun1 compressed bitstream
  395. * @param buf_end the EOF of source byterun1 compressed bitstream
  396. * @return number of consumed bytes in byterun1 compressed bitstream
  397. */
  398. static int decode_byterun(uint8_t *dst, int dst_size,
  399. const uint8_t *buf, const uint8_t *const buf_end) {
  400. const uint8_t *const buf_start = buf;
  401. unsigned x;
  402. for (x = 0; x < dst_size && buf < buf_end;) {
  403. unsigned length;
  404. const int8_t value = *buf++;
  405. if (value >= 0) {
  406. length = value + 1;
  407. memcpy(dst + x, buf, FFMIN3(length, dst_size - x, buf_end - buf));
  408. buf += length;
  409. } else if (value > -128) {
  410. length = -value + 1;
  411. memset(dst + x, *buf++, FFMIN(length, dst_size - x));
  412. } else { // noop
  413. continue;
  414. }
  415. x += length;
  416. }
  417. return buf - buf_start;
  418. }
  419. static int decode_frame_ilbm(AVCodecContext *avctx,
  420. void *data, int *data_size,
  421. AVPacket *avpkt)
  422. {
  423. IffContext *s = avctx->priv_data;
  424. const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
  425. const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
  426. const uint8_t *buf_end = buf+buf_size;
  427. int y, plane, res;
  428. if ((res = extract_header(avctx, avpkt)) < 0)
  429. return res;
  430. if (s->init) {
  431. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  432. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  433. return res;
  434. }
  435. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  436. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  437. return res;
  438. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
  439. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  440. return res;
  441. }
  442. s->init = 1;
  443. if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved
  444. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  445. for(y = 0; y < avctx->height; y++ ) {
  446. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  447. memset(row, 0, avctx->width);
  448. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  449. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  450. buf += s->planesize;
  451. }
  452. }
  453. } else if (s->ham) { // HAM to PIX_FMT_BGR32
  454. for (y = 0; y < avctx->height; y++) {
  455. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  456. memset(s->ham_buf, 0, avctx->width);
  457. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  458. decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
  459. buf += s->planesize;
  460. }
  461. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  462. }
  463. } else { // PIX_FMT_BGR32
  464. for(y = 0; y < avctx->height; y++ ) {
  465. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  466. memset(row, 0, avctx->width << 2);
  467. for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
  468. decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  469. buf += s->planesize;
  470. }
  471. }
  472. }
  473. } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM
  474. for(y = 0; y < avctx->height; y++ ) {
  475. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  476. memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
  477. buf += avctx->width + (avctx->width % 2); // padding if odd
  478. }
  479. } else { // IFF-PBM: HAM to PIX_FMT_BGR32
  480. for (y = 0; y < avctx->height; y++) {
  481. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  482. memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
  483. buf += avctx->width + (avctx->width & 1); // padding if odd
  484. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
  485. }
  486. }
  487. *data_size = sizeof(AVFrame);
  488. *(AVFrame*)data = s->frame;
  489. return buf_size;
  490. }
  491. static int decode_frame_byterun1(AVCodecContext *avctx,
  492. void *data, int *data_size,
  493. AVPacket *avpkt)
  494. {
  495. IffContext *s = avctx->priv_data;
  496. const uint8_t *buf = avpkt->size >= 2 ? get_image_data(avpkt) : NULL;
  497. const int buf_size = avpkt->size >= 2 ? get_image_size(avpkt) : 0;
  498. const uint8_t *buf_end = buf+buf_size;
  499. int y, plane, res;
  500. if ((res = extract_header(avctx, avpkt)) < 0)
  501. return res;
  502. if (s->init) {
  503. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  504. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  505. return res;
  506. }
  507. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  508. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  509. return res;
  510. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
  511. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  512. return res;
  513. }
  514. s->init = 1;
  515. if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved
  516. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  517. for(y = 0; y < avctx->height ; y++ ) {
  518. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  519. memset(row, 0, avctx->width);
  520. for (plane = 0; plane < s->bpp; plane++) {
  521. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  522. decodeplane8(row, s->planebuf, s->planesize, plane);
  523. }
  524. }
  525. } else if (s->ham) { // HAM to PIX_FMT_BGR32
  526. for (y = 0; y < avctx->height ; y++) {
  527. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  528. memset(s->ham_buf, 0, avctx->width);
  529. for (plane = 0; plane < s->bpp; plane++) {
  530. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  531. decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
  532. }
  533. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
  534. }
  535. } else { //PIX_FMT_BGR32
  536. for(y = 0; y < avctx->height ; y++ ) {
  537. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  538. memset(row, 0, avctx->width << 2);
  539. for (plane = 0; plane < s->bpp; plane++) {
  540. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  541. decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
  542. }
  543. }
  544. }
  545. } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM
  546. for(y = 0; y < avctx->height ; y++ ) {
  547. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  548. buf += decode_byterun(row, avctx->width, buf, buf_end);
  549. }
  550. } else { // IFF-PBM: HAM to PIX_FMT_BGR32
  551. for (y = 0; y < avctx->height ; y++) {
  552. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  553. buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
  554. decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, avctx->width);
  555. }
  556. }
  557. *data_size = sizeof(AVFrame);
  558. *(AVFrame*)data = s->frame;
  559. return buf_size;
  560. }
  561. static av_cold int decode_end(AVCodecContext *avctx)
  562. {
  563. IffContext *s = avctx->priv_data;
  564. if (s->frame.data[0])
  565. avctx->release_buffer(avctx, &s->frame);
  566. av_freep(&s->planebuf);
  567. av_freep(&s->ham_buf);
  568. av_freep(&s->ham_palbuf);
  569. return 0;
  570. }
  571. AVCodec ff_iff_ilbm_decoder = {
  572. "iff_ilbm",
  573. AVMEDIA_TYPE_VIDEO,
  574. CODEC_ID_IFF_ILBM,
  575. sizeof(IffContext),
  576. decode_init,
  577. NULL,
  578. decode_end,
  579. decode_frame_ilbm,
  580. CODEC_CAP_DR1,
  581. .long_name = NULL_IF_CONFIG_SMALL("IFF ILBM"),
  582. };
  583. AVCodec ff_iff_byterun1_decoder = {
  584. "iff_byterun1",
  585. AVMEDIA_TYPE_VIDEO,
  586. CODEC_ID_IFF_BYTERUN1,
  587. sizeof(IffContext),
  588. decode_init,
  589. NULL,
  590. decode_end,
  591. decode_frame_byterun1,
  592. CODEC_CAP_DR1,
  593. .long_name = NULL_IF_CONFIG_SMALL("IFF ByteRun1"),
  594. };