You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

395 lines
14KB

  1. /*
  2. * IFF PBM/ILBM bitmap decoder
  3. * Copyright (c) 2010 Peter Ross <pross@xvid.org>
  4. * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * IFF PBM/ILBM bitmap decoder
  25. */
  26. #include "bytestream.h"
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "iff.h"
  30. typedef struct {
  31. AVFrame frame;
  32. int planesize;
  33. uint8_t * planebuf;
  34. int init; // 1 if buffer and palette data already initialized, 0 otherwise
  35. } IffContext;
  36. #define LUT8_PART(plane, v) \
  37. AV_LE2ME64C(UINT64_C(0x0000000)<<32 | v) << plane, \
  38. AV_LE2ME64C(UINT64_C(0x1000000)<<32 | v) << plane, \
  39. AV_LE2ME64C(UINT64_C(0x0010000)<<32 | v) << plane, \
  40. AV_LE2ME64C(UINT64_C(0x1010000)<<32 | v) << plane, \
  41. AV_LE2ME64C(UINT64_C(0x0000100)<<32 | v) << plane, \
  42. AV_LE2ME64C(UINT64_C(0x1000100)<<32 | v) << plane, \
  43. AV_LE2ME64C(UINT64_C(0x0010100)<<32 | v) << plane, \
  44. AV_LE2ME64C(UINT64_C(0x1010100)<<32 | v) << plane, \
  45. AV_LE2ME64C(UINT64_C(0x0000001)<<32 | v) << plane, \
  46. AV_LE2ME64C(UINT64_C(0x1000001)<<32 | v) << plane, \
  47. AV_LE2ME64C(UINT64_C(0x0010001)<<32 | v) << plane, \
  48. AV_LE2ME64C(UINT64_C(0x1010001)<<32 | v) << plane, \
  49. AV_LE2ME64C(UINT64_C(0x0000101)<<32 | v) << plane, \
  50. AV_LE2ME64C(UINT64_C(0x1000101)<<32 | v) << plane, \
  51. AV_LE2ME64C(UINT64_C(0x0010101)<<32 | v) << plane, \
  52. AV_LE2ME64C(UINT64_C(0x1010101)<<32 | v) << plane
  53. #define LUT8(plane) { \
  54. LUT8_PART(plane, 0x0000000), \
  55. LUT8_PART(plane, 0x1000000), \
  56. LUT8_PART(plane, 0x0010000), \
  57. LUT8_PART(plane, 0x1010000), \
  58. LUT8_PART(plane, 0x0000100), \
  59. LUT8_PART(plane, 0x1000100), \
  60. LUT8_PART(plane, 0x0010100), \
  61. LUT8_PART(plane, 0x1010100), \
  62. LUT8_PART(plane, 0x0000001), \
  63. LUT8_PART(plane, 0x1000001), \
  64. LUT8_PART(plane, 0x0010001), \
  65. LUT8_PART(plane, 0x1010001), \
  66. LUT8_PART(plane, 0x0000101), \
  67. LUT8_PART(plane, 0x1000101), \
  68. LUT8_PART(plane, 0x0010101), \
  69. LUT8_PART(plane, 0x1010101), \
  70. }
  71. // 8 planes * 8-bit mask
  72. static const uint64_t plane8_lut[8][256] = {
  73. LUT8(0), LUT8(1), LUT8(2), LUT8(3),
  74. LUT8(4), LUT8(5), LUT8(6), LUT8(7),
  75. };
  76. #define LUT32(plane) { \
  77. 0, 0, 0, 0, \
  78. 0, 0, 0, 1 << plane, \
  79. 0, 0, 1 << plane, 0, \
  80. 0, 0, 1 << plane, 1 << plane, \
  81. 0, 1 << plane, 0, 0, \
  82. 0, 1 << plane, 0, 1 << plane, \
  83. 0, 1 << plane, 1 << plane, 0, \
  84. 0, 1 << plane, 1 << plane, 1 << plane, \
  85. 1 << plane, 0, 0, 0, \
  86. 1 << plane, 0, 0, 1 << plane, \
  87. 1 << plane, 0, 1 << plane, 0, \
  88. 1 << plane, 0, 1 << plane, 1 << plane, \
  89. 1 << plane, 1 << plane, 0, 0, \
  90. 1 << plane, 1 << plane, 0, 1 << plane, \
  91. 1 << plane, 1 << plane, 1 << plane, 0, \
  92. 1 << plane, 1 << plane, 1 << plane, 1 << plane, \
  93. }
  94. // 32 planes * 4-bit mask * 4 lookup tables each
  95. static const uint32_t plane32_lut[32][16*4] = {
  96. LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
  97. LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
  98. LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
  99. LUT32(12), LUT32(13), LUT32(14), LUT32(15),
  100. LUT32(16), LUT32(17), LUT32(18), LUT32(19),
  101. LUT32(20), LUT32(21), LUT32(22), LUT32(23),
  102. LUT32(24), LUT32(25), LUT32(26), LUT32(27),
  103. LUT32(28), LUT32(29), LUT32(30), LUT32(31),
  104. };
  105. // Gray to RGB, required for palette table of grayscale images with bpp < 8
  106. static av_always_inline uint32_t gray2rgb(const uint32_t x) {
  107. return x << 16 | x << 8 | x;
  108. }
  109. /**
  110. * Convert CMAP buffer (stored in extradata) to lavc palette format
  111. */
  112. int ff_cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
  113. {
  114. int count, i;
  115. if (avctx->bits_per_coded_sample > 8) {
  116. av_log(avctx, AV_LOG_ERROR, "bit_per_coded_sample > 8 not supported\n");
  117. return AVERROR_INVALIDDATA;
  118. }
  119. count = 1 << avctx->bits_per_coded_sample;
  120. // If extradata is smaller than actually needed, fill the remaining with black.
  121. count = FFMIN(avctx->extradata_size / 3, count);
  122. if (count) {
  123. for (i=0; i < count; i++) {
  124. pal[i] = 0xFF000000 | AV_RB24( avctx->extradata + i*3 );
  125. }
  126. } else { // Create gray-scale color palette for bps < 8
  127. count = 1 << avctx->bits_per_coded_sample;
  128. for (i=0; i < count; i++) {
  129. pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
  130. }
  131. }
  132. return 0;
  133. }
  134. static av_cold int decode_init(AVCodecContext *avctx)
  135. {
  136. IffContext *s = avctx->priv_data;
  137. int err;
  138. if (avctx->bits_per_coded_sample <= 8) {
  139. avctx->pix_fmt = (avctx->bits_per_coded_sample < 8 ||
  140. avctx->extradata_size) ? PIX_FMT_PAL8
  141. : PIX_FMT_GRAY8;
  142. } else if (avctx->bits_per_coded_sample <= 32) {
  143. avctx->pix_fmt = PIX_FMT_BGR32;
  144. } else {
  145. return AVERROR_INVALIDDATA;
  146. }
  147. if ((err = avcodec_check_dimensions(avctx, avctx->width, avctx->height)))
  148. return err;
  149. s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
  150. s->planebuf = av_malloc(s->planesize + FF_INPUT_BUFFER_PADDING_SIZE);
  151. if (!s->planebuf)
  152. return AVERROR(ENOMEM);
  153. s->frame.reference = 1;
  154. return 0;
  155. }
  156. /**
  157. * Decode interleaved plane buffer up to 8bpp
  158. * @param dst Destination buffer
  159. * @param buf Source buffer
  160. * @param buf_size
  161. * @param plane plane number to decode as
  162. */
  163. static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
  164. {
  165. const uint64_t *lut = plane8_lut[plane];
  166. do {
  167. uint64_t v = AV_RN64A(dst) | lut[*buf++];
  168. AV_WN64A(dst, v);
  169. dst += 8;
  170. } while (--buf_size);
  171. }
  172. /**
  173. * Decode interleaved plane buffer up to 24bpp
  174. * @param dst Destination buffer
  175. * @param buf Source buffer
  176. * @param buf_size
  177. * @param plane plane number to decode as
  178. */
  179. static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
  180. {
  181. const uint32_t *lut = plane32_lut[plane];
  182. do {
  183. unsigned mask = (*buf >> 2) & ~3;
  184. dst[0] |= lut[mask++];
  185. dst[1] |= lut[mask++];
  186. dst[2] |= lut[mask++];
  187. dst[3] |= lut[mask];
  188. mask = (*buf++ << 2) & 0x3F;
  189. dst[4] |= lut[mask++];
  190. dst[5] |= lut[mask++];
  191. dst[6] |= lut[mask++];
  192. dst[7] |= lut[mask];
  193. dst += 8;
  194. } while (--buf_size);
  195. }
  196. /**
  197. * Decode one complete byterun1 encoded line.
  198. *
  199. * @param dst the destination buffer where to store decompressed bitstream
  200. * @param dst_size the destination plane size in bytes
  201. * @param buf the source byterun1 compressed bitstream
  202. * @param buf_end the EOF of source byterun1 compressed bitstream
  203. * @return number of consumed bytes in byterun1 compressed bitstream
  204. */
  205. static int decode_byterun(uint8_t *dst, int dst_size,
  206. const uint8_t *buf, const uint8_t *const buf_end) {
  207. const uint8_t *const buf_start = buf;
  208. unsigned x;
  209. for (x = 0; x < dst_size && buf < buf_end;) {
  210. unsigned length;
  211. const int8_t value = *buf++;
  212. if (value >= 0) {
  213. length = value + 1;
  214. memcpy(dst + x, buf, FFMIN3(length, dst_size - x, buf_end - buf));
  215. buf += length;
  216. } else if (value > -128) {
  217. length = -value + 1;
  218. memset(dst + x, *buf++, FFMIN(length, dst_size - x));
  219. } else { // noop
  220. continue;
  221. }
  222. x += length;
  223. }
  224. return buf - buf_start;
  225. }
  226. static int decode_frame_ilbm(AVCodecContext *avctx,
  227. void *data, int *data_size,
  228. AVPacket *avpkt)
  229. {
  230. IffContext *s = avctx->priv_data;
  231. const uint8_t *buf = avpkt->data;
  232. int buf_size = avpkt->size;
  233. const uint8_t *buf_end = buf+buf_size;
  234. int y, plane, res;
  235. if (s->init) {
  236. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  237. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  238. return res;
  239. }
  240. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  241. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  242. return res;
  243. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
  244. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  245. return res;
  246. }
  247. s->init = 1;
  248. if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved
  249. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  250. for(y = 0; y < avctx->height; y++ ) {
  251. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  252. memset(row, 0, avctx->width);
  253. for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) {
  254. decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  255. buf += s->planesize;
  256. }
  257. }
  258. } else { // PIX_FMT_BGR32
  259. for(y = 0; y < avctx->height; y++ ) {
  260. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  261. memset(row, 0, avctx->width << 2);
  262. for (plane = 0; plane < avctx->bits_per_coded_sample && buf < buf_end; plane++) {
  263. decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
  264. buf += s->planesize;
  265. }
  266. }
  267. }
  268. } else if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) { // IFF-PBM
  269. for(y = 0; y < avctx->height; y++ ) {
  270. uint8_t *row = &s->frame.data[0][y * s->frame.linesize[0]];
  271. memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
  272. buf += avctx->width + (avctx->width % 2); // padding if odd
  273. }
  274. }
  275. *data_size = sizeof(AVFrame);
  276. *(AVFrame*)data = s->frame;
  277. return buf_size;
  278. }
  279. static int decode_frame_byterun1(AVCodecContext *avctx,
  280. void *data, int *data_size,
  281. AVPacket *avpkt)
  282. {
  283. IffContext *s = avctx->priv_data;
  284. const uint8_t *buf = avpkt->data;
  285. int buf_size = avpkt->size;
  286. const uint8_t *buf_end = buf+buf_size;
  287. int y, plane, res;
  288. if (s->init) {
  289. if ((res = avctx->reget_buffer(avctx, &s->frame)) < 0) {
  290. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  291. return res;
  292. }
  293. } else if ((res = avctx->get_buffer(avctx, &s->frame)) < 0) {
  294. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  295. return res;
  296. } else if (avctx->bits_per_coded_sample <= 8 && avctx->pix_fmt != PIX_FMT_GRAY8) {
  297. if ((res = ff_cmap_read_palette(avctx, (uint32_t*)s->frame.data[1])) < 0)
  298. return res;
  299. }
  300. s->init = 1;
  301. if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved
  302. if (avctx->pix_fmt == PIX_FMT_PAL8 || avctx->pix_fmt == PIX_FMT_GRAY8) {
  303. for(y = 0; y < avctx->height ; y++ ) {
  304. uint8_t *row = &s->frame.data[0][ y*s->frame.linesize[0] ];
  305. memset(row, 0, avctx->width);
  306. for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) {
  307. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  308. decodeplane8(row, s->planebuf, s->planesize, plane);
  309. }
  310. }
  311. } else { //PIX_FMT_BGR32
  312. for(y = 0; y < avctx->height ; y++ ) {
  313. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  314. memset(row, 0, avctx->width << 2);
  315. for (plane = 0; plane < avctx->bits_per_coded_sample; plane++) {
  316. buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
  317. decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
  318. }
  319. }
  320. }
  321. } else {
  322. for(y = 0; y < avctx->height ; y++ ) {
  323. uint8_t *row = &s->frame.data[0][y*s->frame.linesize[0]];
  324. buf += decode_byterun(row, avctx->width, buf, buf_end);
  325. }
  326. }
  327. *data_size = sizeof(AVFrame);
  328. *(AVFrame*)data = s->frame;
  329. return buf_size;
  330. }
  331. static av_cold int decode_end(AVCodecContext *avctx)
  332. {
  333. IffContext *s = avctx->priv_data;
  334. if (s->frame.data[0])
  335. avctx->release_buffer(avctx, &s->frame);
  336. av_freep(&s->planebuf);
  337. return 0;
  338. }
  339. AVCodec iff_ilbm_decoder = {
  340. "iff_ilbm",
  341. AVMEDIA_TYPE_VIDEO,
  342. CODEC_ID_IFF_ILBM,
  343. sizeof(IffContext),
  344. decode_init,
  345. NULL,
  346. decode_end,
  347. decode_frame_ilbm,
  348. CODEC_CAP_DR1,
  349. .long_name = NULL_IF_CONFIG_SMALL("IFF ILBM"),
  350. };
  351. AVCodec iff_byterun1_decoder = {
  352. "iff_byterun1",
  353. AVMEDIA_TYPE_VIDEO,
  354. CODEC_ID_IFF_BYTERUN1,
  355. sizeof(IffContext),
  356. decode_init,
  357. NULL,
  358. decode_end,
  359. decode_frame_byterun1,
  360. CODEC_CAP_DR1,
  361. .long_name = NULL_IF_CONFIG_SMALL("IFF ByteRun1"),
  362. };