You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

531 lines
18KB

  1. /*
  2. * Flash Screen Video decoder
  3. * Copyright (C) 2004 Alex Beregszaszi
  4. * Copyright (C) 2006 Benjamin Larsson
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Flash Screen Video decoder
  25. * @author Alex Beregszaszi
  26. * @author Benjamin Larsson
  27. * @author Daniel Verkamp
  28. * @author Konstantin Shishkov
  29. *
  30. * A description of the bitstream format for Flash Screen Video version 1/2
  31. * is part of the SWF File Format Specification (version 10), which can be
  32. * downloaded from http://www.adobe.com/devnet/swf.html.
  33. */
  34. #include <stdio.h>
  35. #include <stdlib.h>
  36. #include <zlib.h>
  37. #include "libavutil/intreadwrite.h"
  38. #include "avcodec.h"
  39. #include "bytestream.h"
  40. #include "get_bits.h"
  41. typedef struct BlockInfo {
  42. uint8_t *pos;
  43. int size;
  44. int unp_size;
  45. } BlockInfo;
  46. typedef struct FlashSVContext {
  47. AVCodecContext *avctx;
  48. AVFrame frame;
  49. int image_width, image_height;
  50. int block_width, block_height;
  51. uint8_t *tmpblock;
  52. int block_size;
  53. z_stream zstream;
  54. int ver;
  55. const uint32_t *pal;
  56. int is_keyframe;
  57. uint8_t *keyframedata;
  58. uint8_t *keyframe;
  59. BlockInfo *blocks;
  60. uint8_t *deflate_block;
  61. int deflate_block_size;
  62. int color_depth;
  63. int zlibprime_curr, zlibprime_prev;
  64. int diff_start, diff_height;
  65. } FlashSVContext;
  66. static int decode_hybrid(const uint8_t *sptr, uint8_t *dptr, int dx, int dy,
  67. int h, int w, int stride, const uint32_t *pal)
  68. {
  69. int x, y;
  70. const uint8_t *orig_src = sptr;
  71. for (y = dx+h; y > dx; y--) {
  72. uint8_t *dst = dptr + (y * stride) + dy * 3;
  73. for (x = 0; x < w; x++) {
  74. if (*sptr & 0x80) {
  75. /* 15-bit color */
  76. unsigned c = AV_RB16(sptr) & ~0x8000;
  77. unsigned b = c & 0x1F;
  78. unsigned g = (c >> 5) & 0x1F;
  79. unsigned r = c >> 10;
  80. /* 000aaabb -> aaabbaaa */
  81. *dst++ = (b << 3) | (b >> 2);
  82. *dst++ = (g << 3) | (g >> 2);
  83. *dst++ = (r << 3) | (r >> 2);
  84. sptr += 2;
  85. } else {
  86. /* palette index */
  87. uint32_t c = pal[*sptr++];
  88. bytestream_put_le24(&dst, c);
  89. }
  90. }
  91. }
  92. return sptr - orig_src;
  93. }
  94. static av_cold int flashsv_decode_init(AVCodecContext *avctx)
  95. {
  96. FlashSVContext *s = avctx->priv_data;
  97. int zret; // Zlib return code
  98. s->avctx = avctx;
  99. s->zstream.zalloc = Z_NULL;
  100. s->zstream.zfree = Z_NULL;
  101. s->zstream.opaque = Z_NULL;
  102. zret = inflateInit(&s->zstream);
  103. if (zret != Z_OK) {
  104. av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
  105. return 1;
  106. }
  107. avctx->pix_fmt = PIX_FMT_BGR24;
  108. s->frame.data[0] = NULL;
  109. return 0;
  110. }
  111. static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
  112. int size, int unp_size)
  113. {
  114. z_stream zs;
  115. zs.zalloc = NULL;
  116. zs.zfree = NULL;
  117. zs.opaque = NULL;
  118. s->zstream.next_in = src;
  119. s->zstream.avail_in = size;
  120. s->zstream.next_out = s->tmpblock;
  121. s->zstream.avail_out = s->block_size * 3;
  122. inflate(&s->zstream, Z_SYNC_FLUSH);
  123. deflateInit(&zs, 0);
  124. zs.next_in = s->tmpblock;
  125. zs.avail_in = s->block_size * 3 - s->zstream.avail_out;
  126. zs.next_out = s->deflate_block;
  127. zs.avail_out = s->deflate_block_size;
  128. deflate(&zs, Z_SYNC_FLUSH);
  129. deflateEnd(&zs);
  130. inflateReset(&s->zstream);
  131. s->zstream.next_in = s->deflate_block;
  132. s->zstream.avail_in = s->deflate_block_size - zs.avail_out;
  133. s->zstream.next_out = s->tmpblock;
  134. s->zstream.avail_out = s->block_size * 3;
  135. inflate(&s->zstream, Z_SYNC_FLUSH);
  136. }
  137. static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
  138. GetBitContext *gb, int block_size,
  139. int width, int height, int x_pos, int y_pos,
  140. int blk_idx)
  141. {
  142. struct FlashSVContext *s = avctx->priv_data;
  143. uint8_t *line = s->tmpblock;
  144. int k;
  145. int ret = inflateReset(&s->zstream);
  146. if (ret != Z_OK) {
  147. //return -1;
  148. }
  149. if (s->zlibprime_curr || s->zlibprime_prev) {
  150. flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size,
  151. s->blocks[blk_idx].unp_size);
  152. }
  153. s->zstream.next_in = avpkt->data + get_bits_count(gb) / 8;
  154. s->zstream.avail_in = block_size;
  155. s->zstream.next_out = s->tmpblock;
  156. s->zstream.avail_out = s->block_size * 3;
  157. ret = inflate(&s->zstream, Z_FINISH);
  158. if (ret == Z_DATA_ERROR) {
  159. av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n");
  160. inflateSync(&s->zstream);
  161. ret = inflate(&s->zstream, Z_FINISH);
  162. }
  163. if (ret != Z_OK && ret != Z_STREAM_END) {
  164. //return -1;
  165. }
  166. if (s->is_keyframe) {
  167. s->blocks[blk_idx].pos = s->keyframedata + (get_bits_count(gb) / 8);
  168. s->blocks[blk_idx].size = block_size;
  169. s->blocks[blk_idx].unp_size = s->block_size * 3 - s->zstream.avail_out;
  170. }
  171. if (!s->color_depth) {
  172. /* Flash Screen Video stores the image upside down, so copy
  173. * lines to destination in reverse order. */
  174. for (k = 1; k <= s->diff_height; k++) {
  175. memcpy(s->frame.data[0] + x_pos * 3 +
  176. (s->image_height - y_pos - s->diff_start - k) * s->frame.linesize[0],
  177. line, width * 3);
  178. /* advance source pointer to next line */
  179. line += width * 3;
  180. }
  181. } else {
  182. /* hybrid 15-bit/palette mode */
  183. decode_hybrid(s->tmpblock, s->frame.data[0],
  184. s->image_height - (y_pos + 1 + s->diff_start + s->diff_height),
  185. x_pos, s->diff_height, width,
  186. s->frame.linesize[0], s->pal);
  187. }
  188. skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */
  189. return 0;
  190. }
  191. static int calc_deflate_block_size(int tmpblock_size)
  192. {
  193. z_stream zstream;
  194. int size;
  195. zstream.zalloc = Z_NULL;
  196. zstream.zfree = Z_NULL;
  197. zstream.opaque = Z_NULL;
  198. if (deflateInit(&zstream, 0) != Z_OK)
  199. return -1;
  200. size = deflateBound(&zstream, tmpblock_size);
  201. deflateEnd(&zstream);
  202. return size;
  203. }
  204. static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
  205. int *data_size, AVPacket *avpkt)
  206. {
  207. int buf_size = avpkt->size;
  208. FlashSVContext *s = avctx->priv_data;
  209. int h_blocks, v_blocks, h_part, v_part, i, j;
  210. GetBitContext gb;
  211. /* no supplementary picture */
  212. if (buf_size == 0)
  213. return 0;
  214. if (buf_size < 4)
  215. return -1;
  216. init_get_bits(&gb, avpkt->data, buf_size * 8);
  217. /* start to parse the bitstream */
  218. s->block_width = 16 * (get_bits(&gb, 4) + 1);
  219. s->image_width = get_bits(&gb, 12);
  220. s->block_height = 16 * (get_bits(&gb, 4) + 1);
  221. s->image_height = get_bits(&gb, 12);
  222. if (s->ver == 2) {
  223. skip_bits(&gb, 6);
  224. if (get_bits1(&gb)) {
  225. av_log_missing_feature(avctx, "iframe", 1);
  226. return AVERROR_PATCHWELCOME;
  227. }
  228. if (get_bits1(&gb)) {
  229. av_log_missing_feature(avctx, "custom palette", 1);
  230. return AVERROR_PATCHWELCOME;
  231. }
  232. }
  233. /* calculate number of blocks and size of border (partial) blocks */
  234. h_blocks = s->image_width / s->block_width;
  235. h_part = s->image_width % s->block_width;
  236. v_blocks = s->image_height / s->block_height;
  237. v_part = s->image_height % s->block_height;
  238. /* the block size could change between frames, make sure the buffer
  239. * is large enough, if not, get a larger one */
  240. if (s->block_size < s->block_width * s->block_height) {
  241. int tmpblock_size = 3 * s->block_width * s->block_height;
  242. s->tmpblock = av_realloc(s->tmpblock, tmpblock_size);
  243. if (!s->tmpblock) {
  244. av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
  245. return AVERROR(ENOMEM);
  246. }
  247. if (s->ver == 2) {
  248. s->deflate_block_size = calc_deflate_block_size(tmpblock_size);
  249. if (s->deflate_block_size <= 0) {
  250. av_log(avctx, AV_LOG_ERROR, "Can't determine deflate buffer size.\n");
  251. return -1;
  252. }
  253. s->deflate_block = av_realloc(s->deflate_block, s->deflate_block_size);
  254. if (!s->deflate_block) {
  255. av_log(avctx, AV_LOG_ERROR, "Can't allocate deflate buffer.\n");
  256. return AVERROR(ENOMEM);
  257. }
  258. }
  259. }
  260. s->block_size = s->block_width * s->block_height;
  261. /* initialize the image size once */
  262. if (avctx->width == 0 && avctx->height == 0) {
  263. avctx->width = s->image_width;
  264. avctx->height = s->image_height;
  265. }
  266. /* check for changes of image width and image height */
  267. if (avctx->width != s->image_width || avctx->height != s->image_height) {
  268. av_log(avctx, AV_LOG_ERROR,
  269. "Frame width or height differs from first frame!\n");
  270. av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n",
  271. avctx->height, avctx->width, s->image_height, s->image_width);
  272. return AVERROR_INVALIDDATA;
  273. }
  274. /* we care for keyframes only in Screen Video v2 */
  275. s->is_keyframe = (avpkt->flags & AV_PKT_FLAG_KEY) && (s->ver == 2);
  276. if (s->is_keyframe) {
  277. s->keyframedata = av_realloc(s->keyframedata, avpkt->size);
  278. memcpy(s->keyframedata, avpkt->data, avpkt->size);
  279. s->blocks = av_realloc(s->blocks,
  280. (v_blocks + !!v_part) * (h_blocks + !!h_part)
  281. * sizeof(s->blocks[0]));
  282. }
  283. av_dlog(avctx, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n",
  284. s->image_width, s->image_height, s->block_width, s->block_height,
  285. h_blocks, v_blocks, h_part, v_part);
  286. s->frame.reference = 3;
  287. s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
  288. FF_BUFFER_HINTS_PRESERVE |
  289. FF_BUFFER_HINTS_REUSABLE;
  290. if (avctx->reget_buffer(avctx, &s->frame) < 0) {
  291. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  292. return -1;
  293. }
  294. /* loop over all block columns */
  295. for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) {
  296. int y_pos = j * s->block_height; // vertical position in frame
  297. int cur_blk_height = (j < v_blocks) ? s->block_height : v_part;
  298. /* loop over all block rows */
  299. for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) {
  300. int x_pos = i * s->block_width; // horizontal position in frame
  301. int cur_blk_width = (i < h_blocks) ? s->block_width : h_part;
  302. int has_diff = 0;
  303. /* get the size of the compressed zlib chunk */
  304. int size = get_bits(&gb, 16);
  305. s->color_depth = 0;
  306. s->zlibprime_curr = 0;
  307. s->zlibprime_prev = 0;
  308. s->diff_start = 0;
  309. s->diff_height = cur_blk_height;
  310. if (8 * size > get_bits_left(&gb)) {
  311. avctx->release_buffer(avctx, &s->frame);
  312. s->frame.data[0] = NULL;
  313. return AVERROR_INVALIDDATA;
  314. }
  315. if (s->ver == 2 && size) {
  316. skip_bits(&gb, 3);
  317. s->color_depth = get_bits(&gb, 2);
  318. has_diff = get_bits1(&gb);
  319. s->zlibprime_curr = get_bits1(&gb);
  320. s->zlibprime_prev = get_bits1(&gb);
  321. if (s->color_depth != 0 && s->color_depth != 2) {
  322. av_log(avctx, AV_LOG_ERROR,
  323. "%dx%d invalid color depth %d\n", i, j, s->color_depth);
  324. return AVERROR_INVALIDDATA;
  325. }
  326. if (has_diff) {
  327. s->diff_start = get_bits(&gb, 8);
  328. s->diff_height = get_bits(&gb, 8);
  329. av_log(avctx, AV_LOG_DEBUG,
  330. "%dx%d diff start %d height %d\n",
  331. i, j, s->diff_start, s->diff_height);
  332. size -= 2;
  333. }
  334. if (s->zlibprime_prev)
  335. av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_prev\n", i, j);
  336. if (s->zlibprime_curr) {
  337. int col = get_bits(&gb, 8);
  338. int row = get_bits(&gb, 8);
  339. av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n", i, j, col, row);
  340. size -= 2;
  341. av_log_missing_feature(avctx, "zlibprime_curr", 1);
  342. return AVERROR_PATCHWELCOME;
  343. }
  344. size--; // account for flags byte
  345. }
  346. if (has_diff) {
  347. int k;
  348. int off = (s->image_height - y_pos - 1) * s->frame.linesize[0];
  349. for (k = 0; k < cur_blk_height; k++)
  350. memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3,
  351. s->keyframe + off - k*s->frame.linesize[0] + x_pos*3,
  352. cur_blk_width * 3);
  353. }
  354. /* skip unchanged blocks, which have size 0 */
  355. if (size) {
  356. if (flashsv_decode_block(avctx, avpkt, &gb, size,
  357. cur_blk_width, cur_blk_height,
  358. x_pos, y_pos,
  359. i + j * (h_blocks + !!h_part)))
  360. av_log(avctx, AV_LOG_ERROR,
  361. "error in decompression of block %dx%d\n", i, j);
  362. }
  363. }
  364. }
  365. if (s->is_keyframe && s->ver == 2) {
  366. if (!s->keyframe) {
  367. s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height);
  368. if (!s->keyframe) {
  369. av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n");
  370. return AVERROR(ENOMEM);
  371. }
  372. }
  373. memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
  374. }
  375. *data_size = sizeof(AVFrame);
  376. *(AVFrame*)data = s->frame;
  377. if ((get_bits_count(&gb) / 8) != buf_size)
  378. av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
  379. buf_size, (get_bits_count(&gb) / 8));
  380. /* report that the buffer was completely consumed */
  381. return buf_size;
  382. }
  383. static av_cold int flashsv_decode_end(AVCodecContext *avctx)
  384. {
  385. FlashSVContext *s = avctx->priv_data;
  386. inflateEnd(&s->zstream);
  387. /* release the frame if needed */
  388. if (s->frame.data[0])
  389. avctx->release_buffer(avctx, &s->frame);
  390. /* free the tmpblock */
  391. av_free(s->tmpblock);
  392. return 0;
  393. }
  394. #if CONFIG_FLASHSV_DECODER
  395. AVCodec ff_flashsv_decoder = {
  396. .name = "flashsv",
  397. .type = AVMEDIA_TYPE_VIDEO,
  398. .id = CODEC_ID_FLASHSV,
  399. .priv_data_size = sizeof(FlashSVContext),
  400. .init = flashsv_decode_init,
  401. .close = flashsv_decode_end,
  402. .decode = flashsv_decode_frame,
  403. .capabilities = CODEC_CAP_DR1,
  404. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_NONE },
  405. .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"),
  406. };
  407. #endif /* CONFIG_FLASHSV_DECODER */
  408. #if CONFIG_FLASHSV2_DECODER
  409. static const uint32_t ff_flashsv2_default_palette[128] = {
  410. 0x000000, 0x333333, 0x666666, 0x999999, 0xCCCCCC, 0xFFFFFF,
  411. 0x330000, 0x660000, 0x990000, 0xCC0000, 0xFF0000, 0x003300,
  412. 0x006600, 0x009900, 0x00CC00, 0x00FF00, 0x000033, 0x000066,
  413. 0x000099, 0x0000CC, 0x0000FF, 0x333300, 0x666600, 0x999900,
  414. 0xCCCC00, 0xFFFF00, 0x003333, 0x006666, 0x009999, 0x00CCCC,
  415. 0x00FFFF, 0x330033, 0x660066, 0x990099, 0xCC00CC, 0xFF00FF,
  416. 0xFFFF33, 0xFFFF66, 0xFFFF99, 0xFFFFCC, 0xFF33FF, 0xFF66FF,
  417. 0xFF99FF, 0xFFCCFF, 0x33FFFF, 0x66FFFF, 0x99FFFF, 0xCCFFFF,
  418. 0xCCCC33, 0xCCCC66, 0xCCCC99, 0xCCCCFF, 0xCC33CC, 0xCC66CC,
  419. 0xCC99CC, 0xCCFFCC, 0x33CCCC, 0x66CCCC, 0x99CCCC, 0xFFCCCC,
  420. 0x999933, 0x999966, 0x9999CC, 0x9999FF, 0x993399, 0x996699,
  421. 0x99CC99, 0x99FF99, 0x339999, 0x669999, 0xCC9999, 0xFF9999,
  422. 0x666633, 0x666699, 0x6666CC, 0x6666FF, 0x663366, 0x669966,
  423. 0x66CC66, 0x66FF66, 0x336666, 0x996666, 0xCC6666, 0xFF6666,
  424. 0x333366, 0x333399, 0x3333CC, 0x3333FF, 0x336633, 0x339933,
  425. 0x33CC33, 0x33FF33, 0x663333, 0x993333, 0xCC3333, 0xFF3333,
  426. 0x003366, 0x336600, 0x660033, 0x006633, 0x330066, 0x663300,
  427. 0x336699, 0x669933, 0x993366, 0x339966, 0x663399, 0x996633,
  428. 0x6699CC, 0x99CC66, 0xCC6699, 0x66CC99, 0x9966CC, 0xCC9966,
  429. 0x99CCFF, 0xCCFF99, 0xFF99CC, 0x99FFCC, 0xCC99FF, 0xFFCC99,
  430. 0x111111, 0x222222, 0x444444, 0x555555, 0xAAAAAA, 0xBBBBBB,
  431. 0xDDDDDD, 0xEEEEEE
  432. };
  433. static av_cold int flashsv2_decode_init(AVCodecContext *avctx)
  434. {
  435. FlashSVContext *s = avctx->priv_data;
  436. flashsv_decode_init(avctx);
  437. s->pal = ff_flashsv2_default_palette;
  438. s->ver = 2;
  439. return 0;
  440. }
  441. static av_cold int flashsv2_decode_end(AVCodecContext *avctx)
  442. {
  443. FlashSVContext *s = avctx->priv_data;
  444. av_freep(&s->keyframedata);
  445. av_freep(&s->blocks);
  446. av_freep(&s->keyframe);
  447. av_freep(&s->deflate_block);
  448. flashsv_decode_end(avctx);
  449. return 0;
  450. }
  451. AVCodec ff_flashsv2_decoder = {
  452. .name = "flashsv2",
  453. .type = AVMEDIA_TYPE_VIDEO,
  454. .id = CODEC_ID_FLASHSV2,
  455. .priv_data_size = sizeof(FlashSVContext),
  456. .init = flashsv2_decode_init,
  457. .close = flashsv2_decode_end,
  458. .decode = flashsv_decode_frame,
  459. .capabilities = CODEC_CAP_DR1,
  460. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_NONE },
  461. .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v2"),
  462. };
  463. #endif /* CONFIG_FLASHSV2_DECODER */