You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

532 lines
18KB

  1. /*
  2. * Flash Screen Video decoder
  3. * Copyright (C) 2004 Alex Beregszaszi
  4. * Copyright (C) 2006 Benjamin Larsson
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Flash Screen Video decoder
  25. * @author Alex Beregszaszi
  26. * @author Benjamin Larsson
  27. * @author Daniel Verkamp
  28. * @author Konstantin Shishkov
  29. *
  30. * A description of the bitstream format for Flash Screen Video version 1/2
  31. * is part of the SWF File Format Specification (version 10), which can be
  32. * downloaded from http://www.adobe.com/devnet/swf.html.
  33. */
  34. #include <stdio.h>
  35. #include <stdlib.h>
  36. #include <zlib.h>
  37. #include "libavutil/intreadwrite.h"
  38. #include "avcodec.h"
  39. #include "bytestream.h"
  40. #include "get_bits.h"
  41. typedef struct BlockInfo {
  42. uint8_t *pos;
  43. int size;
  44. int unp_size;
  45. } BlockInfo;
  46. typedef struct FlashSVContext {
  47. AVCodecContext *avctx;
  48. AVFrame frame;
  49. int image_width, image_height;
  50. int block_width, block_height;
  51. uint8_t *tmpblock;
  52. int block_size;
  53. z_stream zstream;
  54. int ver;
  55. const uint32_t *pal;
  56. int is_keyframe;
  57. uint8_t *keyframedata;
  58. uint8_t *keyframe;
  59. BlockInfo *blocks;
  60. uint8_t *deflate_block;
  61. int deflate_block_size;
  62. int color_depth;
  63. int zlibprime_curr, zlibprime_prev;
  64. int diff_start, diff_height;
  65. } FlashSVContext;
  66. static int decode_hybrid(const uint8_t *sptr, uint8_t *dptr, int dx, int dy,
  67. int h, int w, int stride, const uint32_t *pal)
  68. {
  69. int x, y;
  70. const uint8_t *orig_src = sptr;
  71. for (y = dx+h; y > dx; y--) {
  72. uint8_t *dst = dptr + (y * stride) + dy * 3;
  73. for (x = 0; x < w; x++) {
  74. if (*sptr & 0x80) {
  75. /* 15-bit color */
  76. unsigned c = AV_RB16(sptr) & ~0x8000;
  77. unsigned b = c & 0x1F;
  78. unsigned g = (c >> 5) & 0x1F;
  79. unsigned r = c >> 10;
  80. /* 000aaabb -> aaabbaaa */
  81. *dst++ = (b << 3) | (b >> 2);
  82. *dst++ = (g << 3) | (g >> 2);
  83. *dst++ = (r << 3) | (r >> 2);
  84. sptr += 2;
  85. } else {
  86. /* palette index */
  87. uint32_t c = pal[*sptr++];
  88. bytestream_put_le24(&dst, c);
  89. }
  90. }
  91. }
  92. return sptr - orig_src;
  93. }
  94. static av_cold int flashsv_decode_init(AVCodecContext *avctx)
  95. {
  96. FlashSVContext *s = avctx->priv_data;
  97. int zret; // Zlib return code
  98. s->avctx = avctx;
  99. s->zstream.zalloc = Z_NULL;
  100. s->zstream.zfree = Z_NULL;
  101. s->zstream.opaque = Z_NULL;
  102. zret = inflateInit(&s->zstream);
  103. if (zret != Z_OK) {
  104. av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
  105. return 1;
  106. }
  107. avctx->pix_fmt = PIX_FMT_BGR24;
  108. avcodec_get_frame_defaults(&s->frame);
  109. s->frame.data[0] = NULL;
  110. return 0;
  111. }
  112. static void flashsv2_prime(FlashSVContext *s, uint8_t *src,
  113. int size, int unp_size)
  114. {
  115. z_stream zs;
  116. zs.zalloc = NULL;
  117. zs.zfree = NULL;
  118. zs.opaque = NULL;
  119. s->zstream.next_in = src;
  120. s->zstream.avail_in = size;
  121. s->zstream.next_out = s->tmpblock;
  122. s->zstream.avail_out = s->block_size * 3;
  123. inflate(&s->zstream, Z_SYNC_FLUSH);
  124. deflateInit(&zs, 0);
  125. zs.next_in = s->tmpblock;
  126. zs.avail_in = s->block_size * 3 - s->zstream.avail_out;
  127. zs.next_out = s->deflate_block;
  128. zs.avail_out = s->deflate_block_size;
  129. deflate(&zs, Z_SYNC_FLUSH);
  130. deflateEnd(&zs);
  131. inflateReset(&s->zstream);
  132. s->zstream.next_in = s->deflate_block;
  133. s->zstream.avail_in = s->deflate_block_size - zs.avail_out;
  134. s->zstream.next_out = s->tmpblock;
  135. s->zstream.avail_out = s->block_size * 3;
  136. inflate(&s->zstream, Z_SYNC_FLUSH);
  137. }
  138. static int flashsv_decode_block(AVCodecContext *avctx, AVPacket *avpkt,
  139. GetBitContext *gb, int block_size,
  140. int width, int height, int x_pos, int y_pos,
  141. int blk_idx)
  142. {
  143. struct FlashSVContext *s = avctx->priv_data;
  144. uint8_t *line = s->tmpblock;
  145. int k;
  146. int ret = inflateReset(&s->zstream);
  147. if (ret != Z_OK) {
  148. //return -1;
  149. }
  150. if (s->zlibprime_curr || s->zlibprime_prev) {
  151. flashsv2_prime(s, s->blocks[blk_idx].pos, s->blocks[blk_idx].size,
  152. s->blocks[blk_idx].unp_size);
  153. }
  154. s->zstream.next_in = avpkt->data + get_bits_count(gb) / 8;
  155. s->zstream.avail_in = block_size;
  156. s->zstream.next_out = s->tmpblock;
  157. s->zstream.avail_out = s->block_size * 3;
  158. ret = inflate(&s->zstream, Z_FINISH);
  159. if (ret == Z_DATA_ERROR) {
  160. av_log(avctx, AV_LOG_ERROR, "Zlib resync occurred\n");
  161. inflateSync(&s->zstream);
  162. ret = inflate(&s->zstream, Z_FINISH);
  163. }
  164. if (ret != Z_OK && ret != Z_STREAM_END) {
  165. //return -1;
  166. }
  167. if (s->is_keyframe) {
  168. s->blocks[blk_idx].pos = s->keyframedata + (get_bits_count(gb) / 8);
  169. s->blocks[blk_idx].size = block_size;
  170. s->blocks[blk_idx].unp_size = s->block_size * 3 - s->zstream.avail_out;
  171. }
  172. if (!s->color_depth) {
  173. /* Flash Screen Video stores the image upside down, so copy
  174. * lines to destination in reverse order. */
  175. for (k = 1; k <= s->diff_height; k++) {
  176. memcpy(s->frame.data[0] + x_pos * 3 +
  177. (s->image_height - y_pos - s->diff_start - k) * s->frame.linesize[0],
  178. line, width * 3);
  179. /* advance source pointer to next line */
  180. line += width * 3;
  181. }
  182. } else {
  183. /* hybrid 15-bit/palette mode */
  184. decode_hybrid(s->tmpblock, s->frame.data[0],
  185. s->image_height - (y_pos + 1 + s->diff_start + s->diff_height),
  186. x_pos, s->diff_height, width,
  187. s->frame.linesize[0], s->pal);
  188. }
  189. skip_bits_long(gb, 8 * block_size); /* skip the consumed bits */
  190. return 0;
  191. }
  192. static int calc_deflate_block_size(int tmpblock_size)
  193. {
  194. z_stream zstream;
  195. int size;
  196. zstream.zalloc = Z_NULL;
  197. zstream.zfree = Z_NULL;
  198. zstream.opaque = Z_NULL;
  199. if (deflateInit(&zstream, 0) != Z_OK)
  200. return -1;
  201. size = deflateBound(&zstream, tmpblock_size);
  202. deflateEnd(&zstream);
  203. return size;
  204. }
  205. static int flashsv_decode_frame(AVCodecContext *avctx, void *data,
  206. int *data_size, AVPacket *avpkt)
  207. {
  208. int buf_size = avpkt->size;
  209. FlashSVContext *s = avctx->priv_data;
  210. int h_blocks, v_blocks, h_part, v_part, i, j;
  211. GetBitContext gb;
  212. /* no supplementary picture */
  213. if (buf_size == 0)
  214. return 0;
  215. if (buf_size < 4)
  216. return -1;
  217. init_get_bits(&gb, avpkt->data, buf_size * 8);
  218. /* start to parse the bitstream */
  219. s->block_width = 16 * (get_bits(&gb, 4) + 1);
  220. s->image_width = get_bits(&gb, 12);
  221. s->block_height = 16 * (get_bits(&gb, 4) + 1);
  222. s->image_height = get_bits(&gb, 12);
  223. if (s->ver == 2) {
  224. skip_bits(&gb, 6);
  225. if (get_bits1(&gb)) {
  226. av_log_missing_feature(avctx, "iframe", 1);
  227. return AVERROR_PATCHWELCOME;
  228. }
  229. if (get_bits1(&gb)) {
  230. av_log_missing_feature(avctx, "custom palette", 1);
  231. return AVERROR_PATCHWELCOME;
  232. }
  233. }
  234. /* calculate number of blocks and size of border (partial) blocks */
  235. h_blocks = s->image_width / s->block_width;
  236. h_part = s->image_width % s->block_width;
  237. v_blocks = s->image_height / s->block_height;
  238. v_part = s->image_height % s->block_height;
  239. /* the block size could change between frames, make sure the buffer
  240. * is large enough, if not, get a larger one */
  241. if (s->block_size < s->block_width * s->block_height) {
  242. int tmpblock_size = 3 * s->block_width * s->block_height;
  243. s->tmpblock = av_realloc(s->tmpblock, tmpblock_size);
  244. if (!s->tmpblock) {
  245. av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
  246. return AVERROR(ENOMEM);
  247. }
  248. if (s->ver == 2) {
  249. s->deflate_block_size = calc_deflate_block_size(tmpblock_size);
  250. if (s->deflate_block_size <= 0) {
  251. av_log(avctx, AV_LOG_ERROR, "Can't determine deflate buffer size.\n");
  252. return -1;
  253. }
  254. s->deflate_block = av_realloc(s->deflate_block, s->deflate_block_size);
  255. if (!s->deflate_block) {
  256. av_log(avctx, AV_LOG_ERROR, "Can't allocate deflate buffer.\n");
  257. return AVERROR(ENOMEM);
  258. }
  259. }
  260. }
  261. s->block_size = s->block_width * s->block_height;
  262. /* initialize the image size once */
  263. if (avctx->width == 0 && avctx->height == 0) {
  264. avctx->width = s->image_width;
  265. avctx->height = s->image_height;
  266. }
  267. /* check for changes of image width and image height */
  268. if (avctx->width != s->image_width || avctx->height != s->image_height) {
  269. av_log(avctx, AV_LOG_ERROR,
  270. "Frame width or height differs from first frame!\n");
  271. av_log(avctx, AV_LOG_ERROR, "fh = %d, fv %d vs ch = %d, cv = %d\n",
  272. avctx->height, avctx->width, s->image_height, s->image_width);
  273. return AVERROR_INVALIDDATA;
  274. }
  275. /* we care for keyframes only in Screen Video v2 */
  276. s->is_keyframe = (avpkt->flags & AV_PKT_FLAG_KEY) && (s->ver == 2);
  277. if (s->is_keyframe) {
  278. s->keyframedata = av_realloc(s->keyframedata, avpkt->size);
  279. memcpy(s->keyframedata, avpkt->data, avpkt->size);
  280. s->blocks = av_realloc(s->blocks,
  281. (v_blocks + !!v_part) * (h_blocks + !!h_part)
  282. * sizeof(s->blocks[0]));
  283. }
  284. av_dlog(avctx, "image: %dx%d block: %dx%d num: %dx%d part: %dx%d\n",
  285. s->image_width, s->image_height, s->block_width, s->block_height,
  286. h_blocks, v_blocks, h_part, v_part);
  287. s->frame.reference = 3;
  288. s->frame.buffer_hints = FF_BUFFER_HINTS_VALID |
  289. FF_BUFFER_HINTS_PRESERVE |
  290. FF_BUFFER_HINTS_REUSABLE;
  291. if (avctx->reget_buffer(avctx, &s->frame) < 0) {
  292. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  293. return -1;
  294. }
  295. /* loop over all block columns */
  296. for (j = 0; j < v_blocks + (v_part ? 1 : 0); j++) {
  297. int y_pos = j * s->block_height; // vertical position in frame
  298. int cur_blk_height = (j < v_blocks) ? s->block_height : v_part;
  299. /* loop over all block rows */
  300. for (i = 0; i < h_blocks + (h_part ? 1 : 0); i++) {
  301. int x_pos = i * s->block_width; // horizontal position in frame
  302. int cur_blk_width = (i < h_blocks) ? s->block_width : h_part;
  303. int has_diff = 0;
  304. /* get the size of the compressed zlib chunk */
  305. int size = get_bits(&gb, 16);
  306. s->color_depth = 0;
  307. s->zlibprime_curr = 0;
  308. s->zlibprime_prev = 0;
  309. s->diff_start = 0;
  310. s->diff_height = cur_blk_height;
  311. if (8 * size > get_bits_left(&gb)) {
  312. avctx->release_buffer(avctx, &s->frame);
  313. s->frame.data[0] = NULL;
  314. return AVERROR_INVALIDDATA;
  315. }
  316. if (s->ver == 2 && size) {
  317. skip_bits(&gb, 3);
  318. s->color_depth = get_bits(&gb, 2);
  319. has_diff = get_bits1(&gb);
  320. s->zlibprime_curr = get_bits1(&gb);
  321. s->zlibprime_prev = get_bits1(&gb);
  322. if (s->color_depth != 0 && s->color_depth != 2) {
  323. av_log(avctx, AV_LOG_ERROR,
  324. "%dx%d invalid color depth %d\n", i, j, s->color_depth);
  325. return AVERROR_INVALIDDATA;
  326. }
  327. if (has_diff) {
  328. s->diff_start = get_bits(&gb, 8);
  329. s->diff_height = get_bits(&gb, 8);
  330. av_log(avctx, AV_LOG_DEBUG,
  331. "%dx%d diff start %d height %d\n",
  332. i, j, s->diff_start, s->diff_height);
  333. size -= 2;
  334. }
  335. if (s->zlibprime_prev)
  336. av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_prev\n", i, j);
  337. if (s->zlibprime_curr) {
  338. int col = get_bits(&gb, 8);
  339. int row = get_bits(&gb, 8);
  340. av_log(avctx, AV_LOG_DEBUG, "%dx%d zlibprime_curr %dx%d\n", i, j, col, row);
  341. size -= 2;
  342. av_log_missing_feature(avctx, "zlibprime_curr", 1);
  343. return AVERROR_PATCHWELCOME;
  344. }
  345. size--; // account for flags byte
  346. }
  347. if (has_diff) {
  348. int k;
  349. int off = (s->image_height - y_pos - 1) * s->frame.linesize[0];
  350. for (k = 0; k < cur_blk_height; k++)
  351. memcpy(s->frame.data[0] + off - k*s->frame.linesize[0] + x_pos*3,
  352. s->keyframe + off - k*s->frame.linesize[0] + x_pos*3,
  353. cur_blk_width * 3);
  354. }
  355. /* skip unchanged blocks, which have size 0 */
  356. if (size) {
  357. if (flashsv_decode_block(avctx, avpkt, &gb, size,
  358. cur_blk_width, cur_blk_height,
  359. x_pos, y_pos,
  360. i + j * (h_blocks + !!h_part)))
  361. av_log(avctx, AV_LOG_ERROR,
  362. "error in decompression of block %dx%d\n", i, j);
  363. }
  364. }
  365. }
  366. if (s->is_keyframe && s->ver == 2) {
  367. if (!s->keyframe) {
  368. s->keyframe = av_malloc(s->frame.linesize[0] * avctx->height);
  369. if (!s->keyframe) {
  370. av_log(avctx, AV_LOG_ERROR, "Cannot allocate image data\n");
  371. return AVERROR(ENOMEM);
  372. }
  373. }
  374. memcpy(s->keyframe, s->frame.data[0], s->frame.linesize[0] * avctx->height);
  375. }
  376. *data_size = sizeof(AVFrame);
  377. *(AVFrame*)data = s->frame;
  378. if ((get_bits_count(&gb) / 8) != buf_size)
  379. av_log(avctx, AV_LOG_ERROR, "buffer not fully consumed (%d != %d)\n",
  380. buf_size, (get_bits_count(&gb) / 8));
  381. /* report that the buffer was completely consumed */
  382. return buf_size;
  383. }
  384. static av_cold int flashsv_decode_end(AVCodecContext *avctx)
  385. {
  386. FlashSVContext *s = avctx->priv_data;
  387. inflateEnd(&s->zstream);
  388. /* release the frame if needed */
  389. if (s->frame.data[0])
  390. avctx->release_buffer(avctx, &s->frame);
  391. /* free the tmpblock */
  392. av_free(s->tmpblock);
  393. return 0;
  394. }
  395. #if CONFIG_FLASHSV_DECODER
  396. AVCodec ff_flashsv_decoder = {
  397. .name = "flashsv",
  398. .type = AVMEDIA_TYPE_VIDEO,
  399. .id = AV_CODEC_ID_FLASHSV,
  400. .priv_data_size = sizeof(FlashSVContext),
  401. .init = flashsv_decode_init,
  402. .close = flashsv_decode_end,
  403. .decode = flashsv_decode_frame,
  404. .capabilities = CODEC_CAP_DR1,
  405. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_NONE },
  406. .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v1"),
  407. };
  408. #endif /* CONFIG_FLASHSV_DECODER */
  409. #if CONFIG_FLASHSV2_DECODER
  410. static const uint32_t ff_flashsv2_default_palette[128] = {
  411. 0x000000, 0x333333, 0x666666, 0x999999, 0xCCCCCC, 0xFFFFFF,
  412. 0x330000, 0x660000, 0x990000, 0xCC0000, 0xFF0000, 0x003300,
  413. 0x006600, 0x009900, 0x00CC00, 0x00FF00, 0x000033, 0x000066,
  414. 0x000099, 0x0000CC, 0x0000FF, 0x333300, 0x666600, 0x999900,
  415. 0xCCCC00, 0xFFFF00, 0x003333, 0x006666, 0x009999, 0x00CCCC,
  416. 0x00FFFF, 0x330033, 0x660066, 0x990099, 0xCC00CC, 0xFF00FF,
  417. 0xFFFF33, 0xFFFF66, 0xFFFF99, 0xFFFFCC, 0xFF33FF, 0xFF66FF,
  418. 0xFF99FF, 0xFFCCFF, 0x33FFFF, 0x66FFFF, 0x99FFFF, 0xCCFFFF,
  419. 0xCCCC33, 0xCCCC66, 0xCCCC99, 0xCCCCFF, 0xCC33CC, 0xCC66CC,
  420. 0xCC99CC, 0xCCFFCC, 0x33CCCC, 0x66CCCC, 0x99CCCC, 0xFFCCCC,
  421. 0x999933, 0x999966, 0x9999CC, 0x9999FF, 0x993399, 0x996699,
  422. 0x99CC99, 0x99FF99, 0x339999, 0x669999, 0xCC9999, 0xFF9999,
  423. 0x666633, 0x666699, 0x6666CC, 0x6666FF, 0x663366, 0x669966,
  424. 0x66CC66, 0x66FF66, 0x336666, 0x996666, 0xCC6666, 0xFF6666,
  425. 0x333366, 0x333399, 0x3333CC, 0x3333FF, 0x336633, 0x339933,
  426. 0x33CC33, 0x33FF33, 0x663333, 0x993333, 0xCC3333, 0xFF3333,
  427. 0x003366, 0x336600, 0x660033, 0x006633, 0x330066, 0x663300,
  428. 0x336699, 0x669933, 0x993366, 0x339966, 0x663399, 0x996633,
  429. 0x6699CC, 0x99CC66, 0xCC6699, 0x66CC99, 0x9966CC, 0xCC9966,
  430. 0x99CCFF, 0xCCFF99, 0xFF99CC, 0x99FFCC, 0xCC99FF, 0xFFCC99,
  431. 0x111111, 0x222222, 0x444444, 0x555555, 0xAAAAAA, 0xBBBBBB,
  432. 0xDDDDDD, 0xEEEEEE
  433. };
  434. static av_cold int flashsv2_decode_init(AVCodecContext *avctx)
  435. {
  436. FlashSVContext *s = avctx->priv_data;
  437. flashsv_decode_init(avctx);
  438. s->pal = ff_flashsv2_default_palette;
  439. s->ver = 2;
  440. return 0;
  441. }
  442. static av_cold int flashsv2_decode_end(AVCodecContext *avctx)
  443. {
  444. FlashSVContext *s = avctx->priv_data;
  445. av_freep(&s->keyframedata);
  446. av_freep(&s->blocks);
  447. av_freep(&s->keyframe);
  448. av_freep(&s->deflate_block);
  449. flashsv_decode_end(avctx);
  450. return 0;
  451. }
  452. AVCodec ff_flashsv2_decoder = {
  453. .name = "flashsv2",
  454. .type = AVMEDIA_TYPE_VIDEO,
  455. .id = AV_CODEC_ID_FLASHSV2,
  456. .priv_data_size = sizeof(FlashSVContext),
  457. .init = flashsv2_decode_init,
  458. .close = flashsv2_decode_end,
  459. .decode = flashsv_decode_frame,
  460. .capabilities = CODEC_CAP_DR1,
  461. .pix_fmts = (const enum PixelFormat[]){ PIX_FMT_BGR24, PIX_FMT_NONE },
  462. .long_name = NULL_IF_CONFIG_SMALL("Flash Screen Video v2"),
  463. };
  464. #endif /* CONFIG_FLASHSV2_DECODER */