You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

587 lines
17KB

  1. /*
  2. * Sierra VMD Audio & Video Decoders
  3. * Copyright (C) 2004 the ffmpeg project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sierra VMD audio & video decoders
  24. * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
  25. * for more information on the Sierra VMD format, visit:
  26. * http://www.pcisys.net/~melanson/codecs/
  27. *
  28. * The video decoder outputs PAL8 colorspace data. The decoder expects
  29. * a 0x330-byte VMD file header to be transmitted via extradata during
  30. * codec initialization. Each encoded frame that is sent to this decoder
  31. * is expected to be prepended with the appropriate 16-byte frame
  32. * information record from the VMD file.
  33. *
  34. * The audio decoder, like the video decoder, expects each encoded data
  35. * chunk to be prepended with the appropriate 16-byte frame information
  36. * record from the VMD file. It does not require the 0x330-byte VMD file
  37. * header, but it does need the audio setup parameters passed in through
  38. * normal libavcodec API means.
  39. */
  40. #include <stdio.h>
  41. #include <stdlib.h>
  42. #include <string.h>
  43. #include "libavutil/intreadwrite.h"
  44. #include "avcodec.h"
  45. #define VMD_HEADER_SIZE 0x330
  46. #define PALETTE_COUNT 256
  47. /*
  48. * Video Decoder
  49. */
  50. typedef struct VmdVideoContext {
  51. AVCodecContext *avctx;
  52. AVFrame frame;
  53. AVFrame prev_frame;
  54. const unsigned char *buf;
  55. int size;
  56. unsigned char palette[PALETTE_COUNT * 4];
  57. unsigned char *unpack_buffer;
  58. int unpack_buffer_size;
  59. int x_off, y_off;
  60. } VmdVideoContext;
  61. #define QUEUE_SIZE 0x1000
  62. #define QUEUE_MASK 0x0FFF
  63. static void lz_unpack(const unsigned char *src, unsigned char *dest, int dest_len)
  64. {
  65. const unsigned char *s;
  66. unsigned char *d;
  67. unsigned char *d_end;
  68. unsigned char queue[QUEUE_SIZE];
  69. unsigned int qpos;
  70. unsigned int dataleft;
  71. unsigned int chainofs;
  72. unsigned int chainlen;
  73. unsigned int speclen;
  74. unsigned char tag;
  75. unsigned int i, j;
  76. s = src;
  77. d = dest;
  78. d_end = d + dest_len;
  79. dataleft = AV_RL32(s);
  80. s += 4;
  81. memset(queue, 0x20, QUEUE_SIZE);
  82. if (AV_RL32(s) == 0x56781234) {
  83. s += 4;
  84. qpos = 0x111;
  85. speclen = 0xF + 3;
  86. } else {
  87. qpos = 0xFEE;
  88. speclen = 100; /* no speclen */
  89. }
  90. while (dataleft > 0) {
  91. tag = *s++;
  92. if ((tag == 0xFF) && (dataleft > 8)) {
  93. if (d + 8 > d_end)
  94. return;
  95. for (i = 0; i < 8; i++) {
  96. queue[qpos++] = *d++ = *s++;
  97. qpos &= QUEUE_MASK;
  98. }
  99. dataleft -= 8;
  100. } else {
  101. for (i = 0; i < 8; i++) {
  102. if (dataleft == 0)
  103. break;
  104. if (tag & 0x01) {
  105. if (d + 1 > d_end)
  106. return;
  107. queue[qpos++] = *d++ = *s++;
  108. qpos &= QUEUE_MASK;
  109. dataleft--;
  110. } else {
  111. chainofs = *s++;
  112. chainofs |= ((*s & 0xF0) << 4);
  113. chainlen = (*s++ & 0x0F) + 3;
  114. if (chainlen == speclen)
  115. chainlen = *s++ + 0xF + 3;
  116. if (d + chainlen > d_end)
  117. return;
  118. for (j = 0; j < chainlen; j++) {
  119. *d = queue[chainofs++ & QUEUE_MASK];
  120. queue[qpos++] = *d++;
  121. qpos &= QUEUE_MASK;
  122. }
  123. dataleft -= chainlen;
  124. }
  125. tag >>= 1;
  126. }
  127. }
  128. }
  129. }
  130. static int rle_unpack(const unsigned char *src, unsigned char *dest,
  131. int src_len, int dest_len)
  132. {
  133. const unsigned char *ps;
  134. unsigned char *pd;
  135. int i, l;
  136. unsigned char *dest_end = dest + dest_len;
  137. ps = src;
  138. pd = dest;
  139. if (src_len & 1)
  140. *pd++ = *ps++;
  141. src_len >>= 1;
  142. i = 0;
  143. do {
  144. l = *ps++;
  145. if (l & 0x80) {
  146. l = (l & 0x7F) * 2;
  147. if (pd + l > dest_end)
  148. return ps - src;
  149. memcpy(pd, ps, l);
  150. ps += l;
  151. pd += l;
  152. } else {
  153. if (pd + i > dest_end)
  154. return ps - src;
  155. for (i = 0; i < l; i++) {
  156. *pd++ = ps[0];
  157. *pd++ = ps[1];
  158. }
  159. ps += 2;
  160. }
  161. i += l;
  162. } while (i < src_len);
  163. return ps - src;
  164. }
  165. static void vmd_decode(VmdVideoContext *s)
  166. {
  167. int i;
  168. unsigned int *palette32;
  169. unsigned char r, g, b;
  170. /* point to the start of the encoded data */
  171. const unsigned char *p = s->buf + 16;
  172. const unsigned char *pb;
  173. unsigned char meth;
  174. unsigned char *dp; /* pointer to current frame */
  175. unsigned char *pp; /* pointer to previous frame */
  176. unsigned char len;
  177. int ofs;
  178. int frame_x, frame_y;
  179. int frame_width, frame_height;
  180. frame_x = AV_RL16(&s->buf[6]);
  181. frame_y = AV_RL16(&s->buf[8]);
  182. frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
  183. frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
  184. if (frame_x < 0 || frame_width < 0 ||
  185. frame_x >= s->avctx->width ||
  186. frame_width > s->avctx->width ||
  187. frame_x + frame_width > s->avctx->width)
  188. return;
  189. if (frame_y < 0 || frame_height < 0 ||
  190. frame_y >= s->avctx->height ||
  191. frame_height > s->avctx->height ||
  192. frame_y + frame_height > s->avctx->height)
  193. return;
  194. if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
  195. (frame_x || frame_y)) {
  196. s->x_off = frame_x;
  197. s->y_off = frame_y;
  198. }
  199. frame_x -= s->x_off;
  200. frame_y -= s->y_off;
  201. /* if only a certain region will be updated, copy the entire previous
  202. * frame before the decode */
  203. if (s->prev_frame.data[0] &&
  204. (frame_x || frame_y || (frame_width != s->avctx->width) ||
  205. (frame_height != s->avctx->height))) {
  206. memcpy(s->frame.data[0], s->prev_frame.data[0],
  207. s->avctx->height * s->frame.linesize[0]);
  208. }
  209. /* check if there is a new palette */
  210. if (s->buf[15] & 0x02) {
  211. p += 2;
  212. palette32 = (unsigned int *)s->palette;
  213. for (i = 0; i < PALETTE_COUNT; i++) {
  214. r = *p++ * 4;
  215. g = *p++ * 4;
  216. b = *p++ * 4;
  217. palette32[i] = (r << 16) | (g << 8) | (b);
  218. }
  219. s->size -= (256 * 3 + 2);
  220. }
  221. if (s->size >= 0) {
  222. /* originally UnpackFrame in VAG's code */
  223. pb = p;
  224. meth = *pb++;
  225. if (meth & 0x80) {
  226. lz_unpack(pb, s->unpack_buffer, s->unpack_buffer_size);
  227. meth &= 0x7F;
  228. pb = s->unpack_buffer;
  229. }
  230. dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
  231. pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
  232. switch (meth) {
  233. case 1:
  234. for (i = 0; i < frame_height; i++) {
  235. ofs = 0;
  236. do {
  237. len = *pb++;
  238. if (len & 0x80) {
  239. len = (len & 0x7F) + 1;
  240. if (ofs + len > frame_width)
  241. return;
  242. memcpy(&dp[ofs], pb, len);
  243. pb += len;
  244. ofs += len;
  245. } else {
  246. /* interframe pixel copy */
  247. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  248. return;
  249. memcpy(&dp[ofs], &pp[ofs], len + 1);
  250. ofs += len + 1;
  251. }
  252. } while (ofs < frame_width);
  253. if (ofs > frame_width) {
  254. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  255. ofs, frame_width);
  256. break;
  257. }
  258. dp += s->frame.linesize[0];
  259. pp += s->prev_frame.linesize[0];
  260. }
  261. break;
  262. case 2:
  263. for (i = 0; i < frame_height; i++) {
  264. memcpy(dp, pb, frame_width);
  265. pb += frame_width;
  266. dp += s->frame.linesize[0];
  267. pp += s->prev_frame.linesize[0];
  268. }
  269. break;
  270. case 3:
  271. for (i = 0; i < frame_height; i++) {
  272. ofs = 0;
  273. do {
  274. len = *pb++;
  275. if (len & 0x80) {
  276. len = (len & 0x7F) + 1;
  277. if (*pb++ == 0xFF)
  278. len = rle_unpack(pb, &dp[ofs], len, frame_width - ofs);
  279. else
  280. memcpy(&dp[ofs], pb, len);
  281. pb += len;
  282. ofs += len;
  283. } else {
  284. /* interframe pixel copy */
  285. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  286. return;
  287. memcpy(&dp[ofs], &pp[ofs], len + 1);
  288. ofs += len + 1;
  289. }
  290. } while (ofs < frame_width);
  291. if (ofs > frame_width) {
  292. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  293. ofs, frame_width);
  294. }
  295. dp += s->frame.linesize[0];
  296. pp += s->prev_frame.linesize[0];
  297. }
  298. break;
  299. }
  300. }
  301. }
  302. static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
  303. {
  304. VmdVideoContext *s = avctx->priv_data;
  305. int i;
  306. unsigned int *palette32;
  307. int palette_index = 0;
  308. unsigned char r, g, b;
  309. unsigned char *vmd_header;
  310. unsigned char *raw_palette;
  311. s->avctx = avctx;
  312. avctx->pix_fmt = PIX_FMT_PAL8;
  313. /* make sure the VMD header made it */
  314. if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
  315. av_log(s->avctx, AV_LOG_ERROR, "VMD video: expected extradata size of %d\n",
  316. VMD_HEADER_SIZE);
  317. return -1;
  318. }
  319. vmd_header = (unsigned char *)avctx->extradata;
  320. s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
  321. s->unpack_buffer = av_malloc(s->unpack_buffer_size);
  322. if (!s->unpack_buffer)
  323. return -1;
  324. /* load up the initial palette */
  325. raw_palette = &vmd_header[28];
  326. palette32 = (unsigned int *)s->palette;
  327. for (i = 0; i < PALETTE_COUNT; i++) {
  328. r = raw_palette[palette_index++] * 4;
  329. g = raw_palette[palette_index++] * 4;
  330. b = raw_palette[palette_index++] * 4;
  331. palette32[i] = (r << 16) | (g << 8) | (b);
  332. }
  333. avcodec_get_frame_defaults(&s->frame);
  334. avcodec_get_frame_defaults(&s->prev_frame);
  335. return 0;
  336. }
  337. static int vmdvideo_decode_frame(AVCodecContext *avctx,
  338. void *data, int *data_size,
  339. AVPacket *avpkt)
  340. {
  341. const uint8_t *buf = avpkt->data;
  342. int buf_size = avpkt->size;
  343. VmdVideoContext *s = avctx->priv_data;
  344. s->buf = buf;
  345. s->size = buf_size;
  346. if (buf_size < 16)
  347. return buf_size;
  348. s->frame.reference = 1;
  349. if (avctx->get_buffer(avctx, &s->frame)) {
  350. av_log(s->avctx, AV_LOG_ERROR, "VMD Video: get_buffer() failed\n");
  351. return -1;
  352. }
  353. vmd_decode(s);
  354. /* make the palette available on the way out */
  355. memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
  356. /* shuffle frames */
  357. FFSWAP(AVFrame, s->frame, s->prev_frame);
  358. if (s->frame.data[0])
  359. avctx->release_buffer(avctx, &s->frame);
  360. *data_size = sizeof(AVFrame);
  361. *(AVFrame*)data = s->prev_frame;
  362. /* report that the buffer was completely consumed */
  363. return buf_size;
  364. }
  365. static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
  366. {
  367. VmdVideoContext *s = avctx->priv_data;
  368. if (s->prev_frame.data[0])
  369. avctx->release_buffer(avctx, &s->prev_frame);
  370. av_free(s->unpack_buffer);
  371. return 0;
  372. }
  373. /*
  374. * Audio Decoder
  375. */
  376. #define BLOCK_TYPE_AUDIO 1
  377. #define BLOCK_TYPE_INITIAL 2
  378. #define BLOCK_TYPE_SILENCE 3
  379. typedef struct VmdAudioContext {
  380. AVCodecContext *avctx;
  381. int out_bps;
  382. int predictors[2];
  383. } VmdAudioContext;
  384. static const uint16_t vmdaudio_table[128] = {
  385. 0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
  386. 0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
  387. 0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
  388. 0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
  389. 0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
  390. 0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
  391. 0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
  392. 0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
  393. 0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
  394. 0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
  395. 0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
  396. 0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
  397. 0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
  398. };
  399. static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
  400. {
  401. VmdAudioContext *s = avctx->priv_data;
  402. s->avctx = avctx;
  403. if (avctx->bits_per_coded_sample == 16)
  404. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  405. else
  406. avctx->sample_fmt = AV_SAMPLE_FMT_U8;
  407. s->out_bps = av_get_bytes_per_sample(avctx->sample_fmt);
  408. av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
  409. "block align = %d, sample rate = %d\n",
  410. avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
  411. avctx->sample_rate);
  412. return 0;
  413. }
  414. static void vmdaudio_decode_audio(VmdAudioContext *s, unsigned char *data,
  415. const uint8_t *buf, int buf_size, int stereo)
  416. {
  417. int i;
  418. int chan = 0;
  419. int16_t *out = (int16_t*)data;
  420. for(i = 0; i < buf_size; i++) {
  421. if(buf[i] & 0x80)
  422. s->predictors[chan] -= vmdaudio_table[buf[i] & 0x7F];
  423. else
  424. s->predictors[chan] += vmdaudio_table[buf[i]];
  425. s->predictors[chan] = av_clip_int16(s->predictors[chan]);
  426. out[i] = s->predictors[chan];
  427. chan ^= stereo;
  428. }
  429. }
  430. static int vmdaudio_loadsound(VmdAudioContext *s, unsigned char *data,
  431. const uint8_t *buf, int silent_chunks, int data_size)
  432. {
  433. int silent_size = s->avctx->block_align * silent_chunks * s->out_bps;
  434. if (silent_chunks) {
  435. memset(data, s->out_bps == 2 ? 0x00 : 0x80, silent_size);
  436. data += silent_size;
  437. }
  438. if (s->avctx->bits_per_coded_sample == 16)
  439. vmdaudio_decode_audio(s, data, buf, data_size, s->avctx->channels == 2);
  440. else {
  441. /* just copy the data */
  442. memcpy(data, buf, data_size);
  443. }
  444. return silent_size + data_size * s->out_bps;
  445. }
  446. static int vmdaudio_decode_frame(AVCodecContext *avctx,
  447. void *data, int *data_size,
  448. AVPacket *avpkt)
  449. {
  450. const uint8_t *buf = avpkt->data;
  451. int buf_size = avpkt->size;
  452. VmdAudioContext *s = avctx->priv_data;
  453. int block_type, silent_chunks;
  454. unsigned char *output_samples = (unsigned char *)data;
  455. if (buf_size < 16) {
  456. av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
  457. *data_size = 0;
  458. return buf_size;
  459. }
  460. block_type = buf[6];
  461. if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) {
  462. av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type);
  463. return AVERROR(EINVAL);
  464. }
  465. buf += 16;
  466. buf_size -= 16;
  467. silent_chunks = 0;
  468. if (block_type == BLOCK_TYPE_INITIAL) {
  469. uint32_t flags;
  470. if (buf_size < 4)
  471. return -1;
  472. flags = AV_RB32(buf);
  473. silent_chunks = av_popcount(flags);
  474. buf += 4;
  475. buf_size -= 4;
  476. } else if (block_type == BLOCK_TYPE_SILENCE) {
  477. silent_chunks = 1;
  478. buf_size = 0; // should already be zero but set it just to be sure
  479. }
  480. /* ensure output buffer is large enough */
  481. if (*data_size < (avctx->block_align*silent_chunks + buf_size) * s->out_bps)
  482. return -1;
  483. *data_size = vmdaudio_loadsound(s, output_samples, buf, silent_chunks, buf_size);
  484. return avpkt->size;
  485. }
  486. /*
  487. * Public Data Structures
  488. */
  489. AVCodec ff_vmdvideo_decoder = {
  490. "vmdvideo",
  491. AVMEDIA_TYPE_VIDEO,
  492. CODEC_ID_VMDVIDEO,
  493. sizeof(VmdVideoContext),
  494. vmdvideo_decode_init,
  495. NULL,
  496. vmdvideo_decode_end,
  497. vmdvideo_decode_frame,
  498. CODEC_CAP_DR1,
  499. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
  500. };
  501. AVCodec ff_vmdaudio_decoder = {
  502. "vmdaudio",
  503. AVMEDIA_TYPE_AUDIO,
  504. CODEC_ID_VMDAUDIO,
  505. sizeof(VmdAudioContext),
  506. vmdaudio_decode_init,
  507. NULL,
  508. NULL,
  509. vmdaudio_decode_frame,
  510. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
  511. };