You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

569 lines
16KB

  1. /*
  2. * Sierra VMD Audio & Video Decoders
  3. * Copyright (C) 2004 the ffmpeg project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file vmdvideo.c
  23. * Sierra VMD audio & video decoders
  24. * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
  25. * for more information on the Sierra VMD format, visit:
  26. * http://www.pcisys.net/~melanson/codecs/
  27. *
  28. * The video decoder outputs PAL8 colorspace data. The decoder expects
  29. * a 0x330-byte VMD file header to be transmitted via extradata during
  30. * codec initialization. Each encoded frame that is sent to this decoder
  31. * is expected to be prepended with the appropriate 16-byte frame
  32. * information record from the VMD file.
  33. *
  34. * The audio decoder, like the video decoder, expects each encoded data
  35. * chunk to be prepended with the appropriate 16-byte frame information
  36. * record from the VMD file. It does not require the 0x330-byte VMD file
  37. * header, but it does need the audio setup parameters passed in through
  38. * normal libavcodec API means.
  39. */
  40. #include <stdio.h>
  41. #include <stdlib.h>
  42. #include <string.h>
  43. #include <unistd.h>
  44. #include "avcodec.h"
  45. #include "dsputil.h"
  46. #define VMD_HEADER_SIZE 0x330
  47. #define PALETTE_COUNT 256
  48. /*
  49. * Video Decoder
  50. */
  51. typedef struct VmdVideoContext {
  52. AVCodecContext *avctx;
  53. DSPContext dsp;
  54. AVFrame frame;
  55. AVFrame prev_frame;
  56. unsigned char *buf;
  57. int size;
  58. unsigned char palette[PALETTE_COUNT * 4];
  59. unsigned char *unpack_buffer;
  60. int unpack_buffer_size;
  61. } VmdVideoContext;
  62. #define QUEUE_SIZE 0x1000
  63. #define QUEUE_MASK 0x0FFF
  64. static void lz_unpack(unsigned char *src, unsigned char *dest, int dest_len)
  65. {
  66. unsigned char *s;
  67. unsigned char *d;
  68. unsigned char *d_end;
  69. unsigned char queue[QUEUE_SIZE];
  70. unsigned int qpos;
  71. unsigned int dataleft;
  72. unsigned int chainofs;
  73. unsigned int chainlen;
  74. unsigned int speclen;
  75. unsigned char tag;
  76. unsigned int i, j;
  77. s = src;
  78. d = dest;
  79. d_end = d + dest_len;
  80. dataleft = AV_RL32(s);
  81. s += 4;
  82. memset(queue, 0x20, QUEUE_SIZE);
  83. if (AV_RL32(s) == 0x56781234) {
  84. s += 4;
  85. qpos = 0x111;
  86. speclen = 0xF + 3;
  87. } else {
  88. qpos = 0xFEE;
  89. speclen = 100; /* no speclen */
  90. }
  91. while (dataleft > 0) {
  92. tag = *s++;
  93. if ((tag == 0xFF) && (dataleft > 8)) {
  94. if (d + 8 > d_end)
  95. return;
  96. for (i = 0; i < 8; i++) {
  97. queue[qpos++] = *d++ = *s++;
  98. qpos &= QUEUE_MASK;
  99. }
  100. dataleft -= 8;
  101. } else {
  102. for (i = 0; i < 8; i++) {
  103. if (dataleft == 0)
  104. break;
  105. if (tag & 0x01) {
  106. if (d + 1 > d_end)
  107. return;
  108. queue[qpos++] = *d++ = *s++;
  109. qpos &= QUEUE_MASK;
  110. dataleft--;
  111. } else {
  112. chainofs = *s++;
  113. chainofs |= ((*s & 0xF0) << 4);
  114. chainlen = (*s++ & 0x0F) + 3;
  115. if (chainlen == speclen)
  116. chainlen = *s++ + 0xF + 3;
  117. if (d + chainlen > d_end)
  118. return;
  119. for (j = 0; j < chainlen; j++) {
  120. *d = queue[chainofs++ & QUEUE_MASK];
  121. queue[qpos++] = *d++;
  122. qpos &= QUEUE_MASK;
  123. }
  124. dataleft -= chainlen;
  125. }
  126. tag >>= 1;
  127. }
  128. }
  129. }
  130. }
  131. static int rle_unpack(unsigned char *src, unsigned char *dest,
  132. int src_len, int dest_len)
  133. {
  134. unsigned char *ps;
  135. unsigned char *pd;
  136. int i, l;
  137. unsigned char *dest_end = dest + dest_len;
  138. ps = src;
  139. pd = dest;
  140. if (src_len & 1)
  141. *pd++ = *ps++;
  142. src_len >>= 1;
  143. i = 0;
  144. do {
  145. l = *ps++;
  146. if (l & 0x80) {
  147. l = (l & 0x7F) * 2;
  148. if (pd + l > dest_end)
  149. return (ps - src);
  150. memcpy(pd, ps, l);
  151. ps += l;
  152. pd += l;
  153. } else {
  154. if (pd + i > dest_end)
  155. return (ps - src);
  156. for (i = 0; i < l; i++) {
  157. *pd++ = ps[0];
  158. *pd++ = ps[1];
  159. }
  160. ps += 2;
  161. }
  162. i += l;
  163. } while (i < src_len);
  164. return (ps - src);
  165. }
  166. static void vmd_decode(VmdVideoContext *s)
  167. {
  168. int i;
  169. unsigned int *palette32;
  170. unsigned char r, g, b;
  171. /* point to the start of the encoded data */
  172. unsigned char *p = s->buf + 16;
  173. unsigned char *pb;
  174. unsigned char meth;
  175. unsigned char *dp; /* pointer to current frame */
  176. unsigned char *pp; /* pointer to previous frame */
  177. unsigned char len;
  178. int ofs;
  179. int frame_x, frame_y;
  180. int frame_width, frame_height;
  181. int dp_size;
  182. frame_x = AV_RL16(&s->buf[6]);
  183. frame_y = AV_RL16(&s->buf[8]);
  184. frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
  185. frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
  186. /* if only a certain region will be updated, copy the entire previous
  187. * frame before the decode */
  188. if (frame_x || frame_y || (frame_width != s->avctx->width) ||
  189. (frame_height != s->avctx->height)) {
  190. memcpy(s->frame.data[0], s->prev_frame.data[0],
  191. s->avctx->height * s->frame.linesize[0]);
  192. }
  193. /* check if there is a new palette */
  194. if (s->buf[15] & 0x02) {
  195. p += 2;
  196. palette32 = (unsigned int *)s->palette;
  197. for (i = 0; i < PALETTE_COUNT; i++) {
  198. r = *p++ * 4;
  199. g = *p++ * 4;
  200. b = *p++ * 4;
  201. palette32[i] = (r << 16) | (g << 8) | (b);
  202. }
  203. s->size -= (256 * 3 + 2);
  204. }
  205. if (s->size >= 0) {
  206. /* originally UnpackFrame in VAG's code */
  207. pb = p;
  208. meth = *pb++;
  209. if (meth & 0x80) {
  210. lz_unpack(pb, s->unpack_buffer, s->unpack_buffer_size);
  211. meth &= 0x7F;
  212. pb = s->unpack_buffer;
  213. }
  214. dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
  215. dp_size = s->frame.linesize[0] * s->avctx->height;
  216. pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
  217. switch (meth) {
  218. case 1:
  219. for (i = 0; i < frame_height; i++) {
  220. ofs = 0;
  221. do {
  222. len = *pb++;
  223. if (len & 0x80) {
  224. len = (len & 0x7F) + 1;
  225. if (ofs + len > frame_width)
  226. return;
  227. memcpy(&dp[ofs], pb, len);
  228. pb += len;
  229. ofs += len;
  230. } else {
  231. /* interframe pixel copy */
  232. if (ofs + len + 1 > frame_width)
  233. return;
  234. memcpy(&dp[ofs], &pp[ofs], len + 1);
  235. ofs += len + 1;
  236. }
  237. } while (ofs < frame_width);
  238. if (ofs > frame_width) {
  239. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  240. ofs, frame_width);
  241. break;
  242. }
  243. dp += s->frame.linesize[0];
  244. pp += s->prev_frame.linesize[0];
  245. }
  246. break;
  247. case 2:
  248. for (i = 0; i < frame_height; i++) {
  249. memcpy(dp, pb, frame_width);
  250. pb += frame_width;
  251. dp += s->frame.linesize[0];
  252. pp += s->prev_frame.linesize[0];
  253. }
  254. break;
  255. case 3:
  256. for (i = 0; i < frame_height; i++) {
  257. ofs = 0;
  258. do {
  259. len = *pb++;
  260. if (len & 0x80) {
  261. len = (len & 0x7F) + 1;
  262. if (*pb++ == 0xFF)
  263. len = rle_unpack(pb, &dp[ofs], len, frame_width - ofs);
  264. else
  265. memcpy(&dp[ofs], pb, len);
  266. pb += len;
  267. ofs += len;
  268. } else {
  269. /* interframe pixel copy */
  270. if (ofs + len + 1 > frame_width)
  271. return;
  272. memcpy(&dp[ofs], &pp[ofs], len + 1);
  273. ofs += len + 1;
  274. }
  275. } while (ofs < frame_width);
  276. if (ofs > frame_width) {
  277. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  278. ofs, frame_width);
  279. }
  280. dp += s->frame.linesize[0];
  281. pp += s->prev_frame.linesize[0];
  282. }
  283. break;
  284. }
  285. }
  286. }
  287. static int vmdvideo_decode_init(AVCodecContext *avctx)
  288. {
  289. VmdVideoContext *s = avctx->priv_data;
  290. int i;
  291. unsigned int *palette32;
  292. int palette_index = 0;
  293. unsigned char r, g, b;
  294. unsigned char *vmd_header;
  295. unsigned char *raw_palette;
  296. s->avctx = avctx;
  297. avctx->pix_fmt = PIX_FMT_PAL8;
  298. dsputil_init(&s->dsp, avctx);
  299. /* make sure the VMD header made it */
  300. if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
  301. av_log(s->avctx, AV_LOG_ERROR, "VMD video: expected extradata size of %d\n",
  302. VMD_HEADER_SIZE);
  303. return -1;
  304. }
  305. vmd_header = (unsigned char *)avctx->extradata;
  306. s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
  307. s->unpack_buffer = av_malloc(s->unpack_buffer_size);
  308. if (!s->unpack_buffer)
  309. return -1;
  310. /* load up the initial palette */
  311. raw_palette = &vmd_header[28];
  312. palette32 = (unsigned int *)s->palette;
  313. for (i = 0; i < PALETTE_COUNT; i++) {
  314. r = raw_palette[palette_index++] * 4;
  315. g = raw_palette[palette_index++] * 4;
  316. b = raw_palette[palette_index++] * 4;
  317. palette32[i] = (r << 16) | (g << 8) | (b);
  318. }
  319. s->frame.data[0] = s->prev_frame.data[0] = NULL;
  320. return 0;
  321. }
  322. static int vmdvideo_decode_frame(AVCodecContext *avctx,
  323. void *data, int *data_size,
  324. uint8_t *buf, int buf_size)
  325. {
  326. VmdVideoContext *s = avctx->priv_data;
  327. s->buf = buf;
  328. s->size = buf_size;
  329. if (buf_size < 16)
  330. return buf_size;
  331. s->frame.reference = 1;
  332. if (avctx->get_buffer(avctx, &s->frame)) {
  333. av_log(s->avctx, AV_LOG_ERROR, "VMD Video: get_buffer() failed\n");
  334. return -1;
  335. }
  336. vmd_decode(s);
  337. /* make the palette available on the way out */
  338. memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
  339. /* shuffle frames */
  340. FFSWAP(AVFrame, s->frame, s->prev_frame);
  341. if (s->frame.data[0])
  342. avctx->release_buffer(avctx, &s->frame);
  343. *data_size = sizeof(AVFrame);
  344. *(AVFrame*)data = s->prev_frame;
  345. /* report that the buffer was completely consumed */
  346. return buf_size;
  347. }
  348. static int vmdvideo_decode_end(AVCodecContext *avctx)
  349. {
  350. VmdVideoContext *s = avctx->priv_data;
  351. if (s->prev_frame.data[0])
  352. avctx->release_buffer(avctx, &s->prev_frame);
  353. av_free(s->unpack_buffer);
  354. return 0;
  355. }
  356. /*
  357. * Audio Decoder
  358. */
  359. typedef struct VmdAudioContext {
  360. AVCodecContext *avctx;
  361. int channels;
  362. int bits;
  363. int block_align;
  364. int predictors[2];
  365. } VmdAudioContext;
  366. static uint16_t vmdaudio_table[128] = {
  367. 0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
  368. 0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
  369. 0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
  370. 0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
  371. 0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
  372. 0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
  373. 0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
  374. 0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
  375. 0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
  376. 0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
  377. 0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
  378. 0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
  379. 0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
  380. };
  381. static int vmdaudio_decode_init(AVCodecContext *avctx)
  382. {
  383. VmdAudioContext *s = avctx->priv_data;
  384. s->avctx = avctx;
  385. s->channels = avctx->channels;
  386. s->bits = avctx->bits_per_sample;
  387. s->block_align = avctx->block_align;
  388. av_log(s->avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, block align = %d, sample rate = %d\n",
  389. s->channels, s->bits, s->block_align, avctx->sample_rate);
  390. return 0;
  391. }
  392. static void vmdaudio_decode_audio(VmdAudioContext *s, unsigned char *data,
  393. uint8_t *buf, int stereo)
  394. {
  395. int i;
  396. int chan = 0;
  397. int16_t *out = (int16_t*)data;
  398. for(i = 0; i < s->block_align; i++) {
  399. if(buf[i] & 0x80)
  400. s->predictors[chan] -= vmdaudio_table[buf[i] & 0x7F];
  401. else
  402. s->predictors[chan] += vmdaudio_table[buf[i]];
  403. s->predictors[chan] = av_clip_int16(s->predictors[chan]);
  404. out[i] = s->predictors[chan];
  405. chan ^= stereo;
  406. }
  407. }
  408. static int vmdaudio_loadsound(VmdAudioContext *s, unsigned char *data,
  409. uint8_t *buf, int silence)
  410. {
  411. int bytes_decoded = 0;
  412. int i;
  413. // if (silence)
  414. // av_log(s->avctx, AV_LOG_INFO, "silent block!\n");
  415. if (s->channels == 2) {
  416. /* stereo handling */
  417. if (silence) {
  418. memset(data, 0, s->block_align * 2);
  419. } else {
  420. if (s->bits == 16)
  421. vmdaudio_decode_audio(s, data, buf, 1);
  422. else {
  423. /* copy the data but convert it to signed */
  424. for (i = 0; i < s->block_align; i++){
  425. *data++ = buf[i] + 0x80;
  426. *data++ = buf[i] + 0x80;
  427. }
  428. }
  429. }
  430. } else {
  431. bytes_decoded = s->block_align * 2;
  432. /* mono handling */
  433. if (silence) {
  434. memset(data, 0, s->block_align * 2);
  435. } else {
  436. if (s->bits == 16) {
  437. vmdaudio_decode_audio(s, data, buf, 0);
  438. } else {
  439. /* copy the data but convert it to signed */
  440. for (i = 0; i < s->block_align; i++){
  441. *data++ = buf[i] + 0x80;
  442. *data++ = buf[i] + 0x80;
  443. }
  444. }
  445. }
  446. }
  447. return s->block_align * 2;
  448. }
  449. static int vmdaudio_decode_frame(AVCodecContext *avctx,
  450. void *data, int *data_size,
  451. uint8_t *buf, int buf_size)
  452. {
  453. VmdAudioContext *s = avctx->priv_data;
  454. unsigned char *output_samples = (unsigned char *)data;
  455. /* point to the start of the encoded data */
  456. unsigned char *p = buf + 16;
  457. if (buf_size < 16)
  458. return buf_size;
  459. if (buf[6] == 1) {
  460. /* the chunk contains audio */
  461. *data_size = vmdaudio_loadsound(s, output_samples, p, 0);
  462. } else if (buf[6] == 2) {
  463. /* the chunk may contain audio */
  464. p += 4;
  465. *data_size = vmdaudio_loadsound(s, output_samples, p, (buf_size == 16));
  466. output_samples += (s->block_align * s->bits / 8);
  467. } else if (buf[6] == 3) {
  468. /* silent chunk */
  469. *data_size = vmdaudio_loadsound(s, output_samples, p, 1);
  470. }
  471. return buf_size;
  472. }
  473. /*
  474. * Public Data Structures
  475. */
  476. AVCodec vmdvideo_decoder = {
  477. "vmdvideo",
  478. CODEC_TYPE_VIDEO,
  479. CODEC_ID_VMDVIDEO,
  480. sizeof(VmdVideoContext),
  481. vmdvideo_decode_init,
  482. NULL,
  483. vmdvideo_decode_end,
  484. vmdvideo_decode_frame,
  485. CODEC_CAP_DR1,
  486. };
  487. AVCodec vmdaudio_decoder = {
  488. "vmdaudio",
  489. CODEC_TYPE_AUDIO,
  490. CODEC_ID_VMDAUDIO,
  491. sizeof(VmdAudioContext),
  492. vmdaudio_decode_init,
  493. NULL,
  494. NULL,
  495. vmdaudio_decode_frame,
  496. };