You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

579 lines
16KB

  1. /*
  2. * Sierra VMD Audio & Video Decoders
  3. * Copyright (C) 2004 the ffmpeg project
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. /**
  21. * @file vmdvideo.c
  22. * Sierra VMD audio & video decoders
  23. * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
  24. * for more information on the Sierra VMD format, visit:
  25. * http://www.pcisys.net/~melanson/codecs/
  26. *
  27. * The video decoder outputs PAL8 colorspace data. The decoder expects
  28. * a 0x330-byte VMD file header to be transmitted via extradata during
  29. * codec initialization. Each encoded frame that is sent to this decoder
  30. * is expected to be prepended with the appropriate 16-byte frame
  31. * information record from the VMD file.
  32. *
  33. * The audio decoder, like the video decoder, expects each encoded data
  34. * chunk to be prepended with the appropriate 16-byte frame information
  35. * record from the VMD file. It does not require the 0x330-byte VMD file
  36. * header, but it does need the audio setup parameters passed in through
  37. * normal libavcodec API means.
  38. */
  39. #include <stdio.h>
  40. #include <stdlib.h>
  41. #include <string.h>
  42. #include <unistd.h>
  43. #include "common.h"
  44. #include "avcodec.h"
  45. #include "dsputil.h"
  46. #define VMD_HEADER_SIZE 0x330
  47. #define PALETTE_COUNT 256
  48. /*
  49. * Video Decoder
  50. */
  51. typedef struct VmdVideoContext {
  52. AVCodecContext *avctx;
  53. DSPContext dsp;
  54. AVFrame frame;
  55. AVFrame prev_frame;
  56. unsigned char *buf;
  57. int size;
  58. unsigned char palette[PALETTE_COUNT * 4];
  59. unsigned char *unpack_buffer;
  60. } VmdVideoContext;
  61. #define QUEUE_SIZE 0x1000
  62. #define QUEUE_MASK 0x0FFF
  63. static void lz_unpack(unsigned char *src, unsigned char *dest)
  64. {
  65. unsigned char *s;
  66. unsigned char *d;
  67. unsigned char queue[QUEUE_SIZE];
  68. unsigned int qpos;
  69. unsigned int dataleft;
  70. unsigned int chainofs;
  71. unsigned int chainlen;
  72. unsigned int speclen;
  73. unsigned char tag;
  74. unsigned int i, j;
  75. s = src;
  76. d = dest;
  77. dataleft = LE_32(s);
  78. s += 4;
  79. memset(queue, QUEUE_SIZE, 0x20);
  80. if (LE_32(s) == 0x56781234) {
  81. s += 4;
  82. qpos = 0x111;
  83. speclen = 0xF + 3;
  84. } else {
  85. qpos = 0xFEE;
  86. speclen = 100; /* no speclen */
  87. }
  88. while (dataleft > 0) {
  89. tag = *s++;
  90. if ((tag == 0xFF) && (dataleft > 8)) {
  91. for (i = 0; i < 8; i++) {
  92. queue[qpos++] = *d++ = *s++;
  93. qpos &= QUEUE_MASK;
  94. }
  95. dataleft -= 8;
  96. } else {
  97. for (i = 0; i < 8; i++) {
  98. if (dataleft == 0)
  99. break;
  100. if (tag & 0x01) {
  101. queue[qpos++] = *d++ = *s++;
  102. qpos &= QUEUE_MASK;
  103. dataleft--;
  104. } else {
  105. chainofs = *s++;
  106. chainofs |= ((*s & 0xF0) << 4);
  107. chainlen = (*s++ & 0x0F) + 3;
  108. if (chainlen == speclen)
  109. chainlen = *s++ + 0xF + 3;
  110. for (j = 0; j < chainlen; j++) {
  111. *d = queue[chainofs++ & QUEUE_MASK];
  112. queue[qpos++] = *d++;
  113. qpos &= QUEUE_MASK;
  114. }
  115. dataleft -= chainlen;
  116. }
  117. tag >>= 1;
  118. }
  119. }
  120. }
  121. }
  122. static int rle_unpack(unsigned char *src, unsigned char *dest, int len)
  123. {
  124. unsigned char *ps;
  125. unsigned char *pd;
  126. int i, l;
  127. ps = src;
  128. pd = dest;
  129. if (len & 1)
  130. *pd++ = *ps++;
  131. len >>= 1;
  132. i = 0;
  133. do {
  134. l = *ps++;
  135. if (l & 0x80) {
  136. l = (l & 0x7F) * 2;
  137. memcpy(pd, ps, l);
  138. ps += l;
  139. pd += l;
  140. } else {
  141. for (i = 0; i < l; i++) {
  142. *pd++ = ps[0];
  143. *pd++ = ps[1];
  144. }
  145. ps += 2;
  146. }
  147. i += l;
  148. } while (i < len);
  149. return (ps - src);
  150. }
  151. static void vmd_decode(VmdVideoContext *s)
  152. {
  153. int i;
  154. unsigned int *palette32;
  155. unsigned char r, g, b;
  156. /* point to the start of the encoded data */
  157. unsigned char *p = s->buf + 16;
  158. unsigned char *pb;
  159. unsigned char meth;
  160. unsigned char *dp; /* pointer to current frame */
  161. unsigned char *pp; /* pointer to previous frame */
  162. unsigned char len;
  163. int ofs;
  164. int frame_x, frame_y;
  165. int frame_width, frame_height;
  166. frame_x = LE_16(&s->buf[6]);
  167. frame_y = LE_16(&s->buf[8]);
  168. frame_width = LE_16(&s->buf[10]) - frame_x + 1;
  169. frame_height = LE_16(&s->buf[12]) - frame_y + 1;
  170. /* if only a certain region will be updated, copy the entire previous
  171. * frame before the decode */
  172. if (frame_x || frame_y || (frame_width != s->avctx->width) ||
  173. (frame_height != s->avctx->height)) {
  174. memcpy(s->frame.data[0], s->prev_frame.data[0],
  175. s->avctx->height * s->frame.linesize[0]);
  176. }
  177. /* check if there is a new palette */
  178. if (s->buf[15] & 0x02) {
  179. p += 2;
  180. palette32 = (unsigned int *)s->palette;
  181. for (i = 0; i < PALETTE_COUNT; i++) {
  182. r = *p++ * 4;
  183. g = *p++ * 4;
  184. b = *p++ * 4;
  185. palette32[i] = (r << 16) | (g << 8) | (b);
  186. }
  187. s->size -= (256 * 3 + 2);
  188. }
  189. if (s->size >= 0) {
  190. /* originally UnpackFrame in VAG's code */
  191. pb = p;
  192. meth = *pb++;
  193. if (meth & 0x80) {
  194. lz_unpack(pb, s->unpack_buffer);
  195. meth &= 0x7F;
  196. pb = s->unpack_buffer;
  197. }
  198. dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
  199. pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
  200. switch (meth) {
  201. case 1:
  202. for (i = 0; i < frame_height; i++) {
  203. ofs = 0;
  204. do {
  205. len = *pb++;
  206. if (len & 0x80) {
  207. len = (len & 0x7F) + 1;
  208. memcpy(&dp[ofs], pb, len);
  209. pb += len;
  210. ofs += len;
  211. } else {
  212. /* interframe pixel copy */
  213. memcpy(&dp[ofs], &pp[ofs], len + 1);
  214. ofs += len + 1;
  215. }
  216. } while (ofs < frame_width);
  217. if (ofs > frame_width) {
  218. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  219. ofs, frame_width);
  220. break;
  221. }
  222. dp += s->frame.linesize[0];
  223. pp += s->prev_frame.linesize[0];
  224. }
  225. break;
  226. case 2:
  227. for (i = 0; i < frame_height; i++) {
  228. memcpy(dp, pb, frame_width);
  229. pb += frame_width;
  230. dp += s->frame.linesize[0];
  231. pp += s->prev_frame.linesize[0];
  232. }
  233. break;
  234. case 3:
  235. for (i = 0; i < frame_height; i++) {
  236. ofs = 0;
  237. do {
  238. len = *pb++;
  239. if (len & 0x80) {
  240. len = (len & 0x7F) + 1;
  241. if (*pb++ == 0xFF)
  242. len = rle_unpack(pb, &dp[ofs], len);
  243. else
  244. memcpy(&dp[ofs], pb, len);
  245. pb += len;
  246. ofs += len;
  247. } else {
  248. /* interframe pixel copy */
  249. memcpy(&dp[ofs], &pp[ofs], len + 1);
  250. ofs += len + 1;
  251. }
  252. } while (ofs < frame_width);
  253. if (ofs > frame_width) {
  254. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  255. ofs, frame_width);
  256. }
  257. dp += s->frame.linesize[0];
  258. pp += s->prev_frame.linesize[0];
  259. }
  260. break;
  261. }
  262. }
  263. }
  264. static int vmdvideo_decode_init(AVCodecContext *avctx)
  265. {
  266. VmdVideoContext *s = (VmdVideoContext *)avctx->priv_data;
  267. int i;
  268. unsigned int *palette32;
  269. int palette_index = 0;
  270. unsigned char r, g, b;
  271. unsigned char *vmd_header;
  272. unsigned char *raw_palette;
  273. s->avctx = avctx;
  274. avctx->pix_fmt = PIX_FMT_PAL8;
  275. avctx->has_b_frames = 0;
  276. dsputil_init(&s->dsp, avctx);
  277. /* make sure the VMD header made it */
  278. if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
  279. av_log(s->avctx, AV_LOG_ERROR, "VMD video: expected extradata size of %d\n",
  280. VMD_HEADER_SIZE);
  281. return -1;
  282. }
  283. vmd_header = (unsigned char *)avctx->extradata;
  284. s->unpack_buffer = av_malloc(LE_32(&vmd_header[800]));
  285. if (!s->unpack_buffer)
  286. return -1;
  287. /* load up the initial palette */
  288. raw_palette = &vmd_header[28];
  289. palette32 = (unsigned int *)s->palette;
  290. for (i = 0; i < PALETTE_COUNT; i++) {
  291. r = raw_palette[palette_index++] * 4;
  292. g = raw_palette[palette_index++] * 4;
  293. b = raw_palette[palette_index++] * 4;
  294. palette32[i] = (r << 16) | (g << 8) | (b);
  295. }
  296. s->frame.data[0] = s->prev_frame.data[0] = NULL;
  297. return 0;
  298. }
  299. static int vmdvideo_decode_frame(AVCodecContext *avctx,
  300. void *data, int *data_size,
  301. uint8_t *buf, int buf_size)
  302. {
  303. VmdVideoContext *s = (VmdVideoContext *)avctx->priv_data;
  304. s->buf = buf;
  305. s->size = buf_size;
  306. if (buf_size < 16)
  307. return buf_size;
  308. s->frame.reference = 1;
  309. if (avctx->get_buffer(avctx, &s->frame)) {
  310. av_log(s->avctx, AV_LOG_ERROR, "VMD Video: get_buffer() failed\n");
  311. return -1;
  312. }
  313. vmd_decode(s);
  314. /* make the palette available on the way out */
  315. memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
  316. if (s->prev_frame.data[0])
  317. avctx->release_buffer(avctx, &s->prev_frame);
  318. /* shuffle frames */
  319. s->prev_frame = s->frame;
  320. *data_size = sizeof(AVFrame);
  321. *(AVFrame*)data = s->frame;
  322. /* report that the buffer was completely consumed */
  323. return buf_size;
  324. }
  325. static int vmdvideo_decode_end(AVCodecContext *avctx)
  326. {
  327. VmdVideoContext *s = (VmdVideoContext *)avctx->priv_data;
  328. if (s->prev_frame.data[0])
  329. avctx->release_buffer(avctx, &s->prev_frame);
  330. av_free(s->unpack_buffer);
  331. return 0;
  332. }
  333. /*
  334. * Audio Decoder
  335. */
  336. typedef struct VmdAudioContext {
  337. AVCodecContext *avctx;
  338. int channels;
  339. int bits;
  340. int block_align;
  341. unsigned char steps8[16];
  342. unsigned short steps16[16];
  343. unsigned short steps128[256];
  344. short predictors[2];
  345. } VmdAudioContext;
  346. static int vmdaudio_decode_init(AVCodecContext *avctx)
  347. {
  348. VmdAudioContext *s = (VmdAudioContext *)avctx->priv_data;
  349. int i;
  350. s->avctx = avctx;
  351. s->channels = avctx->channels;
  352. s->bits = avctx->bits_per_sample;
  353. s->block_align = avctx->block_align;
  354. av_log(s->avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, block align = %d, sample rate = %d\n",
  355. s->channels, s->bits, s->block_align, avctx->sample_rate);
  356. /* set up the steps8 and steps16 tables */
  357. for (i = 0; i < 8; i++) {
  358. if (i < 4)
  359. s->steps8[i] = i;
  360. else
  361. s->steps8[i] = s->steps8[i - 1] + i - 1;
  362. if (i == 0)
  363. s->steps16[i] = 0;
  364. else if (i == 1)
  365. s->steps16[i] = 4;
  366. else if (i == 2)
  367. s->steps16[i] = 16;
  368. else
  369. s->steps16[i] = 1 << (i + 4);
  370. }
  371. /* set up the step128 table */
  372. s->steps128[0] = 0;
  373. s->steps128[1] = 8;
  374. for (i = 0x02; i <= 0x20; i++)
  375. s->steps128[i] = (i - 1) << 4;
  376. for (i = 0x21; i <= 0x60; i++)
  377. s->steps128[i] = (i + 0x1F) << 3;
  378. for (i = 0x61; i <= 0x70; i++)
  379. s->steps128[i] = (i - 0x51) << 6;
  380. for (i = 0x71; i <= 0x78; i++)
  381. s->steps128[i] = (i - 0x69) << 8;
  382. for (i = 0x79; i <= 0x7D; i++)
  383. s->steps128[i] = (i - 0x75) << 10;
  384. s->steps128[0x7E] = 0x3000;
  385. s->steps128[0x7F] = 0x4000;
  386. /* set up the negative half of each table */
  387. for (i = 0; i < 8; i++) {
  388. s->steps8[i + 8] = -s->steps8[i];
  389. s->steps16[i + 8] = -s->steps16[i];
  390. }
  391. for (i = 0; i < 128; i++)
  392. s->steps128[i + 128] = -s->steps128[i];
  393. return 0;
  394. }
  395. static void vmdaudio_decode_audio(VmdAudioContext *s, unsigned char *data,
  396. uint8_t *buf, int ratio) {
  397. }
  398. static int vmdaudio_loadsound(VmdAudioContext *s, unsigned char *data,
  399. uint8_t *buf, int silence)
  400. {
  401. int bytes_decoded = 0;
  402. int i;
  403. if (silence)
  404. av_log(s->avctx, AV_LOG_INFO, "silent block!\n");
  405. if (s->channels == 2) {
  406. /* stereo handling */
  407. if ((s->block_align & 0x01) == 0) {
  408. if (silence)
  409. memset(data, 0, s->block_align * 2);
  410. else
  411. vmdaudio_decode_audio(s, data, buf, 1);
  412. } else {
  413. if (silence)
  414. memset(data, 0, s->block_align * 2);
  415. else
  416. vmdaudio_decode_audio(s, data, buf, 1);
  417. }
  418. } else {
  419. /* mono handling */
  420. if (silence) {
  421. if (s->bits == 16) {
  422. memset(data, 0, s->block_align * 2);
  423. bytes_decoded = s->block_align * 2;
  424. } else {
  425. // memset(data, 0x00, s->block_align);
  426. // bytes_decoded = s->block_align;
  427. memset(data, 0x00, s->block_align * 2);
  428. bytes_decoded = s->block_align * 2;
  429. }
  430. } else {
  431. /* copy the data but convert it to signed */
  432. for (i = 0; i < s->block_align; i++)
  433. data[i * 2 + 1] = buf[i] + 0x80;
  434. bytes_decoded = s->block_align * 2;
  435. }
  436. }
  437. return bytes_decoded;
  438. }
  439. static int vmdaudio_decode_frame(AVCodecContext *avctx,
  440. void *data, int *data_size,
  441. uint8_t *buf, int buf_size)
  442. {
  443. VmdAudioContext *s = (VmdAudioContext *)avctx->priv_data;
  444. unsigned int sound_flags;
  445. unsigned char *output_samples = (unsigned char *)data;
  446. /* point to the start of the encoded data */
  447. unsigned char *p = buf + 16;
  448. unsigned char *p_end = buf + buf_size;
  449. if (buf_size < 16)
  450. return buf_size;
  451. if (buf[6] == 1) {
  452. /* the chunk contains audio */
  453. *data_size = vmdaudio_loadsound(s, output_samples, p, 0);
  454. } else if (buf[6] == 2) {
  455. /* the chunk contains audio and silence mixed together */
  456. sound_flags = LE_32(p);
  457. p += 4;
  458. /* do something with extrabufs here? */
  459. while (p < p_end) {
  460. if (sound_flags & 0x01)
  461. /* silence */
  462. *data_size += vmdaudio_loadsound(s, output_samples, p, 1);
  463. else {
  464. /* audio */
  465. *data_size += vmdaudio_loadsound(s, output_samples, p, 0);
  466. p += s->block_align;
  467. }
  468. output_samples += (s->block_align * s->bits / 8);
  469. sound_flags >>= 1;
  470. }
  471. } else if (buf[6] == 3) {
  472. /* silent chunk */
  473. *data_size = vmdaudio_loadsound(s, output_samples, p, 1);
  474. }
  475. return buf_size;
  476. }
  477. /*
  478. * Public Data Structures
  479. */
  480. AVCodec vmdvideo_decoder = {
  481. "vmdvideo",
  482. CODEC_TYPE_VIDEO,
  483. CODEC_ID_VMDVIDEO,
  484. sizeof(VmdVideoContext),
  485. vmdvideo_decode_init,
  486. NULL,
  487. vmdvideo_decode_end,
  488. vmdvideo_decode_frame,
  489. CODEC_CAP_DR1,
  490. };
  491. AVCodec vmdaudio_decoder = {
  492. "vmdaudio",
  493. CODEC_TYPE_AUDIO,
  494. CODEC_ID_VMDAUDIO,
  495. sizeof(VmdAudioContext),
  496. vmdaudio_decode_init,
  497. NULL,
  498. NULL,
  499. vmdaudio_decode_frame,
  500. };