You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

648 lines
19KB

  1. /*
  2. * Sierra VMD Audio & Video Decoders
  3. * Copyright (C) 2004 the ffmpeg project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sierra VMD audio & video decoders
  24. * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
  25. * for more information on the Sierra VMD format, visit:
  26. * http://www.pcisys.net/~melanson/codecs/
  27. *
  28. * The video decoder outputs PAL8 colorspace data. The decoder expects
  29. * a 0x330-byte VMD file header to be transmitted via extradata during
  30. * codec initialization. Each encoded frame that is sent to this decoder
  31. * is expected to be prepended with the appropriate 16-byte frame
  32. * information record from the VMD file.
  33. *
  34. * The audio decoder, like the video decoder, expects each encoded data
  35. * chunk to be prepended with the appropriate 16-byte frame information
  36. * record from the VMD file. It does not require the 0x330-byte VMD file
  37. * header, but it does need the audio setup parameters passed in through
  38. * normal libavcodec API means.
  39. */
  40. #include <stdio.h>
  41. #include <stdlib.h>
  42. #include <string.h>
  43. #include "libavutil/intreadwrite.h"
  44. #include "avcodec.h"
  45. #define VMD_HEADER_SIZE 0x330
  46. #define PALETTE_COUNT 256
  47. /*
  48. * Video Decoder
  49. */
  50. typedef struct VmdVideoContext {
  51. AVCodecContext *avctx;
  52. AVFrame frame;
  53. AVFrame prev_frame;
  54. const unsigned char *buf;
  55. int size;
  56. unsigned char palette[PALETTE_COUNT * 4];
  57. unsigned char *unpack_buffer;
  58. int unpack_buffer_size;
  59. int x_off, y_off;
  60. } VmdVideoContext;
  61. #define QUEUE_SIZE 0x1000
  62. #define QUEUE_MASK 0x0FFF
  63. static void lz_unpack(const unsigned char *src, int src_len,
  64. unsigned char *dest, int dest_len)
  65. {
  66. const unsigned char *s;
  67. unsigned int s_len;
  68. unsigned char *d;
  69. unsigned char *d_end;
  70. unsigned char queue[QUEUE_SIZE];
  71. unsigned int qpos;
  72. unsigned int dataleft;
  73. unsigned int chainofs;
  74. unsigned int chainlen;
  75. unsigned int speclen;
  76. unsigned char tag;
  77. unsigned int i, j;
  78. s = src;
  79. s_len = src_len;
  80. d = dest;
  81. d_end = d + dest_len;
  82. dataleft = AV_RL32(s);
  83. s += 4; s_len -= 4;
  84. memset(queue, 0x20, QUEUE_SIZE);
  85. if (s_len < 4)
  86. return;
  87. if (AV_RL32(s) == 0x56781234) {
  88. s += 4; s_len -= 4;
  89. qpos = 0x111;
  90. speclen = 0xF + 3;
  91. } else {
  92. qpos = 0xFEE;
  93. speclen = 100; /* no speclen */
  94. }
  95. while (dataleft > 0 && s_len > 0) {
  96. tag = *s++; s_len--;
  97. if ((tag == 0xFF) && (dataleft > 8)) {
  98. if (d + 8 > d_end || s_len < 8)
  99. return;
  100. for (i = 0; i < 8; i++) {
  101. queue[qpos++] = *d++ = *s++;
  102. qpos &= QUEUE_MASK;
  103. }
  104. s_len -= 8;
  105. dataleft -= 8;
  106. } else {
  107. for (i = 0; i < 8; i++) {
  108. if (dataleft == 0)
  109. break;
  110. if (tag & 0x01) {
  111. if (d + 1 > d_end || s_len < 1)
  112. return;
  113. queue[qpos++] = *d++ = *s++;
  114. qpos &= QUEUE_MASK;
  115. dataleft--;
  116. s_len--;
  117. } else {
  118. if (s_len < 2)
  119. return;
  120. chainofs = *s++;
  121. chainofs |= ((*s & 0xF0) << 4);
  122. chainlen = (*s++ & 0x0F) + 3;
  123. s_len -= 2;
  124. if (chainlen == speclen) {
  125. if (s_len < 1)
  126. return;
  127. chainlen = *s++ + 0xF + 3;
  128. s_len--;
  129. }
  130. if (d + chainlen > d_end)
  131. return;
  132. for (j = 0; j < chainlen; j++) {
  133. *d = queue[chainofs++ & QUEUE_MASK];
  134. queue[qpos++] = *d++;
  135. qpos &= QUEUE_MASK;
  136. }
  137. dataleft -= chainlen;
  138. }
  139. tag >>= 1;
  140. }
  141. }
  142. }
  143. }
  144. static int rle_unpack(const unsigned char *src, unsigned char *dest,
  145. int src_count, int src_size, int dest_len)
  146. {
  147. const unsigned char *ps;
  148. unsigned char *pd;
  149. int i, l;
  150. unsigned char *dest_end = dest + dest_len;
  151. ps = src;
  152. pd = dest;
  153. if (src_count & 1) {
  154. if (src_size < 1)
  155. return 0;
  156. *pd++ = *ps++;
  157. src_size--;
  158. }
  159. src_count >>= 1;
  160. i = 0;
  161. do {
  162. if (src_size < 1)
  163. break;
  164. l = *ps++;
  165. src_size--;
  166. if (l & 0x80) {
  167. l = (l & 0x7F) * 2;
  168. if (pd + l > dest_end || src_size < l)
  169. return ps - src;
  170. memcpy(pd, ps, l);
  171. ps += l;
  172. src_size -= l;
  173. pd += l;
  174. } else {
  175. if (pd + i > dest_end || src_size < 2)
  176. return ps - src;
  177. for (i = 0; i < l; i++) {
  178. *pd++ = ps[0];
  179. *pd++ = ps[1];
  180. }
  181. ps += 2;
  182. src_size -= 2;
  183. }
  184. i += l;
  185. } while (i < src_count);
  186. return ps - src;
  187. }
  188. static void vmd_decode(VmdVideoContext *s)
  189. {
  190. int i;
  191. unsigned int *palette32;
  192. unsigned char r, g, b;
  193. /* point to the start of the encoded data */
  194. const unsigned char *p = s->buf + 16;
  195. const unsigned char *pb;
  196. unsigned int pb_size;
  197. unsigned char meth;
  198. unsigned char *dp; /* pointer to current frame */
  199. unsigned char *pp; /* pointer to previous frame */
  200. unsigned char len;
  201. int ofs;
  202. int frame_x, frame_y;
  203. int frame_width, frame_height;
  204. int dp_size;
  205. frame_x = AV_RL16(&s->buf[6]);
  206. frame_y = AV_RL16(&s->buf[8]);
  207. frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
  208. frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
  209. if (frame_x < 0 || frame_width < 0 ||
  210. frame_x >= s->avctx->width ||
  211. frame_width > s->avctx->width ||
  212. frame_x + frame_width > s->avctx->width)
  213. return;
  214. if (frame_y < 0 || frame_height < 0 ||
  215. frame_y >= s->avctx->height ||
  216. frame_height > s->avctx->height ||
  217. frame_y + frame_height > s->avctx->height)
  218. return;
  219. if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
  220. (frame_x || frame_y)) {
  221. s->x_off = frame_x;
  222. s->y_off = frame_y;
  223. }
  224. frame_x -= s->x_off;
  225. frame_y -= s->y_off;
  226. /* if only a certain region will be updated, copy the entire previous
  227. * frame before the decode */
  228. if (s->prev_frame.data[0] &&
  229. (frame_x || frame_y || (frame_width != s->avctx->width) ||
  230. (frame_height != s->avctx->height))) {
  231. memcpy(s->frame.data[0], s->prev_frame.data[0],
  232. s->avctx->height * s->frame.linesize[0]);
  233. }
  234. /* check if there is a new palette */
  235. if (s->buf[15] & 0x02) {
  236. p += 2;
  237. palette32 = (unsigned int *)s->palette;
  238. for (i = 0; i < PALETTE_COUNT; i++) {
  239. r = *p++ * 4;
  240. g = *p++ * 4;
  241. b = *p++ * 4;
  242. palette32[i] = (r << 16) | (g << 8) | (b);
  243. }
  244. s->size -= (256 * 3 + 2);
  245. }
  246. if (s->size > 0) {
  247. /* originally UnpackFrame in VAG's code */
  248. pb = p;
  249. pb_size = s->buf + s->size - pb;
  250. if (pb_size < 1)
  251. return;
  252. meth = *pb++; pb_size--;
  253. if (meth & 0x80) {
  254. lz_unpack(pb, pb_size,
  255. s->unpack_buffer, s->unpack_buffer_size);
  256. meth &= 0x7F;
  257. pb = s->unpack_buffer;
  258. pb_size = s->unpack_buffer_size;
  259. }
  260. dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
  261. dp_size = s->frame.linesize[0] * s->avctx->height;
  262. pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
  263. switch (meth) {
  264. case 1:
  265. for (i = 0; i < frame_height; i++) {
  266. ofs = 0;
  267. do {
  268. if (pb_size < 1)
  269. return;
  270. len = *pb++;
  271. pb_size--;
  272. if (len & 0x80) {
  273. len = (len & 0x7F) + 1;
  274. if (ofs + len > frame_width || pb_size < len)
  275. return;
  276. memcpy(&dp[ofs], pb, len);
  277. pb += len;
  278. pb_size -= len;
  279. ofs += len;
  280. } else {
  281. /* interframe pixel copy */
  282. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  283. return;
  284. memcpy(&dp[ofs], &pp[ofs], len + 1);
  285. ofs += len + 1;
  286. }
  287. } while (ofs < frame_width);
  288. if (ofs > frame_width) {
  289. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  290. ofs, frame_width);
  291. break;
  292. }
  293. dp += s->frame.linesize[0];
  294. pp += s->prev_frame.linesize[0];
  295. }
  296. break;
  297. case 2:
  298. for (i = 0; i < frame_height; i++) {
  299. if (pb_size < frame_width)
  300. return;
  301. memcpy(dp, pb, frame_width);
  302. pb += frame_width;
  303. pb_size -= frame_width;
  304. dp += s->frame.linesize[0];
  305. pp += s->prev_frame.linesize[0];
  306. }
  307. break;
  308. case 3:
  309. for (i = 0; i < frame_height; i++) {
  310. ofs = 0;
  311. do {
  312. if (pb_size < 1)
  313. return;
  314. len = *pb++;
  315. pb_size--;
  316. if (len & 0x80) {
  317. len = (len & 0x7F) + 1;
  318. if (pb_size < 1)
  319. return;
  320. if (*pb++ == 0xFF)
  321. len = rle_unpack(pb, &dp[ofs], len, pb_size, frame_width - ofs);
  322. else {
  323. if (pb_size < len)
  324. return;
  325. memcpy(&dp[ofs], pb, len);
  326. }
  327. pb += len;
  328. pb_size -= 1 + len;
  329. ofs += len;
  330. } else {
  331. /* interframe pixel copy */
  332. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  333. return;
  334. memcpy(&dp[ofs], &pp[ofs], len + 1);
  335. ofs += len + 1;
  336. }
  337. } while (ofs < frame_width);
  338. if (ofs > frame_width) {
  339. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  340. ofs, frame_width);
  341. }
  342. dp += s->frame.linesize[0];
  343. pp += s->prev_frame.linesize[0];
  344. }
  345. break;
  346. }
  347. }
  348. }
  349. static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
  350. {
  351. VmdVideoContext *s = avctx->priv_data;
  352. int i;
  353. unsigned int *palette32;
  354. int palette_index = 0;
  355. unsigned char r, g, b;
  356. unsigned char *vmd_header;
  357. unsigned char *raw_palette;
  358. s->avctx = avctx;
  359. avctx->pix_fmt = PIX_FMT_PAL8;
  360. /* make sure the VMD header made it */
  361. if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
  362. av_log(s->avctx, AV_LOG_ERROR, "VMD video: expected extradata size of %d\n",
  363. VMD_HEADER_SIZE);
  364. return -1;
  365. }
  366. vmd_header = (unsigned char *)avctx->extradata;
  367. s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
  368. s->unpack_buffer = av_malloc(s->unpack_buffer_size);
  369. if (!s->unpack_buffer)
  370. return -1;
  371. /* load up the initial palette */
  372. raw_palette = &vmd_header[28];
  373. palette32 = (unsigned int *)s->palette;
  374. for (i = 0; i < PALETTE_COUNT; i++) {
  375. r = raw_palette[palette_index++] * 4;
  376. g = raw_palette[palette_index++] * 4;
  377. b = raw_palette[palette_index++] * 4;
  378. palette32[i] = (r << 16) | (g << 8) | (b);
  379. }
  380. return 0;
  381. }
  382. static int vmdvideo_decode_frame(AVCodecContext *avctx,
  383. void *data, int *data_size,
  384. AVPacket *avpkt)
  385. {
  386. const uint8_t *buf = avpkt->data;
  387. int buf_size = avpkt->size;
  388. VmdVideoContext *s = avctx->priv_data;
  389. s->buf = buf;
  390. s->size = buf_size;
  391. if (buf_size < 16)
  392. return buf_size;
  393. s->frame.reference = 1;
  394. if (avctx->get_buffer(avctx, &s->frame)) {
  395. av_log(s->avctx, AV_LOG_ERROR, "VMD Video: get_buffer() failed\n");
  396. return -1;
  397. }
  398. vmd_decode(s);
  399. /* make the palette available on the way out */
  400. memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
  401. /* shuffle frames */
  402. FFSWAP(AVFrame, s->frame, s->prev_frame);
  403. if (s->frame.data[0])
  404. avctx->release_buffer(avctx, &s->frame);
  405. *data_size = sizeof(AVFrame);
  406. *(AVFrame*)data = s->prev_frame;
  407. /* report that the buffer was completely consumed */
  408. return buf_size;
  409. }
  410. static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
  411. {
  412. VmdVideoContext *s = avctx->priv_data;
  413. if (s->prev_frame.data[0])
  414. avctx->release_buffer(avctx, &s->prev_frame);
  415. av_free(s->unpack_buffer);
  416. return 0;
  417. }
  418. /*
  419. * Audio Decoder
  420. */
  421. typedef struct VmdAudioContext {
  422. AVCodecContext *avctx;
  423. int channels;
  424. int bits;
  425. int block_align;
  426. int predictors[2];
  427. } VmdAudioContext;
  428. static const uint16_t vmdaudio_table[128] = {
  429. 0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
  430. 0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
  431. 0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
  432. 0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
  433. 0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
  434. 0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
  435. 0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
  436. 0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
  437. 0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
  438. 0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
  439. 0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
  440. 0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
  441. 0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
  442. };
  443. static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
  444. {
  445. VmdAudioContext *s = avctx->priv_data;
  446. s->avctx = avctx;
  447. s->channels = avctx->channels;
  448. s->bits = avctx->bits_per_coded_sample;
  449. s->block_align = avctx->block_align;
  450. avctx->sample_fmt = SAMPLE_FMT_S16;
  451. av_log(s->avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, block align = %d, sample rate = %d\n",
  452. s->channels, s->bits, s->block_align, avctx->sample_rate);
  453. return 0;
  454. }
  455. static void vmdaudio_decode_audio(VmdAudioContext *s, unsigned char *data,
  456. const uint8_t *buf, int buf_size, int stereo)
  457. {
  458. int i;
  459. int chan = 0;
  460. int16_t *out = (int16_t*)data;
  461. for(i = 0; i < buf_size; i++) {
  462. if(buf[i] & 0x80)
  463. s->predictors[chan] -= vmdaudio_table[buf[i] & 0x7F];
  464. else
  465. s->predictors[chan] += vmdaudio_table[buf[i]];
  466. s->predictors[chan] = av_clip_int16(s->predictors[chan]);
  467. out[i] = s->predictors[chan];
  468. chan ^= stereo;
  469. }
  470. }
  471. static int vmdaudio_loadsound(VmdAudioContext *s, unsigned char *data,
  472. const uint8_t *buf, int silence, int data_size)
  473. {
  474. int bytes_decoded = 0;
  475. int i;
  476. // if (silence)
  477. // av_log(s->avctx, AV_LOG_INFO, "silent block!\n");
  478. if (s->channels == 2) {
  479. /* stereo handling */
  480. if (silence) {
  481. memset(data, 0, data_size * 2);
  482. } else {
  483. if (s->bits == 16)
  484. vmdaudio_decode_audio(s, data, buf, data_size, 1);
  485. else {
  486. /* copy the data but convert it to signed */
  487. for (i = 0; i < data_size; i++){
  488. *data++ = buf[i] + 0x80;
  489. *data++ = buf[i] + 0x80;
  490. }
  491. }
  492. }
  493. } else {
  494. bytes_decoded = data_size * 2;
  495. /* mono handling */
  496. if (silence) {
  497. memset(data, 0, data_size * 2);
  498. } else {
  499. if (s->bits == 16) {
  500. vmdaudio_decode_audio(s, data, buf, data_size, 0);
  501. } else {
  502. /* copy the data but convert it to signed */
  503. for (i = 0; i < data_size; i++){
  504. *data++ = buf[i] + 0x80;
  505. *data++ = buf[i] + 0x80;
  506. }
  507. }
  508. }
  509. }
  510. return data_size * 2;
  511. }
  512. static int vmdaudio_decode_frame(AVCodecContext *avctx,
  513. void *data, int *data_size,
  514. AVPacket *avpkt)
  515. {
  516. const uint8_t *buf = avpkt->data;
  517. int buf_size = avpkt->size;
  518. VmdAudioContext *s = avctx->priv_data;
  519. unsigned char *output_samples = (unsigned char *)data;
  520. /* point to the start of the encoded data */
  521. const unsigned char *p = buf + 16;
  522. if (buf_size < 16)
  523. return buf_size;
  524. if (buf[6] == 1) {
  525. /* the chunk contains audio */
  526. *data_size = vmdaudio_loadsound(s, output_samples, p, 0, buf_size - 16);
  527. } else if (buf[6] == 2) {
  528. /* initial chunk, may contain audio and silence */
  529. uint32_t flags = AV_RB32(p);
  530. int raw_block_size = s->block_align * s->bits / 8;
  531. int silent_chunks;
  532. if(flags == 0xFFFFFFFF)
  533. silent_chunks = 32;
  534. else
  535. silent_chunks = av_log2(flags + 1);
  536. if(*data_size < (s->block_align*silent_chunks + buf_size - 20) * 2)
  537. return -1;
  538. *data_size = 0;
  539. memset(output_samples, 0, raw_block_size * silent_chunks);
  540. output_samples += raw_block_size * silent_chunks;
  541. *data_size = raw_block_size * silent_chunks;
  542. *data_size += vmdaudio_loadsound(s, output_samples, p + 4, 0, buf_size - 20);
  543. } else if (buf[6] == 3) {
  544. /* silent chunk */
  545. *data_size = vmdaudio_loadsound(s, output_samples, p, 1, 0);
  546. }
  547. return buf_size;
  548. }
  549. /*
  550. * Public Data Structures
  551. */
  552. AVCodec vmdvideo_decoder = {
  553. "vmdvideo",
  554. AVMEDIA_TYPE_VIDEO,
  555. CODEC_ID_VMDVIDEO,
  556. sizeof(VmdVideoContext),
  557. vmdvideo_decode_init,
  558. NULL,
  559. vmdvideo_decode_end,
  560. vmdvideo_decode_frame,
  561. CODEC_CAP_DR1,
  562. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
  563. };
  564. AVCodec vmdaudio_decoder = {
  565. "vmdaudio",
  566. AVMEDIA_TYPE_AUDIO,
  567. CODEC_ID_VMDAUDIO,
  568. sizeof(VmdAudioContext),
  569. vmdaudio_decode_init,
  570. NULL,
  571. NULL,
  572. vmdaudio_decode_frame,
  573. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
  574. };