You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

655 lines
20KB

  1. /*
  2. * Sierra VMD Audio & Video Decoders
  3. * Copyright (C) 2004 the ffmpeg project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sierra VMD audio & video decoders
  24. * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
  25. * for more information on the Sierra VMD format, visit:
  26. * http://www.pcisys.net/~melanson/codecs/
  27. *
  28. * The video decoder outputs PAL8 colorspace data. The decoder expects
  29. * a 0x330-byte VMD file header to be transmitted via extradata during
  30. * codec initialization. Each encoded frame that is sent to this decoder
  31. * is expected to be prepended with the appropriate 16-byte frame
  32. * information record from the VMD file.
  33. *
  34. * The audio decoder, like the video decoder, expects each encoded data
  35. * chunk to be prepended with the appropriate 16-byte frame information
  36. * record from the VMD file. It does not require the 0x330-byte VMD file
  37. * header, but it does need the audio setup parameters passed in through
  38. * normal libavcodec API means.
  39. */
  40. #include <stdio.h>
  41. #include <stdlib.h>
  42. #include <string.h>
  43. #include "libavutil/intreadwrite.h"
  44. #include "avcodec.h"
  45. #define VMD_HEADER_SIZE 0x330
  46. #define PALETTE_COUNT 256
  47. /*
  48. * Video Decoder
  49. */
  50. typedef struct VmdVideoContext {
  51. AVCodecContext *avctx;
  52. AVFrame frame;
  53. AVFrame prev_frame;
  54. const unsigned char *buf;
  55. int size;
  56. unsigned char palette[PALETTE_COUNT * 4];
  57. unsigned char *unpack_buffer;
  58. int unpack_buffer_size;
  59. int x_off, y_off;
  60. } VmdVideoContext;
  61. #define QUEUE_SIZE 0x1000
  62. #define QUEUE_MASK 0x0FFF
  63. static void lz_unpack(const unsigned char *src, int src_len,
  64. unsigned char *dest, int dest_len)
  65. {
  66. const unsigned char *s;
  67. const unsigned char *s_end;
  68. unsigned char *d;
  69. unsigned char *d_end;
  70. unsigned char queue[QUEUE_SIZE];
  71. unsigned int qpos;
  72. unsigned int dataleft;
  73. unsigned int chainofs;
  74. unsigned int chainlen;
  75. unsigned int speclen;
  76. unsigned char tag;
  77. unsigned int i, j;
  78. s = src;
  79. s_end = src + src_len;
  80. d = dest;
  81. d_end = d + dest_len;
  82. if (s_end - s < 8)
  83. return;
  84. dataleft = AV_RL32(s);
  85. s += 4;
  86. memset(queue, 0x20, QUEUE_SIZE);
  87. if (AV_RL32(s) == 0x56781234) {
  88. s += 4;
  89. qpos = 0x111;
  90. speclen = 0xF + 3;
  91. } else {
  92. qpos = 0xFEE;
  93. speclen = 100; /* no speclen */
  94. }
  95. while (s_end - s > 0 && dataleft > 0) {
  96. tag = *s++;
  97. if ((tag == 0xFF) && (dataleft > 8)) {
  98. if (d_end - d < 8 || s_end - s < 8)
  99. return;
  100. for (i = 0; i < 8; i++) {
  101. queue[qpos++] = *d++ = *s++;
  102. qpos &= QUEUE_MASK;
  103. }
  104. dataleft -= 8;
  105. } else {
  106. for (i = 0; i < 8; i++) {
  107. if (dataleft == 0)
  108. break;
  109. if (tag & 0x01) {
  110. if (d_end - d < 1 || s_end - s < 1)
  111. return;
  112. queue[qpos++] = *d++ = *s++;
  113. qpos &= QUEUE_MASK;
  114. dataleft--;
  115. } else {
  116. if (s_end - s < 2)
  117. return;
  118. chainofs = *s++;
  119. chainofs |= ((*s & 0xF0) << 4);
  120. chainlen = (*s++ & 0x0F) + 3;
  121. if (chainlen == speclen) {
  122. if (s_end - s < 1)
  123. return;
  124. chainlen = *s++ + 0xF + 3;
  125. }
  126. if (d_end - d < chainlen)
  127. return;
  128. for (j = 0; j < chainlen; j++) {
  129. *d = queue[chainofs++ & QUEUE_MASK];
  130. queue[qpos++] = *d++;
  131. qpos &= QUEUE_MASK;
  132. }
  133. dataleft -= chainlen;
  134. }
  135. tag >>= 1;
  136. }
  137. }
  138. }
  139. }
  140. static int rle_unpack(const unsigned char *src, int src_len, int src_count,
  141. unsigned char *dest, int dest_len)
  142. {
  143. const unsigned char *ps;
  144. const unsigned char *ps_end;
  145. unsigned char *pd;
  146. int i, l;
  147. unsigned char *dest_end = dest + dest_len;
  148. ps = src;
  149. ps_end = src + src_len;
  150. pd = dest;
  151. if (src_count & 1) {
  152. if (ps_end - ps < 1)
  153. return 0;
  154. *pd++ = *ps++;
  155. }
  156. src_count >>= 1;
  157. i = 0;
  158. do {
  159. if (ps_end - ps < 1)
  160. break;
  161. l = *ps++;
  162. if (l & 0x80) {
  163. l = (l & 0x7F) * 2;
  164. if (dest_end - pd < l || ps_end - ps < l)
  165. return ps - src;
  166. memcpy(pd, ps, l);
  167. ps += l;
  168. pd += l;
  169. } else {
  170. if (dest_end - pd < i || ps_end - ps < 2)
  171. return ps - src;
  172. for (i = 0; i < l; i++) {
  173. *pd++ = ps[0];
  174. *pd++ = ps[1];
  175. }
  176. ps += 2;
  177. }
  178. i += l;
  179. } while (i < src_count);
  180. return ps - src;
  181. }
  182. static void vmd_decode(VmdVideoContext *s)
  183. {
  184. int i;
  185. unsigned int *palette32;
  186. unsigned char r, g, b;
  187. /* point to the start of the encoded data */
  188. const unsigned char *p = s->buf + 16;
  189. const unsigned char *p_end = s->buf + s->size;
  190. const unsigned char *pb;
  191. const unsigned char *pb_end;
  192. unsigned char meth;
  193. unsigned char *dp; /* pointer to current frame */
  194. unsigned char *pp; /* pointer to previous frame */
  195. unsigned char len;
  196. int ofs;
  197. int frame_x, frame_y;
  198. int frame_width, frame_height;
  199. frame_x = AV_RL16(&s->buf[6]);
  200. frame_y = AV_RL16(&s->buf[8]);
  201. frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
  202. frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
  203. if (frame_x < 0 || frame_width < 0 ||
  204. frame_x >= s->avctx->width ||
  205. frame_width > s->avctx->width ||
  206. frame_x + frame_width > s->avctx->width)
  207. return;
  208. if (frame_y < 0 || frame_height < 0 ||
  209. frame_y >= s->avctx->height ||
  210. frame_height > s->avctx->height ||
  211. frame_y + frame_height > s->avctx->height)
  212. return;
  213. if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
  214. (frame_x || frame_y)) {
  215. s->x_off = frame_x;
  216. s->y_off = frame_y;
  217. }
  218. frame_x -= s->x_off;
  219. frame_y -= s->y_off;
  220. /* if only a certain region will be updated, copy the entire previous
  221. * frame before the decode */
  222. if (s->prev_frame.data[0] &&
  223. (frame_x || frame_y || (frame_width != s->avctx->width) ||
  224. (frame_height != s->avctx->height))) {
  225. memcpy(s->frame.data[0], s->prev_frame.data[0],
  226. s->avctx->height * s->frame.linesize[0]);
  227. }
  228. /* check if there is a new palette */
  229. if (s->buf[15] & 0x02) {
  230. if (p_end - p < 2 + 3 * PALETTE_COUNT)
  231. return;
  232. p += 2;
  233. palette32 = (unsigned int *)s->palette;
  234. for (i = 0; i < PALETTE_COUNT; i++) {
  235. r = *p++ * 4;
  236. g = *p++ * 4;
  237. b = *p++ * 4;
  238. palette32[i] = (r << 16) | (g << 8) | (b);
  239. }
  240. }
  241. if (p < p_end) {
  242. /* originally UnpackFrame in VAG's code */
  243. pb = p;
  244. pb_end = p_end;
  245. meth = *pb++;
  246. if (meth & 0x80) {
  247. lz_unpack(pb, p_end - pb, s->unpack_buffer, s->unpack_buffer_size);
  248. meth &= 0x7F;
  249. pb = s->unpack_buffer;
  250. pb_end = s->unpack_buffer + s->unpack_buffer_size;
  251. }
  252. dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
  253. pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
  254. switch (meth) {
  255. case 1:
  256. for (i = 0; i < frame_height; i++) {
  257. ofs = 0;
  258. do {
  259. if (pb_end - pb < 1)
  260. return;
  261. len = *pb++;
  262. if (len & 0x80) {
  263. len = (len & 0x7F) + 1;
  264. if (ofs + len > frame_width || pb_end - pb < len)
  265. return;
  266. memcpy(&dp[ofs], pb, len);
  267. pb += len;
  268. ofs += len;
  269. } else {
  270. /* interframe pixel copy */
  271. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  272. return;
  273. memcpy(&dp[ofs], &pp[ofs], len + 1);
  274. ofs += len + 1;
  275. }
  276. } while (ofs < frame_width);
  277. if (ofs > frame_width) {
  278. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  279. ofs, frame_width);
  280. break;
  281. }
  282. dp += s->frame.linesize[0];
  283. pp += s->prev_frame.linesize[0];
  284. }
  285. break;
  286. case 2:
  287. for (i = 0; i < frame_height; i++) {
  288. if (pb_end -pb < frame_width)
  289. return;
  290. memcpy(dp, pb, frame_width);
  291. pb += frame_width;
  292. dp += s->frame.linesize[0];
  293. pp += s->prev_frame.linesize[0];
  294. }
  295. break;
  296. case 3:
  297. for (i = 0; i < frame_height; i++) {
  298. ofs = 0;
  299. do {
  300. if (pb_end - pb < 1)
  301. return;
  302. len = *pb++;
  303. if (len & 0x80) {
  304. len = (len & 0x7F) + 1;
  305. if (pb_end - pb < 1)
  306. return;
  307. if (*pb++ == 0xFF)
  308. len = rle_unpack(pb, pb_end - pb, len, &dp[ofs], frame_width - ofs);
  309. else {
  310. if (pb_end - pb < len)
  311. return;
  312. memcpy(&dp[ofs], pb, len);
  313. }
  314. pb += len;
  315. ofs += len;
  316. } else {
  317. /* interframe pixel copy */
  318. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  319. return;
  320. memcpy(&dp[ofs], &pp[ofs], len + 1);
  321. ofs += len + 1;
  322. }
  323. } while (ofs < frame_width);
  324. if (ofs > frame_width) {
  325. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  326. ofs, frame_width);
  327. }
  328. dp += s->frame.linesize[0];
  329. pp += s->prev_frame.linesize[0];
  330. }
  331. break;
  332. }
  333. }
  334. }
  335. static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
  336. {
  337. VmdVideoContext *s = avctx->priv_data;
  338. int i;
  339. unsigned int *palette32;
  340. int palette_index = 0;
  341. unsigned char r, g, b;
  342. unsigned char *vmd_header;
  343. unsigned char *raw_palette;
  344. s->avctx = avctx;
  345. avctx->pix_fmt = PIX_FMT_PAL8;
  346. /* make sure the VMD header made it */
  347. if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
  348. av_log(s->avctx, AV_LOG_ERROR, "VMD video: expected extradata size of %d\n",
  349. VMD_HEADER_SIZE);
  350. return -1;
  351. }
  352. vmd_header = (unsigned char *)avctx->extradata;
  353. s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
  354. s->unpack_buffer = av_malloc(s->unpack_buffer_size);
  355. if (!s->unpack_buffer)
  356. return -1;
  357. /* load up the initial palette */
  358. raw_palette = &vmd_header[28];
  359. palette32 = (unsigned int *)s->palette;
  360. for (i = 0; i < PALETTE_COUNT; i++) {
  361. r = raw_palette[palette_index++] * 4;
  362. g = raw_palette[palette_index++] * 4;
  363. b = raw_palette[palette_index++] * 4;
  364. palette32[i] = (r << 16) | (g << 8) | (b);
  365. }
  366. avcodec_get_frame_defaults(&s->frame);
  367. avcodec_get_frame_defaults(&s->prev_frame);
  368. return 0;
  369. }
  370. static int vmdvideo_decode_frame(AVCodecContext *avctx,
  371. void *data, int *data_size,
  372. AVPacket *avpkt)
  373. {
  374. const uint8_t *buf = avpkt->data;
  375. int buf_size = avpkt->size;
  376. VmdVideoContext *s = avctx->priv_data;
  377. s->buf = buf;
  378. s->size = buf_size;
  379. if (buf_size < 16)
  380. return buf_size;
  381. s->frame.reference = 1;
  382. if (avctx->get_buffer(avctx, &s->frame)) {
  383. av_log(s->avctx, AV_LOG_ERROR, "VMD Video: get_buffer() failed\n");
  384. return -1;
  385. }
  386. vmd_decode(s);
  387. /* make the palette available on the way out */
  388. memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
  389. /* shuffle frames */
  390. FFSWAP(AVFrame, s->frame, s->prev_frame);
  391. if (s->frame.data[0])
  392. avctx->release_buffer(avctx, &s->frame);
  393. *data_size = sizeof(AVFrame);
  394. *(AVFrame*)data = s->prev_frame;
  395. /* report that the buffer was completely consumed */
  396. return buf_size;
  397. }
  398. static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
  399. {
  400. VmdVideoContext *s = avctx->priv_data;
  401. if (s->prev_frame.data[0])
  402. avctx->release_buffer(avctx, &s->prev_frame);
  403. av_free(s->unpack_buffer);
  404. return 0;
  405. }
  406. /*
  407. * Audio Decoder
  408. */
  409. #define BLOCK_TYPE_AUDIO 1
  410. #define BLOCK_TYPE_INITIAL 2
  411. #define BLOCK_TYPE_SILENCE 3
  412. typedef struct VmdAudioContext {
  413. int out_bps;
  414. int chunk_size;
  415. } VmdAudioContext;
  416. static const uint16_t vmdaudio_table[128] = {
  417. 0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
  418. 0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
  419. 0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
  420. 0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
  421. 0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
  422. 0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
  423. 0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
  424. 0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
  425. 0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
  426. 0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
  427. 0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
  428. 0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
  429. 0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
  430. };
  431. static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
  432. {
  433. VmdAudioContext *s = avctx->priv_data;
  434. if (avctx->channels < 1 || avctx->channels > 2) {
  435. av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
  436. return AVERROR(EINVAL);
  437. }
  438. if (avctx->block_align < 1) {
  439. av_log(avctx, AV_LOG_ERROR, "invalid block align\n");
  440. return AVERROR(EINVAL);
  441. }
  442. if (avctx->bits_per_coded_sample == 16)
  443. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  444. else
  445. avctx->sample_fmt = AV_SAMPLE_FMT_U8;
  446. s->out_bps = av_get_bytes_per_sample(avctx->sample_fmt);
  447. s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
  448. av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
  449. "block align = %d, sample rate = %d\n",
  450. avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
  451. avctx->sample_rate);
  452. return 0;
  453. }
  454. static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
  455. int channels)
  456. {
  457. int ch;
  458. const uint8_t *buf_end = buf + buf_size;
  459. int predictor[2];
  460. int st = channels - 1;
  461. /* decode initial raw sample */
  462. for (ch = 0; ch < channels; ch++) {
  463. predictor[ch] = (int16_t)AV_RL16(buf);
  464. buf += 2;
  465. *out++ = predictor[ch];
  466. }
  467. /* decode DPCM samples */
  468. ch = 0;
  469. while (buf < buf_end) {
  470. uint8_t b = *buf++;
  471. if (b & 0x80)
  472. predictor[ch] -= vmdaudio_table[b & 0x7F];
  473. else
  474. predictor[ch] += vmdaudio_table[b];
  475. predictor[ch] = av_clip_int16(predictor[ch]);
  476. *out++ = predictor[ch];
  477. ch ^= st;
  478. }
  479. }
  480. static int vmdaudio_decode_frame(AVCodecContext *avctx,
  481. void *data, int *data_size,
  482. AVPacket *avpkt)
  483. {
  484. const uint8_t *buf = avpkt->data;
  485. const uint8_t *buf_end;
  486. int buf_size = avpkt->size;
  487. VmdAudioContext *s = avctx->priv_data;
  488. int block_type, silent_chunks, audio_chunks;
  489. int nb_samples, out_size;
  490. uint8_t *output_samples_u8 = data;
  491. int16_t *output_samples_s16 = data;
  492. if (buf_size < 16) {
  493. av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
  494. *data_size = 0;
  495. return buf_size;
  496. }
  497. block_type = buf[6];
  498. if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) {
  499. av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type);
  500. return AVERROR(EINVAL);
  501. }
  502. buf += 16;
  503. buf_size -= 16;
  504. /* get number of silent chunks */
  505. silent_chunks = 0;
  506. if (block_type == BLOCK_TYPE_INITIAL) {
  507. uint32_t flags;
  508. if (buf_size < 4) {
  509. av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
  510. return AVERROR(EINVAL);
  511. }
  512. flags = AV_RB32(buf);
  513. silent_chunks = av_popcount(flags);
  514. buf += 4;
  515. buf_size -= 4;
  516. } else if (block_type == BLOCK_TYPE_SILENCE) {
  517. silent_chunks = 1;
  518. buf_size = 0; // should already be zero but set it just to be sure
  519. }
  520. /* ensure output buffer is large enough */
  521. audio_chunks = buf_size / s->chunk_size;
  522. nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
  523. out_size = nb_samples * avctx->channels * s->out_bps;
  524. if (*data_size < out_size)
  525. return -1;
  526. /* decode silent chunks */
  527. if (silent_chunks > 0) {
  528. int silent_size = avctx->block_align * silent_chunks;
  529. if (s->out_bps == 2) {
  530. memset(output_samples_s16, 0x00, silent_size * 2);
  531. output_samples_s16 += silent_size;
  532. } else {
  533. memset(output_samples_u8, 0x80, silent_size);
  534. output_samples_u8 += silent_size;
  535. }
  536. }
  537. /* decode audio chunks */
  538. if (audio_chunks > 0) {
  539. buf_end = buf + buf_size;
  540. while (buf < buf_end) {
  541. if (s->out_bps == 2) {
  542. decode_audio_s16(output_samples_s16, buf, s->chunk_size,
  543. avctx->channels);
  544. output_samples_s16 += avctx->block_align;
  545. } else {
  546. memcpy(output_samples_u8, buf, s->chunk_size);
  547. output_samples_u8 += avctx->block_align;
  548. }
  549. buf += s->chunk_size;
  550. }
  551. }
  552. *data_size = out_size;
  553. return avpkt->size;
  554. }
  555. /*
  556. * Public Data Structures
  557. */
  558. AVCodec ff_vmdvideo_decoder = {
  559. .name = "vmdvideo",
  560. .type = AVMEDIA_TYPE_VIDEO,
  561. .id = CODEC_ID_VMDVIDEO,
  562. .priv_data_size = sizeof(VmdVideoContext),
  563. .init = vmdvideo_decode_init,
  564. .close = vmdvideo_decode_end,
  565. .decode = vmdvideo_decode_frame,
  566. .capabilities = CODEC_CAP_DR1,
  567. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
  568. };
  569. AVCodec ff_vmdaudio_decoder = {
  570. .name = "vmdaudio",
  571. .type = AVMEDIA_TYPE_AUDIO,
  572. .id = CODEC_ID_VMDAUDIO,
  573. .priv_data_size = sizeof(VmdAudioContext),
  574. .init = vmdaudio_decode_init,
  575. .decode = vmdaudio_decode_frame,
  576. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
  577. };