You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

646 lines
21KB

  1. /*
  2. * Sierra VMD Audio & Video Decoders
  3. * Copyright (C) 2004 the ffmpeg project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sierra VMD audio & video decoders
  24. * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
  25. * for more information on the Sierra VMD format, visit:
  26. * http://www.pcisys.net/~melanson/codecs/
  27. *
  28. * The video decoder outputs PAL8 colorspace data. The decoder expects
  29. * a 0x330-byte VMD file header to be transmitted via extradata during
  30. * codec initialization. Each encoded frame that is sent to this decoder
  31. * is expected to be prepended with the appropriate 16-byte frame
  32. * information record from the VMD file.
  33. *
  34. * The audio decoder, like the video decoder, expects each encoded data
  35. * chunk to be prepended with the appropriate 16-byte frame information
  36. * record from the VMD file. It does not require the 0x330-byte VMD file
  37. * header, but it does need the audio setup parameters passed in through
  38. * normal libavcodec API means.
  39. */
  40. #include <stdio.h>
  41. #include <stdlib.h>
  42. #include <string.h>
  43. #include "libavutil/channel_layout.h"
  44. #include "libavutil/common.h"
  45. #include "libavutil/intreadwrite.h"
  46. #include "avcodec.h"
  47. #include "internal.h"
  48. #include "bytestream.h"
  49. #define VMD_HEADER_SIZE 0x330
  50. #define PALETTE_COUNT 256
  51. /*
  52. * Video Decoder
  53. */
  54. typedef struct VmdVideoContext {
  55. AVCodecContext *avctx;
  56. AVFrame prev_frame;
  57. const unsigned char *buf;
  58. int size;
  59. unsigned char palette[PALETTE_COUNT * 4];
  60. unsigned char *unpack_buffer;
  61. int unpack_buffer_size;
  62. int x_off, y_off;
  63. } VmdVideoContext;
  64. #define QUEUE_SIZE 0x1000
  65. #define QUEUE_MASK 0x0FFF
  66. static void lz_unpack(const unsigned char *src, int src_len,
  67. unsigned char *dest, int dest_len)
  68. {
  69. unsigned char *d;
  70. unsigned char *d_end;
  71. unsigned char queue[QUEUE_SIZE];
  72. unsigned int qpos;
  73. unsigned int dataleft;
  74. unsigned int chainofs;
  75. unsigned int chainlen;
  76. unsigned int speclen;
  77. unsigned char tag;
  78. unsigned int i, j;
  79. GetByteContext gb;
  80. bytestream2_init(&gb, src, src_len);
  81. d = dest;
  82. d_end = d + dest_len;
  83. dataleft = bytestream2_get_le32(&gb);
  84. memset(queue, 0x20, QUEUE_SIZE);
  85. if (bytestream2_get_bytes_left(&gb) < 4)
  86. return;
  87. if (bytestream2_peek_le32(&gb) == 0x56781234) {
  88. bytestream2_skipu(&gb, 4);
  89. qpos = 0x111;
  90. speclen = 0xF + 3;
  91. } else {
  92. qpos = 0xFEE;
  93. speclen = 100; /* no speclen */
  94. }
  95. while (dataleft > 0 && bytestream2_get_bytes_left(&gb) > 0) {
  96. tag = bytestream2_get_byteu(&gb);
  97. if ((tag == 0xFF) && (dataleft > 8)) {
  98. if (d_end - d < 8 || bytestream2_get_bytes_left(&gb) < 8)
  99. return;
  100. for (i = 0; i < 8; i++) {
  101. queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
  102. qpos &= QUEUE_MASK;
  103. }
  104. dataleft -= 8;
  105. } else {
  106. for (i = 0; i < 8; i++) {
  107. if (dataleft == 0)
  108. break;
  109. if (tag & 0x01) {
  110. if (d_end - d < 1 || bytestream2_get_bytes_left(&gb) < 1)
  111. return;
  112. queue[qpos++] = *d++ = bytestream2_get_byteu(&gb);
  113. qpos &= QUEUE_MASK;
  114. dataleft--;
  115. } else {
  116. chainofs = bytestream2_get_byte(&gb);
  117. chainofs |= ((bytestream2_peek_byte(&gb) & 0xF0) << 4);
  118. chainlen = (bytestream2_get_byte(&gb) & 0x0F) + 3;
  119. if (chainlen == speclen) {
  120. chainlen = bytestream2_get_byte(&gb) + 0xF + 3;
  121. }
  122. if (d_end - d < chainlen)
  123. return;
  124. for (j = 0; j < chainlen; j++) {
  125. *d = queue[chainofs++ & QUEUE_MASK];
  126. queue[qpos++] = *d++;
  127. qpos &= QUEUE_MASK;
  128. }
  129. dataleft -= chainlen;
  130. }
  131. tag >>= 1;
  132. }
  133. }
  134. }
  135. }
  136. static int rle_unpack(const unsigned char *src, unsigned char *dest,
  137. int src_count, int src_size, int dest_len)
  138. {
  139. unsigned char *pd;
  140. int i, j, l;
  141. unsigned char *dest_end = dest + dest_len;
  142. GetByteContext gb;
  143. bytestream2_init(&gb, src, src_size);
  144. pd = dest;
  145. if (src_count & 1) {
  146. if (bytestream2_get_bytes_left(&gb) < 1)
  147. return 0;
  148. *pd++ = bytestream2_get_byteu(&gb);
  149. }
  150. src_count >>= 1;
  151. i = 0;
  152. do {
  153. if (bytestream2_get_bytes_left(&gb) < 1)
  154. break;
  155. l = bytestream2_get_byteu(&gb);
  156. if (l & 0x80) {
  157. l = (l & 0x7F) * 2;
  158. if (dest_end - pd < l || bytestream2_get_bytes_left(&gb) < l)
  159. return bytestream2_tell(&gb);
  160. bytestream2_get_bufferu(&gb, pd, l);
  161. pd += l;
  162. } else {
  163. int ps[2];
  164. if (dest_end - pd < 2*l || bytestream2_get_bytes_left(&gb) < 2)
  165. return bytestream2_tell(&gb);
  166. ps[0] = bytestream2_get_byteu(&gb);
  167. ps[1] = bytestream2_get_byteu(&gb);
  168. for (j = 0; j < l; j++) {
  169. *pd++ = ps[0];
  170. *pd++ = ps[1];
  171. }
  172. }
  173. i += l;
  174. } while (i < src_count);
  175. return bytestream2_tell(&gb);
  176. }
  177. static int vmd_decode(VmdVideoContext *s, AVFrame *frame)
  178. {
  179. int i;
  180. unsigned int *palette32;
  181. unsigned char r, g, b;
  182. GetByteContext gb;
  183. unsigned char meth;
  184. unsigned char *dp; /* pointer to current frame */
  185. unsigned char *pp; /* pointer to previous frame */
  186. unsigned char len;
  187. int ofs;
  188. int frame_x, frame_y;
  189. int frame_width, frame_height;
  190. frame_x = AV_RL16(&s->buf[6]);
  191. frame_y = AV_RL16(&s->buf[8]);
  192. frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
  193. frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
  194. if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
  195. (frame_x || frame_y)) {
  196. s->x_off = frame_x;
  197. s->y_off = frame_y;
  198. }
  199. frame_x -= s->x_off;
  200. frame_y -= s->y_off;
  201. if (frame_x < 0 || frame_width < 0 ||
  202. frame_x >= s->avctx->width ||
  203. frame_width > s->avctx->width ||
  204. frame_x + frame_width > s->avctx->width) {
  205. return AVERROR_INVALIDDATA;
  206. }
  207. if (frame_y < 0 || frame_height < 0 ||
  208. frame_y >= s->avctx->height ||
  209. frame_height > s->avctx->height ||
  210. frame_y + frame_height > s->avctx->height) {
  211. return AVERROR_INVALIDDATA;
  212. }
  213. /* if only a certain region will be updated, copy the entire previous
  214. * frame before the decode */
  215. if (s->prev_frame.data[0] &&
  216. (frame_x || frame_y || (frame_width != s->avctx->width) ||
  217. (frame_height != s->avctx->height))) {
  218. memcpy(frame->data[0], s->prev_frame.data[0],
  219. s->avctx->height * frame->linesize[0]);
  220. }
  221. /* check if there is a new palette */
  222. bytestream2_init(&gb, s->buf + 16, s->size - 16);
  223. if (s->buf[15] & 0x02) {
  224. bytestream2_skip(&gb, 2);
  225. palette32 = (unsigned int *)s->palette;
  226. if (bytestream2_get_bytes_left(&gb) >= PALETTE_COUNT * 3) {
  227. for (i = 0; i < PALETTE_COUNT; i++) {
  228. r = bytestream2_get_byteu(&gb) * 4;
  229. g = bytestream2_get_byteu(&gb) * 4;
  230. b = bytestream2_get_byteu(&gb) * 4;
  231. palette32[i] = 0xFFU << 24 | (r << 16) | (g << 8) | (b);
  232. palette32[i] |= palette32[i] >> 6 & 0x30303;
  233. }
  234. }
  235. }
  236. if (s->size > 0) {
  237. /* originally UnpackFrame in VAG's code */
  238. bytestream2_init(&gb, gb.buffer, s->buf + s->size - gb.buffer);
  239. if (bytestream2_get_bytes_left(&gb) < 1)
  240. return AVERROR_INVALIDDATA;
  241. meth = bytestream2_get_byteu(&gb);
  242. if (meth & 0x80) {
  243. lz_unpack(gb.buffer, bytestream2_get_bytes_left(&gb),
  244. s->unpack_buffer, s->unpack_buffer_size);
  245. meth &= 0x7F;
  246. bytestream2_init(&gb, s->unpack_buffer, s->unpack_buffer_size);
  247. }
  248. dp = &frame->data[0][frame_y * frame->linesize[0] + frame_x];
  249. pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
  250. switch (meth) {
  251. case 1:
  252. for (i = 0; i < frame_height; i++) {
  253. ofs = 0;
  254. do {
  255. len = bytestream2_get_byte(&gb);
  256. if (len & 0x80) {
  257. len = (len & 0x7F) + 1;
  258. if (ofs + len > frame_width || bytestream2_get_bytes_left(&gb) < len)
  259. return AVERROR_INVALIDDATA;
  260. bytestream2_get_bufferu(&gb, &dp[ofs], len);
  261. ofs += len;
  262. } else {
  263. /* interframe pixel copy */
  264. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  265. return AVERROR_INVALIDDATA;
  266. memcpy(&dp[ofs], &pp[ofs], len + 1);
  267. ofs += len + 1;
  268. }
  269. } while (ofs < frame_width);
  270. if (ofs > frame_width) {
  271. av_log(s->avctx, AV_LOG_ERROR, "offset > width (%d > %d)\n",
  272. ofs, frame_width);
  273. break;
  274. }
  275. dp += frame->linesize[0];
  276. pp += s->prev_frame.linesize[0];
  277. }
  278. break;
  279. case 2:
  280. for (i = 0; i < frame_height; i++) {
  281. bytestream2_get_buffer(&gb, dp, frame_width);
  282. dp += frame->linesize[0];
  283. pp += s->prev_frame.linesize[0];
  284. }
  285. break;
  286. case 3:
  287. for (i = 0; i < frame_height; i++) {
  288. ofs = 0;
  289. do {
  290. len = bytestream2_get_byte(&gb);
  291. if (len & 0x80) {
  292. len = (len & 0x7F) + 1;
  293. if (bytestream2_get_byte(&gb) == 0xFF)
  294. len = rle_unpack(gb.buffer, &dp[ofs],
  295. len, bytestream2_get_bytes_left(&gb),
  296. frame_width - ofs);
  297. else
  298. bytestream2_get_buffer(&gb, &dp[ofs], len);
  299. bytestream2_skip(&gb, len);
  300. } else {
  301. /* interframe pixel copy */
  302. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  303. return AVERROR_INVALIDDATA;
  304. memcpy(&dp[ofs], &pp[ofs], len + 1);
  305. ofs += len + 1;
  306. }
  307. } while (ofs < frame_width);
  308. if (ofs > frame_width) {
  309. av_log(s->avctx, AV_LOG_ERROR, "offset > width (%d > %d)\n",
  310. ofs, frame_width);
  311. }
  312. dp += frame->linesize[0];
  313. pp += s->prev_frame.linesize[0];
  314. }
  315. break;
  316. }
  317. }
  318. return 0;
  319. }
  320. static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
  321. {
  322. VmdVideoContext *s = avctx->priv_data;
  323. int i;
  324. unsigned int *palette32;
  325. int palette_index = 0;
  326. unsigned char r, g, b;
  327. unsigned char *vmd_header;
  328. unsigned char *raw_palette;
  329. s->avctx = avctx;
  330. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  331. /* make sure the VMD header made it */
  332. if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
  333. av_log(s->avctx, AV_LOG_ERROR, "expected extradata size of %d\n",
  334. VMD_HEADER_SIZE);
  335. return AVERROR_INVALIDDATA;
  336. }
  337. vmd_header = (unsigned char *)avctx->extradata;
  338. s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
  339. s->unpack_buffer = av_malloc(s->unpack_buffer_size);
  340. if (!s->unpack_buffer)
  341. return AVERROR(ENOMEM);
  342. /* load up the initial palette */
  343. raw_palette = &vmd_header[28];
  344. palette32 = (unsigned int *)s->palette;
  345. for (i = 0; i < PALETTE_COUNT; i++) {
  346. r = raw_palette[palette_index++] * 4;
  347. g = raw_palette[palette_index++] * 4;
  348. b = raw_palette[palette_index++] * 4;
  349. palette32[i] = (r << 16) | (g << 8) | (b);
  350. }
  351. avcodec_get_frame_defaults(&s->prev_frame);
  352. return 0;
  353. }
  354. static int vmdvideo_decode_frame(AVCodecContext *avctx,
  355. void *data, int *got_frame,
  356. AVPacket *avpkt)
  357. {
  358. const uint8_t *buf = avpkt->data;
  359. int buf_size = avpkt->size;
  360. VmdVideoContext *s = avctx->priv_data;
  361. AVFrame *frame = data;
  362. int ret;
  363. s->buf = buf;
  364. s->size = buf_size;
  365. if (buf_size < 16)
  366. return buf_size;
  367. if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
  368. return ret;
  369. if (vmd_decode(s, frame) < 0)
  370. av_log(avctx, AV_LOG_WARNING, "decode error\n");
  371. /* make the palette available on the way out */
  372. memcpy(frame->data[1], s->palette, PALETTE_COUNT * 4);
  373. /* shuffle frames */
  374. av_frame_unref(&s->prev_frame);
  375. if ((ret = av_frame_ref(&s->prev_frame, frame)) < 0)
  376. return ret;
  377. *got_frame = 1;
  378. /* report that the buffer was completely consumed */
  379. return buf_size;
  380. }
  381. static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
  382. {
  383. VmdVideoContext *s = avctx->priv_data;
  384. av_frame_unref(&s->prev_frame);
  385. av_free(s->unpack_buffer);
  386. return 0;
  387. }
  388. /*
  389. * Audio Decoder
  390. */
  391. #define BLOCK_TYPE_AUDIO 1
  392. #define BLOCK_TYPE_INITIAL 2
  393. #define BLOCK_TYPE_SILENCE 3
  394. typedef struct VmdAudioContext {
  395. int out_bps;
  396. int chunk_size;
  397. } VmdAudioContext;
  398. static const uint16_t vmdaudio_table[128] = {
  399. 0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
  400. 0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
  401. 0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
  402. 0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
  403. 0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
  404. 0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
  405. 0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
  406. 0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
  407. 0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
  408. 0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
  409. 0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
  410. 0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
  411. 0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
  412. };
  413. static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
  414. {
  415. VmdAudioContext *s = avctx->priv_data;
  416. if (avctx->channels < 1 || avctx->channels > 2) {
  417. av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
  418. return AVERROR(EINVAL);
  419. }
  420. if (avctx->block_align < 1 || avctx->block_align % avctx->channels) {
  421. av_log(avctx, AV_LOG_ERROR, "invalid block align\n");
  422. return AVERROR(EINVAL);
  423. }
  424. avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO :
  425. AV_CH_LAYOUT_STEREO;
  426. if (avctx->bits_per_coded_sample == 16)
  427. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  428. else
  429. avctx->sample_fmt = AV_SAMPLE_FMT_U8;
  430. s->out_bps = av_get_bytes_per_sample(avctx->sample_fmt);
  431. s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
  432. av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
  433. "block align = %d, sample rate = %d\n",
  434. avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
  435. avctx->sample_rate);
  436. return 0;
  437. }
  438. static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
  439. int channels)
  440. {
  441. int ch;
  442. const uint8_t *buf_end = buf + buf_size;
  443. int predictor[2];
  444. int st = channels - 1;
  445. /* decode initial raw sample */
  446. for (ch = 0; ch < channels; ch++) {
  447. predictor[ch] = (int16_t)AV_RL16(buf);
  448. buf += 2;
  449. *out++ = predictor[ch];
  450. }
  451. /* decode DPCM samples */
  452. ch = 0;
  453. while (buf < buf_end) {
  454. uint8_t b = *buf++;
  455. if (b & 0x80)
  456. predictor[ch] -= vmdaudio_table[b & 0x7F];
  457. else
  458. predictor[ch] += vmdaudio_table[b];
  459. predictor[ch] = av_clip_int16(predictor[ch]);
  460. *out++ = predictor[ch];
  461. ch ^= st;
  462. }
  463. }
  464. static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
  465. int *got_frame_ptr, AVPacket *avpkt)
  466. {
  467. AVFrame *frame = data;
  468. const uint8_t *buf = avpkt->data;
  469. const uint8_t *buf_end;
  470. int buf_size = avpkt->size;
  471. VmdAudioContext *s = avctx->priv_data;
  472. int block_type, silent_chunks, audio_chunks;
  473. int ret;
  474. uint8_t *output_samples_u8;
  475. int16_t *output_samples_s16;
  476. if (buf_size < 16) {
  477. av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
  478. *got_frame_ptr = 0;
  479. return buf_size;
  480. }
  481. block_type = buf[6];
  482. if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) {
  483. av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type);
  484. return AVERROR(EINVAL);
  485. }
  486. buf += 16;
  487. buf_size -= 16;
  488. /* get number of silent chunks */
  489. silent_chunks = 0;
  490. if (block_type == BLOCK_TYPE_INITIAL) {
  491. uint32_t flags;
  492. if (buf_size < 4) {
  493. av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
  494. return AVERROR(EINVAL);
  495. }
  496. flags = AV_RB32(buf);
  497. silent_chunks = av_popcount(flags);
  498. buf += 4;
  499. buf_size -= 4;
  500. } else if (block_type == BLOCK_TYPE_SILENCE) {
  501. silent_chunks = 1;
  502. buf_size = 0; // should already be zero but set it just to be sure
  503. }
  504. /* ensure output buffer is large enough */
  505. audio_chunks = buf_size / s->chunk_size;
  506. /* get output buffer */
  507. frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
  508. avctx->channels;
  509. if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
  510. return ret;
  511. output_samples_u8 = frame->data[0];
  512. output_samples_s16 = (int16_t *)frame->data[0];
  513. /* decode silent chunks */
  514. if (silent_chunks > 0) {
  515. int silent_size = avctx->block_align * silent_chunks;
  516. if (s->out_bps == 2) {
  517. memset(output_samples_s16, 0x00, silent_size * 2);
  518. output_samples_s16 += silent_size;
  519. } else {
  520. memset(output_samples_u8, 0x80, silent_size);
  521. output_samples_u8 += silent_size;
  522. }
  523. }
  524. /* decode audio chunks */
  525. if (audio_chunks > 0) {
  526. buf_end = buf + buf_size;
  527. while (buf_end - buf >= s->chunk_size) {
  528. if (s->out_bps == 2) {
  529. decode_audio_s16(output_samples_s16, buf, s->chunk_size,
  530. avctx->channels);
  531. output_samples_s16 += avctx->block_align;
  532. } else {
  533. memcpy(output_samples_u8, buf, s->chunk_size);
  534. output_samples_u8 += avctx->block_align;
  535. }
  536. buf += s->chunk_size;
  537. }
  538. }
  539. *got_frame_ptr = 1;
  540. return avpkt->size;
  541. }
  542. /*
  543. * Public Data Structures
  544. */
  545. AVCodec ff_vmdvideo_decoder = {
  546. .name = "vmdvideo",
  547. .type = AVMEDIA_TYPE_VIDEO,
  548. .id = AV_CODEC_ID_VMDVIDEO,
  549. .priv_data_size = sizeof(VmdVideoContext),
  550. .init = vmdvideo_decode_init,
  551. .close = vmdvideo_decode_end,
  552. .decode = vmdvideo_decode_frame,
  553. .capabilities = CODEC_CAP_DR1,
  554. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
  555. };
  556. AVCodec ff_vmdaudio_decoder = {
  557. .name = "vmdaudio",
  558. .type = AVMEDIA_TYPE_AUDIO,
  559. .id = AV_CODEC_ID_VMDAUDIO,
  560. .priv_data_size = sizeof(VmdAudioContext),
  561. .init = vmdaudio_decode_init,
  562. .decode = vmdaudio_decode_frame,
  563. .capabilities = CODEC_CAP_DR1,
  564. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
  565. };