You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

680 lines
21KB

  1. /*
  2. * Sierra VMD Audio & Video Decoders
  3. * Copyright (C) 2004 the ffmpeg project
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sierra VMD audio & video decoders
  24. * by Vladimir "VAG" Gneushev (vagsoft at mail.ru)
  25. * for more information on the Sierra VMD format, visit:
  26. * http://www.pcisys.net/~melanson/codecs/
  27. *
  28. * The video decoder outputs PAL8 colorspace data. The decoder expects
  29. * a 0x330-byte VMD file header to be transmitted via extradata during
  30. * codec initialization. Each encoded frame that is sent to this decoder
  31. * is expected to be prepended with the appropriate 16-byte frame
  32. * information record from the VMD file.
  33. *
  34. * The audio decoder, like the video decoder, expects each encoded data
  35. * chunk to be prepended with the appropriate 16-byte frame information
  36. * record from the VMD file. It does not require the 0x330-byte VMD file
  37. * header, but it does need the audio setup parameters passed in through
  38. * normal libavcodec API means.
  39. */
  40. #include <stdio.h>
  41. #include <stdlib.h>
  42. #include <string.h>
  43. #include "libavutil/channel_layout.h"
  44. #include "libavutil/common.h"
  45. #include "libavutil/intreadwrite.h"
  46. #include "avcodec.h"
  47. #include "internal.h"
  48. #define VMD_HEADER_SIZE 0x330
  49. #define PALETTE_COUNT 256
  50. /*
  51. * Video Decoder
  52. */
  53. typedef struct VmdVideoContext {
  54. AVCodecContext *avctx;
  55. AVFrame frame;
  56. AVFrame prev_frame;
  57. const unsigned char *buf;
  58. int size;
  59. unsigned char palette[PALETTE_COUNT * 4];
  60. unsigned char *unpack_buffer;
  61. int unpack_buffer_size;
  62. int x_off, y_off;
  63. } VmdVideoContext;
  64. #define QUEUE_SIZE 0x1000
  65. #define QUEUE_MASK 0x0FFF
  66. static void lz_unpack(const unsigned char *src, int src_len,
  67. unsigned char *dest, int dest_len)
  68. {
  69. const unsigned char *s;
  70. unsigned int s_len;
  71. unsigned char *d;
  72. unsigned char *d_end;
  73. unsigned char queue[QUEUE_SIZE];
  74. unsigned int qpos;
  75. unsigned int dataleft;
  76. unsigned int chainofs;
  77. unsigned int chainlen;
  78. unsigned int speclen;
  79. unsigned char tag;
  80. unsigned int i, j;
  81. s = src;
  82. s_len = src_len;
  83. d = dest;
  84. d_end = d + dest_len;
  85. dataleft = AV_RL32(s);
  86. s += 4; s_len -= 4;
  87. memset(queue, 0x20, QUEUE_SIZE);
  88. if (s_len < 4)
  89. return;
  90. if (AV_RL32(s) == 0x56781234) {
  91. s += 4; s_len -= 4;
  92. qpos = 0x111;
  93. speclen = 0xF + 3;
  94. } else {
  95. qpos = 0xFEE;
  96. speclen = 100; /* no speclen */
  97. }
  98. while (dataleft > 0 && s_len > 0) {
  99. tag = *s++; s_len--;
  100. if ((tag == 0xFF) && (dataleft > 8)) {
  101. if (d + 8 > d_end || s_len < 8)
  102. return;
  103. for (i = 0; i < 8; i++) {
  104. queue[qpos++] = *d++ = *s++;
  105. qpos &= QUEUE_MASK;
  106. }
  107. s_len -= 8;
  108. dataleft -= 8;
  109. } else {
  110. for (i = 0; i < 8; i++) {
  111. if (dataleft == 0)
  112. break;
  113. if (tag & 0x01) {
  114. if (d + 1 > d_end || s_len < 1)
  115. return;
  116. queue[qpos++] = *d++ = *s++;
  117. qpos &= QUEUE_MASK;
  118. dataleft--;
  119. s_len--;
  120. } else {
  121. if (s_len < 2)
  122. return;
  123. chainofs = *s++;
  124. chainofs |= ((*s & 0xF0) << 4);
  125. chainlen = (*s++ & 0x0F) + 3;
  126. s_len -= 2;
  127. if (chainlen == speclen) {
  128. if (s_len < 1)
  129. return;
  130. chainlen = *s++ + 0xF + 3;
  131. s_len--;
  132. }
  133. if (d + chainlen > d_end)
  134. return;
  135. for (j = 0; j < chainlen; j++) {
  136. *d = queue[chainofs++ & QUEUE_MASK];
  137. queue[qpos++] = *d++;
  138. qpos &= QUEUE_MASK;
  139. }
  140. dataleft -= chainlen;
  141. }
  142. tag >>= 1;
  143. }
  144. }
  145. }
  146. }
  147. static int rle_unpack(const unsigned char *src, unsigned char *dest,
  148. int src_count, int src_size, int dest_len)
  149. {
  150. const unsigned char *ps;
  151. unsigned char *pd;
  152. int i, l;
  153. unsigned char *dest_end = dest + dest_len;
  154. ps = src;
  155. pd = dest;
  156. if (src_count & 1) {
  157. if (src_size < 1)
  158. return 0;
  159. *pd++ = *ps++;
  160. src_size--;
  161. }
  162. src_count >>= 1;
  163. i = 0;
  164. do {
  165. if (src_size < 1)
  166. break;
  167. l = *ps++;
  168. src_size--;
  169. if (l & 0x80) {
  170. l = (l & 0x7F) * 2;
  171. if (pd + l > dest_end || src_size < l)
  172. return ps - src;
  173. memcpy(pd, ps, l);
  174. ps += l;
  175. src_size -= l;
  176. pd += l;
  177. } else {
  178. if (pd + i > dest_end || src_size < 2)
  179. return ps - src;
  180. for (i = 0; i < l; i++) {
  181. *pd++ = ps[0];
  182. *pd++ = ps[1];
  183. }
  184. ps += 2;
  185. src_size -= 2;
  186. }
  187. i += l;
  188. } while (i < src_count);
  189. return ps - src;
  190. }
  191. static void vmd_decode(VmdVideoContext *s)
  192. {
  193. int i;
  194. unsigned int *palette32;
  195. unsigned char r, g, b;
  196. /* point to the start of the encoded data */
  197. const unsigned char *p = s->buf + 16;
  198. const unsigned char *pb;
  199. unsigned int pb_size;
  200. unsigned char meth;
  201. unsigned char *dp; /* pointer to current frame */
  202. unsigned char *pp; /* pointer to previous frame */
  203. unsigned char len;
  204. int ofs;
  205. int frame_x, frame_y;
  206. int frame_width, frame_height;
  207. frame_x = AV_RL16(&s->buf[6]);
  208. frame_y = AV_RL16(&s->buf[8]);
  209. frame_width = AV_RL16(&s->buf[10]) - frame_x + 1;
  210. frame_height = AV_RL16(&s->buf[12]) - frame_y + 1;
  211. if (frame_x < 0 || frame_width < 0 ||
  212. frame_x >= s->avctx->width ||
  213. frame_width > s->avctx->width ||
  214. frame_x + frame_width > s->avctx->width)
  215. return;
  216. if (frame_y < 0 || frame_height < 0 ||
  217. frame_y >= s->avctx->height ||
  218. frame_height > s->avctx->height ||
  219. frame_y + frame_height > s->avctx->height)
  220. return;
  221. if ((frame_width == s->avctx->width && frame_height == s->avctx->height) &&
  222. (frame_x || frame_y)) {
  223. s->x_off = frame_x;
  224. s->y_off = frame_y;
  225. }
  226. frame_x -= s->x_off;
  227. frame_y -= s->y_off;
  228. /* if only a certain region will be updated, copy the entire previous
  229. * frame before the decode */
  230. if (s->prev_frame.data[0] &&
  231. (frame_x || frame_y || (frame_width != s->avctx->width) ||
  232. (frame_height != s->avctx->height))) {
  233. memcpy(s->frame.data[0], s->prev_frame.data[0],
  234. s->avctx->height * s->frame.linesize[0]);
  235. }
  236. /* check if there is a new palette */
  237. if (s->buf[15] & 0x02) {
  238. p += 2;
  239. palette32 = (unsigned int *)s->palette;
  240. for (i = 0; i < PALETTE_COUNT; i++) {
  241. r = *p++ * 4;
  242. g = *p++ * 4;
  243. b = *p++ * 4;
  244. palette32[i] = (r << 16) | (g << 8) | (b);
  245. }
  246. s->size -= (256 * 3 + 2);
  247. }
  248. if (s->size > 0) {
  249. /* originally UnpackFrame in VAG's code */
  250. pb = p;
  251. pb_size = s->buf + s->size - pb;
  252. if (pb_size < 1)
  253. return;
  254. meth = *pb++; pb_size--;
  255. if (meth & 0x80) {
  256. lz_unpack(pb, pb_size,
  257. s->unpack_buffer, s->unpack_buffer_size);
  258. meth &= 0x7F;
  259. pb = s->unpack_buffer;
  260. pb_size = s->unpack_buffer_size;
  261. }
  262. dp = &s->frame.data[0][frame_y * s->frame.linesize[0] + frame_x];
  263. pp = &s->prev_frame.data[0][frame_y * s->prev_frame.linesize[0] + frame_x];
  264. switch (meth) {
  265. case 1:
  266. for (i = 0; i < frame_height; i++) {
  267. ofs = 0;
  268. do {
  269. if (pb_size < 1)
  270. return;
  271. len = *pb++;
  272. pb_size--;
  273. if (len & 0x80) {
  274. len = (len & 0x7F) + 1;
  275. if (ofs + len > frame_width || pb_size < len)
  276. return;
  277. memcpy(&dp[ofs], pb, len);
  278. pb += len;
  279. pb_size -= len;
  280. ofs += len;
  281. } else {
  282. /* interframe pixel copy */
  283. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  284. return;
  285. memcpy(&dp[ofs], &pp[ofs], len + 1);
  286. ofs += len + 1;
  287. }
  288. } while (ofs < frame_width);
  289. if (ofs > frame_width) {
  290. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  291. ofs, frame_width);
  292. break;
  293. }
  294. dp += s->frame.linesize[0];
  295. pp += s->prev_frame.linesize[0];
  296. }
  297. break;
  298. case 2:
  299. for (i = 0; i < frame_height; i++) {
  300. if (pb_size < frame_width)
  301. return;
  302. memcpy(dp, pb, frame_width);
  303. pb += frame_width;
  304. pb_size -= frame_width;
  305. dp += s->frame.linesize[0];
  306. pp += s->prev_frame.linesize[0];
  307. }
  308. break;
  309. case 3:
  310. for (i = 0; i < frame_height; i++) {
  311. ofs = 0;
  312. do {
  313. if (pb_size < 1)
  314. return;
  315. len = *pb++;
  316. pb_size--;
  317. if (len & 0x80) {
  318. len = (len & 0x7F) + 1;
  319. if (pb_size < 1)
  320. return;
  321. if (*pb++ == 0xFF)
  322. len = rle_unpack(pb, &dp[ofs], len, pb_size, frame_width - ofs);
  323. else {
  324. if (pb_size < len)
  325. return;
  326. memcpy(&dp[ofs], pb, len);
  327. }
  328. pb += len;
  329. pb_size -= 1 + len;
  330. ofs += len;
  331. } else {
  332. /* interframe pixel copy */
  333. if (ofs + len + 1 > frame_width || !s->prev_frame.data[0])
  334. return;
  335. memcpy(&dp[ofs], &pp[ofs], len + 1);
  336. ofs += len + 1;
  337. }
  338. } while (ofs < frame_width);
  339. if (ofs > frame_width) {
  340. av_log(s->avctx, AV_LOG_ERROR, "VMD video: offset > width (%d > %d)\n",
  341. ofs, frame_width);
  342. }
  343. dp += s->frame.linesize[0];
  344. pp += s->prev_frame.linesize[0];
  345. }
  346. break;
  347. }
  348. }
  349. }
  350. static av_cold int vmdvideo_decode_init(AVCodecContext *avctx)
  351. {
  352. VmdVideoContext *s = avctx->priv_data;
  353. int i;
  354. unsigned int *palette32;
  355. int palette_index = 0;
  356. unsigned char r, g, b;
  357. unsigned char *vmd_header;
  358. unsigned char *raw_palette;
  359. s->avctx = avctx;
  360. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  361. /* make sure the VMD header made it */
  362. if (s->avctx->extradata_size != VMD_HEADER_SIZE) {
  363. av_log(s->avctx, AV_LOG_ERROR, "VMD video: expected extradata size of %d\n",
  364. VMD_HEADER_SIZE);
  365. return -1;
  366. }
  367. vmd_header = (unsigned char *)avctx->extradata;
  368. s->unpack_buffer_size = AV_RL32(&vmd_header[800]);
  369. s->unpack_buffer = av_malloc(s->unpack_buffer_size);
  370. if (!s->unpack_buffer)
  371. return -1;
  372. /* load up the initial palette */
  373. raw_palette = &vmd_header[28];
  374. palette32 = (unsigned int *)s->palette;
  375. for (i = 0; i < PALETTE_COUNT; i++) {
  376. r = raw_palette[palette_index++] * 4;
  377. g = raw_palette[palette_index++] * 4;
  378. b = raw_palette[palette_index++] * 4;
  379. palette32[i] = (r << 16) | (g << 8) | (b);
  380. }
  381. return 0;
  382. }
  383. static int vmdvideo_decode_frame(AVCodecContext *avctx,
  384. void *data, int *got_frame,
  385. AVPacket *avpkt)
  386. {
  387. const uint8_t *buf = avpkt->data;
  388. int buf_size = avpkt->size;
  389. VmdVideoContext *s = avctx->priv_data;
  390. s->buf = buf;
  391. s->size = buf_size;
  392. if (buf_size < 16)
  393. return buf_size;
  394. s->frame.reference = 1;
  395. if (ff_get_buffer(avctx, &s->frame)) {
  396. av_log(s->avctx, AV_LOG_ERROR, "VMD Video: get_buffer() failed\n");
  397. return -1;
  398. }
  399. vmd_decode(s);
  400. /* make the palette available on the way out */
  401. memcpy(s->frame.data[1], s->palette, PALETTE_COUNT * 4);
  402. /* shuffle frames */
  403. FFSWAP(AVFrame, s->frame, s->prev_frame);
  404. if (s->frame.data[0])
  405. avctx->release_buffer(avctx, &s->frame);
  406. *got_frame = 1;
  407. *(AVFrame*)data = s->prev_frame;
  408. /* report that the buffer was completely consumed */
  409. return buf_size;
  410. }
  411. static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
  412. {
  413. VmdVideoContext *s = avctx->priv_data;
  414. if (s->prev_frame.data[0])
  415. avctx->release_buffer(avctx, &s->prev_frame);
  416. av_free(s->unpack_buffer);
  417. return 0;
  418. }
  419. /*
  420. * Audio Decoder
  421. */
  422. #define BLOCK_TYPE_AUDIO 1
  423. #define BLOCK_TYPE_INITIAL 2
  424. #define BLOCK_TYPE_SILENCE 3
  425. typedef struct VmdAudioContext {
  426. AVFrame frame;
  427. int out_bps;
  428. int chunk_size;
  429. } VmdAudioContext;
  430. static const uint16_t vmdaudio_table[128] = {
  431. 0x000, 0x008, 0x010, 0x020, 0x030, 0x040, 0x050, 0x060, 0x070, 0x080,
  432. 0x090, 0x0A0, 0x0B0, 0x0C0, 0x0D0, 0x0E0, 0x0F0, 0x100, 0x110, 0x120,
  433. 0x130, 0x140, 0x150, 0x160, 0x170, 0x180, 0x190, 0x1A0, 0x1B0, 0x1C0,
  434. 0x1D0, 0x1E0, 0x1F0, 0x200, 0x208, 0x210, 0x218, 0x220, 0x228, 0x230,
  435. 0x238, 0x240, 0x248, 0x250, 0x258, 0x260, 0x268, 0x270, 0x278, 0x280,
  436. 0x288, 0x290, 0x298, 0x2A0, 0x2A8, 0x2B0, 0x2B8, 0x2C0, 0x2C8, 0x2D0,
  437. 0x2D8, 0x2E0, 0x2E8, 0x2F0, 0x2F8, 0x300, 0x308, 0x310, 0x318, 0x320,
  438. 0x328, 0x330, 0x338, 0x340, 0x348, 0x350, 0x358, 0x360, 0x368, 0x370,
  439. 0x378, 0x380, 0x388, 0x390, 0x398, 0x3A0, 0x3A8, 0x3B0, 0x3B8, 0x3C0,
  440. 0x3C8, 0x3D0, 0x3D8, 0x3E0, 0x3E8, 0x3F0, 0x3F8, 0x400, 0x440, 0x480,
  441. 0x4C0, 0x500, 0x540, 0x580, 0x5C0, 0x600, 0x640, 0x680, 0x6C0, 0x700,
  442. 0x740, 0x780, 0x7C0, 0x800, 0x900, 0xA00, 0xB00, 0xC00, 0xD00, 0xE00,
  443. 0xF00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x3000, 0x4000
  444. };
  445. static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
  446. {
  447. VmdAudioContext *s = avctx->priv_data;
  448. if (avctx->channels < 1 || avctx->channels > 2) {
  449. av_log(avctx, AV_LOG_ERROR, "invalid number of channels\n");
  450. return AVERROR(EINVAL);
  451. }
  452. if (avctx->block_align < 1) {
  453. av_log(avctx, AV_LOG_ERROR, "invalid block align\n");
  454. return AVERROR(EINVAL);
  455. }
  456. avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO :
  457. AV_CH_LAYOUT_STEREO;
  458. if (avctx->bits_per_coded_sample == 16)
  459. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  460. else
  461. avctx->sample_fmt = AV_SAMPLE_FMT_U8;
  462. s->out_bps = av_get_bytes_per_sample(avctx->sample_fmt);
  463. s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
  464. avcodec_get_frame_defaults(&s->frame);
  465. avctx->coded_frame = &s->frame;
  466. av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
  467. "block align = %d, sample rate = %d\n",
  468. avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
  469. avctx->sample_rate);
  470. return 0;
  471. }
  472. static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
  473. int channels)
  474. {
  475. int ch;
  476. const uint8_t *buf_end = buf + buf_size;
  477. int predictor[2];
  478. int st = channels - 1;
  479. /* decode initial raw sample */
  480. for (ch = 0; ch < channels; ch++) {
  481. predictor[ch] = (int16_t)AV_RL16(buf);
  482. buf += 2;
  483. *out++ = predictor[ch];
  484. }
  485. /* decode DPCM samples */
  486. ch = 0;
  487. while (buf < buf_end) {
  488. uint8_t b = *buf++;
  489. if (b & 0x80)
  490. predictor[ch] -= vmdaudio_table[b & 0x7F];
  491. else
  492. predictor[ch] += vmdaudio_table[b];
  493. predictor[ch] = av_clip_int16(predictor[ch]);
  494. *out++ = predictor[ch];
  495. ch ^= st;
  496. }
  497. }
  498. static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
  499. int *got_frame_ptr, AVPacket *avpkt)
  500. {
  501. const uint8_t *buf = avpkt->data;
  502. const uint8_t *buf_end;
  503. int buf_size = avpkt->size;
  504. VmdAudioContext *s = avctx->priv_data;
  505. int block_type, silent_chunks, audio_chunks;
  506. int ret;
  507. uint8_t *output_samples_u8;
  508. int16_t *output_samples_s16;
  509. if (buf_size < 16) {
  510. av_log(avctx, AV_LOG_WARNING, "skipping small junk packet\n");
  511. *got_frame_ptr = 0;
  512. return buf_size;
  513. }
  514. block_type = buf[6];
  515. if (block_type < BLOCK_TYPE_AUDIO || block_type > BLOCK_TYPE_SILENCE) {
  516. av_log(avctx, AV_LOG_ERROR, "unknown block type: %d\n", block_type);
  517. return AVERROR(EINVAL);
  518. }
  519. buf += 16;
  520. buf_size -= 16;
  521. /* get number of silent chunks */
  522. silent_chunks = 0;
  523. if (block_type == BLOCK_TYPE_INITIAL) {
  524. uint32_t flags;
  525. if (buf_size < 4) {
  526. av_log(avctx, AV_LOG_ERROR, "packet is too small\n");
  527. return AVERROR(EINVAL);
  528. }
  529. flags = AV_RB32(buf);
  530. silent_chunks = av_popcount(flags);
  531. buf += 4;
  532. buf_size -= 4;
  533. } else if (block_type == BLOCK_TYPE_SILENCE) {
  534. silent_chunks = 1;
  535. buf_size = 0; // should already be zero but set it just to be sure
  536. }
  537. /* ensure output buffer is large enough */
  538. audio_chunks = buf_size / s->chunk_size;
  539. /* get output buffer */
  540. s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels;
  541. if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) {
  542. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  543. return ret;
  544. }
  545. output_samples_u8 = s->frame.data[0];
  546. output_samples_s16 = (int16_t *)s->frame.data[0];
  547. /* decode silent chunks */
  548. if (silent_chunks > 0) {
  549. int silent_size = avctx->block_align * silent_chunks;
  550. if (s->out_bps == 2) {
  551. memset(output_samples_s16, 0x00, silent_size * 2);
  552. output_samples_s16 += silent_size;
  553. } else {
  554. memset(output_samples_u8, 0x80, silent_size);
  555. output_samples_u8 += silent_size;
  556. }
  557. }
  558. /* decode audio chunks */
  559. if (audio_chunks > 0) {
  560. buf_end = buf + buf_size;
  561. while (buf < buf_end) {
  562. if (s->out_bps == 2) {
  563. decode_audio_s16(output_samples_s16, buf, s->chunk_size,
  564. avctx->channels);
  565. output_samples_s16 += avctx->block_align;
  566. } else {
  567. memcpy(output_samples_u8, buf, s->chunk_size);
  568. output_samples_u8 += avctx->block_align;
  569. }
  570. buf += s->chunk_size;
  571. }
  572. }
  573. *got_frame_ptr = 1;
  574. *(AVFrame *)data = s->frame;
  575. return avpkt->size;
  576. }
  577. /*
  578. * Public Data Structures
  579. */
  580. AVCodec ff_vmdvideo_decoder = {
  581. .name = "vmdvideo",
  582. .type = AVMEDIA_TYPE_VIDEO,
  583. .id = AV_CODEC_ID_VMDVIDEO,
  584. .priv_data_size = sizeof(VmdVideoContext),
  585. .init = vmdvideo_decode_init,
  586. .close = vmdvideo_decode_end,
  587. .decode = vmdvideo_decode_frame,
  588. .capabilities = CODEC_CAP_DR1,
  589. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD video"),
  590. };
  591. AVCodec ff_vmdaudio_decoder = {
  592. .name = "vmdaudio",
  593. .type = AVMEDIA_TYPE_AUDIO,
  594. .id = AV_CODEC_ID_VMDAUDIO,
  595. .priv_data_size = sizeof(VmdAudioContext),
  596. .init = vmdaudio_decode_init,
  597. .decode = vmdaudio_decode_frame,
  598. .capabilities = CODEC_CAP_DR1,
  599. .long_name = NULL_IF_CONFIG_SMALL("Sierra VMD audio"),
  600. };