You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

681 lines
21KB

  1. /*
  2. * Opus decoder
  3. * Copyright (c) 2012 Andrew D'Addesio
  4. * Copyright (c) 2013-2014 Mozilla Corporation
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Opus decoder
  25. * @author Andrew D'Addesio, Anton Khirnov
  26. *
  27. * Codec homepage: http://opus-codec.org/
  28. * Specification: http://tools.ietf.org/html/rfc6716
  29. * Ogg Opus specification: https://tools.ietf.org/html/draft-ietf-codec-oggopus-03
  30. *
  31. * Ogg-contained .opus files can be produced with opus-tools:
  32. * http://git.xiph.org/?p=opus-tools.git
  33. */
  34. #include <stdint.h>
  35. #include "libavutil/attributes.h"
  36. #include "libavutil/audio_fifo.h"
  37. #include "libavutil/channel_layout.h"
  38. #include "libavutil/opt.h"
  39. #include "libavresample/avresample.h"
  40. #include "avcodec.h"
  41. #include "celp_filters.h"
  42. #include "fft.h"
  43. #include "get_bits.h"
  44. #include "internal.h"
  45. #include "mathops.h"
  46. #include "opus.h"
  47. static const uint16_t silk_frame_duration_ms[16] = {
  48. 10, 20, 40, 60,
  49. 10, 20, 40, 60,
  50. 10, 20, 40, 60,
  51. 10, 20,
  52. 10, 20,
  53. };
  54. /* number of samples of silence to feed to the resampler
  55. * at the beginning */
  56. static const int silk_resample_delay[] = {
  57. 4, 8, 11, 11, 11
  58. };
  59. static const uint8_t celt_band_end[] = { 13, 17, 17, 19, 21 };
  60. static int get_silk_samplerate(int config)
  61. {
  62. if (config < 4)
  63. return 8000;
  64. else if (config < 8)
  65. return 12000;
  66. return 16000;
  67. }
  68. /**
  69. * Range decoder
  70. */
  71. static int opus_rc_init(OpusRangeCoder *rc, const uint8_t *data, int size)
  72. {
  73. int ret = init_get_bits8(&rc->gb, data, size);
  74. if (ret < 0)
  75. return ret;
  76. rc->range = 128;
  77. rc->value = 127 - get_bits(&rc->gb, 7);
  78. rc->total_read_bits = 9;
  79. opus_rc_normalize(rc);
  80. return 0;
  81. }
  82. static void opus_raw_init(OpusRangeCoder *rc, const uint8_t *rightend,
  83. unsigned int bytes)
  84. {
  85. rc->rb.position = rightend;
  86. rc->rb.bytes = bytes;
  87. rc->rb.cachelen = 0;
  88. rc->rb.cacheval = 0;
  89. }
  90. static void opus_fade(float *out,
  91. const float *in1, const float *in2,
  92. const float *window, int len)
  93. {
  94. int i;
  95. for (i = 0; i < len; i++)
  96. out[i] = in2[i] * window[i] + in1[i] * (1.0 - window[i]);
  97. }
  98. static int opus_flush_resample(OpusStreamContext *s, int nb_samples)
  99. {
  100. int celt_size = av_audio_fifo_size(s->celt_delay);
  101. int ret, i;
  102. ret = avresample_convert(s->avr, (uint8_t**)s->out, s->out_size, nb_samples,
  103. NULL, 0, 0);
  104. if (ret < 0)
  105. return ret;
  106. else if (ret != nb_samples) {
  107. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of flushed samples: %d\n",
  108. ret);
  109. return AVERROR_BUG;
  110. }
  111. if (celt_size) {
  112. if (celt_size != nb_samples) {
  113. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of CELT delay samples.\n");
  114. return AVERROR_BUG;
  115. }
  116. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, nb_samples);
  117. for (i = 0; i < s->output_channels; i++) {
  118. s->fdsp->vector_fmac_scalar(s->out[i],
  119. s->celt_output[i], 1.0,
  120. nb_samples);
  121. }
  122. }
  123. if (s->redundancy_idx) {
  124. for (i = 0; i < s->output_channels; i++)
  125. opus_fade(s->out[i], s->out[i],
  126. s->redundancy_output[i] + 120 + s->redundancy_idx,
  127. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  128. s->redundancy_idx = 0;
  129. }
  130. s->out[0] += nb_samples;
  131. s->out[1] += nb_samples;
  132. s->out_size -= nb_samples * sizeof(float);
  133. return 0;
  134. }
  135. static int opus_init_resample(OpusStreamContext *s)
  136. {
  137. float delay[16] = { 0.0 };
  138. uint8_t *delayptr[2] = { (uint8_t*)delay, (uint8_t*)delay };
  139. int ret;
  140. av_opt_set_int(s->avr, "in_sample_rate", s->silk_samplerate, 0);
  141. ret = avresample_open(s->avr);
  142. if (ret < 0) {
  143. av_log(s->avctx, AV_LOG_ERROR, "Error opening the resampler.\n");
  144. return ret;
  145. }
  146. ret = avresample_convert(s->avr, NULL, 0, 0, delayptr, sizeof(delay),
  147. silk_resample_delay[s->packet.bandwidth]);
  148. if (ret < 0) {
  149. av_log(s->avctx, AV_LOG_ERROR,
  150. "Error feeding initial silence to the resampler.\n");
  151. return ret;
  152. }
  153. return 0;
  154. }
  155. static int opus_decode_redundancy(OpusStreamContext *s, const uint8_t *data, int size)
  156. {
  157. int ret;
  158. enum OpusBandwidth bw = s->packet.bandwidth;
  159. if (s->packet.mode == OPUS_MODE_SILK &&
  160. bw == OPUS_BANDWIDTH_MEDIUMBAND)
  161. bw = OPUS_BANDWIDTH_WIDEBAND;
  162. ret = opus_rc_init(&s->redundancy_rc, data, size);
  163. if (ret < 0)
  164. goto fail;
  165. opus_raw_init(&s->redundancy_rc, data + size, size);
  166. ret = ff_celt_decode_frame(s->celt, &s->redundancy_rc,
  167. s->redundancy_output,
  168. s->packet.stereo + 1, 240,
  169. 0, celt_band_end[s->packet.bandwidth]);
  170. if (ret < 0)
  171. goto fail;
  172. return 0;
  173. fail:
  174. av_log(s->avctx, AV_LOG_ERROR, "Error decoding the redundancy frame.\n");
  175. return ret;
  176. }
  177. static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
  178. {
  179. int samples = s->packet.frame_duration;
  180. int redundancy = 0;
  181. int redundancy_size, redundancy_pos;
  182. int ret, i, consumed;
  183. int delayed_samples = s->delayed_samples;
  184. ret = opus_rc_init(&s->rc, data, size);
  185. if (ret < 0)
  186. return ret;
  187. /* decode the silk frame */
  188. if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) {
  189. if (!avresample_is_open(s->avr)) {
  190. ret = opus_init_resample(s);
  191. if (ret < 0)
  192. return ret;
  193. }
  194. samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output,
  195. FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND),
  196. s->packet.stereo + 1,
  197. silk_frame_duration_ms[s->packet.config]);
  198. if (samples < 0) {
  199. av_log(s->avctx, AV_LOG_ERROR, "Error decoding a SILK frame.\n");
  200. return samples;
  201. }
  202. samples = avresample_convert(s->avr, (uint8_t**)s->out, s->out_size,
  203. s->packet.frame_duration,
  204. (uint8_t**)s->silk_output,
  205. sizeof(s->silk_buf[0]),
  206. samples);
  207. if (samples < 0) {
  208. av_log(s->avctx, AV_LOG_ERROR, "Error resampling SILK data.\n");
  209. return samples;
  210. }
  211. s->delayed_samples += s->packet.frame_duration - samples;
  212. } else
  213. ff_silk_flush(s->silk);
  214. // decode redundancy information
  215. consumed = opus_rc_tell(&s->rc);
  216. if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8)
  217. redundancy = opus_rc_p2model(&s->rc, 12);
  218. else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8)
  219. redundancy = 1;
  220. if (redundancy) {
  221. redundancy_pos = opus_rc_p2model(&s->rc, 1);
  222. if (s->packet.mode == OPUS_MODE_HYBRID)
  223. redundancy_size = opus_rc_unimodel(&s->rc, 256) + 2;
  224. else
  225. redundancy_size = size - (consumed + 7) / 8;
  226. size -= redundancy_size;
  227. if (size < 0) {
  228. av_log(s->avctx, AV_LOG_ERROR, "Invalid redundancy frame size.\n");
  229. return AVERROR_INVALIDDATA;
  230. }
  231. if (redundancy_pos) {
  232. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  233. if (ret < 0)
  234. return ret;
  235. ff_celt_flush(s->celt);
  236. }
  237. }
  238. /* decode the CELT frame */
  239. if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) {
  240. float *out_tmp[2] = { s->out[0], s->out[1] };
  241. float **dst = (s->packet.mode == OPUS_MODE_CELT) ?
  242. out_tmp : s->celt_output;
  243. int celt_output_samples = samples;
  244. int delay_samples = av_audio_fifo_size(s->celt_delay);
  245. if (delay_samples) {
  246. if (s->packet.mode == OPUS_MODE_HYBRID) {
  247. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples);
  248. for (i = 0; i < s->output_channels; i++) {
  249. s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0,
  250. delay_samples);
  251. out_tmp[i] += delay_samples;
  252. }
  253. celt_output_samples -= delay_samples;
  254. } else {
  255. av_log(s->avctx, AV_LOG_WARNING,
  256. "Spurious CELT delay samples present.\n");
  257. av_audio_fifo_drain(s->celt_delay, delay_samples);
  258. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  259. return AVERROR_BUG;
  260. }
  261. }
  262. opus_raw_init(&s->rc, data + size, size);
  263. ret = ff_celt_decode_frame(s->celt, &s->rc, dst,
  264. s->packet.stereo + 1,
  265. s->packet.frame_duration,
  266. (s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0,
  267. celt_band_end[s->packet.bandwidth]);
  268. if (ret < 0)
  269. return ret;
  270. if (s->packet.mode == OPUS_MODE_HYBRID) {
  271. int celt_delay = s->packet.frame_duration - celt_output_samples;
  272. void *delaybuf[2] = { s->celt_output[0] + celt_output_samples,
  273. s->celt_output[1] + celt_output_samples };
  274. for (i = 0; i < s->output_channels; i++) {
  275. s->fdsp->vector_fmac_scalar(out_tmp[i],
  276. s->celt_output[i], 1.0,
  277. celt_output_samples);
  278. }
  279. ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay);
  280. if (ret < 0)
  281. return ret;
  282. }
  283. } else
  284. ff_celt_flush(s->celt);
  285. if (s->redundancy_idx) {
  286. for (i = 0; i < s->output_channels; i++)
  287. opus_fade(s->out[i], s->out[i],
  288. s->redundancy_output[i] + 120 + s->redundancy_idx,
  289. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  290. s->redundancy_idx = 0;
  291. }
  292. if (redundancy) {
  293. if (!redundancy_pos) {
  294. ff_celt_flush(s->celt);
  295. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  296. if (ret < 0)
  297. return ret;
  298. for (i = 0; i < s->output_channels; i++) {
  299. opus_fade(s->out[i] + samples - 120 + delayed_samples,
  300. s->out[i] + samples - 120 + delayed_samples,
  301. s->redundancy_output[i] + 120,
  302. ff_celt_window2, 120 - delayed_samples);
  303. if (delayed_samples)
  304. s->redundancy_idx = 120 - delayed_samples;
  305. }
  306. } else {
  307. for (i = 0; i < s->output_channels; i++) {
  308. memcpy(s->out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float));
  309. opus_fade(s->out[i] + 120 + delayed_samples,
  310. s->redundancy_output[i] + 120,
  311. s->out[i] + 120 + delayed_samples,
  312. ff_celt_window2, 120);
  313. }
  314. }
  315. }
  316. return samples;
  317. }
  318. static int opus_decode_subpacket(OpusStreamContext *s,
  319. const uint8_t *buf, int buf_size,
  320. int nb_samples)
  321. {
  322. int output_samples = 0;
  323. int flush_needed = 0;
  324. int i, j, ret;
  325. /* check if we need to flush the resampler */
  326. if (avresample_is_open(s->avr)) {
  327. if (buf) {
  328. int64_t cur_samplerate;
  329. av_opt_get_int(s->avr, "in_sample_rate", 0, &cur_samplerate);
  330. flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
  331. } else {
  332. flush_needed = !!s->delayed_samples;
  333. }
  334. }
  335. if (!buf && !flush_needed)
  336. return 0;
  337. /* use dummy output buffers if the channel is not mapped to anything */
  338. if (!s->out[0] ||
  339. (s->output_channels == 2 && !s->out[1])) {
  340. av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
  341. if (!s->out_dummy)
  342. return AVERROR(ENOMEM);
  343. if (!s->out[0])
  344. s->out[0] = s->out_dummy;
  345. if (!s->out[1])
  346. s->out[1] = s->out_dummy;
  347. }
  348. /* flush the resampler if necessary */
  349. if (flush_needed) {
  350. ret = opus_flush_resample(s, s->delayed_samples);
  351. if (ret < 0) {
  352. av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
  353. return ret;
  354. }
  355. avresample_close(s->avr);
  356. output_samples += s->delayed_samples;
  357. s->delayed_samples = 0;
  358. if (!buf)
  359. goto finish;
  360. }
  361. /* decode all the frames in the packet */
  362. for (i = 0; i < s->packet.frame_count; i++) {
  363. int size = s->packet.frame_size[i];
  364. int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);
  365. if (samples < 0) {
  366. av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
  367. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  368. return samples;
  369. for (j = 0; j < s->output_channels; j++)
  370. memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
  371. samples = s->packet.frame_duration;
  372. }
  373. output_samples += samples;
  374. for (j = 0; j < s->output_channels; j++)
  375. s->out[j] += samples;
  376. s->out_size -= samples * sizeof(float);
  377. }
  378. finish:
  379. s->out[0] = s->out[1] = NULL;
  380. s->out_size = 0;
  381. return output_samples;
  382. }
  383. static int opus_decode_packet(AVCodecContext *avctx, void *data,
  384. int *got_frame_ptr, AVPacket *avpkt)
  385. {
  386. OpusContext *c = avctx->priv_data;
  387. AVFrame *frame = data;
  388. const uint8_t *buf = avpkt->data;
  389. int buf_size = avpkt->size;
  390. int coded_samples = 0;
  391. int decoded_samples = 0;
  392. int i, ret;
  393. /* decode the header of the first sub-packet to find out the sample count */
  394. if (buf) {
  395. OpusPacket *pkt = &c->streams[0].packet;
  396. ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
  397. if (ret < 0) {
  398. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  399. return ret;
  400. }
  401. coded_samples += pkt->frame_count * pkt->frame_duration;
  402. c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
  403. }
  404. frame->nb_samples = coded_samples + c->streams[0].delayed_samples;
  405. /* no input or buffered data => nothing to do */
  406. if (!frame->nb_samples) {
  407. *got_frame_ptr = 0;
  408. return 0;
  409. }
  410. /* setup the data buffers */
  411. ret = ff_get_buffer(avctx, frame, 0);
  412. if (ret < 0) {
  413. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  414. return ret;
  415. }
  416. frame->nb_samples = 0;
  417. for (i = 0; i < avctx->channels; i++) {
  418. ChannelMap *map = &c->channel_maps[i];
  419. if (!map->copy)
  420. c->streams[map->stream_idx].out[map->channel_idx] = (float*)frame->extended_data[i];
  421. }
  422. for (i = 0; i < c->nb_streams; i++)
  423. c->streams[i].out_size = frame->linesize[0];
  424. /* decode each sub-packet */
  425. for (i = 0; i < c->nb_streams; i++) {
  426. OpusStreamContext *s = &c->streams[i];
  427. if (i && buf) {
  428. ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
  429. if (ret < 0) {
  430. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  431. return ret;
  432. }
  433. if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
  434. av_log(avctx, AV_LOG_ERROR,
  435. "Mismatching coded sample count in substream %d.\n", i);
  436. return AVERROR_INVALIDDATA;
  437. }
  438. s->silk_samplerate = get_silk_samplerate(s->packet.config);
  439. }
  440. ret = opus_decode_subpacket(&c->streams[i], buf,
  441. s->packet.data_size, coded_samples);
  442. if (ret < 0)
  443. return ret;
  444. if (decoded_samples && ret != decoded_samples) {
  445. av_log(avctx, AV_LOG_ERROR, "Different numbers of decoded samples "
  446. "in a multi-channel stream\n");
  447. return AVERROR_INVALIDDATA;
  448. }
  449. decoded_samples = ret;
  450. buf += s->packet.packet_size;
  451. buf_size -= s->packet.packet_size;
  452. }
  453. for (i = 0; i < avctx->channels; i++) {
  454. ChannelMap *map = &c->channel_maps[i];
  455. /* handle copied channels */
  456. if (map->copy) {
  457. memcpy(frame->extended_data[i],
  458. frame->extended_data[map->copy_idx],
  459. frame->linesize[0]);
  460. } else if (map->silence) {
  461. memset(frame->extended_data[i], 0, frame->linesize[0]);
  462. }
  463. if (c->gain_i) {
  464. c->fdsp.vector_fmul_scalar((float*)frame->extended_data[i],
  465. (float*)frame->extended_data[i],
  466. c->gain, FFALIGN(decoded_samples, 8));
  467. }
  468. }
  469. frame->nb_samples = decoded_samples;
  470. *got_frame_ptr = !!decoded_samples;
  471. return avpkt->size;
  472. }
  473. static av_cold void opus_decode_flush(AVCodecContext *ctx)
  474. {
  475. OpusContext *c = ctx->priv_data;
  476. int i;
  477. for (i = 0; i < c->nb_streams; i++) {
  478. OpusStreamContext *s = &c->streams[i];
  479. memset(&s->packet, 0, sizeof(s->packet));
  480. s->delayed_samples = 0;
  481. if (s->celt_delay)
  482. av_audio_fifo_drain(s->celt_delay, av_audio_fifo_size(s->celt_delay));
  483. avresample_close(s->avr);
  484. ff_silk_flush(s->silk);
  485. ff_celt_flush(s->celt);
  486. }
  487. }
  488. static av_cold int opus_decode_close(AVCodecContext *avctx)
  489. {
  490. OpusContext *c = avctx->priv_data;
  491. int i;
  492. for (i = 0; i < c->nb_streams; i++) {
  493. OpusStreamContext *s = &c->streams[i];
  494. ff_silk_free(&s->silk);
  495. ff_celt_free(&s->celt);
  496. av_freep(&s->out_dummy);
  497. s->out_dummy_allocated_size = 0;
  498. av_audio_fifo_free(s->celt_delay);
  499. avresample_free(&s->avr);
  500. }
  501. av_freep(&c->streams);
  502. c->nb_streams = 0;
  503. av_freep(&c->channel_maps);
  504. return 0;
  505. }
  506. static av_cold int opus_decode_init(AVCodecContext *avctx)
  507. {
  508. OpusContext *c = avctx->priv_data;
  509. int ret, i, j;
  510. avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
  511. avctx->sample_rate = 48000;
  512. avpriv_float_dsp_init(&c->fdsp, 0);
  513. /* find out the channel configuration */
  514. ret = ff_opus_parse_extradata(avctx, c);
  515. if (ret < 0)
  516. return ret;
  517. /* allocate and init each independent decoder */
  518. c->streams = av_mallocz_array(c->nb_streams, sizeof(*c->streams));
  519. if (!c->streams) {
  520. c->nb_streams = 0;
  521. ret = AVERROR(ENOMEM);
  522. goto fail;
  523. }
  524. for (i = 0; i < c->nb_streams; i++) {
  525. OpusStreamContext *s = &c->streams[i];
  526. uint64_t layout;
  527. s->output_channels = (i < c->nb_stereo_streams) ? 2 : 1;
  528. s->avctx = avctx;
  529. for (j = 0; j < s->output_channels; j++) {
  530. s->silk_output[j] = s->silk_buf[j];
  531. s->celt_output[j] = s->celt_buf[j];
  532. s->redundancy_output[j] = s->redundancy_buf[j];
  533. }
  534. s->fdsp = &c->fdsp;
  535. s->avr = avresample_alloc_context();
  536. if (!s->avr)
  537. goto fail;
  538. layout = (s->output_channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  539. av_opt_set_int(s->avr, "in_sample_fmt", avctx->sample_fmt, 0);
  540. av_opt_set_int(s->avr, "out_sample_fmt", avctx->sample_fmt, 0);
  541. av_opt_set_int(s->avr, "in_channel_layout", layout, 0);
  542. av_opt_set_int(s->avr, "out_channel_layout", layout, 0);
  543. av_opt_set_int(s->avr, "out_sample_rate", avctx->sample_rate, 0);
  544. ret = ff_silk_init(avctx, &s->silk, s->output_channels);
  545. if (ret < 0)
  546. goto fail;
  547. ret = ff_celt_init(avctx, &s->celt, s->output_channels);
  548. if (ret < 0)
  549. goto fail;
  550. s->celt_delay = av_audio_fifo_alloc(avctx->sample_fmt,
  551. s->output_channels, 1024);
  552. if (!s->celt_delay) {
  553. ret = AVERROR(ENOMEM);
  554. goto fail;
  555. }
  556. }
  557. return 0;
  558. fail:
  559. opus_decode_close(avctx);
  560. return ret;
  561. }
  562. AVCodec ff_opus_decoder = {
  563. .name = "opus",
  564. .long_name = NULL_IF_CONFIG_SMALL("Opus"),
  565. .type = AVMEDIA_TYPE_AUDIO,
  566. .id = AV_CODEC_ID_OPUS,
  567. .priv_data_size = sizeof(OpusContext),
  568. .init = opus_decode_init,
  569. .close = opus_decode_close,
  570. .decode = opus_decode_packet,
  571. .flush = opus_decode_flush,
  572. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  573. };