You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

688 lines
22KB

  1. /*
  2. * Opus decoder
  3. * Copyright (c) 2012 Andrew D'Addesio
  4. * Copyright (c) 2013-2014 Mozilla Corporation
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Opus decoder
  25. * @author Andrew D'Addesio, Anton Khirnov
  26. *
  27. * Codec homepage: http://opus-codec.org/
  28. * Specification: http://tools.ietf.org/html/rfc6716
  29. * Ogg Opus specification: https://tools.ietf.org/html/draft-ietf-codec-oggopus-03
  30. *
  31. * Ogg-contained .opus files can be produced with opus-tools:
  32. * http://git.xiph.org/?p=opus-tools.git
  33. */
  34. #include <stdint.h>
  35. #include "libavutil/attributes.h"
  36. #include "libavutil/audio_fifo.h"
  37. #include "libavutil/channel_layout.h"
  38. #include "libavutil/opt.h"
  39. #include "libswresample/swresample.h"
  40. #include "avcodec.h"
  41. #include "get_bits.h"
  42. #include "internal.h"
  43. #include "mathops.h"
  44. #include "opus.h"
  45. static const uint16_t silk_frame_duration_ms[16] = {
  46. 10, 20, 40, 60,
  47. 10, 20, 40, 60,
  48. 10, 20, 40, 60,
  49. 10, 20,
  50. 10, 20,
  51. };
  52. /* number of samples of silence to feed to the resampler
  53. * at the beginning */
  54. static const int silk_resample_delay[] = {
  55. 4, 8, 11, 11, 11
  56. };
  57. static const uint8_t celt_band_end[] = { 13, 17, 17, 19, 21 };
  58. static int get_silk_samplerate(int config)
  59. {
  60. if (config < 4)
  61. return 8000;
  62. else if (config < 8)
  63. return 12000;
  64. return 16000;
  65. }
  66. /**
  67. * Range decoder
  68. */
  69. static int opus_rc_init(OpusRangeCoder *rc, const uint8_t *data, int size)
  70. {
  71. int ret = init_get_bits8(&rc->gb, data, size);
  72. if (ret < 0)
  73. return ret;
  74. rc->range = 128;
  75. rc->value = 127 - get_bits(&rc->gb, 7);
  76. rc->total_read_bits = 9;
  77. opus_rc_normalize(rc);
  78. return 0;
  79. }
  80. static void opus_raw_init(OpusRangeCoder *rc, const uint8_t *rightend,
  81. unsigned int bytes)
  82. {
  83. rc->rb.position = rightend;
  84. rc->rb.bytes = bytes;
  85. rc->rb.cachelen = 0;
  86. rc->rb.cacheval = 0;
  87. }
  88. static void opus_fade(float *out,
  89. const float *in1, const float *in2,
  90. const float *window, int len)
  91. {
  92. int i;
  93. for (i = 0; i < len; i++)
  94. out[i] = in2[i] * window[i] + in1[i] * (1.0 - window[i]);
  95. }
  96. static int opus_flush_resample(OpusStreamContext *s, int nb_samples)
  97. {
  98. int celt_size = av_audio_fifo_size(s->celt_delay);
  99. int ret, i;
  100. ret = swr_convert(s->swr,
  101. (uint8_t**)s->out, nb_samples,
  102. NULL, 0);
  103. if (ret < 0)
  104. return ret;
  105. else if (ret != nb_samples) {
  106. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of flushed samples: %d\n",
  107. ret);
  108. return AVERROR_BUG;
  109. }
  110. if (celt_size) {
  111. if (celt_size != nb_samples) {
  112. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of CELT delay samples.\n");
  113. return AVERROR_BUG;
  114. }
  115. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, nb_samples);
  116. for (i = 0; i < s->output_channels; i++) {
  117. s->fdsp->vector_fmac_scalar(s->out[i],
  118. s->celt_output[i], 1.0,
  119. nb_samples);
  120. }
  121. }
  122. if (s->redundancy_idx) {
  123. for (i = 0; i < s->output_channels; i++)
  124. opus_fade(s->out[i], s->out[i],
  125. s->redundancy_output[i] + 120 + s->redundancy_idx,
  126. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  127. s->redundancy_idx = 0;
  128. }
  129. s->out[0] += nb_samples;
  130. s->out[1] += nb_samples;
  131. s->out_size -= nb_samples * sizeof(float);
  132. return 0;
  133. }
  134. static int opus_init_resample(OpusStreamContext *s)
  135. {
  136. static const float delay[16] = { 0.0 };
  137. const uint8_t *delayptr[2] = { (uint8_t*)delay, (uint8_t*)delay };
  138. int ret;
  139. av_opt_set_int(s->swr, "in_sample_rate", s->silk_samplerate, 0);
  140. ret = swr_init(s->swr);
  141. if (ret < 0) {
  142. av_log(s->avctx, AV_LOG_ERROR, "Error opening the resampler.\n");
  143. return ret;
  144. }
  145. ret = swr_convert(s->swr,
  146. NULL, 0,
  147. delayptr, silk_resample_delay[s->packet.bandwidth]);
  148. if (ret < 0) {
  149. av_log(s->avctx, AV_LOG_ERROR,
  150. "Error feeding initial silence to the resampler.\n");
  151. return ret;
  152. }
  153. return 0;
  154. }
  155. static int opus_decode_redundancy(OpusStreamContext *s, const uint8_t *data, int size)
  156. {
  157. int ret;
  158. enum OpusBandwidth bw = s->packet.bandwidth;
  159. if (s->packet.mode == OPUS_MODE_SILK &&
  160. bw == OPUS_BANDWIDTH_MEDIUMBAND)
  161. bw = OPUS_BANDWIDTH_WIDEBAND;
  162. ret = opus_rc_init(&s->redundancy_rc, data, size);
  163. if (ret < 0)
  164. goto fail;
  165. opus_raw_init(&s->redundancy_rc, data + size, size);
  166. ret = ff_celt_decode_frame(s->celt, &s->redundancy_rc,
  167. s->redundancy_output,
  168. s->packet.stereo + 1, 240,
  169. 0, celt_band_end[s->packet.bandwidth]);
  170. if (ret < 0)
  171. goto fail;
  172. return 0;
  173. fail:
  174. av_log(s->avctx, AV_LOG_ERROR, "Error decoding the redundancy frame.\n");
  175. return ret;
  176. }
  177. static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
  178. {
  179. int samples = s->packet.frame_duration;
  180. int redundancy = 0;
  181. int redundancy_size, redundancy_pos;
  182. int ret, i, consumed;
  183. int delayed_samples = s->delayed_samples;
  184. ret = opus_rc_init(&s->rc, data, size);
  185. if (ret < 0)
  186. return ret;
  187. /* decode the silk frame */
  188. if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) {
  189. if (!swr_is_initialized(s->swr)) {
  190. ret = opus_init_resample(s);
  191. if (ret < 0)
  192. return ret;
  193. }
  194. samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output,
  195. FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND),
  196. s->packet.stereo + 1,
  197. silk_frame_duration_ms[s->packet.config]);
  198. if (samples < 0) {
  199. av_log(s->avctx, AV_LOG_ERROR, "Error decoding a SILK frame.\n");
  200. return samples;
  201. }
  202. samples = swr_convert(s->swr,
  203. (uint8_t**)s->out, s->packet.frame_duration,
  204. (const uint8_t**)s->silk_output, samples);
  205. if (samples < 0) {
  206. av_log(s->avctx, AV_LOG_ERROR, "Error resampling SILK data.\n");
  207. return samples;
  208. }
  209. av_assert2((samples & 7) == 0);
  210. s->delayed_samples += s->packet.frame_duration - samples;
  211. } else
  212. ff_silk_flush(s->silk);
  213. // decode redundancy information
  214. consumed = opus_rc_tell(&s->rc);
  215. if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8)
  216. redundancy = opus_rc_p2model(&s->rc, 12);
  217. else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8)
  218. redundancy = 1;
  219. if (redundancy) {
  220. redundancy_pos = opus_rc_p2model(&s->rc, 1);
  221. if (s->packet.mode == OPUS_MODE_HYBRID)
  222. redundancy_size = opus_rc_unimodel(&s->rc, 256) + 2;
  223. else
  224. redundancy_size = size - (consumed + 7) / 8;
  225. size -= redundancy_size;
  226. if (size < 0) {
  227. av_log(s->avctx, AV_LOG_ERROR, "Invalid redundancy frame size.\n");
  228. return AVERROR_INVALIDDATA;
  229. }
  230. if (redundancy_pos) {
  231. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  232. if (ret < 0)
  233. return ret;
  234. ff_celt_flush(s->celt);
  235. }
  236. }
  237. /* decode the CELT frame */
  238. if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) {
  239. float *out_tmp[2] = { s->out[0], s->out[1] };
  240. float **dst = (s->packet.mode == OPUS_MODE_CELT) ?
  241. out_tmp : s->celt_output;
  242. int celt_output_samples = samples;
  243. int delay_samples = av_audio_fifo_size(s->celt_delay);
  244. if (delay_samples) {
  245. if (s->packet.mode == OPUS_MODE_HYBRID) {
  246. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples);
  247. for (i = 0; i < s->output_channels; i++) {
  248. s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0,
  249. delay_samples);
  250. out_tmp[i] += delay_samples;
  251. }
  252. celt_output_samples -= delay_samples;
  253. } else {
  254. av_log(s->avctx, AV_LOG_WARNING,
  255. "Spurious CELT delay samples present.\n");
  256. av_audio_fifo_drain(s->celt_delay, delay_samples);
  257. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  258. return AVERROR_BUG;
  259. }
  260. }
  261. opus_raw_init(&s->rc, data + size, size);
  262. ret = ff_celt_decode_frame(s->celt, &s->rc, dst,
  263. s->packet.stereo + 1,
  264. s->packet.frame_duration,
  265. (s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0,
  266. celt_band_end[s->packet.bandwidth]);
  267. if (ret < 0)
  268. return ret;
  269. if (s->packet.mode == OPUS_MODE_HYBRID) {
  270. int celt_delay = s->packet.frame_duration - celt_output_samples;
  271. void *delaybuf[2] = { s->celt_output[0] + celt_output_samples,
  272. s->celt_output[1] + celt_output_samples };
  273. for (i = 0; i < s->output_channels; i++) {
  274. s->fdsp->vector_fmac_scalar(out_tmp[i],
  275. s->celt_output[i], 1.0,
  276. celt_output_samples);
  277. }
  278. ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay);
  279. if (ret < 0)
  280. return ret;
  281. }
  282. } else
  283. ff_celt_flush(s->celt);
  284. if (s->redundancy_idx) {
  285. for (i = 0; i < s->output_channels; i++)
  286. opus_fade(s->out[i], s->out[i],
  287. s->redundancy_output[i] + 120 + s->redundancy_idx,
  288. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  289. s->redundancy_idx = 0;
  290. }
  291. if (redundancy) {
  292. if (!redundancy_pos) {
  293. ff_celt_flush(s->celt);
  294. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  295. if (ret < 0)
  296. return ret;
  297. for (i = 0; i < s->output_channels; i++) {
  298. opus_fade(s->out[i] + samples - 120 + delayed_samples,
  299. s->out[i] + samples - 120 + delayed_samples,
  300. s->redundancy_output[i] + 120,
  301. ff_celt_window2, 120 - delayed_samples);
  302. if (delayed_samples)
  303. s->redundancy_idx = 120 - delayed_samples;
  304. }
  305. } else {
  306. for (i = 0; i < s->output_channels; i++) {
  307. memcpy(s->out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float));
  308. opus_fade(s->out[i] + 120 + delayed_samples,
  309. s->redundancy_output[i] + 120,
  310. s->out[i] + 120 + delayed_samples,
  311. ff_celt_window2, 120);
  312. }
  313. }
  314. }
  315. return samples;
  316. }
  317. static int opus_decode_subpacket(OpusStreamContext *s,
  318. const uint8_t *buf, int buf_size,
  319. int nb_samples)
  320. {
  321. int output_samples = 0;
  322. int flush_needed = 0;
  323. int i, j, ret;
  324. /* check if we need to flush the resampler */
  325. if (swr_is_initialized(s->swr)) {
  326. if (buf) {
  327. int64_t cur_samplerate;
  328. av_opt_get_int(s->swr, "in_sample_rate", 0, &cur_samplerate);
  329. flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
  330. } else {
  331. flush_needed = !!s->delayed_samples;
  332. }
  333. }
  334. if (!buf && !flush_needed)
  335. return 0;
  336. /* use dummy output buffers if the channel is not mapped to anything */
  337. if (!s->out[0] ||
  338. (s->output_channels == 2 && !s->out[1])) {
  339. av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
  340. if (!s->out_dummy)
  341. return AVERROR(ENOMEM);
  342. if (!s->out[0])
  343. s->out[0] = s->out_dummy;
  344. if (!s->out[1])
  345. s->out[1] = s->out_dummy;
  346. }
  347. /* flush the resampler if necessary */
  348. if (flush_needed) {
  349. ret = opus_flush_resample(s, s->delayed_samples);
  350. if (ret < 0) {
  351. av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
  352. return ret;
  353. }
  354. swr_close(s->swr);
  355. output_samples += s->delayed_samples;
  356. s->delayed_samples = 0;
  357. if (!buf)
  358. goto finish;
  359. }
  360. /* decode all the frames in the packet */
  361. for (i = 0; i < s->packet.frame_count; i++) {
  362. int size = s->packet.frame_size[i];
  363. int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);
  364. if (samples < 0) {
  365. av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
  366. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  367. return samples;
  368. for (j = 0; j < s->output_channels; j++)
  369. memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
  370. samples = s->packet.frame_duration;
  371. }
  372. output_samples += samples;
  373. for (j = 0; j < s->output_channels; j++)
  374. s->out[j] += samples;
  375. s->out_size -= samples * sizeof(float);
  376. }
  377. finish:
  378. s->out[0] = s->out[1] = NULL;
  379. s->out_size = 0;
  380. return output_samples;
  381. }
  382. static int opus_decode_packet(AVCodecContext *avctx, void *data,
  383. int *got_frame_ptr, AVPacket *avpkt)
  384. {
  385. OpusContext *c = avctx->priv_data;
  386. AVFrame *frame = data;
  387. const uint8_t *buf = avpkt->data;
  388. int buf_size = avpkt->size;
  389. int coded_samples = 0;
  390. int decoded_samples = 0;
  391. int i, ret;
  392. int delayed_samples = 0;
  393. for (i = 0; i < c->nb_streams; i++) {
  394. OpusStreamContext *s = &c->streams[i];
  395. s->out[0] =
  396. s->out[1] = NULL;
  397. delayed_samples = FFMAX(delayed_samples, s->delayed_samples);
  398. }
  399. /* decode the header of the first sub-packet to find out the sample count */
  400. if (buf) {
  401. OpusPacket *pkt = &c->streams[0].packet;
  402. ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
  403. if (ret < 0) {
  404. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  405. return ret;
  406. }
  407. coded_samples += pkt->frame_count * pkt->frame_duration;
  408. c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
  409. }
  410. frame->nb_samples = coded_samples + delayed_samples;
  411. /* no input or buffered data => nothing to do */
  412. if (!frame->nb_samples) {
  413. *got_frame_ptr = 0;
  414. return 0;
  415. }
  416. /* setup the data buffers */
  417. ret = ff_get_buffer(avctx, frame, 0);
  418. if (ret < 0)
  419. return ret;
  420. frame->nb_samples = 0;
  421. for (i = 0; i < avctx->channels; i++) {
  422. ChannelMap *map = &c->channel_maps[i];
  423. if (!map->copy)
  424. c->streams[map->stream_idx].out[map->channel_idx] = (float*)frame->extended_data[i];
  425. }
  426. for (i = 0; i < c->nb_streams; i++)
  427. c->streams[i].out_size = frame->linesize[0];
  428. /* decode each sub-packet */
  429. for (i = 0; i < c->nb_streams; i++) {
  430. OpusStreamContext *s = &c->streams[i];
  431. if (i && buf) {
  432. ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
  433. if (ret < 0) {
  434. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  435. return ret;
  436. }
  437. if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
  438. av_log(avctx, AV_LOG_ERROR,
  439. "Mismatching coded sample count in substream %d.\n", i);
  440. return AVERROR_INVALIDDATA;
  441. }
  442. s->silk_samplerate = get_silk_samplerate(s->packet.config);
  443. }
  444. ret = opus_decode_subpacket(&c->streams[i], buf,
  445. s->packet.data_size, coded_samples);
  446. if (ret < 0)
  447. return ret;
  448. if (decoded_samples && ret != decoded_samples) {
  449. av_log(avctx, AV_LOG_ERROR, "Different numbers of decoded samples "
  450. "in a multi-channel stream\n");
  451. return AVERROR_INVALIDDATA;
  452. }
  453. decoded_samples = ret;
  454. buf += s->packet.packet_size;
  455. buf_size -= s->packet.packet_size;
  456. }
  457. for (i = 0; i < avctx->channels; i++) {
  458. ChannelMap *map = &c->channel_maps[i];
  459. /* handle copied channels */
  460. if (map->copy) {
  461. memcpy(frame->extended_data[i],
  462. frame->extended_data[map->copy_idx],
  463. frame->linesize[0]);
  464. } else if (map->silence) {
  465. memset(frame->extended_data[i], 0, frame->linesize[0]);
  466. }
  467. if (c->gain_i) {
  468. c->fdsp->vector_fmul_scalar((float*)frame->extended_data[i],
  469. (float*)frame->extended_data[i],
  470. c->gain, FFALIGN(decoded_samples, 8));
  471. }
  472. }
  473. frame->nb_samples = decoded_samples;
  474. *got_frame_ptr = !!decoded_samples;
  475. return avpkt->size;
  476. }
  477. static av_cold void opus_decode_flush(AVCodecContext *ctx)
  478. {
  479. OpusContext *c = ctx->priv_data;
  480. int i;
  481. for (i = 0; i < c->nb_streams; i++) {
  482. OpusStreamContext *s = &c->streams[i];
  483. memset(&s->packet, 0, sizeof(s->packet));
  484. s->delayed_samples = 0;
  485. if (s->celt_delay)
  486. av_audio_fifo_drain(s->celt_delay, av_audio_fifo_size(s->celt_delay));
  487. swr_close(s->swr);
  488. ff_silk_flush(s->silk);
  489. ff_celt_flush(s->celt);
  490. }
  491. }
  492. static av_cold int opus_decode_close(AVCodecContext *avctx)
  493. {
  494. OpusContext *c = avctx->priv_data;
  495. int i;
  496. for (i = 0; i < c->nb_streams; i++) {
  497. OpusStreamContext *s = &c->streams[i];
  498. ff_silk_free(&s->silk);
  499. ff_celt_free(&s->celt);
  500. av_freep(&s->out_dummy);
  501. s->out_dummy_allocated_size = 0;
  502. av_audio_fifo_free(s->celt_delay);
  503. swr_free(&s->swr);
  504. }
  505. av_freep(&c->streams);
  506. c->nb_streams = 0;
  507. av_freep(&c->channel_maps);
  508. av_freep(&c->fdsp);
  509. return 0;
  510. }
  511. static av_cold int opus_decode_init(AVCodecContext *avctx)
  512. {
  513. OpusContext *c = avctx->priv_data;
  514. int ret, i, j;
  515. avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
  516. avctx->sample_rate = 48000;
  517. c->fdsp = avpriv_float_dsp_alloc(0);
  518. if (!c->fdsp)
  519. return AVERROR(ENOMEM);
  520. /* find out the channel configuration */
  521. ret = ff_opus_parse_extradata(avctx, c);
  522. if (ret < 0)
  523. return ret;
  524. /* allocate and init each independent decoder */
  525. c->streams = av_mallocz_array(c->nb_streams, sizeof(*c->streams));
  526. if (!c->streams) {
  527. c->nb_streams = 0;
  528. ret = AVERROR(ENOMEM);
  529. goto fail;
  530. }
  531. for (i = 0; i < c->nb_streams; i++) {
  532. OpusStreamContext *s = &c->streams[i];
  533. uint64_t layout;
  534. s->output_channels = (i < c->nb_stereo_streams) ? 2 : 1;
  535. s->avctx = avctx;
  536. for (j = 0; j < s->output_channels; j++) {
  537. s->silk_output[j] = s->silk_buf[j];
  538. s->celt_output[j] = s->celt_buf[j];
  539. s->redundancy_output[j] = s->redundancy_buf[j];
  540. }
  541. s->fdsp = c->fdsp;
  542. s->swr =swr_alloc();
  543. if (!s->swr)
  544. goto fail;
  545. layout = (s->output_channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  546. av_opt_set_int(s->swr, "in_sample_fmt", avctx->sample_fmt, 0);
  547. av_opt_set_int(s->swr, "out_sample_fmt", avctx->sample_fmt, 0);
  548. av_opt_set_int(s->swr, "in_channel_layout", layout, 0);
  549. av_opt_set_int(s->swr, "out_channel_layout", layout, 0);
  550. av_opt_set_int(s->swr, "out_sample_rate", avctx->sample_rate, 0);
  551. av_opt_set_int(s->swr, "filter_size", 16, 0);
  552. ret = ff_silk_init(avctx, &s->silk, s->output_channels);
  553. if (ret < 0)
  554. goto fail;
  555. ret = ff_celt_init(avctx, &s->celt, s->output_channels);
  556. if (ret < 0)
  557. goto fail;
  558. s->celt_delay = av_audio_fifo_alloc(avctx->sample_fmt,
  559. s->output_channels, 1024);
  560. if (!s->celt_delay) {
  561. ret = AVERROR(ENOMEM);
  562. goto fail;
  563. }
  564. }
  565. return 0;
  566. fail:
  567. opus_decode_close(avctx);
  568. return ret;
  569. }
  570. AVCodec ff_opus_decoder = {
  571. .name = "opus",
  572. .long_name = NULL_IF_CONFIG_SMALL("Opus"),
  573. .type = AVMEDIA_TYPE_AUDIO,
  574. .id = AV_CODEC_ID_OPUS,
  575. .priv_data_size = sizeof(OpusContext),
  576. .init = opus_decode_init,
  577. .close = opus_decode_close,
  578. .decode = opus_decode_packet,
  579. .flush = opus_decode_flush,
  580. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  581. };