You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

732 lines
24KB

  1. /*
  2. * Opus decoder
  3. * Copyright (c) 2012 Andrew D'Addesio
  4. * Copyright (c) 2013-2014 Mozilla Corporation
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Opus decoder
  25. * @author Andrew D'Addesio, Anton Khirnov
  26. *
  27. * Codec homepage: http://opus-codec.org/
  28. * Specification: http://tools.ietf.org/html/rfc6716
  29. * Ogg Opus specification: https://tools.ietf.org/html/draft-ietf-codec-oggopus-03
  30. *
  31. * Ogg-contained .opus files can be produced with opus-tools:
  32. * http://git.xiph.org/?p=opus-tools.git
  33. */
  34. #include <stdint.h>
  35. #include "libavutil/attributes.h"
  36. #include "libavutil/audio_fifo.h"
  37. #include "libavutil/channel_layout.h"
  38. #include "libavutil/opt.h"
  39. #include "libswresample/swresample.h"
  40. #include "avcodec.h"
  41. #include "get_bits.h"
  42. #include "internal.h"
  43. #include "mathops.h"
  44. #include "opus.h"
  45. #include "opustab.h"
  46. #include "opus_celt.h"
  47. static const uint16_t silk_frame_duration_ms[16] = {
  48. 10, 20, 40, 60,
  49. 10, 20, 40, 60,
  50. 10, 20, 40, 60,
  51. 10, 20,
  52. 10, 20,
  53. };
  54. /* number of samples of silence to feed to the resampler
  55. * at the beginning */
  56. static const int silk_resample_delay[] = {
  57. 4, 8, 11, 11, 11
  58. };
  59. static int get_silk_samplerate(int config)
  60. {
  61. if (config < 4)
  62. return 8000;
  63. else if (config < 8)
  64. return 12000;
  65. return 16000;
  66. }
  67. static void opus_fade(float *out,
  68. const float *in1, const float *in2,
  69. const float *window, int len)
  70. {
  71. int i;
  72. for (i = 0; i < len; i++)
  73. out[i] = in2[i] * window[i] + in1[i] * (1.0 - window[i]);
  74. }
  75. static int opus_flush_resample(OpusStreamContext *s, int nb_samples)
  76. {
  77. int celt_size = av_audio_fifo_size(s->celt_delay);
  78. int ret, i;
  79. ret = swr_convert(s->swr,
  80. (uint8_t**)s->cur_out, nb_samples,
  81. NULL, 0);
  82. if (ret < 0)
  83. return ret;
  84. else if (ret != nb_samples) {
  85. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of flushed samples: %d\n",
  86. ret);
  87. return AVERROR_BUG;
  88. }
  89. if (celt_size) {
  90. if (celt_size != nb_samples) {
  91. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of CELT delay samples.\n");
  92. return AVERROR_BUG;
  93. }
  94. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, nb_samples);
  95. for (i = 0; i < s->output_channels; i++) {
  96. s->fdsp->vector_fmac_scalar(s->cur_out[i],
  97. s->celt_output[i], 1.0,
  98. nb_samples);
  99. }
  100. }
  101. if (s->redundancy_idx) {
  102. for (i = 0; i < s->output_channels; i++)
  103. opus_fade(s->cur_out[i], s->cur_out[i],
  104. s->redundancy_output[i] + 120 + s->redundancy_idx,
  105. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  106. s->redundancy_idx = 0;
  107. }
  108. s->cur_out[0] += nb_samples;
  109. s->cur_out[1] += nb_samples;
  110. s->remaining_out_size -= nb_samples * sizeof(float);
  111. return 0;
  112. }
  113. static int opus_init_resample(OpusStreamContext *s)
  114. {
  115. static const float delay[16] = { 0.0 };
  116. const uint8_t *delayptr[2] = { (uint8_t*)delay, (uint8_t*)delay };
  117. int ret;
  118. av_opt_set_int(s->swr, "in_sample_rate", s->silk_samplerate, 0);
  119. ret = swr_init(s->swr);
  120. if (ret < 0) {
  121. av_log(s->avctx, AV_LOG_ERROR, "Error opening the resampler.\n");
  122. return ret;
  123. }
  124. ret = swr_convert(s->swr,
  125. NULL, 0,
  126. delayptr, silk_resample_delay[s->packet.bandwidth]);
  127. if (ret < 0) {
  128. av_log(s->avctx, AV_LOG_ERROR,
  129. "Error feeding initial silence to the resampler.\n");
  130. return ret;
  131. }
  132. return 0;
  133. }
  134. static int opus_decode_redundancy(OpusStreamContext *s, const uint8_t *data, int size)
  135. {
  136. int ret = ff_opus_rc_dec_init(&s->redundancy_rc, data, size);
  137. if (ret < 0)
  138. goto fail;
  139. ff_opus_rc_dec_raw_init(&s->redundancy_rc, data + size, size);
  140. ret = ff_celt_decode_frame(s->celt, &s->redundancy_rc,
  141. s->redundancy_output,
  142. s->packet.stereo + 1, 240,
  143. 0, ff_celt_band_end[s->packet.bandwidth]);
  144. if (ret < 0)
  145. goto fail;
  146. return 0;
  147. fail:
  148. av_log(s->avctx, AV_LOG_ERROR, "Error decoding the redundancy frame.\n");
  149. return ret;
  150. }
  151. static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
  152. {
  153. int samples = s->packet.frame_duration;
  154. int redundancy = 0;
  155. int redundancy_size, redundancy_pos;
  156. int ret, i, consumed;
  157. int delayed_samples = s->delayed_samples;
  158. ret = ff_opus_rc_dec_init(&s->rc, data, size);
  159. if (ret < 0)
  160. return ret;
  161. /* decode the silk frame */
  162. if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) {
  163. if (!swr_is_initialized(s->swr)) {
  164. ret = opus_init_resample(s);
  165. if (ret < 0)
  166. return ret;
  167. }
  168. samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output,
  169. FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND),
  170. s->packet.stereo + 1,
  171. silk_frame_duration_ms[s->packet.config]);
  172. if (samples < 0) {
  173. av_log(s->avctx, AV_LOG_ERROR, "Error decoding a SILK frame.\n");
  174. return samples;
  175. }
  176. samples = swr_convert(s->swr,
  177. (uint8_t**)s->cur_out, s->packet.frame_duration,
  178. (const uint8_t**)s->silk_output, samples);
  179. if (samples < 0) {
  180. av_log(s->avctx, AV_LOG_ERROR, "Error resampling SILK data.\n");
  181. return samples;
  182. }
  183. av_assert2((samples & 7) == 0);
  184. s->delayed_samples += s->packet.frame_duration - samples;
  185. } else
  186. ff_silk_flush(s->silk);
  187. // decode redundancy information
  188. consumed = opus_rc_tell(&s->rc);
  189. if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8)
  190. redundancy = ff_opus_rc_dec_log(&s->rc, 12);
  191. else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8)
  192. redundancy = 1;
  193. if (redundancy) {
  194. redundancy_pos = ff_opus_rc_dec_log(&s->rc, 1);
  195. if (s->packet.mode == OPUS_MODE_HYBRID)
  196. redundancy_size = ff_opus_rc_dec_uint(&s->rc, 256) + 2;
  197. else
  198. redundancy_size = size - (consumed + 7) / 8;
  199. size -= redundancy_size;
  200. if (size < 0) {
  201. av_log(s->avctx, AV_LOG_ERROR, "Invalid redundancy frame size.\n");
  202. return AVERROR_INVALIDDATA;
  203. }
  204. if (redundancy_pos) {
  205. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  206. if (ret < 0)
  207. return ret;
  208. ff_celt_flush(s->celt);
  209. }
  210. }
  211. /* decode the CELT frame */
  212. if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) {
  213. float *out_tmp[2] = { s->cur_out[0], s->cur_out[1] };
  214. float **dst = (s->packet.mode == OPUS_MODE_CELT) ?
  215. out_tmp : s->celt_output;
  216. int celt_output_samples = samples;
  217. int delay_samples = av_audio_fifo_size(s->celt_delay);
  218. if (delay_samples) {
  219. if (s->packet.mode == OPUS_MODE_HYBRID) {
  220. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples);
  221. for (i = 0; i < s->output_channels; i++) {
  222. s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0,
  223. delay_samples);
  224. out_tmp[i] += delay_samples;
  225. }
  226. celt_output_samples -= delay_samples;
  227. } else {
  228. av_log(s->avctx, AV_LOG_WARNING,
  229. "Spurious CELT delay samples present.\n");
  230. av_audio_fifo_drain(s->celt_delay, delay_samples);
  231. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  232. return AVERROR_BUG;
  233. }
  234. }
  235. ff_opus_rc_dec_raw_init(&s->rc, data + size, size);
  236. ret = ff_celt_decode_frame(s->celt, &s->rc, dst,
  237. s->packet.stereo + 1,
  238. s->packet.frame_duration,
  239. (s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0,
  240. ff_celt_band_end[s->packet.bandwidth]);
  241. if (ret < 0)
  242. return ret;
  243. if (s->packet.mode == OPUS_MODE_HYBRID) {
  244. int celt_delay = s->packet.frame_duration - celt_output_samples;
  245. void *delaybuf[2] = { s->celt_output[0] + celt_output_samples,
  246. s->celt_output[1] + celt_output_samples };
  247. for (i = 0; i < s->output_channels; i++) {
  248. s->fdsp->vector_fmac_scalar(out_tmp[i],
  249. s->celt_output[i], 1.0,
  250. celt_output_samples);
  251. }
  252. ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay);
  253. if (ret < 0)
  254. return ret;
  255. }
  256. } else
  257. ff_celt_flush(s->celt);
  258. if (s->redundancy_idx) {
  259. for (i = 0; i < s->output_channels; i++)
  260. opus_fade(s->cur_out[i], s->cur_out[i],
  261. s->redundancy_output[i] + 120 + s->redundancy_idx,
  262. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  263. s->redundancy_idx = 0;
  264. }
  265. if (redundancy) {
  266. if (!redundancy_pos) {
  267. ff_celt_flush(s->celt);
  268. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  269. if (ret < 0)
  270. return ret;
  271. for (i = 0; i < s->output_channels; i++) {
  272. opus_fade(s->cur_out[i] + samples - 120 + delayed_samples,
  273. s->cur_out[i] + samples - 120 + delayed_samples,
  274. s->redundancy_output[i] + 120,
  275. ff_celt_window2, 120 - delayed_samples);
  276. if (delayed_samples)
  277. s->redundancy_idx = 120 - delayed_samples;
  278. }
  279. } else {
  280. for (i = 0; i < s->output_channels; i++) {
  281. memcpy(s->cur_out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float));
  282. opus_fade(s->cur_out[i] + 120 + delayed_samples,
  283. s->redundancy_output[i] + 120,
  284. s->cur_out[i] + 120 + delayed_samples,
  285. ff_celt_window2, 120);
  286. }
  287. }
  288. }
  289. return samples;
  290. }
  291. static int opus_decode_subpacket(OpusStreamContext *s,
  292. const uint8_t *buf, int buf_size,
  293. int nb_samples)
  294. {
  295. int output_samples = 0;
  296. int flush_needed = 0;
  297. int i, j, ret;
  298. s->cur_out[0] = s->out[0];
  299. s->cur_out[1] = s->out[1];
  300. s->remaining_out_size = s->out_size;
  301. /* check if we need to flush the resampler */
  302. if (swr_is_initialized(s->swr)) {
  303. if (buf) {
  304. int64_t cur_samplerate;
  305. av_opt_get_int(s->swr, "in_sample_rate", 0, &cur_samplerate);
  306. flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
  307. } else {
  308. flush_needed = !!s->delayed_samples;
  309. }
  310. }
  311. if (!buf && !flush_needed)
  312. return 0;
  313. /* use dummy output buffers if the channel is not mapped to anything */
  314. if (!s->cur_out[0] ||
  315. (s->output_channels == 2 && !s->cur_out[1])) {
  316. av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size,
  317. s->remaining_out_size);
  318. if (!s->out_dummy)
  319. return AVERROR(ENOMEM);
  320. if (!s->cur_out[0])
  321. s->cur_out[0] = s->out_dummy;
  322. if (!s->cur_out[1])
  323. s->cur_out[1] = s->out_dummy;
  324. }
  325. /* flush the resampler if necessary */
  326. if (flush_needed) {
  327. ret = opus_flush_resample(s, s->delayed_samples);
  328. if (ret < 0) {
  329. av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
  330. return ret;
  331. }
  332. swr_close(s->swr);
  333. output_samples += s->delayed_samples;
  334. s->delayed_samples = 0;
  335. if (!buf)
  336. goto finish;
  337. }
  338. /* decode all the frames in the packet */
  339. for (i = 0; i < s->packet.frame_count; i++) {
  340. int size = s->packet.frame_size[i];
  341. int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);
  342. if (samples < 0) {
  343. av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
  344. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  345. return samples;
  346. for (j = 0; j < s->output_channels; j++)
  347. memset(s->cur_out[j], 0, s->packet.frame_duration * sizeof(float));
  348. samples = s->packet.frame_duration;
  349. }
  350. output_samples += samples;
  351. for (j = 0; j < s->output_channels; j++)
  352. s->cur_out[j] += samples;
  353. s->remaining_out_size -= samples * sizeof(float);
  354. }
  355. finish:
  356. s->cur_out[0] = s->cur_out[1] = NULL;
  357. s->remaining_out_size = 0;
  358. return output_samples;
  359. }
  360. static int opus_decode_packet(AVCodecContext *avctx, void *data,
  361. int *got_frame_ptr, AVPacket *avpkt)
  362. {
  363. OpusContext *c = avctx->priv_data;
  364. AVFrame *frame = data;
  365. const uint8_t *buf = avpkt->data;
  366. int buf_size = avpkt->size;
  367. int coded_samples = 0;
  368. int decoded_samples = INT_MAX;
  369. int delayed_samples = 0;
  370. int i, ret;
  371. /* calculate the number of delayed samples */
  372. for (i = 0; i < c->nb_streams; i++) {
  373. OpusStreamContext *s = &c->streams[i];
  374. s->out[0] =
  375. s->out[1] = NULL;
  376. delayed_samples = FFMAX(delayed_samples,
  377. s->delayed_samples + av_audio_fifo_size(s->sync_buffer));
  378. }
  379. /* decode the header of the first sub-packet to find out the sample count */
  380. if (buf) {
  381. OpusPacket *pkt = &c->streams[0].packet;
  382. ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
  383. if (ret < 0) {
  384. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  385. return ret;
  386. }
  387. coded_samples += pkt->frame_count * pkt->frame_duration;
  388. c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
  389. }
  390. frame->nb_samples = coded_samples + delayed_samples;
  391. /* no input or buffered data => nothing to do */
  392. if (!frame->nb_samples) {
  393. *got_frame_ptr = 0;
  394. return 0;
  395. }
  396. /* setup the data buffers */
  397. ret = ff_get_buffer(avctx, frame, 0);
  398. if (ret < 0)
  399. return ret;
  400. frame->nb_samples = 0;
  401. for (i = 0; i < avctx->channels; i++) {
  402. ChannelMap *map = &c->channel_maps[i];
  403. if (!map->copy)
  404. c->streams[map->stream_idx].out[map->channel_idx] = (float*)frame->extended_data[i];
  405. }
  406. /* read the data from the sync buffers */
  407. for (i = 0; i < c->nb_streams; i++) {
  408. OpusStreamContext *s = &c->streams[i];
  409. float **out = s->out;
  410. int sync_size = av_audio_fifo_size(s->sync_buffer);
  411. float sync_dummy[32];
  412. int out_dummy = (!out[0]) | ((!out[1]) << 1);
  413. if (!out[0])
  414. out[0] = sync_dummy;
  415. if (!out[1])
  416. out[1] = sync_dummy;
  417. if (out_dummy && sync_size > FF_ARRAY_ELEMS(sync_dummy))
  418. return AVERROR_BUG;
  419. ret = av_audio_fifo_read(s->sync_buffer, (void**)out, sync_size);
  420. if (ret < 0)
  421. return ret;
  422. if (out_dummy & 1)
  423. out[0] = NULL;
  424. else
  425. out[0] += ret;
  426. if (out_dummy & 2)
  427. out[1] = NULL;
  428. else
  429. out[1] += ret;
  430. s->out_size = frame->linesize[0] - ret * sizeof(float);
  431. }
  432. /* decode each sub-packet */
  433. for (i = 0; i < c->nb_streams; i++) {
  434. OpusStreamContext *s = &c->streams[i];
  435. if (i && buf) {
  436. ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
  437. if (ret < 0) {
  438. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  439. return ret;
  440. }
  441. if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
  442. av_log(avctx, AV_LOG_ERROR,
  443. "Mismatching coded sample count in substream %d.\n", i);
  444. return AVERROR_INVALIDDATA;
  445. }
  446. s->silk_samplerate = get_silk_samplerate(s->packet.config);
  447. }
  448. ret = opus_decode_subpacket(&c->streams[i], buf, s->packet.data_size,
  449. coded_samples);
  450. if (ret < 0)
  451. return ret;
  452. s->decoded_samples = ret;
  453. decoded_samples = FFMIN(decoded_samples, ret);
  454. buf += s->packet.packet_size;
  455. buf_size -= s->packet.packet_size;
  456. }
  457. /* buffer the extra samples */
  458. for (i = 0; i < c->nb_streams; i++) {
  459. OpusStreamContext *s = &c->streams[i];
  460. int buffer_samples = s->decoded_samples - decoded_samples;
  461. if (buffer_samples) {
  462. float *buf[2] = { s->out[0] ? s->out[0] : (float*)frame->extended_data[0],
  463. s->out[1] ? s->out[1] : (float*)frame->extended_data[0] };
  464. buf[0] += decoded_samples;
  465. buf[1] += decoded_samples;
  466. ret = av_audio_fifo_write(s->sync_buffer, (void**)buf, buffer_samples);
  467. if (ret < 0)
  468. return ret;
  469. }
  470. }
  471. for (i = 0; i < avctx->channels; i++) {
  472. ChannelMap *map = &c->channel_maps[i];
  473. /* handle copied channels */
  474. if (map->copy) {
  475. memcpy(frame->extended_data[i],
  476. frame->extended_data[map->copy_idx],
  477. frame->linesize[0]);
  478. } else if (map->silence) {
  479. memset(frame->extended_data[i], 0, frame->linesize[0]);
  480. }
  481. if (c->gain_i && decoded_samples > 0) {
  482. c->fdsp->vector_fmul_scalar((float*)frame->extended_data[i],
  483. (float*)frame->extended_data[i],
  484. c->gain, FFALIGN(decoded_samples, 8));
  485. }
  486. }
  487. frame->nb_samples = decoded_samples;
  488. *got_frame_ptr = !!decoded_samples;
  489. return avpkt->size;
  490. }
  491. static av_cold void opus_decode_flush(AVCodecContext *ctx)
  492. {
  493. OpusContext *c = ctx->priv_data;
  494. int i;
  495. for (i = 0; i < c->nb_streams; i++) {
  496. OpusStreamContext *s = &c->streams[i];
  497. memset(&s->packet, 0, sizeof(s->packet));
  498. s->delayed_samples = 0;
  499. av_audio_fifo_drain(s->celt_delay, av_audio_fifo_size(s->celt_delay));
  500. swr_close(s->swr);
  501. av_audio_fifo_drain(s->sync_buffer, av_audio_fifo_size(s->sync_buffer));
  502. ff_silk_flush(s->silk);
  503. ff_celt_flush(s->celt);
  504. }
  505. }
  506. static av_cold int opus_decode_close(AVCodecContext *avctx)
  507. {
  508. OpusContext *c = avctx->priv_data;
  509. int i;
  510. for (i = 0; i < c->nb_streams; i++) {
  511. OpusStreamContext *s = &c->streams[i];
  512. ff_silk_free(&s->silk);
  513. ff_celt_free(&s->celt);
  514. av_freep(&s->out_dummy);
  515. s->out_dummy_allocated_size = 0;
  516. av_audio_fifo_free(s->sync_buffer);
  517. av_audio_fifo_free(s->celt_delay);
  518. swr_free(&s->swr);
  519. }
  520. av_freep(&c->streams);
  521. c->nb_streams = 0;
  522. av_freep(&c->channel_maps);
  523. av_freep(&c->fdsp);
  524. return 0;
  525. }
  526. static av_cold int opus_decode_init(AVCodecContext *avctx)
  527. {
  528. OpusContext *c = avctx->priv_data;
  529. int ret, i, j;
  530. avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
  531. avctx->sample_rate = 48000;
  532. c->fdsp = avpriv_float_dsp_alloc(0);
  533. if (!c->fdsp)
  534. return AVERROR(ENOMEM);
  535. /* find out the channel configuration */
  536. ret = ff_opus_parse_extradata(avctx, c);
  537. if (ret < 0) {
  538. av_freep(&c->fdsp);
  539. return ret;
  540. }
  541. /* allocate and init each independent decoder */
  542. c->streams = av_mallocz_array(c->nb_streams, sizeof(*c->streams));
  543. if (!c->streams) {
  544. c->nb_streams = 0;
  545. ret = AVERROR(ENOMEM);
  546. goto fail;
  547. }
  548. for (i = 0; i < c->nb_streams; i++) {
  549. OpusStreamContext *s = &c->streams[i];
  550. uint64_t layout;
  551. s->output_channels = (i < c->nb_stereo_streams) ? 2 : 1;
  552. s->avctx = avctx;
  553. for (j = 0; j < s->output_channels; j++) {
  554. s->silk_output[j] = s->silk_buf[j];
  555. s->celt_output[j] = s->celt_buf[j];
  556. s->redundancy_output[j] = s->redundancy_buf[j];
  557. }
  558. s->fdsp = c->fdsp;
  559. s->swr =swr_alloc();
  560. if (!s->swr) {
  561. ret = AVERROR(ENOMEM);
  562. goto fail;
  563. }
  564. layout = (s->output_channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  565. av_opt_set_int(s->swr, "in_sample_fmt", avctx->sample_fmt, 0);
  566. av_opt_set_int(s->swr, "out_sample_fmt", avctx->sample_fmt, 0);
  567. av_opt_set_int(s->swr, "in_channel_layout", layout, 0);
  568. av_opt_set_int(s->swr, "out_channel_layout", layout, 0);
  569. av_opt_set_int(s->swr, "out_sample_rate", avctx->sample_rate, 0);
  570. av_opt_set_int(s->swr, "filter_size", 16, 0);
  571. ret = ff_silk_init(avctx, &s->silk, s->output_channels);
  572. if (ret < 0)
  573. goto fail;
  574. ret = ff_celt_init(avctx, &s->celt, s->output_channels, c->apply_phase_inv);
  575. if (ret < 0)
  576. goto fail;
  577. s->celt_delay = av_audio_fifo_alloc(avctx->sample_fmt,
  578. s->output_channels, 1024);
  579. if (!s->celt_delay) {
  580. ret = AVERROR(ENOMEM);
  581. goto fail;
  582. }
  583. s->sync_buffer = av_audio_fifo_alloc(avctx->sample_fmt,
  584. s->output_channels, 32);
  585. if (!s->sync_buffer) {
  586. ret = AVERROR(ENOMEM);
  587. goto fail;
  588. }
  589. }
  590. return 0;
  591. fail:
  592. opus_decode_close(avctx);
  593. return ret;
  594. }
  595. #define OFFSET(x) offsetof(OpusContext, x)
  596. #define AD AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  597. static const AVOption opus_options[] = {
  598. { "apply_phase_inv", "Apply intensity stereo phase inversion", OFFSET(apply_phase_inv), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, AD },
  599. { NULL },
  600. };
  601. static const AVClass opus_class = {
  602. .class_name = "Opus Decoder",
  603. .item_name = av_default_item_name,
  604. .option = opus_options,
  605. .version = LIBAVUTIL_VERSION_INT,
  606. };
  607. AVCodec ff_opus_decoder = {
  608. .name = "opus",
  609. .long_name = NULL_IF_CONFIG_SMALL("Opus"),
  610. .priv_class = &opus_class,
  611. .type = AVMEDIA_TYPE_AUDIO,
  612. .id = AV_CODEC_ID_OPUS,
  613. .priv_data_size = sizeof(OpusContext),
  614. .init = opus_decode_init,
  615. .close = opus_decode_close,
  616. .decode = opus_decode_packet,
  617. .flush = opus_decode_flush,
  618. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_CHANNEL_CONF,
  619. };