You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

759 lines
24KB

  1. /*
  2. * Opus decoder
  3. * Copyright (c) 2012 Andrew D'Addesio
  4. * Copyright (c) 2013-2014 Mozilla Corporation
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Opus decoder
  25. * @author Andrew D'Addesio, Anton Khirnov
  26. *
  27. * Codec homepage: http://opus-codec.org/
  28. * Specification: http://tools.ietf.org/html/rfc6716
  29. * Ogg Opus specification: https://tools.ietf.org/html/draft-ietf-codec-oggopus-03
  30. *
  31. * Ogg-contained .opus files can be produced with opus-tools:
  32. * http://git.xiph.org/?p=opus-tools.git
  33. */
  34. #include <stdint.h>
  35. #include "libavutil/attributes.h"
  36. #include "libavutil/audio_fifo.h"
  37. #include "libavutil/channel_layout.h"
  38. #include "libavutil/opt.h"
  39. #include "libswresample/swresample.h"
  40. #include "avcodec.h"
  41. #include "celp_filters.h"
  42. #include "fft.h"
  43. #include "get_bits.h"
  44. #include "internal.h"
  45. #include "mathops.h"
  46. #include "opus.h"
  47. static const uint16_t silk_frame_duration_ms[16] = {
  48. 10, 20, 40, 60,
  49. 10, 20, 40, 60,
  50. 10, 20, 40, 60,
  51. 10, 20,
  52. 10, 20,
  53. };
  54. /* number of samples of silence to feed to the resampler
  55. * at the beginning */
  56. static const int silk_resample_delay[] = {
  57. 4, 8, 11, 11, 11
  58. };
  59. static const uint8_t celt_band_end[] = { 13, 17, 17, 19, 21 };
  60. static int get_silk_samplerate(int config)
  61. {
  62. if (config < 4)
  63. return 8000;
  64. else if (config < 8)
  65. return 12000;
  66. return 16000;
  67. }
  68. /**
  69. * Range decoder
  70. */
  71. static int opus_rc_init(OpusRangeCoder *rc, const uint8_t *data, int size)
  72. {
  73. int ret = init_get_bits8(&rc->gb, data, size);
  74. if (ret < 0)
  75. return ret;
  76. rc->range = 128;
  77. rc->value = 127 - get_bits(&rc->gb, 7);
  78. rc->total_read_bits = 9;
  79. opus_rc_normalize(rc);
  80. return 0;
  81. }
  82. static void opus_raw_init(OpusRangeCoder *rc, const uint8_t *rightend,
  83. unsigned int bytes)
  84. {
  85. rc->rb.position = rightend;
  86. rc->rb.bytes = bytes;
  87. rc->rb.cachelen = 0;
  88. rc->rb.cacheval = 0;
  89. }
  90. static void opus_fade(float *out,
  91. const float *in1, const float *in2,
  92. const float *window, int len)
  93. {
  94. int i;
  95. for (i = 0; i < len; i++)
  96. out[i] = in2[i] * window[i] + in1[i] * (1.0 - window[i]);
  97. }
  98. static int opus_flush_resample(OpusStreamContext *s, int nb_samples)
  99. {
  100. int celt_size = av_audio_fifo_size(s->celt_delay);
  101. int ret, i;
  102. ret = swr_convert(s->swr,
  103. (uint8_t**)s->out, nb_samples,
  104. NULL, 0);
  105. if (ret < 0)
  106. return ret;
  107. else if (ret != nb_samples) {
  108. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of flushed samples: %d\n",
  109. ret);
  110. return AVERROR_BUG;
  111. }
  112. if (celt_size) {
  113. if (celt_size != nb_samples) {
  114. av_log(s->avctx, AV_LOG_ERROR, "Wrong number of CELT delay samples.\n");
  115. return AVERROR_BUG;
  116. }
  117. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, nb_samples);
  118. for (i = 0; i < s->output_channels; i++) {
  119. s->fdsp->vector_fmac_scalar(s->out[i],
  120. s->celt_output[i], 1.0,
  121. nb_samples);
  122. }
  123. }
  124. if (s->redundancy_idx) {
  125. for (i = 0; i < s->output_channels; i++)
  126. opus_fade(s->out[i], s->out[i],
  127. s->redundancy_output[i] + 120 + s->redundancy_idx,
  128. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  129. s->redundancy_idx = 0;
  130. }
  131. s->out[0] += nb_samples;
  132. s->out[1] += nb_samples;
  133. s->out_size -= nb_samples * sizeof(float);
  134. return 0;
  135. }
  136. static int opus_init_resample(OpusStreamContext *s)
  137. {
  138. static const float delay[16] = { 0.0 };
  139. const uint8_t *delayptr[2] = { (uint8_t*)delay, (uint8_t*)delay };
  140. int ret;
  141. av_opt_set_int(s->swr, "in_sample_rate", s->silk_samplerate, 0);
  142. ret = swr_init(s->swr);
  143. if (ret < 0) {
  144. av_log(s->avctx, AV_LOG_ERROR, "Error opening the resampler.\n");
  145. return ret;
  146. }
  147. ret = swr_convert(s->swr,
  148. NULL, 0,
  149. delayptr, silk_resample_delay[s->packet.bandwidth]);
  150. if (ret < 0) {
  151. av_log(s->avctx, AV_LOG_ERROR,
  152. "Error feeding initial silence to the resampler.\n");
  153. return ret;
  154. }
  155. return 0;
  156. }
  157. static int opus_decode_redundancy(OpusStreamContext *s, const uint8_t *data, int size)
  158. {
  159. int ret;
  160. enum OpusBandwidth bw = s->packet.bandwidth;
  161. if (s->packet.mode == OPUS_MODE_SILK &&
  162. bw == OPUS_BANDWIDTH_MEDIUMBAND)
  163. bw = OPUS_BANDWIDTH_WIDEBAND;
  164. ret = opus_rc_init(&s->redundancy_rc, data, size);
  165. if (ret < 0)
  166. goto fail;
  167. opus_raw_init(&s->redundancy_rc, data + size, size);
  168. ret = ff_celt_decode_frame(s->celt, &s->redundancy_rc,
  169. s->redundancy_output,
  170. s->packet.stereo + 1, 240,
  171. 0, celt_band_end[s->packet.bandwidth]);
  172. if (ret < 0)
  173. goto fail;
  174. return 0;
  175. fail:
  176. av_log(s->avctx, AV_LOG_ERROR, "Error decoding the redundancy frame.\n");
  177. return ret;
  178. }
  179. static int opus_decode_frame(OpusStreamContext *s, const uint8_t *data, int size)
  180. {
  181. int samples = s->packet.frame_duration;
  182. int redundancy = 0;
  183. int redundancy_size, redundancy_pos;
  184. int ret, i, consumed;
  185. int delayed_samples = s->delayed_samples;
  186. ret = opus_rc_init(&s->rc, data, size);
  187. if (ret < 0)
  188. return ret;
  189. /* decode the silk frame */
  190. if (s->packet.mode == OPUS_MODE_SILK || s->packet.mode == OPUS_MODE_HYBRID) {
  191. if (!swr_is_initialized(s->swr)) {
  192. ret = opus_init_resample(s);
  193. if (ret < 0)
  194. return ret;
  195. }
  196. samples = ff_silk_decode_superframe(s->silk, &s->rc, s->silk_output,
  197. FFMIN(s->packet.bandwidth, OPUS_BANDWIDTH_WIDEBAND),
  198. s->packet.stereo + 1,
  199. silk_frame_duration_ms[s->packet.config]);
  200. if (samples < 0) {
  201. av_log(s->avctx, AV_LOG_ERROR, "Error decoding a SILK frame.\n");
  202. return samples;
  203. }
  204. samples = swr_convert(s->swr,
  205. (uint8_t**)s->out, s->packet.frame_duration,
  206. (const uint8_t**)s->silk_output, samples);
  207. if (samples < 0) {
  208. av_log(s->avctx, AV_LOG_ERROR, "Error resampling SILK data.\n");
  209. return samples;
  210. }
  211. av_assert2((samples & 7) == 0);
  212. s->delayed_samples += s->packet.frame_duration - samples;
  213. } else
  214. ff_silk_flush(s->silk);
  215. // decode redundancy information
  216. consumed = opus_rc_tell(&s->rc);
  217. if (s->packet.mode == OPUS_MODE_HYBRID && consumed + 37 <= size * 8)
  218. redundancy = opus_rc_p2model(&s->rc, 12);
  219. else if (s->packet.mode == OPUS_MODE_SILK && consumed + 17 <= size * 8)
  220. redundancy = 1;
  221. if (redundancy) {
  222. redundancy_pos = opus_rc_p2model(&s->rc, 1);
  223. if (s->packet.mode == OPUS_MODE_HYBRID)
  224. redundancy_size = opus_rc_unimodel(&s->rc, 256) + 2;
  225. else
  226. redundancy_size = size - (consumed + 7) / 8;
  227. size -= redundancy_size;
  228. if (size < 0) {
  229. av_log(s->avctx, AV_LOG_ERROR, "Invalid redundancy frame size.\n");
  230. return AVERROR_INVALIDDATA;
  231. }
  232. if (redundancy_pos) {
  233. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  234. if (ret < 0)
  235. return ret;
  236. ff_celt_flush(s->celt);
  237. }
  238. }
  239. /* decode the CELT frame */
  240. if (s->packet.mode == OPUS_MODE_CELT || s->packet.mode == OPUS_MODE_HYBRID) {
  241. float *out_tmp[2] = { s->out[0], s->out[1] };
  242. float **dst = (s->packet.mode == OPUS_MODE_CELT) ?
  243. out_tmp : s->celt_output;
  244. int celt_output_samples = samples;
  245. int delay_samples = av_audio_fifo_size(s->celt_delay);
  246. if (delay_samples) {
  247. if (s->packet.mode == OPUS_MODE_HYBRID) {
  248. av_audio_fifo_read(s->celt_delay, (void**)s->celt_output, delay_samples);
  249. for (i = 0; i < s->output_channels; i++) {
  250. s->fdsp->vector_fmac_scalar(out_tmp[i], s->celt_output[i], 1.0,
  251. delay_samples);
  252. out_tmp[i] += delay_samples;
  253. }
  254. celt_output_samples -= delay_samples;
  255. } else {
  256. av_log(s->avctx, AV_LOG_WARNING,
  257. "Spurious CELT delay samples present.\n");
  258. av_audio_fifo_drain(s->celt_delay, delay_samples);
  259. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  260. return AVERROR_BUG;
  261. }
  262. }
  263. opus_raw_init(&s->rc, data + size, size);
  264. ret = ff_celt_decode_frame(s->celt, &s->rc, dst,
  265. s->packet.stereo + 1,
  266. s->packet.frame_duration,
  267. (s->packet.mode == OPUS_MODE_HYBRID) ? 17 : 0,
  268. celt_band_end[s->packet.bandwidth]);
  269. if (ret < 0)
  270. return ret;
  271. if (s->packet.mode == OPUS_MODE_HYBRID) {
  272. int celt_delay = s->packet.frame_duration - celt_output_samples;
  273. void *delaybuf[2] = { s->celt_output[0] + celt_output_samples,
  274. s->celt_output[1] + celt_output_samples };
  275. for (i = 0; i < s->output_channels; i++) {
  276. s->fdsp->vector_fmac_scalar(out_tmp[i],
  277. s->celt_output[i], 1.0,
  278. celt_output_samples);
  279. }
  280. ret = av_audio_fifo_write(s->celt_delay, delaybuf, celt_delay);
  281. if (ret < 0)
  282. return ret;
  283. }
  284. } else
  285. ff_celt_flush(s->celt);
  286. if (s->redundancy_idx) {
  287. for (i = 0; i < s->output_channels; i++)
  288. opus_fade(s->out[i], s->out[i],
  289. s->redundancy_output[i] + 120 + s->redundancy_idx,
  290. ff_celt_window2 + s->redundancy_idx, 120 - s->redundancy_idx);
  291. s->redundancy_idx = 0;
  292. }
  293. if (redundancy) {
  294. if (!redundancy_pos) {
  295. ff_celt_flush(s->celt);
  296. ret = opus_decode_redundancy(s, data + size, redundancy_size);
  297. if (ret < 0)
  298. return ret;
  299. for (i = 0; i < s->output_channels; i++) {
  300. opus_fade(s->out[i] + samples - 120 + delayed_samples,
  301. s->out[i] + samples - 120 + delayed_samples,
  302. s->redundancy_output[i] + 120,
  303. ff_celt_window2, 120 - delayed_samples);
  304. if (delayed_samples)
  305. s->redundancy_idx = 120 - delayed_samples;
  306. }
  307. } else {
  308. for (i = 0; i < s->output_channels; i++) {
  309. memcpy(s->out[i] + delayed_samples, s->redundancy_output[i], 120 * sizeof(float));
  310. opus_fade(s->out[i] + 120 + delayed_samples,
  311. s->redundancy_output[i] + 120,
  312. s->out[i] + 120 + delayed_samples,
  313. ff_celt_window2, 120);
  314. }
  315. }
  316. }
  317. return samples;
  318. }
  319. static int opus_decode_subpacket(OpusStreamContext *s,
  320. const uint8_t *buf, int buf_size,
  321. float **out, int out_size,
  322. int nb_samples)
  323. {
  324. int output_samples = 0;
  325. int flush_needed = 0;
  326. int i, j, ret;
  327. s->out[0] = out[0];
  328. s->out[1] = out[1];
  329. s->out_size = out_size;
  330. /* check if we need to flush the resampler */
  331. if (swr_is_initialized(s->swr)) {
  332. if (buf) {
  333. int64_t cur_samplerate;
  334. av_opt_get_int(s->swr, "in_sample_rate", 0, &cur_samplerate);
  335. flush_needed = (s->packet.mode == OPUS_MODE_CELT) || (cur_samplerate != s->silk_samplerate);
  336. } else {
  337. flush_needed = !!s->delayed_samples;
  338. }
  339. }
  340. if (!buf && !flush_needed)
  341. return 0;
  342. /* use dummy output buffers if the channel is not mapped to anything */
  343. if (!s->out[0] ||
  344. (s->output_channels == 2 && !s->out[1])) {
  345. av_fast_malloc(&s->out_dummy, &s->out_dummy_allocated_size, s->out_size);
  346. if (!s->out_dummy)
  347. return AVERROR(ENOMEM);
  348. if (!s->out[0])
  349. s->out[0] = s->out_dummy;
  350. if (!s->out[1])
  351. s->out[1] = s->out_dummy;
  352. }
  353. /* flush the resampler if necessary */
  354. if (flush_needed) {
  355. ret = opus_flush_resample(s, s->delayed_samples);
  356. if (ret < 0) {
  357. av_log(s->avctx, AV_LOG_ERROR, "Error flushing the resampler.\n");
  358. return ret;
  359. }
  360. swr_close(s->swr);
  361. output_samples += s->delayed_samples;
  362. s->delayed_samples = 0;
  363. if (!buf)
  364. goto finish;
  365. }
  366. /* decode all the frames in the packet */
  367. for (i = 0; i < s->packet.frame_count; i++) {
  368. int size = s->packet.frame_size[i];
  369. int samples = opus_decode_frame(s, buf + s->packet.frame_offset[i], size);
  370. if (samples < 0) {
  371. av_log(s->avctx, AV_LOG_ERROR, "Error decoding an Opus frame.\n");
  372. if (s->avctx->err_recognition & AV_EF_EXPLODE)
  373. return samples;
  374. for (j = 0; j < s->output_channels; j++)
  375. memset(s->out[j], 0, s->packet.frame_duration * sizeof(float));
  376. samples = s->packet.frame_duration;
  377. }
  378. output_samples += samples;
  379. for (j = 0; j < s->output_channels; j++)
  380. s->out[j] += samples;
  381. s->out_size -= samples * sizeof(float);
  382. }
  383. finish:
  384. s->out[0] = s->out[1] = NULL;
  385. s->out_size = 0;
  386. return output_samples;
  387. }
  388. static int opus_decode_packet(AVCodecContext *avctx, void *data,
  389. int *got_frame_ptr, AVPacket *avpkt)
  390. {
  391. OpusContext *c = avctx->priv_data;
  392. AVFrame *frame = data;
  393. const uint8_t *buf = avpkt->data;
  394. int buf_size = avpkt->size;
  395. int coded_samples = 0;
  396. int decoded_samples = INT_MAX;
  397. int delayed_samples = 0;
  398. int i, ret;
  399. /* calculate the number of delayed samples */
  400. for (i = 0; i < c->nb_streams; i++) {
  401. OpusStreamContext *s = &c->streams[i];
  402. s->out[0] =
  403. s->out[1] = NULL;
  404. delayed_samples = FFMAX(delayed_samples,
  405. s->delayed_samples + av_audio_fifo_size(c->sync_buffers[i]));
  406. }
  407. /* decode the header of the first sub-packet to find out the sample count */
  408. if (buf) {
  409. OpusPacket *pkt = &c->streams[0].packet;
  410. ret = ff_opus_parse_packet(pkt, buf, buf_size, c->nb_streams > 1);
  411. if (ret < 0) {
  412. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  413. return ret;
  414. }
  415. coded_samples += pkt->frame_count * pkt->frame_duration;
  416. c->streams[0].silk_samplerate = get_silk_samplerate(pkt->config);
  417. }
  418. frame->nb_samples = coded_samples + delayed_samples;
  419. /* no input or buffered data => nothing to do */
  420. if (!frame->nb_samples) {
  421. *got_frame_ptr = 0;
  422. return 0;
  423. }
  424. /* setup the data buffers */
  425. ret = ff_get_buffer(avctx, frame, 0);
  426. if (ret < 0) {
  427. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  428. return ret;
  429. }
  430. frame->nb_samples = 0;
  431. memset(c->out, 0, c->nb_streams * 2 * sizeof(*c->out));
  432. for (i = 0; i < avctx->channels; i++) {
  433. ChannelMap *map = &c->channel_maps[i];
  434. if (!map->copy)
  435. c->out[2 * map->stream_idx + map->channel_idx] = (float*)frame->extended_data[i];
  436. }
  437. /* read the data from the sync buffers */
  438. for (i = 0; i < c->nb_streams; i++) {
  439. float **out = c->out + 2 * i;
  440. int sync_size = av_audio_fifo_size(c->sync_buffers[i]);
  441. float sync_dummy[32];
  442. int out_dummy = (!out[0]) | ((!out[1]) << 1);
  443. if (!out[0])
  444. out[0] = sync_dummy;
  445. if (!out[1])
  446. out[1] = sync_dummy;
  447. if (out_dummy && sync_size > FF_ARRAY_ELEMS(sync_dummy))
  448. return AVERROR_BUG;
  449. ret = av_audio_fifo_read(c->sync_buffers[i], (void**)out, sync_size);
  450. if (ret < 0)
  451. return ret;
  452. if (out_dummy & 1)
  453. out[0] = NULL;
  454. else
  455. out[0] += ret;
  456. if (out_dummy & 2)
  457. out[1] = NULL;
  458. else
  459. out[1] += ret;
  460. c->out_size[i] = frame->linesize[0] - ret * sizeof(float);
  461. }
  462. /* decode each sub-packet */
  463. for (i = 0; i < c->nb_streams; i++) {
  464. OpusStreamContext *s = &c->streams[i];
  465. if (i && buf) {
  466. ret = ff_opus_parse_packet(&s->packet, buf, buf_size, i != c->nb_streams - 1);
  467. if (ret < 0) {
  468. av_log(avctx, AV_LOG_ERROR, "Error parsing the packet header.\n");
  469. return ret;
  470. }
  471. if (coded_samples != s->packet.frame_count * s->packet.frame_duration) {
  472. av_log(avctx, AV_LOG_ERROR,
  473. "Mismatching coded sample count in substream %d.\n", i);
  474. return AVERROR_INVALIDDATA;
  475. }
  476. s->silk_samplerate = get_silk_samplerate(s->packet.config);
  477. }
  478. ret = opus_decode_subpacket(&c->streams[i], buf, s->packet.data_size,
  479. c->out + 2 * i, c->out_size[i], coded_samples);
  480. if (ret < 0)
  481. return ret;
  482. c->decoded_samples[i] = ret;
  483. decoded_samples = FFMIN(decoded_samples, ret);
  484. buf += s->packet.packet_size;
  485. buf_size -= s->packet.packet_size;
  486. }
  487. /* buffer the extra samples */
  488. for (i = 0; i < c->nb_streams; i++) {
  489. int buffer_samples = c->decoded_samples[i] - decoded_samples;
  490. if (buffer_samples) {
  491. float *buf[2] = { c->out[2 * i + 0] ? c->out[2 * i + 0] : (float*)frame->extended_data[0],
  492. c->out[2 * i + 1] ? c->out[2 * i + 1] : (float*)frame->extended_data[0] };
  493. buf[0] += buffer_samples;
  494. buf[1] += buffer_samples;
  495. ret = av_audio_fifo_write(c->sync_buffers[i], (void**)buf, buffer_samples);
  496. if (ret < 0)
  497. return ret;
  498. }
  499. }
  500. for (i = 0; i < avctx->channels; i++) {
  501. ChannelMap *map = &c->channel_maps[i];
  502. /* handle copied channels */
  503. if (map->copy) {
  504. memcpy(frame->extended_data[i],
  505. frame->extended_data[map->copy_idx],
  506. frame->linesize[0]);
  507. } else if (map->silence) {
  508. memset(frame->extended_data[i], 0, frame->linesize[0]);
  509. }
  510. if (c->gain_i) {
  511. c->fdsp.vector_fmul_scalar((float*)frame->extended_data[i],
  512. (float*)frame->extended_data[i],
  513. c->gain, FFALIGN(decoded_samples, 8));
  514. }
  515. }
  516. frame->nb_samples = decoded_samples;
  517. *got_frame_ptr = !!decoded_samples;
  518. return avpkt->size;
  519. }
  520. static av_cold void opus_decode_flush(AVCodecContext *ctx)
  521. {
  522. OpusContext *c = ctx->priv_data;
  523. int i;
  524. for (i = 0; i < c->nb_streams; i++) {
  525. OpusStreamContext *s = &c->streams[i];
  526. memset(&s->packet, 0, sizeof(s->packet));
  527. s->delayed_samples = 0;
  528. if (s->celt_delay)
  529. av_audio_fifo_drain(s->celt_delay, av_audio_fifo_size(s->celt_delay));
  530. swr_close(s->swr);
  531. av_audio_fifo_drain(c->sync_buffers[i], av_audio_fifo_size(c->sync_buffers[i]));
  532. ff_silk_flush(s->silk);
  533. ff_celt_flush(s->celt);
  534. }
  535. }
  536. static av_cold int opus_decode_close(AVCodecContext *avctx)
  537. {
  538. OpusContext *c = avctx->priv_data;
  539. int i;
  540. for (i = 0; i < c->nb_streams; i++) {
  541. OpusStreamContext *s = &c->streams[i];
  542. ff_silk_free(&s->silk);
  543. ff_celt_free(&s->celt);
  544. av_freep(&s->out_dummy);
  545. s->out_dummy_allocated_size = 0;
  546. av_audio_fifo_free(s->celt_delay);
  547. swr_free(&s->swr);
  548. }
  549. av_freep(&c->streams);
  550. if (c->sync_buffers) {
  551. for (i = 0; i < c->nb_streams; i++)
  552. av_audio_fifo_free(c->sync_buffers[i]);
  553. }
  554. av_freep(&c->sync_buffers);
  555. av_freep(&c->decoded_samples);
  556. av_freep(&c->out);
  557. av_freep(&c->out_size);
  558. c->nb_streams = 0;
  559. av_freep(&c->channel_maps);
  560. return 0;
  561. }
  562. static av_cold int opus_decode_init(AVCodecContext *avctx)
  563. {
  564. OpusContext *c = avctx->priv_data;
  565. int ret, i, j;
  566. avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
  567. avctx->sample_rate = 48000;
  568. avpriv_float_dsp_init(&c->fdsp, 0);
  569. /* find out the channel configuration */
  570. ret = ff_opus_parse_extradata(avctx, c);
  571. if (ret < 0)
  572. return ret;
  573. /* allocate and init each independent decoder */
  574. c->streams = av_mallocz_array(c->nb_streams, sizeof(*c->streams));
  575. c->out = av_mallocz_array(c->nb_streams, 2 * sizeof(*c->out));
  576. c->out_size = av_mallocz_array(c->nb_streams, sizeof(*c->out_size));
  577. c->sync_buffers = av_mallocz_array(c->nb_streams, sizeof(*c->sync_buffers));
  578. c->decoded_samples = av_mallocz_array(c->nb_streams, sizeof(*c->decoded_samples));
  579. if (!c->streams || !c->sync_buffers || !c->decoded_samples || !c->out || !c->out_size) {
  580. c->nb_streams = 0;
  581. ret = AVERROR(ENOMEM);
  582. goto fail;
  583. }
  584. for (i = 0; i < c->nb_streams; i++) {
  585. OpusStreamContext *s = &c->streams[i];
  586. uint64_t layout;
  587. s->output_channels = (i < c->nb_stereo_streams) ? 2 : 1;
  588. s->avctx = avctx;
  589. for (j = 0; j < s->output_channels; j++) {
  590. s->silk_output[j] = s->silk_buf[j];
  591. s->celt_output[j] = s->celt_buf[j];
  592. s->redundancy_output[j] = s->redundancy_buf[j];
  593. }
  594. s->fdsp = &c->fdsp;
  595. s->swr =swr_alloc();
  596. if (!s->swr)
  597. goto fail;
  598. layout = (s->output_channels == 1) ? AV_CH_LAYOUT_MONO : AV_CH_LAYOUT_STEREO;
  599. av_opt_set_int(s->swr, "in_sample_fmt", avctx->sample_fmt, 0);
  600. av_opt_set_int(s->swr, "out_sample_fmt", avctx->sample_fmt, 0);
  601. av_opt_set_int(s->swr, "in_channel_layout", layout, 0);
  602. av_opt_set_int(s->swr, "out_channel_layout", layout, 0);
  603. av_opt_set_int(s->swr, "out_sample_rate", avctx->sample_rate, 0);
  604. av_opt_set_int(s->swr, "filter_size", 16, 0);
  605. ret = ff_silk_init(avctx, &s->silk, s->output_channels);
  606. if (ret < 0)
  607. goto fail;
  608. ret = ff_celt_init(avctx, &s->celt, s->output_channels);
  609. if (ret < 0)
  610. goto fail;
  611. s->celt_delay = av_audio_fifo_alloc(avctx->sample_fmt,
  612. s->output_channels, 1024);
  613. if (!s->celt_delay) {
  614. ret = AVERROR(ENOMEM);
  615. goto fail;
  616. }
  617. c->sync_buffers[i] = av_audio_fifo_alloc(avctx->sample_fmt,
  618. s->output_channels, 32);
  619. if (!c->sync_buffers[i]) {
  620. ret = AVERROR(ENOMEM);
  621. goto fail;
  622. }
  623. }
  624. return 0;
  625. fail:
  626. opus_decode_close(avctx);
  627. return ret;
  628. }
  629. AVCodec ff_opus_decoder = {
  630. .name = "opus",
  631. .long_name = NULL_IF_CONFIG_SMALL("Opus"),
  632. .type = AVMEDIA_TYPE_AUDIO,
  633. .id = AV_CODEC_ID_OPUS,
  634. .priv_data_size = sizeof(OpusContext),
  635. .init = opus_decode_init,
  636. .close = opus_decode_close,
  637. .decode = opus_decode_packet,
  638. .flush = opus_decode_flush,
  639. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
  640. };