You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

358 lines
11KB

  1. /*
  2. *
  3. * This file is part of Libav.
  4. *
  5. * Libav is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2.1 of the License, or (at your option) any later version.
  9. *
  10. * Libav is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with Libav; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. /**
  20. * @file
  21. * sample format and channel layout conversion audio filter
  22. */
  23. #include "libavutil/avassert.h"
  24. #include "libavutil/avstring.h"
  25. #include "libavutil/common.h"
  26. #include "libavutil/dict.h"
  27. #include "libavutil/mathematics.h"
  28. #include "libavutil/opt.h"
  29. #include "libavresample/avresample.h"
  30. #include "audio.h"
  31. #include "avfilter.h"
  32. #include "formats.h"
  33. #include "internal.h"
  34. typedef struct ResampleContext {
  35. const AVClass *class;
  36. AVAudioResampleContext *avr;
  37. AVDictionary *options;
  38. int resampling;
  39. int64_t next_pts;
  40. int64_t next_in_pts;
  41. /* set by filter_frame() to signal an output frame to request_frame() */
  42. int got_output;
  43. } ResampleContext;
  44. static av_cold int init(AVFilterContext *ctx, AVDictionary **opts)
  45. {
  46. ResampleContext *s = ctx->priv;
  47. const AVClass *avr_class = avresample_get_class();
  48. AVDictionaryEntry *e = NULL;
  49. while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
  50. if (av_opt_find(&avr_class, e->key, NULL, 0,
  51. AV_OPT_SEARCH_FAKE_OBJ | AV_OPT_SEARCH_CHILDREN))
  52. av_dict_set(&s->options, e->key, e->value, 0);
  53. }
  54. e = NULL;
  55. while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
  56. av_dict_set(opts, e->key, NULL, 0);
  57. /* do not allow the user to override basic format options */
  58. av_dict_set(&s->options, "in_channel_layout", NULL, 0);
  59. av_dict_set(&s->options, "out_channel_layout", NULL, 0);
  60. av_dict_set(&s->options, "in_sample_fmt", NULL, 0);
  61. av_dict_set(&s->options, "out_sample_fmt", NULL, 0);
  62. av_dict_set(&s->options, "in_sample_rate", NULL, 0);
  63. av_dict_set(&s->options, "out_sample_rate", NULL, 0);
  64. return 0;
  65. }
  66. static av_cold void uninit(AVFilterContext *ctx)
  67. {
  68. ResampleContext *s = ctx->priv;
  69. if (s->avr) {
  70. avresample_close(s->avr);
  71. avresample_free(&s->avr);
  72. }
  73. av_dict_free(&s->options);
  74. }
  75. static int query_formats(AVFilterContext *ctx)
  76. {
  77. AVFilterLink *inlink = ctx->inputs[0];
  78. AVFilterLink *outlink = ctx->outputs[0];
  79. AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
  80. AVFilterFormats *out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
  81. AVFilterFormats *in_samplerates = ff_all_samplerates();
  82. AVFilterFormats *out_samplerates = ff_all_samplerates();
  83. AVFilterChannelLayouts *in_layouts = ff_all_channel_layouts();
  84. AVFilterChannelLayouts *out_layouts = ff_all_channel_layouts();
  85. ff_formats_ref(in_formats, &inlink->out_formats);
  86. ff_formats_ref(out_formats, &outlink->in_formats);
  87. ff_formats_ref(in_samplerates, &inlink->out_samplerates);
  88. ff_formats_ref(out_samplerates, &outlink->in_samplerates);
  89. ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
  90. ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
  91. return 0;
  92. }
  93. static int config_output(AVFilterLink *outlink)
  94. {
  95. AVFilterContext *ctx = outlink->src;
  96. AVFilterLink *inlink = ctx->inputs[0];
  97. ResampleContext *s = ctx->priv;
  98. char buf1[64], buf2[64];
  99. int ret;
  100. int64_t resampling_forced;
  101. if (s->avr) {
  102. avresample_close(s->avr);
  103. avresample_free(&s->avr);
  104. }
  105. if (inlink->channel_layout == outlink->channel_layout &&
  106. inlink->sample_rate == outlink->sample_rate &&
  107. (inlink->format == outlink->format ||
  108. (av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 &&
  109. av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 &&
  110. av_get_planar_sample_fmt(inlink->format) ==
  111. av_get_planar_sample_fmt(outlink->format))))
  112. return 0;
  113. if (!(s->avr = avresample_alloc_context()))
  114. return AVERROR(ENOMEM);
  115. if (s->options) {
  116. int ret;
  117. AVDictionaryEntry *e = NULL;
  118. while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
  119. av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value);
  120. ret = av_opt_set_dict(s->avr, &s->options);
  121. if (ret < 0)
  122. return ret;
  123. }
  124. av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0);
  125. av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0);
  126. av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0);
  127. av_opt_set_int(s->avr, "out_sample_fmt", outlink->format, 0);
  128. av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0);
  129. av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0);
  130. if ((ret = avresample_open(s->avr)) < 0)
  131. return ret;
  132. av_opt_get_int(s->avr, "force_resampling", 0, &resampling_forced);
  133. s->resampling = resampling_forced || (inlink->sample_rate != outlink->sample_rate);
  134. if (s->resampling) {
  135. outlink->time_base = (AVRational){ 1, outlink->sample_rate };
  136. s->next_pts = AV_NOPTS_VALUE;
  137. s->next_in_pts = AV_NOPTS_VALUE;
  138. } else
  139. outlink->time_base = inlink->time_base;
  140. av_get_channel_layout_string(buf1, sizeof(buf1),
  141. -1, inlink ->channel_layout);
  142. av_get_channel_layout_string(buf2, sizeof(buf2),
  143. -1, outlink->channel_layout);
  144. av_log(ctx, AV_LOG_VERBOSE,
  145. "fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n",
  146. av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1,
  147. av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2);
  148. return 0;
  149. }
  150. static int request_frame(AVFilterLink *outlink)
  151. {
  152. AVFilterContext *ctx = outlink->src;
  153. ResampleContext *s = ctx->priv;
  154. int ret = 0;
  155. s->got_output = 0;
  156. while (ret >= 0 && !s->got_output)
  157. ret = ff_request_frame(ctx->inputs[0]);
  158. /* flush the lavr delay buffer */
  159. if (ret == AVERROR_EOF && s->avr) {
  160. AVFrame *frame;
  161. int nb_samples = avresample_get_out_samples(s->avr, 0);
  162. if (!nb_samples)
  163. return ret;
  164. frame = ff_get_audio_buffer(outlink, nb_samples);
  165. if (!frame)
  166. return AVERROR(ENOMEM);
  167. ret = avresample_convert(s->avr, frame->extended_data,
  168. frame->linesize[0], nb_samples,
  169. NULL, 0, 0);
  170. if (ret <= 0) {
  171. av_frame_free(&frame);
  172. return (ret == 0) ? AVERROR_EOF : ret;
  173. }
  174. frame->nb_samples = ret;
  175. frame->pts = s->next_pts;
  176. return ff_filter_frame(outlink, frame);
  177. }
  178. return ret;
  179. }
  180. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  181. {
  182. AVFilterContext *ctx = inlink->dst;
  183. ResampleContext *s = ctx->priv;
  184. AVFilterLink *outlink = ctx->outputs[0];
  185. int ret;
  186. if (s->avr) {
  187. AVFrame *out;
  188. int delay, nb_samples;
  189. /* maximum possible samples lavr can output */
  190. delay = avresample_get_delay(s->avr);
  191. nb_samples = avresample_get_out_samples(s->avr, in->nb_samples);
  192. out = ff_get_audio_buffer(outlink, nb_samples);
  193. if (!out) {
  194. ret = AVERROR(ENOMEM);
  195. goto fail;
  196. }
  197. ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
  198. nb_samples, in->extended_data, in->linesize[0],
  199. in->nb_samples);
  200. if (ret <= 0) {
  201. av_frame_free(&out);
  202. if (ret < 0)
  203. goto fail;
  204. }
  205. av_assert0(!avresample_available(s->avr));
  206. if (s->resampling && s->next_pts == AV_NOPTS_VALUE) {
  207. if (in->pts == AV_NOPTS_VALUE) {
  208. av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
  209. "assuming 0.\n");
  210. s->next_pts = 0;
  211. } else
  212. s->next_pts = av_rescale_q(in->pts, inlink->time_base,
  213. outlink->time_base);
  214. }
  215. if (ret > 0) {
  216. out->nb_samples = ret;
  217. ret = av_frame_copy_props(out, in);
  218. if (ret < 0) {
  219. av_frame_free(&out);
  220. goto fail;
  221. }
  222. if (s->resampling) {
  223. out->sample_rate = outlink->sample_rate;
  224. /* Only convert in->pts if there is a discontinuous jump.
  225. This ensures that out->pts tracks the number of samples actually
  226. output by the resampler in the absence of such a jump.
  227. Otherwise, the rounding in av_rescale_q() and av_rescale()
  228. causes off-by-1 errors. */
  229. if (in->pts != AV_NOPTS_VALUE && in->pts != s->next_in_pts) {
  230. out->pts = av_rescale_q(in->pts, inlink->time_base,
  231. outlink->time_base) -
  232. av_rescale(delay, outlink->sample_rate,
  233. inlink->sample_rate);
  234. } else
  235. out->pts = s->next_pts;
  236. s->next_pts = out->pts + out->nb_samples;
  237. s->next_in_pts = in->pts + in->nb_samples;
  238. } else
  239. out->pts = in->pts;
  240. ret = ff_filter_frame(outlink, out);
  241. s->got_output = 1;
  242. }
  243. fail:
  244. av_frame_free(&in);
  245. } else {
  246. in->format = outlink->format;
  247. ret = ff_filter_frame(outlink, in);
  248. s->got_output = 1;
  249. }
  250. return ret;
  251. }
  252. static const AVClass *resample_child_class_next(const AVClass *prev)
  253. {
  254. return prev ? NULL : avresample_get_class();
  255. }
  256. static void *resample_child_next(void *obj, void *prev)
  257. {
  258. ResampleContext *s = obj;
  259. return prev ? NULL : s->avr;
  260. }
  261. static const AVClass resample_class = {
  262. .class_name = "resample",
  263. .item_name = av_default_item_name,
  264. .version = LIBAVUTIL_VERSION_INT,
  265. .child_class_next = resample_child_class_next,
  266. .child_next = resample_child_next,
  267. };
  268. static const AVFilterPad avfilter_af_resample_inputs[] = {
  269. {
  270. .name = "default",
  271. .type = AVMEDIA_TYPE_AUDIO,
  272. .filter_frame = filter_frame,
  273. },
  274. { NULL }
  275. };
  276. static const AVFilterPad avfilter_af_resample_outputs[] = {
  277. {
  278. .name = "default",
  279. .type = AVMEDIA_TYPE_AUDIO,
  280. .config_props = config_output,
  281. .request_frame = request_frame
  282. },
  283. { NULL }
  284. };
  285. AVFilter ff_af_resample = {
  286. .name = "resample",
  287. .description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
  288. .priv_size = sizeof(ResampleContext),
  289. .priv_class = &resample_class,
  290. .init_dict = init,
  291. .uninit = uninit,
  292. .query_formats = query_formats,
  293. .inputs = avfilter_af_resample_inputs,
  294. .outputs = avfilter_af_resample_outputs,
  295. };