You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

312 lines
10KB

  1. /*
  2. * Copyright (c) 2011 Stefano Sabatini
  3. * Copyright (c) 2011 Mina Nagy Zaki
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * resampling audio filter
  24. */
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/samplefmt.h"
  29. #include "libavutil/avassert.h"
  30. #include "libswresample/swresample.h"
  31. #include "avfilter.h"
  32. #include "audio.h"
  33. #include "internal.h"
  34. typedef struct {
  35. const AVClass *class;
  36. int sample_rate_arg;
  37. double ratio;
  38. struct SwrContext *swr;
  39. int64_t next_pts;
  40. int req_fullfilled;
  41. } AResampleContext;
  42. static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
  43. {
  44. AResampleContext *aresample = ctx->priv;
  45. int ret = 0;
  46. aresample->next_pts = AV_NOPTS_VALUE;
  47. aresample->swr = swr_alloc();
  48. if (!aresample->swr) {
  49. ret = AVERROR(ENOMEM);
  50. goto end;
  51. }
  52. if (opts) {
  53. AVDictionaryEntry *e = NULL;
  54. while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
  55. const char *token = e->key;
  56. const char *value = e->value;
  57. if ((ret = av_opt_set(aresample->swr, token, value, 0)) < 0)
  58. goto end;
  59. }
  60. av_dict_free(opts);
  61. }
  62. if (aresample->sample_rate_arg > 0)
  63. av_opt_set_int(aresample->swr, "osr", aresample->sample_rate_arg, 0);
  64. end:
  65. return ret;
  66. }
  67. static av_cold void uninit(AVFilterContext *ctx)
  68. {
  69. AResampleContext *aresample = ctx->priv;
  70. swr_free(&aresample->swr);
  71. }
  72. static int query_formats(AVFilterContext *ctx)
  73. {
  74. AResampleContext *aresample = ctx->priv;
  75. int out_rate = av_get_int(aresample->swr, "osr", NULL);
  76. uint64_t out_layout = av_get_int(aresample->swr, "ocl", NULL);
  77. enum AVSampleFormat out_format = av_get_int(aresample->swr, "osf", NULL);
  78. AVFilterLink *inlink = ctx->inputs[0];
  79. AVFilterLink *outlink = ctx->outputs[0];
  80. AVFilterFormats *in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
  81. AVFilterFormats *out_formats;
  82. AVFilterFormats *in_samplerates = ff_all_samplerates();
  83. AVFilterFormats *out_samplerates;
  84. AVFilterChannelLayouts *in_layouts = ff_all_channel_counts();
  85. AVFilterChannelLayouts *out_layouts;
  86. ff_formats_ref (in_formats, &inlink->out_formats);
  87. ff_formats_ref (in_samplerates, &inlink->out_samplerates);
  88. ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
  89. if(out_rate > 0) {
  90. out_samplerates = ff_make_format_list((int[]){ out_rate, -1 });
  91. } else {
  92. out_samplerates = ff_all_samplerates();
  93. }
  94. ff_formats_ref(out_samplerates, &outlink->in_samplerates);
  95. if(out_format != AV_SAMPLE_FMT_NONE) {
  96. out_formats = ff_make_format_list((int[]){ out_format, -1 });
  97. } else
  98. out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
  99. ff_formats_ref(out_formats, &outlink->in_formats);
  100. if(out_layout) {
  101. out_layouts = avfilter_make_format64_list((int64_t[]){ out_layout, -1 });
  102. } else
  103. out_layouts = ff_all_channel_counts();
  104. ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
  105. return 0;
  106. }
  107. static int config_output(AVFilterLink *outlink)
  108. {
  109. int ret;
  110. AVFilterContext *ctx = outlink->src;
  111. AVFilterLink *inlink = ctx->inputs[0];
  112. AResampleContext *aresample = ctx->priv;
  113. int out_rate;
  114. uint64_t out_layout;
  115. enum AVSampleFormat out_format;
  116. char inchl_buf[128], outchl_buf[128];
  117. aresample->swr = swr_alloc_set_opts(aresample->swr,
  118. outlink->channel_layout, outlink->format, outlink->sample_rate,
  119. inlink->channel_layout, inlink->format, inlink->sample_rate,
  120. 0, ctx);
  121. if (!aresample->swr)
  122. return AVERROR(ENOMEM);
  123. if (!inlink->channel_layout)
  124. av_opt_set_int(aresample->swr, "ich", inlink->channels, 0);
  125. if (!outlink->channel_layout)
  126. av_opt_set_int(aresample->swr, "och", outlink->channels, 0);
  127. ret = swr_init(aresample->swr);
  128. if (ret < 0)
  129. return ret;
  130. out_rate = av_get_int(aresample->swr, "osr", NULL);
  131. out_layout = av_get_int(aresample->swr, "ocl", NULL);
  132. out_format = av_get_int(aresample->swr, "osf", NULL);
  133. outlink->time_base = (AVRational) {1, out_rate};
  134. av_assert0(outlink->sample_rate == out_rate);
  135. av_assert0(outlink->channel_layout == out_layout || !outlink->channel_layout);
  136. av_assert0(outlink->format == out_format);
  137. aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate;
  138. av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), inlink ->channels, inlink ->channel_layout);
  139. av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), outlink->channels, outlink->channel_layout);
  140. av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n",
  141. inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate,
  142. outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
  143. return 0;
  144. }
  145. static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
  146. {
  147. AResampleContext *aresample = inlink->dst->priv;
  148. const int n_in = insamplesref->nb_samples;
  149. int n_out = n_in * aresample->ratio * 2 + 256;
  150. AVFilterLink *const outlink = inlink->dst->outputs[0];
  151. AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n_out);
  152. int ret;
  153. if(!outsamplesref)
  154. return AVERROR(ENOMEM);
  155. av_frame_copy_props(outsamplesref, insamplesref);
  156. outsamplesref->format = outlink->format;
  157. av_frame_set_channels(outsamplesref, outlink->channels);
  158. outsamplesref->channel_layout = outlink->channel_layout;
  159. outsamplesref->sample_rate = outlink->sample_rate;
  160. if(insamplesref->pts != AV_NOPTS_VALUE) {
  161. int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den);
  162. int64_t outpts= swr_next_pts(aresample->swr, inpts);
  163. aresample->next_pts =
  164. outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate);
  165. } else {
  166. outsamplesref->pts = AV_NOPTS_VALUE;
  167. }
  168. n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out,
  169. (void *)insamplesref->extended_data, n_in);
  170. if (n_out <= 0) {
  171. av_frame_free(&outsamplesref);
  172. av_frame_free(&insamplesref);
  173. return 0;
  174. }
  175. outsamplesref->nb_samples = n_out;
  176. ret = ff_filter_frame(outlink, outsamplesref);
  177. aresample->req_fullfilled= 1;
  178. av_frame_free(&insamplesref);
  179. return ret;
  180. }
  181. static int request_frame(AVFilterLink *outlink)
  182. {
  183. AVFilterContext *ctx = outlink->src;
  184. AResampleContext *aresample = ctx->priv;
  185. AVFilterLink *const inlink = outlink->src->inputs[0];
  186. int ret;
  187. aresample->req_fullfilled = 0;
  188. do{
  189. ret = ff_request_frame(ctx->inputs[0]);
  190. }while(!aresample->req_fullfilled && ret>=0);
  191. if (ret == AVERROR_EOF) {
  192. AVFrame *outsamplesref;
  193. int n_out = 4096;
  194. outsamplesref = ff_get_audio_buffer(outlink, n_out);
  195. if (!outsamplesref)
  196. return AVERROR(ENOMEM);
  197. n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0);
  198. if (n_out <= 0) {
  199. av_frame_free(&outsamplesref);
  200. return (n_out == 0) ? AVERROR_EOF : n_out;
  201. }
  202. outsamplesref->sample_rate = outlink->sample_rate;
  203. outsamplesref->nb_samples = n_out;
  204. #if 0
  205. outsamplesref->pts = aresample->next_pts;
  206. if(aresample->next_pts != AV_NOPTS_VALUE)
  207. aresample->next_pts += av_rescale_q(n_out, (AVRational){1 ,outlink->sample_rate}, outlink->time_base);
  208. #else
  209. outsamplesref->pts = swr_next_pts(aresample->swr, INT64_MIN);
  210. outsamplesref->pts = ROUNDED_DIV(outsamplesref->pts, inlink->sample_rate);
  211. #endif
  212. return ff_filter_frame(outlink, outsamplesref);
  213. }
  214. return ret;
  215. }
  216. static const AVClass *resample_child_class_next(const AVClass *prev)
  217. {
  218. return prev ? NULL : swr_get_class();
  219. }
  220. static void *resample_child_next(void *obj, void *prev)
  221. {
  222. AResampleContext *s = obj;
  223. return prev ? NULL : s->swr;
  224. }
  225. #define OFFSET(x) offsetof(AResampleContext, x)
  226. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  227. static const AVOption options[] = {
  228. {"sample_rate", NULL, OFFSET(sample_rate_arg), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
  229. {NULL}
  230. };
  231. static const AVClass aresample_class = {
  232. .class_name = "aresample",
  233. .item_name = av_default_item_name,
  234. .option = options,
  235. .version = LIBAVUTIL_VERSION_INT,
  236. .child_class_next = resample_child_class_next,
  237. .child_next = resample_child_next,
  238. };
  239. static const AVFilterPad aresample_inputs[] = {
  240. {
  241. .name = "default",
  242. .type = AVMEDIA_TYPE_AUDIO,
  243. .filter_frame = filter_frame,
  244. },
  245. { NULL },
  246. };
  247. static const AVFilterPad aresample_outputs[] = {
  248. {
  249. .name = "default",
  250. .config_props = config_output,
  251. .request_frame = request_frame,
  252. .type = AVMEDIA_TYPE_AUDIO,
  253. },
  254. { NULL },
  255. };
  256. AVFilter avfilter_af_aresample = {
  257. .name = "aresample",
  258. .description = NULL_IF_CONFIG_SMALL("Resample audio data."),
  259. .init_dict = init_dict,
  260. .uninit = uninit,
  261. .query_formats = query_formats,
  262. .priv_size = sizeof(AResampleContext),
  263. .priv_class = &aresample_class,
  264. .inputs = aresample_inputs,
  265. .outputs = aresample_outputs,
  266. };