You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

362 lines
11KB

  1. /*
  2. * Copyright (c) 2011 Stefano Sabatini
  3. * Copyright (c) 2011 Mina Nagy Zaki
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * resampling audio filter
  24. */
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/channel_layout.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/samplefmt.h"
  29. #include "libavutil/avassert.h"
  30. #include "libswresample/swresample.h"
  31. #include "avfilter.h"
  32. #include "audio.h"
  33. #include "internal.h"
  34. typedef struct {
  35. const AVClass *class;
  36. int sample_rate_arg;
  37. double ratio;
  38. struct SwrContext *swr;
  39. int64_t next_pts;
  40. int req_fullfilled;
  41. int more_data;
  42. } AResampleContext;
  43. static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
  44. {
  45. AResampleContext *aresample = ctx->priv;
  46. int ret = 0;
  47. aresample->next_pts = AV_NOPTS_VALUE;
  48. aresample->swr = swr_alloc();
  49. if (!aresample->swr) {
  50. ret = AVERROR(ENOMEM);
  51. goto end;
  52. }
  53. if (opts) {
  54. AVDictionaryEntry *e = NULL;
  55. while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
  56. if ((ret = av_opt_set(aresample->swr, e->key, e->value, 0)) < 0)
  57. goto end;
  58. }
  59. av_dict_free(opts);
  60. }
  61. if (aresample->sample_rate_arg > 0)
  62. av_opt_set_int(aresample->swr, "osr", aresample->sample_rate_arg, 0);
  63. end:
  64. return ret;
  65. }
  66. static av_cold void uninit(AVFilterContext *ctx)
  67. {
  68. AResampleContext *aresample = ctx->priv;
  69. swr_free(&aresample->swr);
  70. }
  71. static int query_formats(AVFilterContext *ctx)
  72. {
  73. AResampleContext *aresample = ctx->priv;
  74. enum AVSampleFormat out_format;
  75. int64_t out_rate, out_layout;
  76. AVFilterLink *inlink = ctx->inputs[0];
  77. AVFilterLink *outlink = ctx->outputs[0];
  78. AVFilterFormats *in_formats, *out_formats;
  79. AVFilterFormats *in_samplerates, *out_samplerates;
  80. AVFilterChannelLayouts *in_layouts, *out_layouts;
  81. av_opt_get_sample_fmt(aresample->swr, "osf", 0, &out_format);
  82. av_opt_get_int(aresample->swr, "osr", 0, &out_rate);
  83. av_opt_get_int(aresample->swr, "ocl", 0, &out_layout);
  84. in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
  85. if (!in_formats)
  86. return AVERROR(ENOMEM);
  87. ff_formats_ref (in_formats, &inlink->out_formats);
  88. in_samplerates = ff_all_samplerates();
  89. if (!in_samplerates)
  90. return AVERROR(ENOMEM);
  91. ff_formats_ref (in_samplerates, &inlink->out_samplerates);
  92. in_layouts = ff_all_channel_counts();
  93. if (!in_layouts)
  94. return AVERROR(ENOMEM);
  95. ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts);
  96. if(out_rate > 0) {
  97. int ratelist[] = { out_rate, -1 };
  98. out_samplerates = ff_make_format_list(ratelist);
  99. } else {
  100. out_samplerates = ff_all_samplerates();
  101. }
  102. if (!out_samplerates) {
  103. av_log(ctx, AV_LOG_ERROR, "Cannot allocate output samplerates.\n");
  104. return AVERROR(ENOMEM);
  105. }
  106. ff_formats_ref(out_samplerates, &outlink->in_samplerates);
  107. if(out_format != AV_SAMPLE_FMT_NONE) {
  108. int formatlist[] = { out_format, -1 };
  109. out_formats = ff_make_format_list(formatlist);
  110. } else
  111. out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
  112. ff_formats_ref(out_formats, &outlink->in_formats);
  113. if(out_layout) {
  114. int64_t layout_list[] = { out_layout, -1 };
  115. out_layouts = avfilter_make_format64_list(layout_list);
  116. } else
  117. out_layouts = ff_all_channel_counts();
  118. ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
  119. return 0;
  120. }
  121. static int config_output(AVFilterLink *outlink)
  122. {
  123. int ret;
  124. AVFilterContext *ctx = outlink->src;
  125. AVFilterLink *inlink = ctx->inputs[0];
  126. AResampleContext *aresample = ctx->priv;
  127. int64_t out_rate, out_layout;
  128. enum AVSampleFormat out_format;
  129. char inchl_buf[128], outchl_buf[128];
  130. aresample->swr = swr_alloc_set_opts(aresample->swr,
  131. outlink->channel_layout, outlink->format, outlink->sample_rate,
  132. inlink->channel_layout, inlink->format, inlink->sample_rate,
  133. 0, ctx);
  134. if (!aresample->swr)
  135. return AVERROR(ENOMEM);
  136. if (!inlink->channel_layout)
  137. av_opt_set_int(aresample->swr, "ich", inlink->channels, 0);
  138. if (!outlink->channel_layout)
  139. av_opt_set_int(aresample->swr, "och", outlink->channels, 0);
  140. ret = swr_init(aresample->swr);
  141. if (ret < 0)
  142. return ret;
  143. av_opt_get_int(aresample->swr, "osr", 0, &out_rate);
  144. av_opt_get_int(aresample->swr, "ocl", 0, &out_layout);
  145. av_opt_get_sample_fmt(aresample->swr, "osf", 0, &out_format);
  146. outlink->time_base = (AVRational) {1, out_rate};
  147. av_assert0(outlink->sample_rate == out_rate);
  148. av_assert0(outlink->channel_layout == out_layout || !outlink->channel_layout);
  149. av_assert0(outlink->format == out_format);
  150. aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate;
  151. av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), inlink ->channels, inlink ->channel_layout);
  152. av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), outlink->channels, outlink->channel_layout);
  153. av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n",
  154. inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate,
  155. outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
  156. return 0;
  157. }
  158. static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
  159. {
  160. AResampleContext *aresample = inlink->dst->priv;
  161. const int n_in = insamplesref->nb_samples;
  162. int64_t delay;
  163. int n_out = n_in * aresample->ratio + 32;
  164. AVFilterLink *const outlink = inlink->dst->outputs[0];
  165. AVFrame *outsamplesref;
  166. int ret;
  167. delay = swr_get_delay(aresample->swr, outlink->sample_rate);
  168. if (delay > 0)
  169. n_out += FFMIN(delay, FFMAX(4096, n_out));
  170. outsamplesref = ff_get_audio_buffer(outlink, n_out);
  171. if(!outsamplesref)
  172. return AVERROR(ENOMEM);
  173. av_frame_copy_props(outsamplesref, insamplesref);
  174. outsamplesref->format = outlink->format;
  175. av_frame_set_channels(outsamplesref, outlink->channels);
  176. outsamplesref->channel_layout = outlink->channel_layout;
  177. outsamplesref->sample_rate = outlink->sample_rate;
  178. if(insamplesref->pts != AV_NOPTS_VALUE) {
  179. int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den);
  180. int64_t outpts= swr_next_pts(aresample->swr, inpts);
  181. aresample->next_pts =
  182. outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate);
  183. } else {
  184. outsamplesref->pts = AV_NOPTS_VALUE;
  185. }
  186. n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out,
  187. (void *)insamplesref->extended_data, n_in);
  188. if (n_out <= 0) {
  189. av_frame_free(&outsamplesref);
  190. av_frame_free(&insamplesref);
  191. return 0;
  192. }
  193. aresample->more_data = outsamplesref->nb_samples == n_out; // Indicate that there is probably more data in our buffers
  194. outsamplesref->nb_samples = n_out;
  195. ret = ff_filter_frame(outlink, outsamplesref);
  196. aresample->req_fullfilled= 1;
  197. av_frame_free(&insamplesref);
  198. return ret;
  199. }
  200. static int flush_frame(AVFilterLink *outlink, int final, AVFrame **outsamplesref_ret)
  201. {
  202. AVFilterContext *ctx = outlink->src;
  203. AResampleContext *aresample = ctx->priv;
  204. AVFilterLink *const inlink = outlink->src->inputs[0];
  205. AVFrame *outsamplesref;
  206. int n_out = 4096;
  207. int64_t pts;
  208. outsamplesref = ff_get_audio_buffer(outlink, n_out);
  209. *outsamplesref_ret = outsamplesref;
  210. if (!outsamplesref)
  211. return AVERROR(ENOMEM);
  212. pts = swr_next_pts(aresample->swr, INT64_MIN);
  213. pts = ROUNDED_DIV(pts, inlink->sample_rate);
  214. n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, final ? NULL : (void*)outsamplesref->extended_data, 0);
  215. if (n_out <= 0) {
  216. av_frame_free(&outsamplesref);
  217. return (n_out == 0) ? AVERROR_EOF : n_out;
  218. }
  219. outsamplesref->sample_rate = outlink->sample_rate;
  220. outsamplesref->nb_samples = n_out;
  221. outsamplesref->pts = pts;
  222. return 0;
  223. }
  224. static int request_frame(AVFilterLink *outlink)
  225. {
  226. AVFilterContext *ctx = outlink->src;
  227. AResampleContext *aresample = ctx->priv;
  228. int ret;
  229. // First try to get data from the internal buffers
  230. if (aresample->more_data) {
  231. AVFrame *outsamplesref;
  232. if (flush_frame(outlink, 0, &outsamplesref) >= 0) {
  233. return ff_filter_frame(outlink, outsamplesref);
  234. }
  235. }
  236. aresample->more_data = 0;
  237. // Second request more data from the input
  238. aresample->req_fullfilled = 0;
  239. do{
  240. ret = ff_request_frame(ctx->inputs[0]);
  241. }while(!aresample->req_fullfilled && ret>=0);
  242. // Third if we hit the end flush
  243. if (ret == AVERROR_EOF) {
  244. AVFrame *outsamplesref;
  245. if ((ret = flush_frame(outlink, 1, &outsamplesref)) < 0)
  246. return ret;
  247. return ff_filter_frame(outlink, outsamplesref);
  248. }
  249. return ret;
  250. }
  251. static const AVClass *resample_child_class_next(const AVClass *prev)
  252. {
  253. return prev ? NULL : swr_get_class();
  254. }
  255. static void *resample_child_next(void *obj, void *prev)
  256. {
  257. AResampleContext *s = obj;
  258. return prev ? NULL : s->swr;
  259. }
  260. #define OFFSET(x) offsetof(AResampleContext, x)
  261. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  262. static const AVOption options[] = {
  263. {"sample_rate", NULL, OFFSET(sample_rate_arg), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
  264. {NULL}
  265. };
  266. static const AVClass aresample_class = {
  267. .class_name = "aresample",
  268. .item_name = av_default_item_name,
  269. .option = options,
  270. .version = LIBAVUTIL_VERSION_INT,
  271. .child_class_next = resample_child_class_next,
  272. .child_next = resample_child_next,
  273. };
  274. static const AVFilterPad aresample_inputs[] = {
  275. {
  276. .name = "default",
  277. .type = AVMEDIA_TYPE_AUDIO,
  278. .filter_frame = filter_frame,
  279. },
  280. { NULL }
  281. };
  282. static const AVFilterPad aresample_outputs[] = {
  283. {
  284. .name = "default",
  285. .config_props = config_output,
  286. .request_frame = request_frame,
  287. .type = AVMEDIA_TYPE_AUDIO,
  288. },
  289. { NULL }
  290. };
  291. AVFilter ff_af_aresample = {
  292. .name = "aresample",
  293. .description = NULL_IF_CONFIG_SMALL("Resample audio data."),
  294. .init_dict = init_dict,
  295. .uninit = uninit,
  296. .query_formats = query_formats,
  297. .priv_size = sizeof(AResampleContext),
  298. .priv_class = &aresample_class,
  299. .inputs = aresample_inputs,
  300. .outputs = aresample_outputs,
  301. };