You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

319 lines
8.5KB

  1. /*
  2. * Copyright (c) 2016 The FFmpeg Project
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/channel_layout.h"
  21. #include "libavutil/opt.h"
  22. #include "avfilter.h"
  23. #include "audio.h"
  24. #include "formats.h"
  25. typedef struct CrystalizerContext {
  26. const AVClass *class;
  27. float mult;
  28. int clip;
  29. AVFrame *prev;
  30. int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
  31. } CrystalizerContext;
  32. #define OFFSET(x) offsetof(CrystalizerContext, x)
  33. #define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  34. static const AVOption crystalizer_options[] = {
  35. { "i", "set intensity", OFFSET(mult), AV_OPT_TYPE_FLOAT, {.dbl=2.0}, 0, 10, A },
  36. { "c", "enable clipping", OFFSET(clip), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A },
  37. { NULL }
  38. };
  39. AVFILTER_DEFINE_CLASS(crystalizer);
  40. static int query_formats(AVFilterContext *ctx)
  41. {
  42. AVFilterFormats *formats = NULL;
  43. AVFilterChannelLayouts *layouts = NULL;
  44. static const enum AVSampleFormat sample_fmts[] = {
  45. AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
  46. AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
  47. AV_SAMPLE_FMT_NONE
  48. };
  49. int ret;
  50. formats = ff_make_format_list(sample_fmts);
  51. if (!formats)
  52. return AVERROR(ENOMEM);
  53. ret = ff_set_common_formats(ctx, formats);
  54. if (ret < 0)
  55. return ret;
  56. layouts = ff_all_channel_counts();
  57. if (!layouts)
  58. return AVERROR(ENOMEM);
  59. ret = ff_set_common_channel_layouts(ctx, layouts);
  60. if (ret < 0)
  61. return ret;
  62. formats = ff_all_samplerates();
  63. return ff_set_common_samplerates(ctx, formats);
  64. }
  65. typedef struct ThreadData {
  66. void **d;
  67. void **p;
  68. const void **s;
  69. int nb_samples;
  70. int channels;
  71. float mult;
  72. int clip;
  73. } ThreadData;
  74. static int filter_flt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  75. {
  76. ThreadData *td = arg;
  77. void **d = td->d;
  78. void **p = td->p;
  79. const void **s = td->s;
  80. const int nb_samples = td->nb_samples;
  81. const int channels = td->channels;
  82. float mult = td->mult;
  83. const int clip = td->clip;
  84. const int start = (channels * jobnr) / nb_jobs;
  85. const int end = (channels * (jobnr+1)) / nb_jobs;
  86. float *prv = p[0];
  87. int n, c;
  88. for (c = start; c < end; c++) {
  89. const float *src = s[0];
  90. float *dst = d[0];
  91. for (n = 0; n < nb_samples; n++) {
  92. float current = src[c];
  93. dst[c] = current + (current - prv[c]) * mult;
  94. prv[c] = current;
  95. if (clip) {
  96. dst[c] = av_clipf(dst[c], -1, 1);
  97. }
  98. dst += channels;
  99. src += channels;
  100. }
  101. }
  102. return 0;
  103. }
  104. static int filter_dbl(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  105. {
  106. ThreadData *td = arg;
  107. void **d = td->d;
  108. void **p = td->p;
  109. const void **s = td->s;
  110. const int nb_samples = td->nb_samples;
  111. const int channels = td->channels;
  112. float mult = td->mult;
  113. const int clip = td->clip;
  114. const int start = (channels * jobnr) / nb_jobs;
  115. const int end = (channels * (jobnr+1)) / nb_jobs;
  116. double *prv = p[0];
  117. int n, c;
  118. for (c = start; c < end; c++) {
  119. const double *src = s[0];
  120. double *dst = d[0];
  121. for (n = 0; n < nb_samples; n++) {
  122. double current = src[c];
  123. dst[c] = current + (current - prv[c]) * mult;
  124. prv[c] = current;
  125. if (clip) {
  126. dst[c] = av_clipd(dst[c], -1, 1);
  127. }
  128. dst += channels;
  129. src += channels;
  130. }
  131. }
  132. return 0;
  133. }
  134. static int filter_fltp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  135. {
  136. ThreadData *td = arg;
  137. void **d = td->d;
  138. void **p = td->p;
  139. const void **s = td->s;
  140. const int nb_samples = td->nb_samples;
  141. const int channels = td->channels;
  142. float mult = td->mult;
  143. const int clip = td->clip;
  144. const int start = (channels * jobnr) / nb_jobs;
  145. const int end = (channels * (jobnr+1)) / nb_jobs;
  146. int n, c;
  147. for (c = start; c < end; c++) {
  148. const float *src = s[c];
  149. float *dst = d[c];
  150. float *prv = p[c];
  151. for (n = 0; n < nb_samples; n++) {
  152. float current = src[n];
  153. dst[n] = current + (current - prv[0]) * mult;
  154. prv[0] = current;
  155. if (clip) {
  156. dst[n] = av_clipf(dst[n], -1, 1);
  157. }
  158. }
  159. }
  160. return 0;
  161. }
  162. static int filter_dblp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  163. {
  164. ThreadData *td = arg;
  165. void **d = td->d;
  166. void **p = td->p;
  167. const void **s = td->s;
  168. const int nb_samples = td->nb_samples;
  169. const int channels = td->channels;
  170. float mult = td->mult;
  171. const int clip = td->clip;
  172. const int start = (channels * jobnr) / nb_jobs;
  173. const int end = (channels * (jobnr+1)) / nb_jobs;
  174. int n, c;
  175. for (c = start; c < end; c++) {
  176. const double *src = s[c];
  177. double *dst = d[c];
  178. double *prv = p[c];
  179. for (n = 0; n < nb_samples; n++) {
  180. double current = src[n];
  181. dst[n] = current + (current - prv[0]) * mult;
  182. prv[0] = current;
  183. if (clip) {
  184. dst[n] = av_clipd(dst[n], -1, 1);
  185. }
  186. }
  187. }
  188. return 0;
  189. }
  190. static int config_input(AVFilterLink *inlink)
  191. {
  192. AVFilterContext *ctx = inlink->dst;
  193. CrystalizerContext *s = ctx->priv;
  194. switch (inlink->format) {
  195. case AV_SAMPLE_FMT_FLT: s->filter = filter_flt; break;
  196. case AV_SAMPLE_FMT_DBL: s->filter = filter_dbl; break;
  197. case AV_SAMPLE_FMT_FLTP: s->filter = filter_fltp; break;
  198. case AV_SAMPLE_FMT_DBLP: s->filter = filter_dblp; break;
  199. }
  200. return 0;
  201. }
  202. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  203. {
  204. AVFilterContext *ctx = inlink->dst;
  205. AVFilterLink *outlink = ctx->outputs[0];
  206. CrystalizerContext *s = ctx->priv;
  207. AVFrame *out;
  208. ThreadData td;
  209. if (!s->prev) {
  210. s->prev = ff_get_audio_buffer(inlink, 1);
  211. if (!s->prev) {
  212. av_frame_free(&in);
  213. return AVERROR(ENOMEM);
  214. }
  215. }
  216. if (av_frame_is_writable(in)) {
  217. out = in;
  218. } else {
  219. out = ff_get_audio_buffer(outlink, in->nb_samples);
  220. if (!out) {
  221. av_frame_free(&in);
  222. return AVERROR(ENOMEM);
  223. }
  224. av_frame_copy_props(out, in);
  225. }
  226. td.d = (void **)out->extended_data;
  227. td.s = (const void **)in->extended_data;
  228. td.p = (void **)s->prev->extended_data;
  229. td.nb_samples = in->nb_samples;
  230. td.channels = in->channels;
  231. td.mult = ctx->is_disabled ? 0.f : s->mult;
  232. td.clip = s->clip;
  233. ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN(inlink->channels,
  234. ff_filter_get_nb_threads(ctx)));
  235. if (out != in)
  236. av_frame_free(&in);
  237. return ff_filter_frame(outlink, out);
  238. }
  239. static av_cold void uninit(AVFilterContext *ctx)
  240. {
  241. CrystalizerContext *s = ctx->priv;
  242. av_frame_free(&s->prev);
  243. }
  244. static const AVFilterPad inputs[] = {
  245. {
  246. .name = "default",
  247. .type = AVMEDIA_TYPE_AUDIO,
  248. .filter_frame = filter_frame,
  249. .config_props = config_input,
  250. },
  251. { NULL }
  252. };
  253. static const AVFilterPad outputs[] = {
  254. {
  255. .name = "default",
  256. .type = AVMEDIA_TYPE_AUDIO,
  257. },
  258. { NULL }
  259. };
  260. AVFilter ff_af_crystalizer = {
  261. .name = "crystalizer",
  262. .description = NULL_IF_CONFIG_SMALL("Simple expand audio dynamic range filter."),
  263. .query_formats = query_formats,
  264. .priv_size = sizeof(CrystalizerContext),
  265. .priv_class = &crystalizer_class,
  266. .uninit = uninit,
  267. .inputs = inputs,
  268. .outputs = outputs,
  269. .process_command = ff_filter_process_command,
  270. .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
  271. AVFILTER_FLAG_SLICE_THREADS,
  272. };