You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

517 lines
16KB

  1. /*
  2. * Copyright (c) 2001 Heikki Leinonen
  3. * Copyright (c) 2001 Chris Bagwell
  4. * Copyright (c) 2003 Donnie Smith
  5. * Copyright (c) 2014 Paul B Mahol
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <float.h> /* DBL_MAX */
  24. #include "libavutil/opt.h"
  25. #include "libavutil/timestamp.h"
  26. #include "audio.h"
  27. #include "formats.h"
  28. #include "avfilter.h"
  29. #include "internal.h"
  30. enum SilenceMode {
  31. SILENCE_TRIM,
  32. SILENCE_TRIM_FLUSH,
  33. SILENCE_COPY,
  34. SILENCE_COPY_FLUSH,
  35. SILENCE_STOP
  36. };
  37. typedef struct SilenceRemoveContext {
  38. const AVClass *class;
  39. enum SilenceMode mode;
  40. int start_periods;
  41. int64_t start_duration;
  42. double start_threshold;
  43. int stop_periods;
  44. int64_t stop_duration;
  45. double stop_threshold;
  46. double *start_holdoff;
  47. size_t start_holdoff_offset;
  48. size_t start_holdoff_end;
  49. int start_found_periods;
  50. double *stop_holdoff;
  51. size_t stop_holdoff_offset;
  52. size_t stop_holdoff_end;
  53. int stop_found_periods;
  54. double window_ratio;
  55. double *window;
  56. double *window_current;
  57. double *window_end;
  58. int window_size;
  59. double sum;
  60. int leave_silence;
  61. int restart;
  62. int64_t next_pts;
  63. int detection;
  64. void (*update)(struct SilenceRemoveContext *s, double sample);
  65. double(*compute)(struct SilenceRemoveContext *s, double sample);
  66. } SilenceRemoveContext;
  67. #define OFFSET(x) offsetof(SilenceRemoveContext, x)
  68. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
  69. static const AVOption silenceremove_options[] = {
  70. { "start_periods", NULL, OFFSET(start_periods), AV_OPT_TYPE_INT, {.i64=0}, 0, 9000, FLAGS },
  71. { "start_duration", NULL, OFFSET(start_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
  72. { "start_threshold", NULL, OFFSET(start_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
  73. { "stop_periods", NULL, OFFSET(stop_periods), AV_OPT_TYPE_INT, {.i64=0}, -9000, 9000, FLAGS },
  74. { "stop_duration", NULL, OFFSET(stop_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
  75. { "stop_threshold", NULL, OFFSET(stop_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
  76. { "leave_silence", NULL, OFFSET(leave_silence), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
  77. { "detection", NULL, OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "detection" },
  78. { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "detection" },
  79. { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "detection" },
  80. { "window", NULL, OFFSET(window_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=0.02}, 0, 10, FLAGS },
  81. { NULL }
  82. };
  83. AVFILTER_DEFINE_CLASS(silenceremove);
  84. static double compute_peak(SilenceRemoveContext *s, double sample)
  85. {
  86. double new_sum;
  87. new_sum = s->sum;
  88. new_sum -= *s->window_current;
  89. new_sum += fabs(sample);
  90. return new_sum / s->window_size;
  91. }
  92. static void update_peak(SilenceRemoveContext *s, double sample)
  93. {
  94. s->sum -= *s->window_current;
  95. *s->window_current = fabs(sample);
  96. s->sum += *s->window_current;
  97. s->window_current++;
  98. if (s->window_current >= s->window_end)
  99. s->window_current = s->window;
  100. }
  101. static double compute_rms(SilenceRemoveContext *s, double sample)
  102. {
  103. double new_sum;
  104. new_sum = s->sum;
  105. new_sum -= *s->window_current;
  106. new_sum += sample * sample;
  107. return sqrt(new_sum / s->window_size);
  108. }
  109. static void update_rms(SilenceRemoveContext *s, double sample)
  110. {
  111. s->sum -= *s->window_current;
  112. *s->window_current = sample * sample;
  113. s->sum += *s->window_current;
  114. s->window_current++;
  115. if (s->window_current >= s->window_end)
  116. s->window_current = s->window;
  117. }
  118. static av_cold int init(AVFilterContext *ctx)
  119. {
  120. SilenceRemoveContext *s = ctx->priv;
  121. if (s->stop_periods < 0) {
  122. s->stop_periods = -s->stop_periods;
  123. s->restart = 1;
  124. }
  125. switch (s->detection) {
  126. case 0:
  127. s->update = update_peak;
  128. s->compute = compute_peak;
  129. break;
  130. case 1:
  131. s->update = update_rms;
  132. s->compute = compute_rms;
  133. break;
  134. };
  135. return 0;
  136. }
  137. static void clear_window(SilenceRemoveContext *s)
  138. {
  139. memset(s->window, 0, s->window_size * sizeof(*s->window));
  140. s->window_current = s->window;
  141. s->window_end = s->window + s->window_size;
  142. s->sum = 0;
  143. }
  144. static int config_input(AVFilterLink *inlink)
  145. {
  146. AVFilterContext *ctx = inlink->dst;
  147. SilenceRemoveContext *s = ctx->priv;
  148. s->window_size = FFMAX((inlink->sample_rate * s->window_ratio), 1) * inlink->channels;
  149. s->window = av_malloc_array(s->window_size, sizeof(*s->window));
  150. if (!s->window)
  151. return AVERROR(ENOMEM);
  152. clear_window(s);
  153. s->start_duration = av_rescale(s->start_duration, inlink->sample_rate,
  154. AV_TIME_BASE);
  155. s->stop_duration = av_rescale(s->stop_duration, inlink->sample_rate,
  156. AV_TIME_BASE);
  157. s->start_holdoff = av_malloc_array(FFMAX(s->start_duration, 1),
  158. sizeof(*s->start_holdoff) *
  159. inlink->channels);
  160. if (!s->start_holdoff)
  161. return AVERROR(ENOMEM);
  162. s->start_holdoff_offset = 0;
  163. s->start_holdoff_end = 0;
  164. s->start_found_periods = 0;
  165. s->stop_holdoff = av_malloc_array(FFMAX(s->stop_duration, 1),
  166. sizeof(*s->stop_holdoff) *
  167. inlink->channels);
  168. if (!s->stop_holdoff)
  169. return AVERROR(ENOMEM);
  170. s->stop_holdoff_offset = 0;
  171. s->stop_holdoff_end = 0;
  172. s->stop_found_periods = 0;
  173. if (s->start_periods)
  174. s->mode = SILENCE_TRIM;
  175. else
  176. s->mode = SILENCE_COPY;
  177. return 0;
  178. }
  179. static void flush(AVFrame *out, AVFilterLink *outlink,
  180. int *nb_samples_written, int *ret)
  181. {
  182. if (*nb_samples_written) {
  183. out->nb_samples = *nb_samples_written / outlink->channels;
  184. *ret = ff_filter_frame(outlink, out);
  185. *nb_samples_written = 0;
  186. } else {
  187. av_frame_free(&out);
  188. }
  189. }
  190. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  191. {
  192. AVFilterContext *ctx = inlink->dst;
  193. AVFilterLink *outlink = ctx->outputs[0];
  194. SilenceRemoveContext *s = ctx->priv;
  195. int i, j, threshold, ret = 0;
  196. int nbs, nb_samples_read, nb_samples_written;
  197. double *obuf, *ibuf = (double *)in->data[0];
  198. AVFrame *out;
  199. nb_samples_read = nb_samples_written = 0;
  200. switch (s->mode) {
  201. case SILENCE_TRIM:
  202. silence_trim:
  203. nbs = in->nb_samples - nb_samples_read / inlink->channels;
  204. if (!nbs)
  205. break;
  206. for (i = 0; i < nbs; i++) {
  207. threshold = 0;
  208. for (j = 0; j < inlink->channels; j++) {
  209. threshold |= s->compute(s, ibuf[j]) > s->start_threshold;
  210. }
  211. if (threshold) {
  212. for (j = 0; j < inlink->channels; j++) {
  213. s->update(s, *ibuf);
  214. s->start_holdoff[s->start_holdoff_end++] = *ibuf++;
  215. }
  216. nb_samples_read += inlink->channels;
  217. if (s->start_holdoff_end >= s->start_duration * inlink->channels) {
  218. if (++s->start_found_periods >= s->start_periods) {
  219. s->mode = SILENCE_TRIM_FLUSH;
  220. goto silence_trim_flush;
  221. }
  222. s->start_holdoff_offset = 0;
  223. s->start_holdoff_end = 0;
  224. }
  225. } else {
  226. s->start_holdoff_end = 0;
  227. for (j = 0; j < inlink->channels; j++)
  228. s->update(s, ibuf[j]);
  229. ibuf += inlink->channels;
  230. nb_samples_read += inlink->channels;
  231. }
  232. }
  233. break;
  234. case SILENCE_TRIM_FLUSH:
  235. silence_trim_flush:
  236. nbs = s->start_holdoff_end - s->start_holdoff_offset;
  237. nbs -= nbs % inlink->channels;
  238. if (!nbs)
  239. break;
  240. out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
  241. if (!out) {
  242. av_frame_free(&in);
  243. return AVERROR(ENOMEM);
  244. }
  245. memcpy(out->data[0], &s->start_holdoff[s->start_holdoff_offset],
  246. nbs * sizeof(double));
  247. s->start_holdoff_offset += nbs;
  248. ret = ff_filter_frame(outlink, out);
  249. if (s->start_holdoff_offset == s->start_holdoff_end) {
  250. s->start_holdoff_offset = 0;
  251. s->start_holdoff_end = 0;
  252. s->mode = SILENCE_COPY;
  253. goto silence_copy;
  254. }
  255. break;
  256. case SILENCE_COPY:
  257. silence_copy:
  258. nbs = in->nb_samples - nb_samples_read / inlink->channels;
  259. if (!nbs)
  260. break;
  261. out = ff_get_audio_buffer(inlink, nbs);
  262. if (!out) {
  263. av_frame_free(&in);
  264. return AVERROR(ENOMEM);
  265. }
  266. obuf = (double *)out->data[0];
  267. if (s->stop_periods) {
  268. for (i = 0; i < nbs; i++) {
  269. threshold = 1;
  270. for (j = 0; j < inlink->channels; j++)
  271. threshold &= s->compute(s, ibuf[j]) > s->stop_threshold;
  272. if (threshold && s->stop_holdoff_end && !s->leave_silence) {
  273. s->mode = SILENCE_COPY_FLUSH;
  274. flush(out, outlink, &nb_samples_written, &ret);
  275. goto silence_copy_flush;
  276. } else if (threshold) {
  277. for (j = 0; j < inlink->channels; j++) {
  278. s->update(s, *ibuf);
  279. *obuf++ = *ibuf++;
  280. }
  281. nb_samples_read += inlink->channels;
  282. nb_samples_written += inlink->channels;
  283. } else if (!threshold) {
  284. for (j = 0; j < inlink->channels; j++) {
  285. s->update(s, *ibuf);
  286. if (s->leave_silence) {
  287. *obuf++ = *ibuf;
  288. nb_samples_written++;
  289. }
  290. s->stop_holdoff[s->stop_holdoff_end++] = *ibuf++;
  291. }
  292. nb_samples_read += inlink->channels;
  293. if (s->stop_holdoff_end >= s->stop_duration * inlink->channels) {
  294. if (++s->stop_found_periods >= s->stop_periods) {
  295. s->stop_holdoff_offset = 0;
  296. s->stop_holdoff_end = 0;
  297. if (!s->restart) {
  298. s->mode = SILENCE_STOP;
  299. flush(out, outlink, &nb_samples_written, &ret);
  300. goto silence_stop;
  301. } else {
  302. s->stop_found_periods = 0;
  303. s->start_found_periods = 0;
  304. s->start_holdoff_offset = 0;
  305. s->start_holdoff_end = 0;
  306. clear_window(s);
  307. s->mode = SILENCE_TRIM;
  308. flush(out, outlink, &nb_samples_written, &ret);
  309. goto silence_trim;
  310. }
  311. }
  312. s->mode = SILENCE_COPY_FLUSH;
  313. flush(out, outlink, &nb_samples_written, &ret);
  314. goto silence_copy_flush;
  315. }
  316. }
  317. }
  318. flush(out, outlink, &nb_samples_written, &ret);
  319. } else {
  320. memcpy(obuf, ibuf, sizeof(double) * nbs * inlink->channels);
  321. ret = ff_filter_frame(outlink, out);
  322. }
  323. break;
  324. case SILENCE_COPY_FLUSH:
  325. silence_copy_flush:
  326. nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
  327. nbs -= nbs % inlink->channels;
  328. if (!nbs)
  329. break;
  330. out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
  331. if (!out) {
  332. av_frame_free(&in);
  333. return AVERROR(ENOMEM);
  334. }
  335. memcpy(out->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
  336. nbs * sizeof(double));
  337. s->stop_holdoff_offset += nbs;
  338. ret = ff_filter_frame(outlink, out);
  339. if (s->stop_holdoff_offset == s->stop_holdoff_end) {
  340. s->stop_holdoff_offset = 0;
  341. s->stop_holdoff_end = 0;
  342. s->mode = SILENCE_COPY;
  343. goto silence_copy;
  344. }
  345. break;
  346. case SILENCE_STOP:
  347. silence_stop:
  348. break;
  349. }
  350. av_frame_free(&in);
  351. return ret;
  352. }
  353. static int request_frame(AVFilterLink *outlink)
  354. {
  355. AVFilterContext *ctx = outlink->src;
  356. SilenceRemoveContext *s = ctx->priv;
  357. int ret;
  358. ret = ff_request_frame(ctx->inputs[0]);
  359. if (ret == AVERROR_EOF && (s->mode == SILENCE_COPY_FLUSH ||
  360. s->mode == SILENCE_COPY)) {
  361. int nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
  362. if (nbs) {
  363. AVFrame *frame;
  364. frame = ff_get_audio_buffer(outlink, nbs / outlink->channels);
  365. if (!frame)
  366. return AVERROR(ENOMEM);
  367. memcpy(frame->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
  368. nbs * sizeof(double));
  369. ret = ff_filter_frame(ctx->inputs[0], frame);
  370. }
  371. s->mode = SILENCE_STOP;
  372. }
  373. return ret;
  374. }
  375. static int query_formats(AVFilterContext *ctx)
  376. {
  377. AVFilterFormats *formats = NULL;
  378. AVFilterChannelLayouts *layouts = NULL;
  379. static const enum AVSampleFormat sample_fmts[] = {
  380. AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE
  381. };
  382. int ret;
  383. layouts = ff_all_channel_counts();
  384. if (!layouts)
  385. return AVERROR(ENOMEM);
  386. ret = ff_set_common_channel_layouts(ctx, layouts);
  387. if (ret < 0)
  388. return ret;
  389. formats = ff_make_format_list(sample_fmts);
  390. if (!formats)
  391. return AVERROR(ENOMEM);
  392. ret = ff_set_common_formats(ctx, formats);
  393. if (ret < 0)
  394. return ret;
  395. formats = ff_all_samplerates();
  396. if (!formats)
  397. return AVERROR(ENOMEM);
  398. return ff_set_common_samplerates(ctx, formats);
  399. }
  400. static av_cold void uninit(AVFilterContext *ctx)
  401. {
  402. SilenceRemoveContext *s = ctx->priv;
  403. av_freep(&s->start_holdoff);
  404. av_freep(&s->stop_holdoff);
  405. av_freep(&s->window);
  406. }
  407. static const AVFilterPad silenceremove_inputs[] = {
  408. {
  409. .name = "default",
  410. .type = AVMEDIA_TYPE_AUDIO,
  411. .config_props = config_input,
  412. .filter_frame = filter_frame,
  413. },
  414. { NULL }
  415. };
  416. static const AVFilterPad silenceremove_outputs[] = {
  417. {
  418. .name = "default",
  419. .type = AVMEDIA_TYPE_AUDIO,
  420. .request_frame = request_frame,
  421. },
  422. { NULL }
  423. };
  424. AVFilter ff_af_silenceremove = {
  425. .name = "silenceremove",
  426. .description = NULL_IF_CONFIG_SMALL("Remove silence."),
  427. .priv_size = sizeof(SilenceRemoveContext),
  428. .priv_class = &silenceremove_class,
  429. .init = init,
  430. .uninit = uninit,
  431. .query_formats = query_formats,
  432. .inputs = silenceremove_inputs,
  433. .outputs = silenceremove_outputs,
  434. };