You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

547 lines
17KB

  1. /*
  2. * Copyright (c) 2001 Heikki Leinonen
  3. * Copyright (c) 2001 Chris Bagwell
  4. * Copyright (c) 2003 Donnie Smith
  5. * Copyright (c) 2014 Paul B Mahol
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <float.h> /* DBL_MAX */
  24. #include "libavutil/opt.h"
  25. #include "libavutil/timestamp.h"
  26. #include "audio.h"
  27. #include "formats.h"
  28. #include "avfilter.h"
  29. #include "internal.h"
  30. enum SilenceMode {
  31. SILENCE_TRIM,
  32. SILENCE_TRIM_FLUSH,
  33. SILENCE_COPY,
  34. SILENCE_COPY_FLUSH,
  35. SILENCE_STOP
  36. };
  37. typedef struct SilenceRemoveContext {
  38. const AVClass *class;
  39. enum SilenceMode mode;
  40. int start_periods;
  41. int64_t start_duration;
  42. double start_threshold;
  43. int stop_periods;
  44. int64_t stop_duration;
  45. double stop_threshold;
  46. double *start_holdoff;
  47. size_t start_holdoff_offset;
  48. size_t start_holdoff_end;
  49. int start_found_periods;
  50. double *stop_holdoff;
  51. size_t stop_holdoff_offset;
  52. size_t stop_holdoff_end;
  53. int stop_found_periods;
  54. double window_ratio;
  55. double *window;
  56. double *window_current;
  57. double *window_end;
  58. int window_size;
  59. double sum;
  60. int leave_silence;
  61. int restart;
  62. int64_t next_pts;
  63. int detection;
  64. void (*update)(struct SilenceRemoveContext *s, double sample);
  65. double(*compute)(struct SilenceRemoveContext *s, double sample);
  66. } SilenceRemoveContext;
  67. #define OFFSET(x) offsetof(SilenceRemoveContext, x)
  68. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
  69. static const AVOption silenceremove_options[] = {
  70. { "start_periods", NULL, OFFSET(start_periods), AV_OPT_TYPE_INT, {.i64=0}, 0, 9000, FLAGS },
  71. { "start_duration", NULL, OFFSET(start_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
  72. { "start_threshold", NULL, OFFSET(start_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
  73. { "stop_periods", NULL, OFFSET(stop_periods), AV_OPT_TYPE_INT, {.i64=0}, -9000, 9000, FLAGS },
  74. { "stop_duration", NULL, OFFSET(stop_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, 9000, FLAGS },
  75. { "stop_threshold", NULL, OFFSET(stop_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, FLAGS },
  76. { "leave_silence", NULL, OFFSET(leave_silence), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
  77. { "detection", NULL, OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "detection" },
  78. { "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "detection" },
  79. { "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "detection" },
  80. { "window", NULL, OFFSET(window_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=0.02}, 0, 10, FLAGS },
  81. { NULL }
  82. };
  83. AVFILTER_DEFINE_CLASS(silenceremove);
  84. static double compute_peak(SilenceRemoveContext *s, double sample)
  85. {
  86. double new_sum;
  87. new_sum = s->sum;
  88. new_sum -= *s->window_current;
  89. new_sum += fabs(sample);
  90. return new_sum / s->window_size;
  91. }
  92. static void update_peak(SilenceRemoveContext *s, double sample)
  93. {
  94. s->sum -= *s->window_current;
  95. *s->window_current = fabs(sample);
  96. s->sum += *s->window_current;
  97. s->window_current++;
  98. if (s->window_current >= s->window_end)
  99. s->window_current = s->window;
  100. }
  101. static double compute_rms(SilenceRemoveContext *s, double sample)
  102. {
  103. double new_sum;
  104. new_sum = s->sum;
  105. new_sum -= *s->window_current;
  106. new_sum += sample * sample;
  107. return sqrt(new_sum / s->window_size);
  108. }
  109. static void update_rms(SilenceRemoveContext *s, double sample)
  110. {
  111. s->sum -= *s->window_current;
  112. *s->window_current = sample * sample;
  113. s->sum += *s->window_current;
  114. s->window_current++;
  115. if (s->window_current >= s->window_end)
  116. s->window_current = s->window;
  117. }
  118. static av_cold int init(AVFilterContext *ctx)
  119. {
  120. SilenceRemoveContext *s = ctx->priv;
  121. if (s->stop_periods < 0) {
  122. s->stop_periods = -s->stop_periods;
  123. s->restart = 1;
  124. }
  125. switch (s->detection) {
  126. case 0:
  127. s->update = update_peak;
  128. s->compute = compute_peak;
  129. break;
  130. case 1:
  131. s->update = update_rms;
  132. s->compute = compute_rms;
  133. break;
  134. };
  135. return 0;
  136. }
  137. static void clear_window(SilenceRemoveContext *s)
  138. {
  139. memset(s->window, 0, s->window_size * sizeof(*s->window));
  140. s->window_current = s->window;
  141. s->window_end = s->window + s->window_size;
  142. s->sum = 0;
  143. }
  144. static int config_input(AVFilterLink *inlink)
  145. {
  146. AVFilterContext *ctx = inlink->dst;
  147. SilenceRemoveContext *s = ctx->priv;
  148. s->window_size = FFMAX((inlink->sample_rate * s->window_ratio), 1) * inlink->channels;
  149. s->window = av_malloc_array(s->window_size, sizeof(*s->window));
  150. if (!s->window)
  151. return AVERROR(ENOMEM);
  152. clear_window(s);
  153. s->start_duration = av_rescale(s->start_duration, inlink->sample_rate,
  154. AV_TIME_BASE);
  155. s->stop_duration = av_rescale(s->stop_duration, inlink->sample_rate,
  156. AV_TIME_BASE);
  157. s->start_holdoff = av_malloc_array(FFMAX(s->start_duration, 1),
  158. sizeof(*s->start_holdoff) *
  159. inlink->channels);
  160. if (!s->start_holdoff)
  161. return AVERROR(ENOMEM);
  162. s->start_holdoff_offset = 0;
  163. s->start_holdoff_end = 0;
  164. s->start_found_periods = 0;
  165. s->stop_holdoff = av_malloc_array(FFMAX(s->stop_duration, 1),
  166. sizeof(*s->stop_holdoff) *
  167. inlink->channels);
  168. if (!s->stop_holdoff)
  169. return AVERROR(ENOMEM);
  170. s->stop_holdoff_offset = 0;
  171. s->stop_holdoff_end = 0;
  172. s->stop_found_periods = 0;
  173. if (s->start_periods)
  174. s->mode = SILENCE_TRIM;
  175. else
  176. s->mode = SILENCE_COPY;
  177. return 0;
  178. }
  179. static void flush(SilenceRemoveContext *s,
  180. AVFrame *out, AVFilterLink *outlink,
  181. int *nb_samples_written, int *ret)
  182. {
  183. if (*nb_samples_written) {
  184. out->nb_samples = *nb_samples_written / outlink->channels;
  185. out->pts = s->next_pts;
  186. s->next_pts += av_rescale_q(out->nb_samples,
  187. (AVRational){1, outlink->sample_rate},
  188. outlink->time_base);
  189. *ret = ff_filter_frame(outlink, out);
  190. *nb_samples_written = 0;
  191. } else {
  192. av_frame_free(&out);
  193. }
  194. }
  195. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  196. {
  197. AVFilterContext *ctx = inlink->dst;
  198. AVFilterLink *outlink = ctx->outputs[0];
  199. SilenceRemoveContext *s = ctx->priv;
  200. int i, j, threshold, ret = 0;
  201. int nbs, nb_samples_read, nb_samples_written;
  202. double *obuf, *ibuf = (double *)in->data[0];
  203. AVFrame *out;
  204. nb_samples_read = nb_samples_written = 0;
  205. switch (s->mode) {
  206. case SILENCE_TRIM:
  207. silence_trim:
  208. nbs = in->nb_samples - nb_samples_read / inlink->channels;
  209. if (!nbs)
  210. break;
  211. for (i = 0; i < nbs; i++) {
  212. threshold = 0;
  213. for (j = 0; j < inlink->channels; j++) {
  214. threshold |= s->compute(s, ibuf[j]) > s->start_threshold;
  215. }
  216. if (threshold) {
  217. for (j = 0; j < inlink->channels; j++) {
  218. s->update(s, *ibuf);
  219. s->start_holdoff[s->start_holdoff_end++] = *ibuf++;
  220. }
  221. nb_samples_read += inlink->channels;
  222. if (s->start_holdoff_end >= s->start_duration * inlink->channels) {
  223. if (++s->start_found_periods >= s->start_periods) {
  224. s->mode = SILENCE_TRIM_FLUSH;
  225. goto silence_trim_flush;
  226. }
  227. s->start_holdoff_offset = 0;
  228. s->start_holdoff_end = 0;
  229. }
  230. } else {
  231. s->start_holdoff_end = 0;
  232. for (j = 0; j < inlink->channels; j++)
  233. s->update(s, ibuf[j]);
  234. ibuf += inlink->channels;
  235. nb_samples_read += inlink->channels;
  236. }
  237. }
  238. break;
  239. case SILENCE_TRIM_FLUSH:
  240. silence_trim_flush:
  241. nbs = s->start_holdoff_end - s->start_holdoff_offset;
  242. nbs -= nbs % inlink->channels;
  243. if (!nbs)
  244. break;
  245. out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
  246. if (!out) {
  247. av_frame_free(&in);
  248. return AVERROR(ENOMEM);
  249. }
  250. memcpy(out->data[0], &s->start_holdoff[s->start_holdoff_offset],
  251. nbs * sizeof(double));
  252. out->pts = s->next_pts;
  253. s->next_pts += av_rescale_q(out->nb_samples,
  254. (AVRational){1, outlink->sample_rate},
  255. outlink->time_base);
  256. s->start_holdoff_offset += nbs;
  257. ret = ff_filter_frame(outlink, out);
  258. if (s->start_holdoff_offset == s->start_holdoff_end) {
  259. s->start_holdoff_offset = 0;
  260. s->start_holdoff_end = 0;
  261. s->mode = SILENCE_COPY;
  262. goto silence_copy;
  263. }
  264. break;
  265. case SILENCE_COPY:
  266. silence_copy:
  267. nbs = in->nb_samples - nb_samples_read / inlink->channels;
  268. if (!nbs)
  269. break;
  270. out = ff_get_audio_buffer(inlink, nbs);
  271. if (!out) {
  272. av_frame_free(&in);
  273. return AVERROR(ENOMEM);
  274. }
  275. obuf = (double *)out->data[0];
  276. if (s->stop_periods) {
  277. for (i = 0; i < nbs; i++) {
  278. threshold = 1;
  279. for (j = 0; j < inlink->channels; j++)
  280. threshold &= s->compute(s, ibuf[j]) > s->stop_threshold;
  281. if (threshold && s->stop_holdoff_end && !s->leave_silence) {
  282. s->mode = SILENCE_COPY_FLUSH;
  283. flush(s, out, outlink, &nb_samples_written, &ret);
  284. goto silence_copy_flush;
  285. } else if (threshold) {
  286. for (j = 0; j < inlink->channels; j++) {
  287. s->update(s, *ibuf);
  288. *obuf++ = *ibuf++;
  289. }
  290. nb_samples_read += inlink->channels;
  291. nb_samples_written += inlink->channels;
  292. } else if (!threshold) {
  293. for (j = 0; j < inlink->channels; j++) {
  294. s->update(s, *ibuf);
  295. if (s->leave_silence) {
  296. *obuf++ = *ibuf;
  297. nb_samples_written++;
  298. }
  299. s->stop_holdoff[s->stop_holdoff_end++] = *ibuf++;
  300. }
  301. nb_samples_read += inlink->channels;
  302. if (s->stop_holdoff_end >= s->stop_duration * inlink->channels) {
  303. if (++s->stop_found_periods >= s->stop_periods) {
  304. s->stop_holdoff_offset = 0;
  305. s->stop_holdoff_end = 0;
  306. if (!s->restart) {
  307. s->mode = SILENCE_STOP;
  308. flush(s, out, outlink, &nb_samples_written, &ret);
  309. goto silence_stop;
  310. } else {
  311. s->stop_found_periods = 0;
  312. s->start_found_periods = 0;
  313. s->start_holdoff_offset = 0;
  314. s->start_holdoff_end = 0;
  315. clear_window(s);
  316. s->mode = SILENCE_TRIM;
  317. flush(s, out, outlink, &nb_samples_written, &ret);
  318. goto silence_trim;
  319. }
  320. }
  321. s->mode = SILENCE_COPY_FLUSH;
  322. flush(s, out, outlink, &nb_samples_written, &ret);
  323. goto silence_copy_flush;
  324. }
  325. }
  326. }
  327. flush(s, out, outlink, &nb_samples_written, &ret);
  328. } else {
  329. memcpy(obuf, ibuf, sizeof(double) * nbs * inlink->channels);
  330. out->pts = s->next_pts;
  331. s->next_pts += av_rescale_q(out->nb_samples,
  332. (AVRational){1, outlink->sample_rate},
  333. outlink->time_base);
  334. ret = ff_filter_frame(outlink, out);
  335. }
  336. break;
  337. case SILENCE_COPY_FLUSH:
  338. silence_copy_flush:
  339. nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
  340. nbs -= nbs % inlink->channels;
  341. if (!nbs)
  342. break;
  343. out = ff_get_audio_buffer(inlink, nbs / inlink->channels);
  344. if (!out) {
  345. av_frame_free(&in);
  346. return AVERROR(ENOMEM);
  347. }
  348. memcpy(out->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
  349. nbs * sizeof(double));
  350. s->stop_holdoff_offset += nbs;
  351. out->pts = s->next_pts;
  352. s->next_pts += av_rescale_q(out->nb_samples,
  353. (AVRational){1, outlink->sample_rate},
  354. outlink->time_base);
  355. ret = ff_filter_frame(outlink, out);
  356. if (s->stop_holdoff_offset == s->stop_holdoff_end) {
  357. s->stop_holdoff_offset = 0;
  358. s->stop_holdoff_end = 0;
  359. s->mode = SILENCE_COPY;
  360. goto silence_copy;
  361. }
  362. break;
  363. case SILENCE_STOP:
  364. silence_stop:
  365. break;
  366. }
  367. av_frame_free(&in);
  368. return ret;
  369. }
  370. static int request_frame(AVFilterLink *outlink)
  371. {
  372. AVFilterContext *ctx = outlink->src;
  373. SilenceRemoveContext *s = ctx->priv;
  374. int ret;
  375. ret = ff_request_frame(ctx->inputs[0]);
  376. if (ret == AVERROR_EOF && (s->mode == SILENCE_COPY_FLUSH ||
  377. s->mode == SILENCE_COPY)) {
  378. int nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
  379. if (nbs) {
  380. AVFrame *frame;
  381. frame = ff_get_audio_buffer(outlink, nbs / outlink->channels);
  382. if (!frame)
  383. return AVERROR(ENOMEM);
  384. memcpy(frame->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
  385. nbs * sizeof(double));
  386. frame->pts = s->next_pts;
  387. s->next_pts += av_rescale_q(frame->nb_samples,
  388. (AVRational){1, outlink->sample_rate},
  389. outlink->time_base);
  390. ret = ff_filter_frame(ctx->inputs[0], frame);
  391. }
  392. s->mode = SILENCE_STOP;
  393. }
  394. return ret;
  395. }
  396. static int query_formats(AVFilterContext *ctx)
  397. {
  398. AVFilterFormats *formats = NULL;
  399. AVFilterChannelLayouts *layouts = NULL;
  400. static const enum AVSampleFormat sample_fmts[] = {
  401. AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE
  402. };
  403. int ret;
  404. layouts = ff_all_channel_counts();
  405. if (!layouts)
  406. return AVERROR(ENOMEM);
  407. ret = ff_set_common_channel_layouts(ctx, layouts);
  408. if (ret < 0)
  409. return ret;
  410. formats = ff_make_format_list(sample_fmts);
  411. if (!formats)
  412. return AVERROR(ENOMEM);
  413. ret = ff_set_common_formats(ctx, formats);
  414. if (ret < 0)
  415. return ret;
  416. formats = ff_all_samplerates();
  417. if (!formats)
  418. return AVERROR(ENOMEM);
  419. return ff_set_common_samplerates(ctx, formats);
  420. }
  421. static av_cold void uninit(AVFilterContext *ctx)
  422. {
  423. SilenceRemoveContext *s = ctx->priv;
  424. av_freep(&s->start_holdoff);
  425. av_freep(&s->stop_holdoff);
  426. av_freep(&s->window);
  427. }
  428. static const AVFilterPad silenceremove_inputs[] = {
  429. {
  430. .name = "default",
  431. .type = AVMEDIA_TYPE_AUDIO,
  432. .config_props = config_input,
  433. .filter_frame = filter_frame,
  434. },
  435. { NULL }
  436. };
  437. static const AVFilterPad silenceremove_outputs[] = {
  438. {
  439. .name = "default",
  440. .type = AVMEDIA_TYPE_AUDIO,
  441. .request_frame = request_frame,
  442. },
  443. { NULL }
  444. };
  445. AVFilter ff_af_silenceremove = {
  446. .name = "silenceremove",
  447. .description = NULL_IF_CONFIG_SMALL("Remove silence."),
  448. .priv_size = sizeof(SilenceRemoveContext),
  449. .priv_class = &silenceremove_class,
  450. .init = init,
  451. .uninit = uninit,
  452. .query_formats = query_formats,
  453. .inputs = silenceremove_inputs,
  454. .outputs = silenceremove_outputs,
  455. };