You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

514 lines
19KB

  1. /*
  2. * Copyright (c) 2009 Rob Sykes <robs@users.sourceforge.net>
  3. * Copyright (c) 2013 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <float.h>
  22. #include "libavutil/opt.h"
  23. #include "audio.h"
  24. #include "avfilter.h"
  25. #include "internal.h"
  26. typedef struct ChannelStats {
  27. double last;
  28. double sigma_x, sigma_x2;
  29. double avg_sigma_x2, min_sigma_x2, max_sigma_x2;
  30. double min, max;
  31. double nmin, nmax;
  32. double min_run, max_run;
  33. double min_runs, max_runs;
  34. double min_diff, max_diff;
  35. double diff1_sum;
  36. uint64_t mask, imask;
  37. uint64_t min_count, max_count;
  38. uint64_t nb_samples;
  39. } ChannelStats;
  40. typedef struct {
  41. const AVClass *class;
  42. ChannelStats *chstats;
  43. int nb_channels;
  44. uint64_t tc_samples;
  45. double time_constant;
  46. double mult;
  47. int metadata;
  48. int reset_count;
  49. int nb_frames;
  50. int maxbitdepth;
  51. } AudioStatsContext;
  52. #define OFFSET(x) offsetof(AudioStatsContext, x)
  53. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  54. static const AVOption astats_options[] = {
  55. { "length", "set the window length", OFFSET(time_constant), AV_OPT_TYPE_DOUBLE, {.dbl=.05}, .01, 10, FLAGS },
  56. { "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
  57. { "reset", "recalculate stats after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
  58. { NULL }
  59. };
  60. AVFILTER_DEFINE_CLASS(astats);
  61. static int query_formats(AVFilterContext *ctx)
  62. {
  63. AVFilterFormats *formats;
  64. AVFilterChannelLayouts *layouts;
  65. static const enum AVSampleFormat sample_fmts[] = {
  66. AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
  67. AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
  68. AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
  69. AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
  70. AV_SAMPLE_FMT_NONE
  71. };
  72. int ret;
  73. layouts = ff_all_channel_counts();
  74. if (!layouts)
  75. return AVERROR(ENOMEM);
  76. ret = ff_set_common_channel_layouts(ctx, layouts);
  77. if (ret < 0)
  78. return ret;
  79. formats = ff_make_format_list(sample_fmts);
  80. if (!formats)
  81. return AVERROR(ENOMEM);
  82. ret = ff_set_common_formats(ctx, formats);
  83. if (ret < 0)
  84. return ret;
  85. formats = ff_all_samplerates();
  86. if (!formats)
  87. return AVERROR(ENOMEM);
  88. return ff_set_common_samplerates(ctx, formats);
  89. }
  90. static void reset_stats(AudioStatsContext *s)
  91. {
  92. int c;
  93. for (c = 0; c < s->nb_channels; c++) {
  94. ChannelStats *p = &s->chstats[c];
  95. p->min = p->nmin = p->min_sigma_x2 = DBL_MAX;
  96. p->max = p->nmax = p->max_sigma_x2 = DBL_MIN;
  97. p->min_diff = DBL_MAX;
  98. p->max_diff = DBL_MIN;
  99. p->sigma_x = 0;
  100. p->sigma_x2 = 0;
  101. p->avg_sigma_x2 = 0;
  102. p->min_sigma_x2 = 0;
  103. p->max_sigma_x2 = 0;
  104. p->min_run = 0;
  105. p->max_run = 0;
  106. p->min_runs = 0;
  107. p->max_runs = 0;
  108. p->diff1_sum = 0;
  109. p->mask = 0;
  110. p->imask = 0xFFFFFFFFFFFFFFFF;
  111. p->min_count = 0;
  112. p->max_count = 0;
  113. p->nb_samples = 0;
  114. }
  115. }
  116. static int config_output(AVFilterLink *outlink)
  117. {
  118. AudioStatsContext *s = outlink->src->priv;
  119. s->chstats = av_calloc(sizeof(*s->chstats), outlink->channels);
  120. if (!s->chstats)
  121. return AVERROR(ENOMEM);
  122. s->nb_channels = outlink->channels;
  123. s->mult = exp((-1 / s->time_constant / outlink->sample_rate));
  124. s->tc_samples = 5 * s->time_constant * outlink->sample_rate + .5;
  125. s->nb_frames = 0;
  126. s->maxbitdepth = av_get_bytes_per_sample(outlink->format) * 8;
  127. reset_stats(s);
  128. return 0;
  129. }
  130. static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
  131. {
  132. unsigned result = s->maxbitdepth;
  133. mask = mask & (~imask);
  134. for (; result && !(mask & 1); --result, mask >>= 1);
  135. depth->den = result;
  136. depth->num = 0;
  137. for (; result; --result, mask >>= 1)
  138. if (mask & 1)
  139. depth->num++;
  140. }
  141. static inline void update_stat(AudioStatsContext *s, ChannelStats *p, double d, double nd, int64_t i)
  142. {
  143. if (d < p->min) {
  144. p->min = d;
  145. p->nmin = nd;
  146. p->min_run = 1;
  147. p->min_runs = 0;
  148. p->min_count = 1;
  149. } else if (d == p->min) {
  150. p->min_count++;
  151. p->min_run = d == p->last ? p->min_run + 1 : 1;
  152. } else if (p->last == p->min) {
  153. p->min_runs += p->min_run * p->min_run;
  154. }
  155. if (d > p->max) {
  156. p->max = d;
  157. p->nmax = nd;
  158. p->max_run = 1;
  159. p->max_runs = 0;
  160. p->max_count = 1;
  161. } else if (d == p->max) {
  162. p->max_count++;
  163. p->max_run = d == p->last ? p->max_run + 1 : 1;
  164. } else if (p->last == p->max) {
  165. p->max_runs += p->max_run * p->max_run;
  166. }
  167. p->sigma_x += nd;
  168. p->sigma_x2 += nd * nd;
  169. p->avg_sigma_x2 = p->avg_sigma_x2 * s->mult + (1.0 - s->mult) * nd * nd;
  170. p->min_diff = FFMIN(p->min_diff, fabs(d - p->last));
  171. p->max_diff = FFMAX(p->max_diff, fabs(d - p->last));
  172. p->diff1_sum += fabs(d - p->last);
  173. p->last = d;
  174. p->mask |= i;
  175. p->imask &= i;
  176. if (p->nb_samples >= s->tc_samples) {
  177. p->max_sigma_x2 = FFMAX(p->max_sigma_x2, p->avg_sigma_x2);
  178. p->min_sigma_x2 = FFMIN(p->min_sigma_x2, p->avg_sigma_x2);
  179. }
  180. p->nb_samples++;
  181. }
  182. static void set_meta(AVDictionary **metadata, int chan, const char *key,
  183. const char *fmt, double val)
  184. {
  185. uint8_t value[128];
  186. uint8_t key2[128];
  187. snprintf(value, sizeof(value), fmt, val);
  188. if (chan)
  189. snprintf(key2, sizeof(key2), "lavfi.astats.%d.%s", chan, key);
  190. else
  191. snprintf(key2, sizeof(key2), "lavfi.astats.%s", key);
  192. av_dict_set(metadata, key2, value, 0);
  193. }
  194. #define LINEAR_TO_DB(x) (log10(x) * 20)
  195. static void set_metadata(AudioStatsContext *s, AVDictionary **metadata)
  196. {
  197. uint64_t mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0;
  198. double min_runs = 0, max_runs = 0,
  199. min = DBL_MAX, max = DBL_MIN, min_diff = DBL_MAX, max_diff = 0,
  200. nmin = DBL_MAX, nmax = DBL_MIN,
  201. max_sigma_x = 0,
  202. diff1_sum = 0,
  203. sigma_x = 0,
  204. sigma_x2 = 0,
  205. min_sigma_x2 = DBL_MAX,
  206. max_sigma_x2 = DBL_MIN;
  207. AVRational depth;
  208. int c;
  209. for (c = 0; c < s->nb_channels; c++) {
  210. ChannelStats *p = &s->chstats[c];
  211. if (p->nb_samples < s->tc_samples)
  212. p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
  213. min = FFMIN(min, p->min);
  214. max = FFMAX(max, p->max);
  215. nmin = FFMIN(nmin, p->nmin);
  216. nmax = FFMAX(nmax, p->nmax);
  217. min_diff = FFMIN(min_diff, p->min_diff);
  218. max_diff = FFMAX(max_diff, p->max_diff);
  219. diff1_sum += p->diff1_sum,
  220. min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
  221. max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
  222. sigma_x += p->sigma_x;
  223. sigma_x2 += p->sigma_x2;
  224. min_count += p->min_count;
  225. max_count += p->max_count;
  226. min_runs += p->min_runs;
  227. max_runs += p->max_runs;
  228. mask |= p->mask;
  229. imask &= p->imask;
  230. nb_samples += p->nb_samples;
  231. if (fabs(p->sigma_x) > fabs(max_sigma_x))
  232. max_sigma_x = p->sigma_x;
  233. set_meta(metadata, c + 1, "DC_offset", "%f", p->sigma_x / p->nb_samples);
  234. set_meta(metadata, c + 1, "Min_level", "%f", p->min);
  235. set_meta(metadata, c + 1, "Max_level", "%f", p->max);
  236. set_meta(metadata, c + 1, "Min_difference", "%f", p->min_diff);
  237. set_meta(metadata, c + 1, "Max_difference", "%f", p->max_diff);
  238. set_meta(metadata, c + 1, "Mean_difference", "%f", p->diff1_sum / (p->nb_samples - 1));
  239. set_meta(metadata, c + 1, "Peak_level", "%f", LINEAR_TO_DB(FFMAX(-p->nmin, p->nmax)));
  240. set_meta(metadata, c + 1, "RMS_level", "%f", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
  241. set_meta(metadata, c + 1, "RMS_peak", "%f", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
  242. set_meta(metadata, c + 1, "RMS_trough", "%f", LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
  243. set_meta(metadata, c + 1, "Crest_factor", "%f", p->sigma_x2 ? FFMAX(-p->min, p->max) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
  244. set_meta(metadata, c + 1, "Flat_factor", "%f", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
  245. set_meta(metadata, c + 1, "Peak_count", "%f", (float)(p->min_count + p->max_count));
  246. bit_depth(s, p->mask, p->imask, &depth);
  247. set_meta(metadata, c + 1, "Bit_depth", "%f", depth.num);
  248. set_meta(metadata, c + 1, "Bit_depth2", "%f", depth.den);
  249. }
  250. set_meta(metadata, 0, "Overall.DC_offset", "%f", max_sigma_x / (nb_samples / s->nb_channels));
  251. set_meta(metadata, 0, "Overall.Min_level", "%f", min);
  252. set_meta(metadata, 0, "Overall.Max_level", "%f", max);
  253. set_meta(metadata, 0, "Overall.Min_difference", "%f", min_diff);
  254. set_meta(metadata, 0, "Overall.Max_difference", "%f", max_diff);
  255. set_meta(metadata, 0, "Overall.Mean_difference", "%f", diff1_sum / (nb_samples - s->nb_channels));
  256. set_meta(metadata, 0, "Overall.Peak_level", "%f", LINEAR_TO_DB(FFMAX(-nmin, nmax)));
  257. set_meta(metadata, 0, "Overall.RMS_level", "%f", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
  258. set_meta(metadata, 0, "Overall.RMS_peak", "%f", LINEAR_TO_DB(sqrt(max_sigma_x2)));
  259. set_meta(metadata, 0, "Overall.RMS_trough", "%f", LINEAR_TO_DB(sqrt(min_sigma_x2)));
  260. set_meta(metadata, 0, "Overall.Flat_factor", "%f", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
  261. set_meta(metadata, 0, "Overall.Peak_count", "%f", (float)(min_count + max_count) / (double)s->nb_channels);
  262. bit_depth(s, mask, imask, &depth);
  263. set_meta(metadata, 0, "Overall.Bit_depth", "%f", depth.num);
  264. set_meta(metadata, 0, "Overall.Bit_depth2", "%f", depth.den);
  265. set_meta(metadata, 0, "Overall.Number_of_samples", "%f", nb_samples / s->nb_channels);
  266. }
  267. static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
  268. {
  269. AudioStatsContext *s = inlink->dst->priv;
  270. AVDictionary **metadata = avpriv_frame_get_metadatap(buf);
  271. const int channels = s->nb_channels;
  272. int i, c;
  273. if (s->reset_count > 0) {
  274. if (s->nb_frames >= s->reset_count) {
  275. reset_stats(s);
  276. s->nb_frames = 0;
  277. }
  278. s->nb_frames++;
  279. }
  280. switch (inlink->format) {
  281. case AV_SAMPLE_FMT_DBLP:
  282. for (c = 0; c < channels; c++) {
  283. ChannelStats *p = &s->chstats[c];
  284. const double *src = (const double *)buf->extended_data[c];
  285. for (i = 0; i < buf->nb_samples; i++, src++)
  286. update_stat(s, p, *src, *src, llrint(*src * (UINT64_C(1) << 63)));
  287. }
  288. break;
  289. case AV_SAMPLE_FMT_DBL: {
  290. const double *src = (const double *)buf->extended_data[0];
  291. for (i = 0; i < buf->nb_samples; i++) {
  292. for (c = 0; c < channels; c++, src++)
  293. update_stat(s, &s->chstats[c], *src, *src, llrint(*src * (UINT64_C(1) << 63)));
  294. }}
  295. break;
  296. case AV_SAMPLE_FMT_FLTP:
  297. for (c = 0; c < channels; c++) {
  298. ChannelStats *p = &s->chstats[c];
  299. const float *src = (const float *)buf->extended_data[c];
  300. for (i = 0; i < buf->nb_samples; i++, src++)
  301. update_stat(s, p, *src, *src, llrint(*src * (UINT64_C(1) << 63)));
  302. }
  303. break;
  304. case AV_SAMPLE_FMT_FLT: {
  305. const float *src = (const float *)buf->extended_data[0];
  306. for (i = 0; i < buf->nb_samples; i++) {
  307. for (c = 0; c < channels; c++, src++)
  308. update_stat(s, &s->chstats[c], *src, *src / (double)INT16_MAX, llrint(*src * (UINT64_C(1) << 63)));
  309. }}
  310. break;
  311. case AV_SAMPLE_FMT_S32P:
  312. for (c = 0; c < channels; c++) {
  313. ChannelStats *p = &s->chstats[c];
  314. const int32_t *src = (const int32_t *)buf->extended_data[c];
  315. for (i = 0; i < buf->nb_samples; i++, src++)
  316. update_stat(s, p, *src, *src / (double)INT32_MAX, *src);
  317. }
  318. break;
  319. case AV_SAMPLE_FMT_S32: {
  320. const int32_t *src = (const int32_t *)buf->extended_data[0];
  321. for (i = 0; i < buf->nb_samples; i++) {
  322. for (c = 0; c < channels; c++, src++)
  323. update_stat(s, &s->chstats[c], *src, *src / (double)INT32_MAX, *src);
  324. }}
  325. break;
  326. case AV_SAMPLE_FMT_S16P:
  327. for (c = 0; c < channels; c++) {
  328. ChannelStats *p = &s->chstats[c];
  329. const int16_t *src = (const int16_t *)buf->extended_data[c];
  330. for (i = 0; i < buf->nb_samples; i++, src++)
  331. update_stat(s, p, *src, *src / (double)INT16_MAX, *src);
  332. }
  333. break;
  334. case AV_SAMPLE_FMT_S16: {
  335. const int16_t *src = (const int16_t *)buf->extended_data[0];
  336. for (i = 0; i < buf->nb_samples; i++) {
  337. for (c = 0; c < channels; c++, src++)
  338. update_stat(s, &s->chstats[c], *src, *src / (double)INT16_MAX, *src);
  339. }}
  340. break;
  341. }
  342. if (s->metadata)
  343. set_metadata(s, metadata);
  344. return ff_filter_frame(inlink->dst->outputs[0], buf);
  345. }
  346. static void print_stats(AVFilterContext *ctx)
  347. {
  348. AudioStatsContext *s = ctx->priv;
  349. uint64_t mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0;
  350. double min_runs = 0, max_runs = 0,
  351. min = DBL_MAX, max = DBL_MIN, min_diff = DBL_MAX, max_diff = 0,
  352. nmin = DBL_MAX, nmax = DBL_MIN,
  353. max_sigma_x = 0,
  354. diff1_sum = 0,
  355. sigma_x = 0,
  356. sigma_x2 = 0,
  357. min_sigma_x2 = DBL_MAX,
  358. max_sigma_x2 = DBL_MIN;
  359. AVRational depth;
  360. int c;
  361. for (c = 0; c < s->nb_channels; c++) {
  362. ChannelStats *p = &s->chstats[c];
  363. if (p->nb_samples < s->tc_samples)
  364. p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
  365. min = FFMIN(min, p->min);
  366. max = FFMAX(max, p->max);
  367. nmin = FFMIN(nmin, p->nmin);
  368. nmax = FFMAX(nmax, p->nmax);
  369. min_diff = FFMIN(min_diff, p->min_diff);
  370. max_diff = FFMAX(max_diff, p->max_diff);
  371. diff1_sum += p->diff1_sum,
  372. min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
  373. max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
  374. sigma_x += p->sigma_x;
  375. sigma_x2 += p->sigma_x2;
  376. min_count += p->min_count;
  377. max_count += p->max_count;
  378. min_runs += p->min_runs;
  379. max_runs += p->max_runs;
  380. mask |= p->mask;
  381. imask &= p->imask;
  382. nb_samples += p->nb_samples;
  383. if (fabs(p->sigma_x) > fabs(max_sigma_x))
  384. max_sigma_x = p->sigma_x;
  385. av_log(ctx, AV_LOG_INFO, "Channel: %d\n", c + 1);
  386. av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", p->sigma_x / p->nb_samples);
  387. av_log(ctx, AV_LOG_INFO, "Min level: %f\n", p->min);
  388. av_log(ctx, AV_LOG_INFO, "Max level: %f\n", p->max);
  389. av_log(ctx, AV_LOG_INFO, "Min difference: %f\n", p->min_diff);
  390. av_log(ctx, AV_LOG_INFO, "Max difference: %f\n", p->max_diff);
  391. av_log(ctx, AV_LOG_INFO, "Mean difference: %f\n", p->diff1_sum / (p->nb_samples - 1));
  392. av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-p->nmin, p->nmax)));
  393. av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
  394. av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
  395. if (p->min_sigma_x2 != 1)
  396. av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n",LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
  397. av_log(ctx, AV_LOG_INFO, "Crest factor: %f\n", p->sigma_x2 ? FFMAX(-p->nmin, p->nmax) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
  398. av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
  399. av_log(ctx, AV_LOG_INFO, "Peak count: %"PRId64"\n", p->min_count + p->max_count);
  400. bit_depth(s, p->mask, p->imask, &depth);
  401. av_log(ctx, AV_LOG_INFO, "Bit depth: %u/%u\n", depth.num, depth.den);
  402. }
  403. av_log(ctx, AV_LOG_INFO, "Overall\n");
  404. av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", max_sigma_x / (nb_samples / s->nb_channels));
  405. av_log(ctx, AV_LOG_INFO, "Min level: %f\n", min);
  406. av_log(ctx, AV_LOG_INFO, "Max level: %f\n", max);
  407. av_log(ctx, AV_LOG_INFO, "Min difference: %f\n", min_diff);
  408. av_log(ctx, AV_LOG_INFO, "Max difference: %f\n", max_diff);
  409. av_log(ctx, AV_LOG_INFO, "Mean difference: %f\n", diff1_sum / (nb_samples - s->nb_channels));
  410. av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-nmin, nmax)));
  411. av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
  412. av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(max_sigma_x2)));
  413. if (min_sigma_x2 != 1)
  414. av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n", LINEAR_TO_DB(sqrt(min_sigma_x2)));
  415. av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
  416. av_log(ctx, AV_LOG_INFO, "Peak count: %f\n", (min_count + max_count) / (double)s->nb_channels);
  417. bit_depth(s, mask, imask, &depth);
  418. av_log(ctx, AV_LOG_INFO, "Bit depth: %u/%u\n", depth.num, depth.den);
  419. av_log(ctx, AV_LOG_INFO, "Number of samples: %"PRId64"\n", nb_samples / s->nb_channels);
  420. }
  421. static av_cold void uninit(AVFilterContext *ctx)
  422. {
  423. AudioStatsContext *s = ctx->priv;
  424. if (s->nb_channels)
  425. print_stats(ctx);
  426. av_freep(&s->chstats);
  427. }
  428. static const AVFilterPad astats_inputs[] = {
  429. {
  430. .name = "default",
  431. .type = AVMEDIA_TYPE_AUDIO,
  432. .filter_frame = filter_frame,
  433. },
  434. { NULL }
  435. };
  436. static const AVFilterPad astats_outputs[] = {
  437. {
  438. .name = "default",
  439. .type = AVMEDIA_TYPE_AUDIO,
  440. .config_props = config_output,
  441. },
  442. { NULL }
  443. };
  444. AVFilter ff_af_astats = {
  445. .name = "astats",
  446. .description = NULL_IF_CONFIG_SMALL("Show time domain statistics about audio frames."),
  447. .query_formats = query_formats,
  448. .priv_size = sizeof(AudioStatsContext),
  449. .priv_class = &astats_class,
  450. .uninit = uninit,
  451. .inputs = astats_inputs,
  452. .outputs = astats_outputs,
  453. };