You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

244 lines
7.1KB

  1. /*
  2. * Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Stream (de)synchronization filter
  23. */
  24. #include "libavutil/eval.h"
  25. #include "libavutil/opt.h"
  26. #include "avfilter.h"
  27. #include "audio.h"
  28. #include "internal.h"
  29. #define QUEUE_SIZE 16
  30. static const char * const var_names[] = {
  31. "b1", "b2",
  32. "s1", "s2",
  33. "t1", "t2",
  34. NULL
  35. };
  36. enum var_name {
  37. VAR_B1, VAR_B2,
  38. VAR_S1, VAR_S2,
  39. VAR_T1, VAR_T2,
  40. VAR_NB
  41. };
  42. typedef struct {
  43. const AVClass *class;
  44. AVExpr *expr;
  45. char *expr_str;
  46. double var_values[VAR_NB];
  47. struct buf_queue {
  48. AVFrame *buf[QUEUE_SIZE];
  49. unsigned tail, nb;
  50. /* buf[tail] is the oldest,
  51. buf[(tail + nb) % QUEUE_SIZE] is where the next is added */
  52. } queue[2];
  53. int req[2];
  54. int next_out;
  55. int eof; /* bitmask, one bit for each stream */
  56. } AStreamSyncContext;
  57. #define OFFSET(x) offsetof(AStreamSyncContext, x)
  58. #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  59. static const AVOption astreamsync_options[] = {
  60. { "expr", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
  61. { "e", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
  62. { NULL }
  63. };
  64. AVFILTER_DEFINE_CLASS(astreamsync);
  65. static av_cold int init(AVFilterContext *ctx)
  66. {
  67. AStreamSyncContext *as = ctx->priv;
  68. int r, i;
  69. r = av_expr_parse(&as->expr, as->expr_str, var_names,
  70. NULL, NULL, NULL, NULL, 0, ctx);
  71. if (r < 0) {
  72. av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", as->expr_str);
  73. return r;
  74. }
  75. for (i = 0; i < 42; i++)
  76. av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */
  77. return 0;
  78. }
  79. static int query_formats(AVFilterContext *ctx)
  80. {
  81. int i, ret;
  82. AVFilterFormats *formats, *rates;
  83. AVFilterChannelLayouts *layouts;
  84. for (i = 0; i < 2; i++) {
  85. formats = ctx->inputs[i]->in_formats;
  86. if ((ret = ff_formats_ref(formats, &ctx->inputs[i]->out_formats)) < 0 ||
  87. (ret = ff_formats_ref(formats, &ctx->outputs[i]->in_formats)) < 0)
  88. return ret;
  89. rates = ff_all_samplerates();
  90. if ((ret = ff_formats_ref(rates, &ctx->inputs[i]->out_samplerates)) < 0 ||
  91. (ret = ff_formats_ref(rates, &ctx->outputs[i]->in_samplerates)) < 0)
  92. return ret;
  93. layouts = ctx->inputs[i]->in_channel_layouts;
  94. if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0 ||
  95. (ret = ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts)) < 0)
  96. return ret;
  97. }
  98. return 0;
  99. }
  100. static int config_output(AVFilterLink *outlink)
  101. {
  102. AVFilterContext *ctx = outlink->src;
  103. int id = outlink == ctx->outputs[1];
  104. outlink->sample_rate = ctx->inputs[id]->sample_rate;
  105. outlink->time_base = ctx->inputs[id]->time_base;
  106. return 0;
  107. }
  108. static int send_out(AVFilterContext *ctx, int out_id)
  109. {
  110. AStreamSyncContext *as = ctx->priv;
  111. struct buf_queue *queue = &as->queue[out_id];
  112. AVFrame *buf = queue->buf[queue->tail];
  113. int ret;
  114. queue->buf[queue->tail] = NULL;
  115. as->var_values[VAR_B1 + out_id]++;
  116. as->var_values[VAR_S1 + out_id] += buf->nb_samples;
  117. if (buf->pts != AV_NOPTS_VALUE)
  118. as->var_values[VAR_T1 + out_id] =
  119. av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
  120. as->var_values[VAR_T1 + out_id] += buf->nb_samples /
  121. (double)ctx->inputs[out_id]->sample_rate;
  122. ret = ff_filter_frame(ctx->outputs[out_id], buf);
  123. queue->nb--;
  124. queue->tail = (queue->tail + 1) % QUEUE_SIZE;
  125. if (as->req[out_id])
  126. as->req[out_id]--;
  127. return ret;
  128. }
  129. static void send_next(AVFilterContext *ctx)
  130. {
  131. AStreamSyncContext *as = ctx->priv;
  132. int i;
  133. while (1) {
  134. if (!as->queue[as->next_out].nb)
  135. break;
  136. send_out(ctx, as->next_out);
  137. if (!as->eof)
  138. as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0;
  139. }
  140. for (i = 0; i < 2; i++)
  141. if (as->queue[i].nb == QUEUE_SIZE)
  142. send_out(ctx, i);
  143. }
  144. static int request_frame(AVFilterLink *outlink)
  145. {
  146. AVFilterContext *ctx = outlink->src;
  147. AStreamSyncContext *as = ctx->priv;
  148. int id = outlink == ctx->outputs[1];
  149. as->req[id]++;
  150. while (as->req[id] && !(as->eof & (1 << id))) {
  151. if (as->queue[as->next_out].nb) {
  152. send_next(ctx);
  153. } else {
  154. as->eof |= 1 << as->next_out;
  155. ff_request_frame(ctx->inputs[as->next_out]);
  156. if (as->eof & (1 << as->next_out))
  157. as->next_out = !as->next_out;
  158. }
  159. }
  160. return 0;
  161. }
  162. static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
  163. {
  164. AVFilterContext *ctx = inlink->dst;
  165. AStreamSyncContext *as = ctx->priv;
  166. int id = inlink == ctx->inputs[1];
  167. as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] =
  168. insamples;
  169. as->eof &= ~(1 << id);
  170. send_next(ctx);
  171. return 0;
  172. }
  173. static av_cold void uninit(AVFilterContext *ctx)
  174. {
  175. AStreamSyncContext *as = ctx->priv;
  176. av_expr_free(as->expr);
  177. as->expr = NULL;
  178. }
  179. static const AVFilterPad astreamsync_inputs[] = {
  180. {
  181. .name = "in1",
  182. .type = AVMEDIA_TYPE_AUDIO,
  183. .filter_frame = filter_frame,
  184. },{
  185. .name = "in2",
  186. .type = AVMEDIA_TYPE_AUDIO,
  187. .filter_frame = filter_frame,
  188. },
  189. { NULL }
  190. };
  191. static const AVFilterPad astreamsync_outputs[] = {
  192. {
  193. .name = "out1",
  194. .type = AVMEDIA_TYPE_AUDIO,
  195. .config_props = config_output,
  196. .request_frame = request_frame,
  197. },{
  198. .name = "out2",
  199. .type = AVMEDIA_TYPE_AUDIO,
  200. .config_props = config_output,
  201. .request_frame = request_frame,
  202. },
  203. { NULL }
  204. };
  205. AVFilter ff_af_astreamsync = {
  206. .name = "astreamsync",
  207. .description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
  208. "in a configurable order."),
  209. .priv_size = sizeof(AStreamSyncContext),
  210. .init = init,
  211. .uninit = uninit,
  212. .query_formats = query_formats,
  213. .inputs = astreamsync_inputs,
  214. .outputs = astreamsync_outputs,
  215. .priv_class = &astreamsync_class,
  216. };