You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

469 lines
13KB

  1. /*
  2. * Copyright (c) 2016 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/audio_fifo.h"
  21. #include "libavutil/avassert.h"
  22. #include "libavutil/fifo.h"
  23. #include "libavutil/internal.h"
  24. #include "libavutil/opt.h"
  25. #include "avfilter.h"
  26. #include "audio.h"
  27. #include "filters.h"
  28. #include "formats.h"
  29. #include "internal.h"
  30. #include "video.h"
  31. typedef struct LoopContext {
  32. const AVClass *class;
  33. AVAudioFifo *fifo;
  34. AVAudioFifo *left;
  35. AVFrame **frames;
  36. int nb_frames;
  37. int current_frame;
  38. int64_t start_pts;
  39. int64_t duration;
  40. int64_t current_sample;
  41. int64_t nb_samples;
  42. int64_t ignored_samples;
  43. int loop;
  44. int eof;
  45. int64_t size;
  46. int64_t start;
  47. int64_t pts;
  48. } LoopContext;
  49. #define AFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  50. #define VFLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  51. #define OFFSET(x) offsetof(LoopContext, x)
  52. static void check_size(AVFilterContext *ctx)
  53. {
  54. LoopContext *s = ctx->priv;
  55. if (!s->size)
  56. av_log(ctx, AV_LOG_WARNING, "Number of %s to loop is not set!\n",
  57. ctx->input_pads[0].type == AVMEDIA_TYPE_VIDEO ? "frames" : "samples");
  58. }
  59. #if CONFIG_ALOOP_FILTER
  60. static int aconfig_input(AVFilterLink *inlink)
  61. {
  62. AVFilterContext *ctx = inlink->dst;
  63. LoopContext *s = ctx->priv;
  64. s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, 8192);
  65. s->left = av_audio_fifo_alloc(inlink->format, inlink->channels, 8192);
  66. if (!s->fifo || !s->left)
  67. return AVERROR(ENOMEM);
  68. check_size(ctx);
  69. return 0;
  70. }
  71. static av_cold void auninit(AVFilterContext *ctx)
  72. {
  73. LoopContext *s = ctx->priv;
  74. av_audio_fifo_free(s->fifo);
  75. av_audio_fifo_free(s->left);
  76. }
  77. static int push_samples(AVFilterContext *ctx, int nb_samples)
  78. {
  79. AVFilterLink *outlink = ctx->outputs[0];
  80. LoopContext *s = ctx->priv;
  81. AVFrame *out;
  82. int ret, i = 0;
  83. while (s->loop != 0 && i < nb_samples) {
  84. out = ff_get_audio_buffer(outlink, FFMIN(nb_samples, s->nb_samples - s->current_sample));
  85. if (!out)
  86. return AVERROR(ENOMEM);
  87. ret = av_audio_fifo_peek_at(s->fifo, (void **)out->extended_data, out->nb_samples, s->current_sample);
  88. if (ret < 0) {
  89. av_frame_free(&out);
  90. return ret;
  91. }
  92. out->pts = s->pts;
  93. out->nb_samples = ret;
  94. s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  95. i += out->nb_samples;
  96. s->current_sample += out->nb_samples;
  97. ret = ff_filter_frame(outlink, out);
  98. if (ret < 0)
  99. return ret;
  100. if (s->current_sample >= s->nb_samples) {
  101. s->duration = s->pts;
  102. s->current_sample = 0;
  103. if (s->loop > 0)
  104. s->loop--;
  105. }
  106. }
  107. return ret;
  108. }
  109. static int afilter_frame(AVFilterLink *inlink, AVFrame *frame)
  110. {
  111. AVFilterContext *ctx = inlink->dst;
  112. AVFilterLink *outlink = ctx->outputs[0];
  113. LoopContext *s = ctx->priv;
  114. int ret = 0;
  115. if (s->ignored_samples + frame->nb_samples > s->start && s->size > 0 && s->loop != 0) {
  116. if (s->nb_samples < s->size) {
  117. int written = FFMIN(frame->nb_samples, s->size - s->nb_samples);
  118. int drain = 0;
  119. ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, written);
  120. if (ret < 0)
  121. return ret;
  122. if (!s->nb_samples) {
  123. drain = FFMAX(0, s->start - s->ignored_samples);
  124. s->pts = frame->pts;
  125. av_audio_fifo_drain(s->fifo, drain);
  126. s->pts += av_rescale_q(s->start - s->ignored_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  127. }
  128. s->nb_samples += ret - drain;
  129. drain = frame->nb_samples - written;
  130. if (s->nb_samples == s->size && drain > 0) {
  131. int ret2;
  132. ret2 = av_audio_fifo_write(s->left, (void **)frame->extended_data, frame->nb_samples);
  133. if (ret2 < 0)
  134. return ret2;
  135. av_audio_fifo_drain(s->left, drain);
  136. }
  137. frame->nb_samples = ret;
  138. s->pts += av_rescale_q(ret, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  139. ret = ff_filter_frame(outlink, frame);
  140. } else {
  141. int nb_samples = frame->nb_samples;
  142. av_frame_free(&frame);
  143. ret = push_samples(ctx, nb_samples);
  144. }
  145. } else {
  146. s->ignored_samples += frame->nb_samples;
  147. frame->pts = s->pts;
  148. s->pts += av_rescale_q(frame->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  149. ret = ff_filter_frame(outlink, frame);
  150. }
  151. return ret;
  152. }
  153. static int arequest_frame(AVFilterLink *outlink)
  154. {
  155. AVFilterContext *ctx = outlink->src;
  156. LoopContext *s = ctx->priv;
  157. int ret = 0;
  158. if ((!s->size) ||
  159. (s->nb_samples < s->size) ||
  160. (s->nb_samples >= s->size && s->loop == 0)) {
  161. int nb_samples = av_audio_fifo_size(s->left);
  162. if (s->loop == 0 && nb_samples > 0) {
  163. AVFrame *out;
  164. out = ff_get_audio_buffer(outlink, nb_samples);
  165. if (!out)
  166. return AVERROR(ENOMEM);
  167. av_audio_fifo_read(s->left, (void **)out->extended_data, nb_samples);
  168. out->pts = s->pts;
  169. s->pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
  170. ret = ff_filter_frame(outlink, out);
  171. if (ret < 0)
  172. return ret;
  173. }
  174. ret = ff_request_frame(ctx->inputs[0]);
  175. } else {
  176. ret = push_samples(ctx, 1024);
  177. }
  178. if (s->eof && s->nb_samples > 0 && s->loop != 0) {
  179. ret = push_samples(ctx, 1024);
  180. }
  181. return ret;
  182. }
  183. static int aactivate(AVFilterContext *ctx)
  184. {
  185. AVFilterLink *inlink = ctx->inputs[0];
  186. AVFilterLink *outlink = ctx->outputs[0];
  187. LoopContext *s = ctx->priv;
  188. AVFrame *frame = NULL;
  189. int ret, status;
  190. int64_t pts;
  191. FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
  192. if (!s->eof && (s->nb_samples < s->size || !s->loop || !s->size)) {
  193. ret = ff_inlink_consume_frame(inlink, &frame);
  194. if (ret < 0)
  195. return ret;
  196. if (ret > 0)
  197. return afilter_frame(inlink, frame);
  198. }
  199. if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
  200. if (status == AVERROR_EOF) {
  201. s->size = s->nb_samples;
  202. s->eof = 1;
  203. }
  204. }
  205. if (s->eof && (!s->loop || !s->size)) {
  206. ff_outlink_set_status(outlink, AVERROR_EOF, s->duration);
  207. return 0;
  208. }
  209. if (!s->eof && (!s->size ||
  210. (s->nb_samples < s->size) ||
  211. (s->nb_samples >= s->size && s->loop == 0))) {
  212. FF_FILTER_FORWARD_WANTED(outlink, inlink);
  213. } else if (s->loop && s->nb_samples == s->size) {
  214. return arequest_frame(outlink);
  215. }
  216. return FFERROR_NOT_READY;
  217. }
  218. static const AVOption aloop_options[] = {
  219. { "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, AFLAGS },
  220. { "size", "max number of samples to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT32_MAX, AFLAGS },
  221. { "start", "set the loop start sample", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AFLAGS },
  222. { NULL }
  223. };
  224. AVFILTER_DEFINE_CLASS(aloop);
  225. static const AVFilterPad ainputs[] = {
  226. {
  227. .name = "default",
  228. .type = AVMEDIA_TYPE_AUDIO,
  229. .config_props = aconfig_input,
  230. },
  231. { NULL }
  232. };
  233. static const AVFilterPad aoutputs[] = {
  234. {
  235. .name = "default",
  236. .type = AVMEDIA_TYPE_AUDIO,
  237. },
  238. { NULL }
  239. };
  240. AVFilter ff_af_aloop = {
  241. .name = "aloop",
  242. .description = NULL_IF_CONFIG_SMALL("Loop audio samples."),
  243. .priv_size = sizeof(LoopContext),
  244. .priv_class = &aloop_class,
  245. .activate = aactivate,
  246. .uninit = auninit,
  247. .inputs = ainputs,
  248. .outputs = aoutputs,
  249. };
  250. #endif /* CONFIG_ALOOP_FILTER */
  251. #if CONFIG_LOOP_FILTER
  252. static av_cold int init(AVFilterContext *ctx)
  253. {
  254. LoopContext *s = ctx->priv;
  255. s->frames = av_calloc(s->size, sizeof(*s->frames));
  256. if (!s->frames)
  257. return AVERROR(ENOMEM);
  258. check_size(ctx);
  259. return 0;
  260. }
  261. static av_cold void uninit(AVFilterContext *ctx)
  262. {
  263. LoopContext *s = ctx->priv;
  264. int i;
  265. for (i = 0; i < s->nb_frames; i++)
  266. av_frame_free(&s->frames[i]);
  267. av_freep(&s->frames);
  268. s->nb_frames = 0;
  269. }
  270. static int push_frame(AVFilterContext *ctx)
  271. {
  272. AVFilterLink *outlink = ctx->outputs[0];
  273. LoopContext *s = ctx->priv;
  274. int64_t pts, duration;
  275. int ret;
  276. AVFrame *out = av_frame_clone(s->frames[s->current_frame]);
  277. if (!out)
  278. return AVERROR(ENOMEM);
  279. out->pts += s->duration - s->start_pts;
  280. if (out->pkt_duration)
  281. duration = out->pkt_duration;
  282. else
  283. duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
  284. pts = out->pts + duration;
  285. ret = ff_filter_frame(outlink, out);
  286. s->current_frame++;
  287. if (s->current_frame >= s->nb_frames) {
  288. s->duration = pts;
  289. s->current_frame = 0;
  290. if (s->loop > 0)
  291. s->loop--;
  292. }
  293. return ret;
  294. }
  295. static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
  296. {
  297. AVFilterContext *ctx = inlink->dst;
  298. AVFilterLink *outlink = ctx->outputs[0];
  299. LoopContext *s = ctx->priv;
  300. int64_t duration;
  301. int ret = 0;
  302. if (inlink->frame_count_out >= s->start && s->size > 0 && s->loop != 0) {
  303. if (s->nb_frames < s->size) {
  304. if (!s->nb_frames)
  305. s->start_pts = frame->pts;
  306. s->frames[s->nb_frames] = av_frame_clone(frame);
  307. if (!s->frames[s->nb_frames]) {
  308. av_frame_free(&frame);
  309. return AVERROR(ENOMEM);
  310. }
  311. s->nb_frames++;
  312. if (frame->pkt_duration)
  313. duration = frame->pkt_duration;
  314. else
  315. duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
  316. s->duration = frame->pts + duration;
  317. ret = ff_filter_frame(outlink, frame);
  318. } else {
  319. av_frame_free(&frame);
  320. ret = push_frame(ctx);
  321. }
  322. } else {
  323. frame->pts += s->duration;
  324. ret = ff_filter_frame(outlink, frame);
  325. }
  326. return ret;
  327. }
  328. static int activate(AVFilterContext *ctx)
  329. {
  330. AVFilterLink *inlink = ctx->inputs[0];
  331. AVFilterLink *outlink = ctx->outputs[0];
  332. LoopContext *s = ctx->priv;
  333. AVFrame *frame = NULL;
  334. int ret, status;
  335. int64_t pts;
  336. FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
  337. if (!s->eof && (s->nb_frames < s->size || !s->loop || !s->size)) {
  338. ret = ff_inlink_consume_frame(inlink, &frame);
  339. if (ret < 0)
  340. return ret;
  341. if (ret > 0)
  342. return filter_frame(inlink, frame);
  343. }
  344. if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
  345. if (status == AVERROR_EOF) {
  346. s->size = s->nb_frames;
  347. s->eof = 1;
  348. }
  349. }
  350. if (s->eof && (!s->loop || !s->size)) {
  351. ff_outlink_set_status(outlink, AVERROR_EOF, s->duration);
  352. return 0;
  353. }
  354. if (!s->eof && (!s->size ||
  355. (s->nb_frames < s->size) ||
  356. (s->nb_frames >= s->size && s->loop == 0))) {
  357. FF_FILTER_FORWARD_WANTED(outlink, inlink);
  358. } else if (s->loop && s->nb_frames == s->size) {
  359. return push_frame(ctx);
  360. }
  361. return FFERROR_NOT_READY;
  362. }
  363. static const AVOption loop_options[] = {
  364. { "loop", "number of loops", OFFSET(loop), AV_OPT_TYPE_INT, {.i64 = 0 }, -1, INT_MAX, VFLAGS },
  365. { "size", "max number of frames to loop", OFFSET(size), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT16_MAX, VFLAGS },
  366. { "start", "set the loop start frame", OFFSET(start), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, VFLAGS },
  367. { NULL }
  368. };
  369. AVFILTER_DEFINE_CLASS(loop);
  370. static const AVFilterPad inputs[] = {
  371. {
  372. .name = "default",
  373. .type = AVMEDIA_TYPE_VIDEO,
  374. },
  375. { NULL }
  376. };
  377. static const AVFilterPad outputs[] = {
  378. {
  379. .name = "default",
  380. .type = AVMEDIA_TYPE_VIDEO,
  381. },
  382. { NULL }
  383. };
  384. AVFilter ff_vf_loop = {
  385. .name = "loop",
  386. .description = NULL_IF_CONFIG_SMALL("Loop video frames."),
  387. .priv_size = sizeof(LoopContext),
  388. .priv_class = &loop_class,
  389. .init = init,
  390. .uninit = uninit,
  391. .activate = activate,
  392. .inputs = inputs,
  393. .outputs = outputs,
  394. };
  395. #endif /* CONFIG_LOOP_FILTER */