You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

260 lines
7.0KB

  1. /*
  2. * Copyright (c) 2015 Derek Buitenhuis
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "avfilter.h"
  22. #include "formats.h"
  23. #include "internal.h"
  24. #include "video.h"
  25. #define DEFAULT_LENGTH 300
  26. typedef struct ReverseContext {
  27. int nb_frames;
  28. AVFrame **frames;
  29. unsigned int frames_size;
  30. unsigned int pts_size;
  31. int64_t *pts;
  32. int flush_idx;
  33. } ReverseContext;
  34. static av_cold int init(AVFilterContext *ctx)
  35. {
  36. ReverseContext *s = ctx->priv;
  37. s->pts = av_fast_realloc(NULL, &s->pts_size,
  38. DEFAULT_LENGTH * sizeof(*(s->pts)));
  39. if (!s->pts)
  40. return AVERROR(ENOMEM);
  41. s->frames = av_fast_realloc(NULL, &s->frames_size,
  42. DEFAULT_LENGTH * sizeof(*(s->frames)));
  43. if (!s->frames) {
  44. av_freep(&s->pts);
  45. return AVERROR(ENOMEM);
  46. }
  47. return 0;
  48. }
  49. static av_cold void uninit(AVFilterContext *ctx)
  50. {
  51. ReverseContext *s = ctx->priv;
  52. av_freep(&s->pts);
  53. av_freep(&s->frames);
  54. }
  55. static int config_output(AVFilterLink *outlink)
  56. {
  57. outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
  58. return 0;
  59. }
  60. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  61. {
  62. AVFilterContext *ctx = inlink->dst;
  63. ReverseContext *s = ctx->priv;
  64. void *ptr;
  65. if (s->nb_frames + 1 > s->pts_size / sizeof(*(s->pts))) {
  66. ptr = av_fast_realloc(s->pts, &s->pts_size, s->pts_size * 2);
  67. if (!ptr)
  68. return AVERROR(ENOMEM);
  69. s->pts = ptr;
  70. }
  71. if (s->nb_frames + 1 > s->frames_size / sizeof(*(s->frames))) {
  72. ptr = av_fast_realloc(s->frames, &s->frames_size, s->frames_size * 2);
  73. if (!ptr)
  74. return AVERROR(ENOMEM);
  75. s->frames = ptr;
  76. }
  77. s->frames[s->nb_frames] = in;
  78. s->pts[s->nb_frames] = in->pts;
  79. s->nb_frames++;
  80. return 0;
  81. }
  82. #if CONFIG_REVERSE_FILTER
  83. static int request_frame(AVFilterLink *outlink)
  84. {
  85. AVFilterContext *ctx = outlink->src;
  86. ReverseContext *s = ctx->priv;
  87. int ret;
  88. ret = ff_request_frame(ctx->inputs[0]);
  89. if (ret == AVERROR_EOF && s->nb_frames > 0) {
  90. AVFrame *out = s->frames[s->nb_frames - 1];
  91. out->pts = s->pts[s->flush_idx++];
  92. ret = ff_filter_frame(outlink, out);
  93. s->nb_frames--;
  94. }
  95. return ret;
  96. }
  97. static const AVFilterPad reverse_inputs[] = {
  98. {
  99. .name = "default",
  100. .type = AVMEDIA_TYPE_VIDEO,
  101. .filter_frame = filter_frame,
  102. },
  103. { NULL }
  104. };
  105. static const AVFilterPad reverse_outputs[] = {
  106. {
  107. .name = "default",
  108. .type = AVMEDIA_TYPE_VIDEO,
  109. .request_frame = request_frame,
  110. .config_props = config_output,
  111. },
  112. { NULL }
  113. };
  114. AVFilter ff_vf_reverse = {
  115. .name = "reverse",
  116. .description = NULL_IF_CONFIG_SMALL("Reverse a clip."),
  117. .priv_size = sizeof(ReverseContext),
  118. .init = init,
  119. .uninit = uninit,
  120. .inputs = reverse_inputs,
  121. .outputs = reverse_outputs,
  122. };
  123. #endif /* CONFIG_REVERSE_FILTER */
  124. #if CONFIG_AREVERSE_FILTER
  125. static int query_formats(AVFilterContext *ctx)
  126. {
  127. AVFilterFormats *formats;
  128. AVFilterChannelLayouts *layouts;
  129. int ret;
  130. layouts = ff_all_channel_layouts();
  131. if (!layouts)
  132. return AVERROR(ENOMEM);
  133. ret = ff_set_common_channel_layouts(ctx, layouts);
  134. if (ret < 0)
  135. return ret;
  136. ret = ff_set_common_formats(ctx, ff_planar_sample_fmts());
  137. if (ret < 0)
  138. return ret;
  139. formats = ff_all_samplerates();
  140. if (!formats)
  141. return AVERROR(ENOMEM);
  142. return ff_set_common_samplerates(ctx, formats);
  143. }
  144. static int areverse_request_frame(AVFilterLink *outlink)
  145. {
  146. AVFilterContext *ctx = outlink->src;
  147. ReverseContext *s = ctx->priv;
  148. int ret, p, i, j;
  149. ret = ff_request_frame(ctx->inputs[0]);
  150. if (ret == AVERROR_EOF && s->nb_frames > 0) {
  151. AVFrame *out = s->frames[s->nb_frames - 1];
  152. out->pts = s->pts[s->flush_idx++];
  153. for (p = 0; p < outlink->channels; p++) {
  154. switch (outlink->format) {
  155. case AV_SAMPLE_FMT_U8P: {
  156. uint8_t *dst = (uint8_t *)out->extended_data[p];
  157. for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  158. FFSWAP(uint8_t, dst[i], dst[j]);
  159. }
  160. break;
  161. case AV_SAMPLE_FMT_S16P: {
  162. int16_t *dst = (int16_t *)out->extended_data[p];
  163. for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  164. FFSWAP(int16_t, dst[i], dst[j]);
  165. }
  166. break;
  167. case AV_SAMPLE_FMT_S32P: {
  168. int32_t *dst = (int32_t *)out->extended_data[p];
  169. for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  170. FFSWAP(int32_t, dst[i], dst[j]);
  171. }
  172. break;
  173. case AV_SAMPLE_FMT_FLTP: {
  174. float *dst = (float *)out->extended_data[p];
  175. for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  176. FFSWAP(float, dst[i], dst[j]);
  177. }
  178. break;
  179. case AV_SAMPLE_FMT_DBLP: {
  180. double *dst = (double *)out->extended_data[p];
  181. for (i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  182. FFSWAP(double, dst[i], dst[j]);
  183. }
  184. break;
  185. }
  186. }
  187. ret = ff_filter_frame(outlink, out);
  188. s->nb_frames--;
  189. }
  190. return ret;
  191. }
  192. static const AVFilterPad areverse_inputs[] = {
  193. {
  194. .name = "default",
  195. .type = AVMEDIA_TYPE_AUDIO,
  196. .filter_frame = filter_frame,
  197. .needs_writable = 1,
  198. },
  199. { NULL }
  200. };
  201. static const AVFilterPad areverse_outputs[] = {
  202. {
  203. .name = "default",
  204. .type = AVMEDIA_TYPE_AUDIO,
  205. .request_frame = areverse_request_frame,
  206. .config_props = config_output,
  207. },
  208. { NULL }
  209. };
  210. AVFilter ff_af_areverse = {
  211. .name = "areverse",
  212. .description = NULL_IF_CONFIG_SMALL("Reverse an audio clip."),
  213. .query_formats = query_formats,
  214. .priv_size = sizeof(ReverseContext),
  215. .init = init,
  216. .uninit = uninit,
  217. .inputs = areverse_inputs,
  218. .outputs = areverse_outputs,
  219. };
  220. #endif /* CONFIG_AREVERSE_FILTER */