You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

309 lines
8.6KB

  1. /*
  2. * Copyright (c) 2015 Derek Buitenhuis
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/opt.h"
  21. #include "avfilter.h"
  22. #include "formats.h"
  23. #include "internal.h"
  24. #include "video.h"
  25. #define DEFAULT_LENGTH 300
  26. typedef struct ReverseContext {
  27. int nb_frames;
  28. AVFrame **frames;
  29. unsigned int frames_size;
  30. unsigned int pts_size;
  31. int64_t *pts;
  32. int flush_idx;
  33. } ReverseContext;
  34. static av_cold int init(AVFilterContext *ctx)
  35. {
  36. ReverseContext *s = ctx->priv;
  37. s->pts = av_fast_realloc(NULL, &s->pts_size,
  38. DEFAULT_LENGTH * sizeof(*(s->pts)));
  39. if (!s->pts)
  40. return AVERROR(ENOMEM);
  41. s->frames = av_fast_realloc(NULL, &s->frames_size,
  42. DEFAULT_LENGTH * sizeof(*(s->frames)));
  43. if (!s->frames) {
  44. av_freep(&s->pts);
  45. return AVERROR(ENOMEM);
  46. }
  47. return 0;
  48. }
  49. static av_cold void uninit(AVFilterContext *ctx)
  50. {
  51. ReverseContext *s = ctx->priv;
  52. while (s->nb_frames > 0) {
  53. av_frame_free(&s->frames[s->nb_frames - 1]);
  54. s->nb_frames--;
  55. }
  56. av_freep(&s->pts);
  57. av_freep(&s->frames);
  58. }
  59. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  60. {
  61. AVFilterContext *ctx = inlink->dst;
  62. ReverseContext *s = ctx->priv;
  63. void *ptr;
  64. if (s->nb_frames + 1 > s->pts_size / sizeof(*(s->pts))) {
  65. ptr = av_fast_realloc(s->pts, &s->pts_size, s->pts_size * 2);
  66. if (!ptr)
  67. return AVERROR(ENOMEM);
  68. s->pts = ptr;
  69. }
  70. if (s->nb_frames + 1 > s->frames_size / sizeof(*(s->frames))) {
  71. ptr = av_fast_realloc(s->frames, &s->frames_size, s->frames_size * 2);
  72. if (!ptr)
  73. return AVERROR(ENOMEM);
  74. s->frames = ptr;
  75. }
  76. s->frames[s->nb_frames] = in;
  77. s->pts[s->nb_frames] = in->pts;
  78. s->nb_frames++;
  79. return 0;
  80. }
  81. #if CONFIG_REVERSE_FILTER
  82. static int request_frame(AVFilterLink *outlink)
  83. {
  84. AVFilterContext *ctx = outlink->src;
  85. ReverseContext *s = ctx->priv;
  86. int ret;
  87. ret = ff_request_frame(ctx->inputs[0]);
  88. if (ret == AVERROR_EOF && s->nb_frames > 0) {
  89. AVFrame *out = s->frames[s->nb_frames - 1];
  90. out->pts = s->pts[s->flush_idx++];
  91. ret = ff_filter_frame(outlink, out);
  92. s->frames[s->nb_frames - 1] = NULL;
  93. s->nb_frames--;
  94. }
  95. return ret;
  96. }
  97. static const AVFilterPad reverse_inputs[] = {
  98. {
  99. .name = "default",
  100. .type = AVMEDIA_TYPE_VIDEO,
  101. .filter_frame = filter_frame,
  102. },
  103. { NULL }
  104. };
  105. static const AVFilterPad reverse_outputs[] = {
  106. {
  107. .name = "default",
  108. .type = AVMEDIA_TYPE_VIDEO,
  109. .request_frame = request_frame,
  110. },
  111. { NULL }
  112. };
  113. AVFilter ff_vf_reverse = {
  114. .name = "reverse",
  115. .description = NULL_IF_CONFIG_SMALL("Reverse a clip."),
  116. .priv_size = sizeof(ReverseContext),
  117. .init = init,
  118. .uninit = uninit,
  119. .inputs = reverse_inputs,
  120. .outputs = reverse_outputs,
  121. };
  122. #endif /* CONFIG_REVERSE_FILTER */
  123. #if CONFIG_AREVERSE_FILTER
  124. static int query_formats(AVFilterContext *ctx)
  125. {
  126. AVFilterFormats *formats;
  127. AVFilterChannelLayouts *layouts;
  128. int ret;
  129. layouts = ff_all_channel_counts();
  130. if (!layouts)
  131. return AVERROR(ENOMEM);
  132. ret = ff_set_common_channel_layouts(ctx, layouts);
  133. if (ret < 0)
  134. return ret;
  135. ret = ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO));
  136. if (ret < 0)
  137. return ret;
  138. formats = ff_all_samplerates();
  139. if (!formats)
  140. return AVERROR(ENOMEM);
  141. return ff_set_common_samplerates(ctx, formats);
  142. }
  143. static void reverse_samples_planar(AVFrame *out)
  144. {
  145. for (int p = 0; p < out->channels; p++) {
  146. switch (out->format) {
  147. case AV_SAMPLE_FMT_U8P: {
  148. uint8_t *dst = (uint8_t *)out->extended_data[p];
  149. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  150. FFSWAP(uint8_t, dst[i], dst[j]);
  151. }
  152. break;
  153. case AV_SAMPLE_FMT_S16P: {
  154. int16_t *dst = (int16_t *)out->extended_data[p];
  155. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  156. FFSWAP(int16_t, dst[i], dst[j]);
  157. }
  158. break;
  159. case AV_SAMPLE_FMT_S32P: {
  160. int32_t *dst = (int32_t *)out->extended_data[p];
  161. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  162. FFSWAP(int32_t, dst[i], dst[j]);
  163. }
  164. break;
  165. case AV_SAMPLE_FMT_FLTP: {
  166. float *dst = (float *)out->extended_data[p];
  167. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  168. FFSWAP(float, dst[i], dst[j]);
  169. }
  170. break;
  171. case AV_SAMPLE_FMT_DBLP: {
  172. double *dst = (double *)out->extended_data[p];
  173. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  174. FFSWAP(double, dst[i], dst[j]);
  175. }
  176. break;
  177. }
  178. }
  179. }
  180. static void reverse_samples_packed(AVFrame *out)
  181. {
  182. const int channels = out->channels;
  183. switch (out->format) {
  184. case AV_SAMPLE_FMT_U8: {
  185. uint8_t *dst = (uint8_t *)out->extended_data[0];
  186. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  187. for (int p = 0; p < channels; p++)
  188. FFSWAP(uint8_t, dst[i * channels + p], dst[j * channels + p]);
  189. }
  190. break;
  191. case AV_SAMPLE_FMT_S16: {
  192. int16_t *dst = (int16_t *)out->extended_data[0];
  193. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  194. for (int p = 0; p < channels; p++)
  195. FFSWAP(int16_t, dst[i * channels + p], dst[j * channels + p]);
  196. }
  197. break;
  198. case AV_SAMPLE_FMT_S32: {
  199. int32_t *dst = (int32_t *)out->extended_data[0];
  200. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  201. for (int p = 0; p < channels; p++)
  202. FFSWAP(int32_t, dst[i * channels + p], dst[j * channels + p]);
  203. }
  204. break;
  205. case AV_SAMPLE_FMT_FLT: {
  206. float *dst = (float *)out->extended_data[0];
  207. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  208. for (int p = 0; p < channels; p++)
  209. FFSWAP(float, dst[i * channels + p], dst[j * channels + p]);
  210. }
  211. break;
  212. case AV_SAMPLE_FMT_DBL: {
  213. double *dst = (double *)out->extended_data[0];
  214. for (int i = 0, j = out->nb_samples - 1; i < j; i++, j--)
  215. for (int p = 0; p < channels; p++)
  216. FFSWAP(double, dst[i * channels + p], dst[j * channels + p]);
  217. }
  218. break;
  219. }
  220. }
  221. static int areverse_request_frame(AVFilterLink *outlink)
  222. {
  223. AVFilterContext *ctx = outlink->src;
  224. ReverseContext *s = ctx->priv;
  225. int ret;
  226. ret = ff_request_frame(ctx->inputs[0]);
  227. if (ret == AVERROR_EOF && s->nb_frames > 0) {
  228. AVFrame *out = s->frames[s->nb_frames - 1];
  229. out->pts = s->pts[s->flush_idx++];
  230. if (av_sample_fmt_is_planar(out->format))
  231. reverse_samples_planar(out);
  232. else
  233. reverse_samples_packed(out);
  234. ret = ff_filter_frame(outlink, out);
  235. s->frames[s->nb_frames - 1] = NULL;
  236. s->nb_frames--;
  237. }
  238. return ret;
  239. }
  240. static const AVFilterPad areverse_inputs[] = {
  241. {
  242. .name = "default",
  243. .type = AVMEDIA_TYPE_AUDIO,
  244. .filter_frame = filter_frame,
  245. .needs_writable = 1,
  246. },
  247. { NULL }
  248. };
  249. static const AVFilterPad areverse_outputs[] = {
  250. {
  251. .name = "default",
  252. .type = AVMEDIA_TYPE_AUDIO,
  253. .request_frame = areverse_request_frame,
  254. },
  255. { NULL }
  256. };
  257. AVFilter ff_af_areverse = {
  258. .name = "areverse",
  259. .description = NULL_IF_CONFIG_SMALL("Reverse an audio clip."),
  260. .query_formats = query_formats,
  261. .priv_size = sizeof(ReverseContext),
  262. .init = init,
  263. .uninit = uninit,
  264. .inputs = areverse_inputs,
  265. .outputs = areverse_outputs,
  266. };
  267. #endif /* CONFIG_AREVERSE_FILTER */