You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

219 lines
7.5KB

  1. /*
  2. * Copyright (c) 2018 Paul B Mahol
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/channel_layout.h"
  22. #include "libavutil/opt.h"
  23. #include "avfilter.h"
  24. #include "audio.h"
  25. #include "filters.h"
  26. #include "internal.h"
  27. #include "formats.h"
  28. #include "drawutils.h"
  29. typedef struct TPadContext {
  30. const AVClass *class;
  31. int pad_start;
  32. int pad_stop;
  33. int start_mode;
  34. int stop_mode;
  35. int64_t start_duration;
  36. int64_t stop_duration;
  37. uint8_t rgba_color[4]; ///< color for the padding area
  38. FFDrawContext draw;
  39. FFDrawColor color;
  40. int64_t pts;
  41. int eof;
  42. AVFrame *cache_start;
  43. AVFrame *cache_stop;
  44. } TPadContext;
  45. #define OFFSET(x) offsetof(TPadContext, x)
  46. #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
  47. static const AVOption tpad_options[] = {
  48. { "start", "set the number of frames to delay input", OFFSET(pad_start), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, VF },
  49. { "stop", "set the number of frames to add after input finished", OFFSET(pad_stop), AV_OPT_TYPE_INT, {.i64=0}, -1, INT_MAX, VF },
  50. { "start_mode", "set the mode of added frames to start", OFFSET(start_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
  51. { "add", "add solid-color frames", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
  52. { "clone", "clone first/last frame", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
  53. { "stop_mode", "set the mode of added frames to end", OFFSET(stop_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
  54. { "start_duration", "set the duration to delay input", OFFSET(start_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT64_MAX, VF },
  55. { "stop_duration", "set the duration to pad input", OFFSET(stop_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT64_MAX, VF },
  56. { "color", "set the color of the added frames", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="black"}, 0, 0, VF },
  57. { NULL }
  58. };
  59. AVFILTER_DEFINE_CLASS(tpad);
  60. static int query_formats(AVFilterContext *ctx)
  61. {
  62. return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0));
  63. }
  64. static int activate(AVFilterContext *ctx)
  65. {
  66. AVFilterLink *inlink = ctx->inputs[0];
  67. AVFilterLink *outlink = ctx->outputs[0];
  68. TPadContext *s = ctx->priv;
  69. AVFrame *frame = NULL;
  70. int ret, status;
  71. int64_t pts;
  72. FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
  73. if (s->start_mode == 0 && s->pad_start > 0 && ff_outlink_frame_wanted(outlink)) {
  74. frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  75. if (!frame)
  76. return AVERROR(ENOMEM);
  77. ff_fill_rectangle(&s->draw, &s->color,
  78. frame->data, frame->linesize,
  79. 0, 0, frame->width, frame->height);
  80. frame->pts = s->pts;
  81. s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
  82. s->pad_start--;
  83. return ff_filter_frame(outlink, frame);
  84. }
  85. if (s->start_mode == 1 && s->pad_start > 0) {
  86. if (!s->cache_start && ff_inlink_queued_frames(inlink)) {
  87. s->cache_start = ff_inlink_peek_frame(inlink, 0);
  88. } else if (!s->cache_start) {
  89. FF_FILTER_FORWARD_WANTED(outlink, inlink);
  90. }
  91. frame = av_frame_clone(s->cache_start);
  92. if (!frame)
  93. return AVERROR(ENOMEM);
  94. frame->pts = s->pts;
  95. s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
  96. s->pad_start--;
  97. if (s->pad_start == 0)
  98. s->cache_start = NULL;
  99. return ff_filter_frame(outlink, frame);
  100. }
  101. if (!s->eof && !s->pad_start) {
  102. ret = ff_inlink_consume_frame(inlink, &frame);
  103. if (ret < 0)
  104. return ret;
  105. if (ret > 0) {
  106. if (s->stop_mode == 1 && s->pad_stop != 0) {
  107. av_frame_free(&s->cache_stop);
  108. s->cache_stop = av_frame_clone(frame);
  109. }
  110. frame->pts += s->pts;
  111. return ff_filter_frame(outlink, frame);
  112. }
  113. }
  114. if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
  115. if (status == AVERROR_EOF) {
  116. if (!s->pad_stop) {
  117. ff_outlink_set_status(outlink, status, pts);
  118. return 0;
  119. }
  120. s->eof = 1;
  121. s->pts += pts;
  122. }
  123. }
  124. if (s->eof) {
  125. if (!s->pad_stop) {
  126. ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
  127. return 0;
  128. }
  129. if (s->stop_mode == 0) {
  130. frame = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  131. if (!frame)
  132. return AVERROR(ENOMEM);
  133. ff_fill_rectangle(&s->draw, &s->color,
  134. frame->data, frame->linesize,
  135. 0, 0, frame->width, frame->height);
  136. } else if (s->stop_mode == 1) {
  137. frame = av_frame_clone(s->cache_stop);
  138. if (!frame)
  139. return AVERROR(ENOMEM);
  140. }
  141. frame->pts = s->pts;
  142. s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
  143. if (s->pad_stop > 0)
  144. s->pad_stop--;
  145. return ff_filter_frame(outlink, frame);
  146. }
  147. if (!s->pad_start)
  148. FF_FILTER_FORWARD_WANTED(outlink, inlink);
  149. return FFERROR_NOT_READY;
  150. }
  151. static int config_input(AVFilterLink *inlink)
  152. {
  153. AVFilterContext *ctx = inlink->dst;
  154. TPadContext *s = ctx->priv;
  155. ff_draw_init(&s->draw, inlink->format, 0);
  156. ff_draw_color(&s->draw, &s->color, s->rgba_color);
  157. if (s->start_duration)
  158. s->pad_start = av_rescale_q(s->start_duration, inlink->frame_rate, av_inv_q(AV_TIME_BASE_Q));
  159. if (s->stop_duration)
  160. s->pad_stop = av_rescale_q(s->stop_duration, inlink->frame_rate, av_inv_q(AV_TIME_BASE_Q));
  161. return 0;
  162. }
  163. static av_cold void uninit(AVFilterContext *ctx)
  164. {
  165. TPadContext *s = ctx->priv;
  166. av_frame_free(&s->cache_stop);
  167. }
  168. static const AVFilterPad tpad_inputs[] = {
  169. {
  170. .name = "default",
  171. .type = AVMEDIA_TYPE_VIDEO,
  172. .config_props = config_input,
  173. },
  174. { NULL }
  175. };
  176. static const AVFilterPad tpad_outputs[] = {
  177. {
  178. .name = "default",
  179. .type = AVMEDIA_TYPE_VIDEO,
  180. },
  181. { NULL }
  182. };
  183. AVFilter ff_vf_tpad = {
  184. .name = "tpad",
  185. .description = NULL_IF_CONFIG_SMALL("Temporarily pad video frames."),
  186. .priv_size = sizeof(TPadContext),
  187. .priv_class = &tpad_class,
  188. .query_formats = query_formats,
  189. .activate = activate,
  190. .uninit = uninit,
  191. .inputs = tpad_inputs,
  192. .outputs = tpad_outputs,
  193. };