You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

218 lines
7.3KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "avfilter.h"
  25. #include "avcodec.h"
  26. #include "vsrc_buffer.h"
  27. #include "libavutil/imgutils.h"
  28. typedef struct {
  29. AVFrame frame;
  30. int has_frame;
  31. int h, w;
  32. enum PixelFormat pix_fmt;
  33. AVRational time_base; ///< time_base to set in the output link
  34. AVRational sample_aspect_ratio;
  35. char sws_param[256];
  36. } BufferSourceContext;
  37. int av_vsrc_buffer_add_frame2(AVFilterContext *buffer_filter, AVFrame *frame,
  38. const char *sws_param)
  39. {
  40. BufferSourceContext *c = buffer_filter->priv;
  41. int ret;
  42. if (c->has_frame) {
  43. av_log(buffer_filter, AV_LOG_ERROR,
  44. "Buffering several frames is not supported. "
  45. "Please consume all available frames before adding a new one.\n"
  46. );
  47. //return -1;
  48. }
  49. if(!c->sws_param[0]){
  50. snprintf(c->sws_param, 255, "%d:%d:%s", c->w, c->h, sws_param);
  51. }
  52. if (frame->width != c->w || frame->height != c->h || frame->format != c->pix_fmt) {
  53. AVFilterContext *scale= buffer_filter->outputs[0]->dst;
  54. AVFilterLink *link;
  55. av_log(buffer_filter, AV_LOG_INFO,
  56. "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  57. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  58. frame->width, frame->height, av_pix_fmt_descriptors[frame->format].name);
  59. if(!scale || strcmp(scale->filter->name,"scale")){
  60. AVFilter *f= avfilter_get_by_name("scale");
  61. av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
  62. if(avfilter_open(&scale, f, "Input equalizer") < 0)
  63. return -1;
  64. if((ret=avfilter_init_filter(scale, c->sws_param, NULL))<0){
  65. avfilter_free(scale);
  66. return ret;
  67. }
  68. if((ret=avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0))<0){
  69. avfilter_free(scale);
  70. return ret;
  71. }
  72. scale->outputs[0]->time_base = scale->inputs[0]->time_base;
  73. scale->outputs[0]->format= c->pix_fmt;
  74. } else if(!strcmp(scale->filter->name, "scale")) {
  75. snprintf(c->sws_param, 255, "%d:%d:%s", scale->outputs[0]->w, scale->outputs[0]->h, sws_param);
  76. scale->filter->init(scale, c->sws_param, NULL);
  77. }
  78. c->pix_fmt = scale->inputs[0]->format = frame->format;
  79. c->w = scale->inputs[0]->w = frame->width;
  80. c->h = scale->inputs[0]->h = frame->height;
  81. link= scale->outputs[0];
  82. if ((ret = link->srcpad->config_props(link)) < 0)
  83. return ret;
  84. }
  85. c->frame = *frame;
  86. memcpy(c->frame.data , frame->data , sizeof(frame->data));
  87. memcpy(c->frame.linesize, frame->linesize, sizeof(frame->linesize));
  88. c->has_frame = 1;
  89. return 0;
  90. }
  91. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame)
  92. {
  93. return av_vsrc_buffer_add_frame2(buffer_filter, frame, "");
  94. }
  95. static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
  96. {
  97. BufferSourceContext *c = ctx->priv;
  98. char pix_fmt_str[128];
  99. int n = 0;
  100. if (!args ||
  101. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
  102. &c->time_base.num, &c->time_base.den,
  103. &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den)) != 7) {
  104. av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but only %d found in '%s'\n", n, args);
  105. return AVERROR(EINVAL);
  106. }
  107. if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) {
  108. char *tail;
  109. c->pix_fmt = strtol(pix_fmt_str, &tail, 10);
  110. if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) {
  111. av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str);
  112. return AVERROR(EINVAL);
  113. }
  114. }
  115. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d\n",
  116. c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
  117. c->time_base.num, c->time_base.den,
  118. c->sample_aspect_ratio.num, c->sample_aspect_ratio.den);
  119. return 0;
  120. }
  121. static int query_formats(AVFilterContext *ctx)
  122. {
  123. BufferSourceContext *c = ctx->priv;
  124. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  125. avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
  126. return 0;
  127. }
  128. static int config_props(AVFilterLink *link)
  129. {
  130. BufferSourceContext *c = link->src->priv;
  131. link->w = c->w;
  132. link->h = c->h;
  133. link->sample_aspect_ratio = c->sample_aspect_ratio;
  134. link->time_base = c->time_base;
  135. return 0;
  136. }
  137. static int request_frame(AVFilterLink *link)
  138. {
  139. BufferSourceContext *c = link->src->priv;
  140. AVFilterBufferRef *picref;
  141. if (!c->has_frame) {
  142. av_log(link->src, AV_LOG_ERROR,
  143. "request_frame() called with no available frame!\n");
  144. //return -1;
  145. }
  146. /* This picture will be needed unmodified later for decoding the next
  147. * frame */
  148. picref = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
  149. AV_PERM_REUSE2,
  150. link->w, link->h);
  151. av_image_copy(picref->data, picref->linesize,
  152. c->frame.data, c->frame.linesize,
  153. picref->format, link->w, link->h);
  154. avfilter_copy_frame_props(picref, &c->frame);
  155. avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
  156. avfilter_draw_slice(link, 0, link->h, 1);
  157. avfilter_end_frame(link);
  158. avfilter_unref_buffer(picref);
  159. c->has_frame = 0;
  160. return 0;
  161. }
  162. static int poll_frame(AVFilterLink *link)
  163. {
  164. BufferSourceContext *c = link->src->priv;
  165. return !!(c->has_frame);
  166. }
  167. AVFilter avfilter_vsrc_buffer = {
  168. .name = "buffer",
  169. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  170. .priv_size = sizeof(BufferSourceContext),
  171. .query_formats = query_formats,
  172. .init = init,
  173. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  174. .outputs = (AVFilterPad[]) {{ .name = "default",
  175. .type = AVMEDIA_TYPE_VIDEO,
  176. .request_frame = request_frame,
  177. .poll_frame = poll_frame,
  178. .config_props = config_props, },
  179. { .name = NULL}},
  180. };