You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

172 lines
5.6KB

  1. /*
  2. * Copyright (c) 2008 Vitor Sessak
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * memory buffer source filter
  23. */
  24. #include "avfilter.h"
  25. #include "vsrc_buffer.h"
  26. #include "libavutil/imgutils.h"
  27. typedef struct {
  28. int64_t pts;
  29. AVFrame frame;
  30. int has_frame;
  31. int h, w;
  32. enum PixelFormat pix_fmt;
  33. AVRational time_base; ///< time_base to set in the output link
  34. AVRational pixel_aspect;
  35. } BufferSourceContext;
  36. int av_vsrc_buffer_add_frame(AVFilterContext *buffer_filter, AVFrame *frame,
  37. int64_t pts, AVRational pixel_aspect)
  38. {
  39. BufferSourceContext *c = buffer_filter->priv;
  40. if (c->has_frame) {
  41. av_log(buffer_filter, AV_LOG_ERROR,
  42. "Buffering several frames is not supported. "
  43. "Please consume all available frames before adding a new one.\n"
  44. );
  45. //return -1;
  46. }
  47. memcpy(c->frame.data , frame->data , sizeof(frame->data));
  48. memcpy(c->frame.linesize, frame->linesize, sizeof(frame->linesize));
  49. c->frame.interlaced_frame= frame->interlaced_frame;
  50. c->frame.top_field_first = frame->top_field_first;
  51. c->frame.key_frame = frame->key_frame;
  52. c->frame.pict_type = frame->pict_type;
  53. c->pts = pts;
  54. c->pixel_aspect = pixel_aspect;
  55. c->has_frame = 1;
  56. return 0;
  57. }
  58. static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
  59. {
  60. BufferSourceContext *c = ctx->priv;
  61. char pix_fmt_str[128];
  62. int n = 0;
  63. if (!args ||
  64. (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str,
  65. &c->time_base.num, &c->time_base.den,
  66. &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) {
  67. av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args);
  68. return AVERROR(EINVAL);
  69. }
  70. if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) {
  71. char *tail;
  72. c->pix_fmt = strtol(pix_fmt_str, &tail, 10);
  73. if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) {
  74. av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str);
  75. return AVERROR(EINVAL);
  76. }
  77. }
  78. av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name);
  79. return 0;
  80. }
  81. static int query_formats(AVFilterContext *ctx)
  82. {
  83. BufferSourceContext *c = ctx->priv;
  84. enum PixelFormat pix_fmts[] = { c->pix_fmt, PIX_FMT_NONE };
  85. avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
  86. return 0;
  87. }
  88. static int config_props(AVFilterLink *link)
  89. {
  90. BufferSourceContext *c = link->src->priv;
  91. link->w = c->w;
  92. link->h = c->h;
  93. link->sample_aspect_ratio = c->pixel_aspect;
  94. link->time_base = c->time_base;
  95. return 0;
  96. }
  97. static int request_frame(AVFilterLink *link)
  98. {
  99. BufferSourceContext *c = link->src->priv;
  100. AVFilterBufferRef *picref;
  101. if (!c->has_frame) {
  102. av_log(link->src, AV_LOG_ERROR,
  103. "request_frame() called with no available frame!\n");
  104. //return -1;
  105. }
  106. /* This picture will be needed unmodified later for decoding the next
  107. * frame */
  108. picref = avfilter_get_video_buffer(link, AV_PERM_WRITE | AV_PERM_PRESERVE |
  109. AV_PERM_REUSE2,
  110. link->w, link->h);
  111. av_image_copy(picref->data, picref->linesize,
  112. c->frame.data, c->frame.linesize,
  113. picref->format, link->w, link->h);
  114. picref->pts = c->pts;
  115. picref->video->pixel_aspect = c->pixel_aspect;
  116. picref->video->interlaced = c->frame.interlaced_frame;
  117. picref->video->top_field_first = c->frame.top_field_first;
  118. picref->video->key_frame = c->frame.key_frame;
  119. picref->video->pict_type = c->frame.pict_type;
  120. avfilter_start_frame(link, avfilter_ref_buffer(picref, ~0));
  121. avfilter_draw_slice(link, 0, link->h, 1);
  122. avfilter_end_frame(link);
  123. avfilter_unref_buffer(picref);
  124. c->has_frame = 0;
  125. return 0;
  126. }
  127. static int poll_frame(AVFilterLink *link)
  128. {
  129. BufferSourceContext *c = link->src->priv;
  130. return !!(c->has_frame);
  131. }
  132. AVFilter avfilter_vsrc_buffer = {
  133. .name = "buffer",
  134. .description = NULL_IF_CONFIG_SMALL("Buffer video frames, and make them accessible to the filterchain."),
  135. .priv_size = sizeof(BufferSourceContext),
  136. .query_formats = query_formats,
  137. .init = init,
  138. .inputs = (AVFilterPad[]) {{ .name = NULL }},
  139. .outputs = (AVFilterPad[]) {{ .name = "default",
  140. .type = AVMEDIA_TYPE_VIDEO,
  141. .request_frame = request_frame,
  142. .poll_frame = poll_frame,
  143. .config_props = config_props, },
  144. { .name = NULL}},
  145. };