You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

305 lines
9.9KB

  1. /*
  2. * Copyright (c) 2007 Bobby Bingham
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * FIFO buffering filter
  23. */
  24. #include "libavutil/avassert.h"
  25. #include "libavutil/audioconvert.h"
  26. #include "libavutil/mathematics.h"
  27. #include "libavutil/samplefmt.h"
  28. #include "audio.h"
  29. #include "avfilter.h"
  30. #include "internal.h"
  31. #include "video.h"
  32. typedef struct Buf {
  33. AVFilterBufferRef *buf;
  34. struct Buf *next;
  35. } Buf;
  36. typedef struct {
  37. Buf root;
  38. Buf *last; ///< last buffered frame
  39. /**
  40. * When a specific number of output samples is requested, the partial
  41. * buffer is stored here
  42. */
  43. AVFilterBufferRef *buf_out;
  44. int allocated_samples; ///< number of samples buf_out was allocated for
  45. } FifoContext;
  46. static av_cold int init(AVFilterContext *ctx, const char *args)
  47. {
  48. FifoContext *fifo = ctx->priv;
  49. fifo->last = &fifo->root;
  50. return 0;
  51. }
  52. static av_cold void uninit(AVFilterContext *ctx)
  53. {
  54. FifoContext *fifo = ctx->priv;
  55. Buf *buf, *tmp;
  56. for (buf = fifo->root.next; buf; buf = tmp) {
  57. tmp = buf->next;
  58. avfilter_unref_buffer(buf->buf);
  59. av_free(buf);
  60. }
  61. avfilter_unref_buffer(fifo->buf_out);
  62. }
  63. static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
  64. {
  65. FifoContext *fifo = inlink->dst->priv;
  66. fifo->last->next = av_mallocz(sizeof(Buf));
  67. if (!fifo->last->next) {
  68. avfilter_unref_buffer(buf);
  69. return AVERROR(ENOMEM);
  70. }
  71. fifo->last = fifo->last->next;
  72. fifo->last->buf = buf;
  73. return 0;
  74. }
  75. static void start_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
  76. {
  77. add_to_queue(inlink, buf);
  78. }
  79. static void queue_pop(FifoContext *s)
  80. {
  81. Buf *tmp = s->root.next->next;
  82. if (s->last == s->root.next)
  83. s->last = &s->root;
  84. av_freep(&s->root.next);
  85. s->root.next = tmp;
  86. }
  87. static void end_frame(AVFilterLink *inlink) { }
  88. static void draw_slice(AVFilterLink *inlink, int y, int h, int slice_dir) { }
  89. /**
  90. * Move data pointers and pts offset samples forward.
  91. */
  92. static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf,
  93. int offset)
  94. {
  95. int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
  96. int planar = av_sample_fmt_is_planar(link->format);
  97. int planes = planar ? nb_channels : 1;
  98. int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
  99. int i;
  100. av_assert0(buf->audio->nb_samples > offset);
  101. for (i = 0; i < planes; i++)
  102. buf->extended_data[i] += block_align*offset;
  103. if (buf->data != buf->extended_data)
  104. memcpy(buf->data, buf->extended_data,
  105. FFMIN(planes, FF_ARRAY_ELEMS(buf->data)) * sizeof(*buf->data));
  106. buf->linesize[0] -= block_align*offset;
  107. buf->audio->nb_samples -= offset;
  108. if (buf->pts != AV_NOPTS_VALUE) {
  109. buf->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
  110. link->time_base);
  111. }
  112. }
  113. static int calc_ptr_alignment(AVFilterBufferRef *buf)
  114. {
  115. int planes = av_sample_fmt_is_planar(buf->format) ?
  116. av_get_channel_layout_nb_channels(buf->audio->channel_layout) : 1;
  117. int min_align = 128;
  118. int p;
  119. for (p = 0; p < planes; p++) {
  120. int cur_align = 128;
  121. while ((intptr_t)buf->extended_data[p] % cur_align)
  122. cur_align >>= 1;
  123. if (cur_align < min_align)
  124. min_align = cur_align;
  125. }
  126. return min_align;
  127. }
  128. static int return_audio_frame(AVFilterContext *ctx)
  129. {
  130. AVFilterLink *link = ctx->outputs[0];
  131. FifoContext *s = ctx->priv;
  132. AVFilterBufferRef *head = s->root.next->buf;
  133. AVFilterBufferRef *buf_out;
  134. int ret;
  135. if (!s->buf_out &&
  136. head->audio->nb_samples >= link->request_samples &&
  137. calc_ptr_alignment(head) >= 32) {
  138. if (head->audio->nb_samples == link->request_samples) {
  139. buf_out = head;
  140. queue_pop(s);
  141. } else {
  142. buf_out = avfilter_ref_buffer(head, AV_PERM_READ);
  143. buf_out->audio->nb_samples = link->request_samples;
  144. buffer_offset(link, head, link->request_samples);
  145. }
  146. } else {
  147. int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
  148. if (!s->buf_out) {
  149. s->buf_out = ff_get_audio_buffer(link, AV_PERM_WRITE,
  150. link->request_samples);
  151. if (!s->buf_out)
  152. return AVERROR(ENOMEM);
  153. s->buf_out->audio->nb_samples = 0;
  154. s->buf_out->pts = head->pts;
  155. s->allocated_samples = link->request_samples;
  156. } else if (link->request_samples != s->allocated_samples) {
  157. av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
  158. "buffer was returned.\n");
  159. return AVERROR(EINVAL);
  160. }
  161. while (s->buf_out->audio->nb_samples < s->allocated_samples) {
  162. int len = FFMIN(s->allocated_samples - s->buf_out->audio->nb_samples,
  163. head->audio->nb_samples);
  164. av_samples_copy(s->buf_out->extended_data, head->extended_data,
  165. s->buf_out->audio->nb_samples, 0, len, nb_channels,
  166. link->format);
  167. s->buf_out->audio->nb_samples += len;
  168. if (len == head->audio->nb_samples) {
  169. avfilter_unref_buffer(head);
  170. queue_pop(s);
  171. if (!s->root.next &&
  172. (ret = ff_request_frame(ctx->inputs[0])) < 0) {
  173. if (ret == AVERROR_EOF) {
  174. av_samples_set_silence(s->buf_out->extended_data,
  175. s->buf_out->audio->nb_samples,
  176. s->allocated_samples -
  177. s->buf_out->audio->nb_samples,
  178. nb_channels, link->format);
  179. s->buf_out->audio->nb_samples = s->allocated_samples;
  180. break;
  181. }
  182. return ret;
  183. }
  184. head = s->root.next->buf;
  185. } else {
  186. buffer_offset(link, head, len);
  187. }
  188. }
  189. buf_out = s->buf_out;
  190. s->buf_out = NULL;
  191. }
  192. return ff_filter_samples(link, buf_out);
  193. }
  194. static int request_frame(AVFilterLink *outlink)
  195. {
  196. FifoContext *fifo = outlink->src->priv;
  197. int ret = 0;
  198. if (!fifo->root.next) {
  199. if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
  200. return ret;
  201. }
  202. /* by doing this, we give ownership of the reference to the next filter,
  203. * so we don't have to worry about dereferencing it ourselves. */
  204. switch (outlink->type) {
  205. case AVMEDIA_TYPE_VIDEO:
  206. ff_start_frame(outlink, fifo->root.next->buf);
  207. ff_draw_slice (outlink, 0, outlink->h, 1);
  208. ff_end_frame (outlink);
  209. queue_pop(fifo);
  210. break;
  211. case AVMEDIA_TYPE_AUDIO:
  212. if (outlink->request_samples) {
  213. return return_audio_frame(outlink->src);
  214. } else {
  215. ret = ff_filter_samples(outlink, fifo->root.next->buf);
  216. queue_pop(fifo);
  217. }
  218. break;
  219. default:
  220. return AVERROR(EINVAL);
  221. }
  222. return ret;
  223. }
  224. AVFilter avfilter_vf_fifo = {
  225. .name = "fifo",
  226. .description = NULL_IF_CONFIG_SMALL("Buffer input images and send them when they are requested."),
  227. .init = init,
  228. .uninit = uninit,
  229. .priv_size = sizeof(FifoContext),
  230. .inputs = (const AVFilterPad[]) {{ .name = "default",
  231. .type = AVMEDIA_TYPE_VIDEO,
  232. .get_video_buffer= ff_null_get_video_buffer,
  233. .start_frame = start_frame,
  234. .draw_slice = draw_slice,
  235. .end_frame = end_frame,
  236. .rej_perms = AV_PERM_REUSE2, },
  237. { .name = NULL}},
  238. .outputs = (const AVFilterPad[]) {{ .name = "default",
  239. .type = AVMEDIA_TYPE_VIDEO,
  240. .request_frame = request_frame, },
  241. { .name = NULL}},
  242. };
  243. AVFilter avfilter_af_afifo = {
  244. .name = "afifo",
  245. .description = NULL_IF_CONFIG_SMALL("Buffer input frames and send them when they are requested."),
  246. .init = init,
  247. .uninit = uninit,
  248. .priv_size = sizeof(FifoContext),
  249. .inputs = (AVFilterPad[]) {{ .name = "default",
  250. .type = AVMEDIA_TYPE_AUDIO,
  251. .get_audio_buffer = ff_null_get_audio_buffer,
  252. .filter_samples = add_to_queue,
  253. .rej_perms = AV_PERM_REUSE2, },
  254. { .name = NULL}},
  255. .outputs = (AVFilterPad[]) {{ .name = "default",
  256. .type = AVMEDIA_TYPE_AUDIO,
  257. .request_frame = request_frame, },
  258. { .name = NULL}},
  259. };