You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

293 lines
9.4KB

  1. /*
  2. * Copyright (c) 2012 Rudolf Polzer
  3. * Copyright (c) 2013 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
  23. * Rudolf Polzer.
  24. */
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/imgutils.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. typedef struct {
  34. const AVClass *class;
  35. int first_field;
  36. char *pattern;
  37. unsigned int pattern_pos;
  38. int64_t start_time;
  39. AVRational pts;
  40. AVRational ts_unit;
  41. int out_cnt;
  42. int occupied;
  43. int nb_planes;
  44. int planeheight[4];
  45. int stride[4];
  46. AVFrame *frame[5];
  47. AVFrame *temp;
  48. } TelecineContext;
  49. #define OFFSET(x) offsetof(TelecineContext, x)
  50. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  51. static const AVOption telecine_options[] = {
  52. {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
  53. {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  54. {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  55. {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  56. {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  57. {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
  58. {NULL}
  59. };
  60. AVFILTER_DEFINE_CLASS(telecine);
  61. static av_cold int init(AVFilterContext *ctx)
  62. {
  63. TelecineContext *s = ctx->priv;
  64. const char *p;
  65. int max = 0;
  66. if (!strlen(s->pattern)) {
  67. av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
  68. return AVERROR_INVALIDDATA;
  69. }
  70. for (p = s->pattern; *p; p++) {
  71. if (!av_isdigit(*p)) {
  72. av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
  73. return AVERROR_INVALIDDATA;
  74. }
  75. max = FFMAX(*p - '0', max);
  76. s->pts.num += 2;
  77. s->pts.den += *p - '0';
  78. }
  79. s->start_time = AV_NOPTS_VALUE;
  80. s->out_cnt = (max + 1) / 2;
  81. av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
  82. s->pattern, s->out_cnt, s->pts.num, s->pts.den);
  83. return 0;
  84. }
  85. static int query_formats(AVFilterContext *ctx)
  86. {
  87. AVFilterFormats *pix_fmts = NULL;
  88. int fmt;
  89. for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
  90. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
  91. if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
  92. desc->flags & AV_PIX_FMT_FLAG_PAL ||
  93. desc->flags & AV_PIX_FMT_FLAG_BITSTREAM))
  94. ff_add_format(&pix_fmts, fmt);
  95. }
  96. return ff_set_common_formats(ctx, pix_fmts);
  97. }
  98. static int config_input(AVFilterLink *inlink)
  99. {
  100. TelecineContext *s = inlink->dst->priv;
  101. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  102. int i, ret;
  103. s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  104. if (!s->temp)
  105. return AVERROR(ENOMEM);
  106. for (i = 0; i < s->out_cnt; i++) {
  107. s->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  108. if (!s->frame[i])
  109. return AVERROR(ENOMEM);
  110. }
  111. if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0)
  112. return ret;
  113. s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  114. s->planeheight[0] = s->planeheight[3] = inlink->h;
  115. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  116. return 0;
  117. }
  118. static int config_output(AVFilterLink *outlink)
  119. {
  120. AVFilterContext *ctx = outlink->src;
  121. TelecineContext *s = ctx->priv;
  122. const AVFilterLink *inlink = ctx->inputs[0];
  123. AVRational fps = inlink->frame_rate;
  124. if (!fps.num || !fps.den) {
  125. av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
  126. "current rate of %d/%d is invalid\n", fps.num, fps.den);
  127. return AVERROR(EINVAL);
  128. }
  129. fps = av_mul_q(fps, av_inv_q(s->pts));
  130. av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
  131. inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
  132. outlink->frame_rate = fps;
  133. outlink->time_base = av_mul_q(inlink->time_base, s->pts);
  134. av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
  135. inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
  136. s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
  137. return 0;
  138. }
  139. static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
  140. {
  141. AVFilterContext *ctx = inlink->dst;
  142. AVFilterLink *outlink = ctx->outputs[0];
  143. TelecineContext *s = ctx->priv;
  144. int i, len, ret = 0, nout = 0;
  145. if (s->start_time == AV_NOPTS_VALUE)
  146. s->start_time = inpicref->pts;
  147. len = s->pattern[s->pattern_pos] - '0';
  148. s->pattern_pos++;
  149. if (!s->pattern[s->pattern_pos])
  150. s->pattern_pos = 0;
  151. if (!len) { // do not output any field from this frame
  152. av_frame_free(&inpicref);
  153. return 0;
  154. }
  155. if (s->occupied) {
  156. for (i = 0; i < s->nb_planes; i++) {
  157. // fill in the EARLIER field from the buffered pic
  158. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * s->first_field,
  159. s->frame[nout]->linesize[i] * 2,
  160. s->temp->data[i] + s->temp->linesize[i] * s->first_field,
  161. s->temp->linesize[i] * 2,
  162. s->stride[i],
  163. (s->planeheight[i] - s->first_field + 1) / 2);
  164. // fill in the LATER field from the new pic
  165. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * !s->first_field,
  166. s->frame[nout]->linesize[i] * 2,
  167. inpicref->data[i] + inpicref->linesize[i] * !s->first_field,
  168. inpicref->linesize[i] * 2,
  169. s->stride[i],
  170. (s->planeheight[i] - !s->first_field + 1) / 2);
  171. }
  172. nout++;
  173. len--;
  174. s->occupied = 0;
  175. }
  176. while (len >= 2) {
  177. // output THIS image as-is
  178. for (i = 0; i < s->nb_planes; i++)
  179. av_image_copy_plane(s->frame[nout]->data[i], s->frame[nout]->linesize[i],
  180. inpicref->data[i], inpicref->linesize[i],
  181. s->stride[i],
  182. s->planeheight[i]);
  183. nout++;
  184. len -= 2;
  185. }
  186. if (len >= 1) {
  187. // copy THIS image to the buffer, we need it later
  188. for (i = 0; i < s->nb_planes; i++)
  189. av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
  190. inpicref->data[i], inpicref->linesize[i],
  191. s->stride[i],
  192. s->planeheight[i]);
  193. s->occupied = 1;
  194. }
  195. for (i = 0; i < nout; i++) {
  196. AVFrame *frame = av_frame_clone(s->frame[i]);
  197. if (!frame) {
  198. av_frame_free(&inpicref);
  199. return AVERROR(ENOMEM);
  200. }
  201. av_frame_copy_props(frame, inpicref);
  202. frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
  203. av_rescale(outlink->frame_count, s->ts_unit.num,
  204. s->ts_unit.den);
  205. ret = ff_filter_frame(outlink, frame);
  206. }
  207. av_frame_free(&inpicref);
  208. return ret;
  209. }
  210. static av_cold void uninit(AVFilterContext *ctx)
  211. {
  212. TelecineContext *s = ctx->priv;
  213. int i;
  214. av_frame_free(&s->temp);
  215. for (i = 0; i < s->out_cnt; i++)
  216. av_frame_free(&s->frame[i]);
  217. }
  218. static const AVFilterPad telecine_inputs[] = {
  219. {
  220. .name = "default",
  221. .type = AVMEDIA_TYPE_VIDEO,
  222. .filter_frame = filter_frame,
  223. .config_props = config_input,
  224. },
  225. { NULL }
  226. };
  227. static const AVFilterPad telecine_outputs[] = {
  228. {
  229. .name = "default",
  230. .type = AVMEDIA_TYPE_VIDEO,
  231. .config_props = config_output,
  232. },
  233. { NULL }
  234. };
  235. AVFilter ff_vf_telecine = {
  236. .name = "telecine",
  237. .description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
  238. .priv_size = sizeof(TelecineContext),
  239. .priv_class = &telecine_class,
  240. .init = init,
  241. .uninit = uninit,
  242. .query_formats = query_formats,
  243. .inputs = telecine_inputs,
  244. .outputs = telecine_outputs,
  245. };