You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

294 lines
9.4KB

  1. /*
  2. * Copyright (c) 2012 Rudolf Polzer
  3. * Copyright (c) 2013 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
  23. * Rudolf Polzer.
  24. */
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/imgutils.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. typedef struct {
  34. const AVClass *class;
  35. int first_field;
  36. char *pattern;
  37. unsigned int pattern_pos;
  38. int64_t start_time;
  39. AVRational pts;
  40. AVRational ts_unit;
  41. int out_cnt;
  42. int occupied;
  43. int nb_planes;
  44. int planeheight[4];
  45. int stride[4];
  46. AVFrame *frame[5];
  47. AVFrame *temp;
  48. } TelecineContext;
  49. #define OFFSET(x) offsetof(TelecineContext, x)
  50. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  51. static const AVOption telecine_options[] = {
  52. {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
  53. {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  54. {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  55. {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  56. {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  57. {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
  58. {NULL}
  59. };
  60. AVFILTER_DEFINE_CLASS(telecine);
  61. static av_cold int init(AVFilterContext *ctx)
  62. {
  63. TelecineContext *s = ctx->priv;
  64. const char *p;
  65. int max = 0;
  66. if (!strlen(s->pattern)) {
  67. av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
  68. return AVERROR_INVALIDDATA;
  69. }
  70. for (p = s->pattern; *p; p++) {
  71. if (!av_isdigit(*p)) {
  72. av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
  73. return AVERROR_INVALIDDATA;
  74. }
  75. max = FFMAX(*p - '0', max);
  76. s->pts.num += 2;
  77. s->pts.den += *p - '0';
  78. }
  79. s->start_time = AV_NOPTS_VALUE;
  80. s->out_cnt = (max + 1) / 2;
  81. av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
  82. s->pattern, s->out_cnt, s->pts.num, s->pts.den);
  83. return 0;
  84. }
  85. static int query_formats(AVFilterContext *ctx)
  86. {
  87. AVFilterFormats *pix_fmts = NULL;
  88. int fmt, ret;
  89. for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
  90. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
  91. if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
  92. desc->flags & AV_PIX_FMT_FLAG_PAL ||
  93. desc->flags & AV_PIX_FMT_FLAG_BITSTREAM) &&
  94. (ret = ff_add_format(&pix_fmts, fmt)) < 0)
  95. return ret;
  96. }
  97. return ff_set_common_formats(ctx, pix_fmts);
  98. }
  99. static int config_input(AVFilterLink *inlink)
  100. {
  101. TelecineContext *s = inlink->dst->priv;
  102. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  103. int i, ret;
  104. s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  105. if (!s->temp)
  106. return AVERROR(ENOMEM);
  107. for (i = 0; i < s->out_cnt; i++) {
  108. s->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  109. if (!s->frame[i])
  110. return AVERROR(ENOMEM);
  111. }
  112. if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0)
  113. return ret;
  114. s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  115. s->planeheight[0] = s->planeheight[3] = inlink->h;
  116. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  117. return 0;
  118. }
  119. static int config_output(AVFilterLink *outlink)
  120. {
  121. AVFilterContext *ctx = outlink->src;
  122. TelecineContext *s = ctx->priv;
  123. const AVFilterLink *inlink = ctx->inputs[0];
  124. AVRational fps = inlink->frame_rate;
  125. if (!fps.num || !fps.den) {
  126. av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
  127. "current rate of %d/%d is invalid\n", fps.num, fps.den);
  128. return AVERROR(EINVAL);
  129. }
  130. fps = av_mul_q(fps, av_inv_q(s->pts));
  131. av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
  132. inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
  133. outlink->frame_rate = fps;
  134. outlink->time_base = av_mul_q(inlink->time_base, s->pts);
  135. av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
  136. inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
  137. s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
  138. return 0;
  139. }
  140. static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
  141. {
  142. AVFilterContext *ctx = inlink->dst;
  143. AVFilterLink *outlink = ctx->outputs[0];
  144. TelecineContext *s = ctx->priv;
  145. int i, len, ret = 0, nout = 0;
  146. if (s->start_time == AV_NOPTS_VALUE)
  147. s->start_time = inpicref->pts;
  148. len = s->pattern[s->pattern_pos] - '0';
  149. s->pattern_pos++;
  150. if (!s->pattern[s->pattern_pos])
  151. s->pattern_pos = 0;
  152. if (!len) { // do not output any field from this frame
  153. av_frame_free(&inpicref);
  154. return 0;
  155. }
  156. if (s->occupied) {
  157. for (i = 0; i < s->nb_planes; i++) {
  158. // fill in the EARLIER field from the buffered pic
  159. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * s->first_field,
  160. s->frame[nout]->linesize[i] * 2,
  161. s->temp->data[i] + s->temp->linesize[i] * s->first_field,
  162. s->temp->linesize[i] * 2,
  163. s->stride[i],
  164. (s->planeheight[i] - s->first_field + 1) / 2);
  165. // fill in the LATER field from the new pic
  166. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * !s->first_field,
  167. s->frame[nout]->linesize[i] * 2,
  168. inpicref->data[i] + inpicref->linesize[i] * !s->first_field,
  169. inpicref->linesize[i] * 2,
  170. s->stride[i],
  171. (s->planeheight[i] - !s->first_field + 1) / 2);
  172. }
  173. nout++;
  174. len--;
  175. s->occupied = 0;
  176. }
  177. while (len >= 2) {
  178. // output THIS image as-is
  179. for (i = 0; i < s->nb_planes; i++)
  180. av_image_copy_plane(s->frame[nout]->data[i], s->frame[nout]->linesize[i],
  181. inpicref->data[i], inpicref->linesize[i],
  182. s->stride[i],
  183. s->planeheight[i]);
  184. nout++;
  185. len -= 2;
  186. }
  187. if (len >= 1) {
  188. // copy THIS image to the buffer, we need it later
  189. for (i = 0; i < s->nb_planes; i++)
  190. av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
  191. inpicref->data[i], inpicref->linesize[i],
  192. s->stride[i],
  193. s->planeheight[i]);
  194. s->occupied = 1;
  195. }
  196. for (i = 0; i < nout; i++) {
  197. AVFrame *frame = av_frame_clone(s->frame[i]);
  198. if (!frame) {
  199. av_frame_free(&inpicref);
  200. return AVERROR(ENOMEM);
  201. }
  202. av_frame_copy_props(frame, inpicref);
  203. frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
  204. av_rescale(outlink->frame_count, s->ts_unit.num,
  205. s->ts_unit.den);
  206. ret = ff_filter_frame(outlink, frame);
  207. }
  208. av_frame_free(&inpicref);
  209. return ret;
  210. }
  211. static av_cold void uninit(AVFilterContext *ctx)
  212. {
  213. TelecineContext *s = ctx->priv;
  214. int i;
  215. av_frame_free(&s->temp);
  216. for (i = 0; i < s->out_cnt; i++)
  217. av_frame_free(&s->frame[i]);
  218. }
  219. static const AVFilterPad telecine_inputs[] = {
  220. {
  221. .name = "default",
  222. .type = AVMEDIA_TYPE_VIDEO,
  223. .filter_frame = filter_frame,
  224. .config_props = config_input,
  225. },
  226. { NULL }
  227. };
  228. static const AVFilterPad telecine_outputs[] = {
  229. {
  230. .name = "default",
  231. .type = AVMEDIA_TYPE_VIDEO,
  232. .config_props = config_output,
  233. },
  234. { NULL }
  235. };
  236. AVFilter ff_vf_telecine = {
  237. .name = "telecine",
  238. .description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
  239. .priv_size = sizeof(TelecineContext),
  240. .priv_class = &telecine_class,
  241. .init = init,
  242. .uninit = uninit,
  243. .query_formats = query_formats,
  244. .inputs = telecine_inputs,
  245. .outputs = telecine_outputs,
  246. };