You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

295 lines
9.4KB

  1. /*
  2. * Copyright (c) 2012 Rudolf Polzer
  3. * Copyright (c) 2013 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
  23. * Rudolf Polzer.
  24. */
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/imgutils.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. typedef struct {
  34. const AVClass *class;
  35. int first_field;
  36. char *pattern;
  37. unsigned int pattern_pos;
  38. int64_t start_time;
  39. AVRational pts;
  40. AVRational ts_unit;
  41. int out_cnt;
  42. int occupied;
  43. int nb_planes;
  44. int planeheight[4];
  45. int stride[4];
  46. AVFrame *frame[5];
  47. AVFrame *temp;
  48. } TelecineContext;
  49. #define OFFSET(x) offsetof(TelecineContext, x)
  50. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  51. static const AVOption telecine_options[] = {
  52. {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
  53. {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  54. {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  55. {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  56. {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  57. {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
  58. {NULL}
  59. };
  60. AVFILTER_DEFINE_CLASS(telecine);
  61. static av_cold int init(AVFilterContext *ctx)
  62. {
  63. TelecineContext *s = ctx->priv;
  64. const char *p;
  65. int max = 0;
  66. if (!strlen(s->pattern)) {
  67. av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
  68. return AVERROR_INVALIDDATA;
  69. }
  70. for (p = s->pattern; *p; p++) {
  71. if (!av_isdigit(*p)) {
  72. av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
  73. return AVERROR_INVALIDDATA;
  74. }
  75. max = FFMAX(*p - '0', max);
  76. s->pts.num += 2;
  77. s->pts.den += *p - '0';
  78. }
  79. s->start_time = AV_NOPTS_VALUE;
  80. s->out_cnt = (max + 1) / 2;
  81. av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
  82. s->pattern, s->out_cnt, s->pts.num, s->pts.den);
  83. return 0;
  84. }
  85. static int query_formats(AVFilterContext *ctx)
  86. {
  87. AVFilterFormats *pix_fmts = NULL;
  88. int fmt;
  89. for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
  90. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
  91. if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
  92. desc->flags & AV_PIX_FMT_FLAG_PAL ||
  93. desc->flags & AV_PIX_FMT_FLAG_BITSTREAM))
  94. ff_add_format(&pix_fmts, fmt);
  95. }
  96. ff_set_common_formats(ctx, pix_fmts);
  97. return 0;
  98. }
  99. static int config_input(AVFilterLink *inlink)
  100. {
  101. TelecineContext *s = inlink->dst->priv;
  102. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  103. int i, ret;
  104. s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  105. if (!s->temp)
  106. return AVERROR(ENOMEM);
  107. for (i = 0; i < s->out_cnt; i++) {
  108. s->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  109. if (!s->frame[i])
  110. return AVERROR(ENOMEM);
  111. }
  112. if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0)
  113. return ret;
  114. s->planeheight[1] = s->planeheight[2] = FF_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  115. s->planeheight[0] = s->planeheight[3] = inlink->h;
  116. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  117. return 0;
  118. }
  119. static int config_output(AVFilterLink *outlink)
  120. {
  121. AVFilterContext *ctx = outlink->src;
  122. TelecineContext *s = ctx->priv;
  123. const AVFilterLink *inlink = ctx->inputs[0];
  124. AVRational fps = inlink->frame_rate;
  125. if (!fps.num || !fps.den) {
  126. av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
  127. "current rate of %d/%d is invalid\n", fps.num, fps.den);
  128. return AVERROR(EINVAL);
  129. }
  130. fps = av_mul_q(fps, av_inv_q(s->pts));
  131. av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
  132. inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
  133. outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
  134. outlink->frame_rate = fps;
  135. outlink->time_base = av_mul_q(inlink->time_base, s->pts);
  136. av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
  137. inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
  138. s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
  139. return 0;
  140. }
  141. static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
  142. {
  143. AVFilterContext *ctx = inlink->dst;
  144. AVFilterLink *outlink = ctx->outputs[0];
  145. TelecineContext *s = ctx->priv;
  146. int i, len, ret = 0, nout = 0;
  147. if (s->start_time == AV_NOPTS_VALUE)
  148. s->start_time = inpicref->pts;
  149. len = s->pattern[s->pattern_pos] - '0';
  150. s->pattern_pos++;
  151. if (!s->pattern[s->pattern_pos])
  152. s->pattern_pos = 0;
  153. if (!len) { // do not output any field from this frame
  154. av_frame_free(&inpicref);
  155. return 0;
  156. }
  157. if (s->occupied) {
  158. for (i = 0; i < s->nb_planes; i++) {
  159. // fill in the EARLIER field from the buffered pic
  160. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * s->first_field,
  161. s->frame[nout]->linesize[i] * 2,
  162. s->temp->data[i] + s->temp->linesize[i] * s->first_field,
  163. s->temp->linesize[i] * 2,
  164. s->stride[i],
  165. (s->planeheight[i] - s->first_field + 1) / 2);
  166. // fill in the LATER field from the new pic
  167. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * !s->first_field,
  168. s->frame[nout]->linesize[i] * 2,
  169. inpicref->data[i] + inpicref->linesize[i] * !s->first_field,
  170. inpicref->linesize[i] * 2,
  171. s->stride[i],
  172. (s->planeheight[i] - !s->first_field + 1) / 2);
  173. }
  174. nout++;
  175. len--;
  176. s->occupied = 0;
  177. }
  178. while (len >= 2) {
  179. // output THIS image as-is
  180. for (i = 0; i < s->nb_planes; i++)
  181. av_image_copy_plane(s->frame[nout]->data[i], s->frame[nout]->linesize[i],
  182. inpicref->data[i], inpicref->linesize[i],
  183. s->stride[i],
  184. s->planeheight[i]);
  185. nout++;
  186. len -= 2;
  187. }
  188. if (len >= 1) {
  189. // copy THIS image to the buffer, we need it later
  190. for (i = 0; i < s->nb_planes; i++)
  191. av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
  192. inpicref->data[i], inpicref->linesize[i],
  193. s->stride[i],
  194. s->planeheight[i]);
  195. s->occupied = 1;
  196. }
  197. for (i = 0; i < nout; i++) {
  198. AVFrame *frame = av_frame_clone(s->frame[i]);
  199. if (!frame) {
  200. av_frame_free(&inpicref);
  201. return AVERROR(ENOMEM);
  202. }
  203. av_frame_copy_props(frame, inpicref);
  204. frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
  205. av_rescale(outlink->frame_count, s->ts_unit.num,
  206. s->ts_unit.den);
  207. ret = ff_filter_frame(outlink, frame);
  208. }
  209. av_frame_free(&inpicref);
  210. return ret;
  211. }
  212. static av_cold void uninit(AVFilterContext *ctx)
  213. {
  214. TelecineContext *s = ctx->priv;
  215. int i;
  216. av_frame_free(&s->temp);
  217. for (i = 0; i < s->out_cnt; i++)
  218. av_frame_free(&s->frame[i]);
  219. }
  220. static const AVFilterPad telecine_inputs[] = {
  221. {
  222. .name = "default",
  223. .type = AVMEDIA_TYPE_VIDEO,
  224. .filter_frame = filter_frame,
  225. .config_props = config_input,
  226. },
  227. { NULL }
  228. };
  229. static const AVFilterPad telecine_outputs[] = {
  230. {
  231. .name = "default",
  232. .type = AVMEDIA_TYPE_VIDEO,
  233. .config_props = config_output,
  234. },
  235. { NULL }
  236. };
  237. AVFilter ff_vf_telecine = {
  238. .name = "telecine",
  239. .description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
  240. .priv_size = sizeof(TelecineContext),
  241. .priv_class = &telecine_class,
  242. .init = init,
  243. .uninit = uninit,
  244. .query_formats = query_formats,
  245. .inputs = telecine_inputs,
  246. .outputs = telecine_outputs,
  247. };