You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

301 lines
9.9KB

  1. /*
  2. * Copyright (c) 2012 Rudolf Polzer
  3. * Copyright (c) 2013 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file telecine filter, heavily based from mpv-player:TOOLS/vf_dlopen/telecine.c by
  23. * Rudolf Polzer.
  24. */
  25. #include "libavutil/avstring.h"
  26. #include "libavutil/imgutils.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "avfilter.h"
  30. #include "formats.h"
  31. #include "internal.h"
  32. #include "video.h"
  33. typedef struct TelecineContext {
  34. const AVClass *class;
  35. int first_field;
  36. char *pattern;
  37. unsigned int pattern_pos;
  38. int64_t start_time;
  39. AVRational pts;
  40. AVRational ts_unit;
  41. int out_cnt;
  42. int occupied;
  43. int nb_planes;
  44. int planeheight[4];
  45. int stride[4];
  46. AVFrame *frame[5];
  47. AVFrame *temp;
  48. } TelecineContext;
  49. #define OFFSET(x) offsetof(TelecineContext, x)
  50. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  51. static const AVOption telecine_options[] = {
  52. {"first_field", "select first field", OFFSET(first_field), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "field"},
  53. {"top", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  54. {"t", "select top field first", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "field"},
  55. {"bottom", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  56. {"b", "select bottom field first", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "field"},
  57. {"pattern", "pattern that describe for how many fields a frame is to be displayed", OFFSET(pattern), AV_OPT_TYPE_STRING, {.str="23"}, 0, 0, FLAGS},
  58. {NULL}
  59. };
  60. AVFILTER_DEFINE_CLASS(telecine);
  61. static av_cold int init(AVFilterContext *ctx)
  62. {
  63. TelecineContext *s = ctx->priv;
  64. const char *p;
  65. int max = 0;
  66. if (!strlen(s->pattern)) {
  67. av_log(ctx, AV_LOG_ERROR, "No pattern provided.\n");
  68. return AVERROR_INVALIDDATA;
  69. }
  70. for (p = s->pattern; *p; p++) {
  71. if (!av_isdigit(*p)) {
  72. av_log(ctx, AV_LOG_ERROR, "Provided pattern includes non-numeric characters.\n");
  73. return AVERROR_INVALIDDATA;
  74. }
  75. max = FFMAX(*p - '0', max);
  76. s->pts.num += 2;
  77. s->pts.den += *p - '0';
  78. }
  79. s->start_time = AV_NOPTS_VALUE;
  80. s->out_cnt = (max + 1) / 2;
  81. av_log(ctx, AV_LOG_INFO, "Telecine pattern %s yields up to %d frames per frame, pts advance factor: %d/%d\n",
  82. s->pattern, s->out_cnt, s->pts.num, s->pts.den);
  83. return 0;
  84. }
  85. static int query_formats(AVFilterContext *ctx)
  86. {
  87. AVFilterFormats *formats = NULL;
  88. int ret;
  89. ret = ff_formats_pixdesc_filter(&formats, 0,
  90. AV_PIX_FMT_FLAG_BITSTREAM |
  91. AV_PIX_FMT_FLAG_PAL |
  92. AV_PIX_FMT_FLAG_HWACCEL);
  93. if (ret < 0)
  94. return ret;
  95. return ff_set_common_formats(ctx, formats);
  96. }
  97. static int config_input(AVFilterLink *inlink)
  98. {
  99. TelecineContext *s = inlink->dst->priv;
  100. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  101. int i, ret;
  102. s->temp = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  103. if (!s->temp)
  104. return AVERROR(ENOMEM);
  105. for (i = 0; i < s->out_cnt; i++) {
  106. s->frame[i] = ff_get_video_buffer(inlink, inlink->w, inlink->h);
  107. if (!s->frame[i])
  108. return AVERROR(ENOMEM);
  109. }
  110. if ((ret = av_image_fill_linesizes(s->stride, inlink->format, inlink->w)) < 0)
  111. return ret;
  112. s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
  113. s->planeheight[0] = s->planeheight[3] = inlink->h;
  114. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  115. return 0;
  116. }
  117. static int config_output(AVFilterLink *outlink)
  118. {
  119. AVFilterContext *ctx = outlink->src;
  120. TelecineContext *s = ctx->priv;
  121. const AVFilterLink *inlink = ctx->inputs[0];
  122. AVRational fps = inlink->frame_rate;
  123. if (!fps.num || !fps.den) {
  124. av_log(ctx, AV_LOG_ERROR, "The input needs a constant frame rate; "
  125. "current rate of %d/%d is invalid\n", fps.num, fps.den);
  126. return AVERROR(EINVAL);
  127. }
  128. fps = av_mul_q(fps, av_inv_q(s->pts));
  129. av_log(ctx, AV_LOG_VERBOSE, "FPS: %d/%d -> %d/%d\n",
  130. inlink->frame_rate.num, inlink->frame_rate.den, fps.num, fps.den);
  131. outlink->frame_rate = fps;
  132. outlink->time_base = av_mul_q(inlink->time_base, s->pts);
  133. av_log(ctx, AV_LOG_VERBOSE, "TB: %d/%d -> %d/%d\n",
  134. inlink->time_base.num, inlink->time_base.den, outlink->time_base.num, outlink->time_base.den);
  135. s->ts_unit = av_inv_q(av_mul_q(fps, outlink->time_base));
  136. return 0;
  137. }
  138. static int filter_frame(AVFilterLink *inlink, AVFrame *inpicref)
  139. {
  140. AVFilterContext *ctx = inlink->dst;
  141. AVFilterLink *outlink = ctx->outputs[0];
  142. TelecineContext *s = ctx->priv;
  143. int i, len, ret = 0, nout = 0;
  144. if (s->start_time == AV_NOPTS_VALUE)
  145. s->start_time = inpicref->pts;
  146. len = s->pattern[s->pattern_pos] - '0';
  147. s->pattern_pos++;
  148. if (!s->pattern[s->pattern_pos])
  149. s->pattern_pos = 0;
  150. if (!len) { // do not output any field from this frame
  151. av_frame_free(&inpicref);
  152. return 0;
  153. }
  154. if (s->occupied) {
  155. av_frame_make_writable(s->frame[nout]);
  156. for (i = 0; i < s->nb_planes; i++) {
  157. // fill in the EARLIER field from the buffered pic
  158. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * s->first_field,
  159. s->frame[nout]->linesize[i] * 2,
  160. s->temp->data[i] + s->temp->linesize[i] * s->first_field,
  161. s->temp->linesize[i] * 2,
  162. s->stride[i],
  163. (s->planeheight[i] - s->first_field + 1) / 2);
  164. // fill in the LATER field from the new pic
  165. av_image_copy_plane(s->frame[nout]->data[i] + s->frame[nout]->linesize[i] * !s->first_field,
  166. s->frame[nout]->linesize[i] * 2,
  167. inpicref->data[i] + inpicref->linesize[i] * !s->first_field,
  168. inpicref->linesize[i] * 2,
  169. s->stride[i],
  170. (s->planeheight[i] - !s->first_field + 1) / 2);
  171. }
  172. s->frame[nout]->interlaced_frame = 1;
  173. s->frame[nout]->top_field_first = !s->first_field;
  174. nout++;
  175. len--;
  176. s->occupied = 0;
  177. }
  178. while (len >= 2) {
  179. // output THIS image as-is
  180. av_frame_make_writable(s->frame[nout]);
  181. for (i = 0; i < s->nb_planes; i++)
  182. av_image_copy_plane(s->frame[nout]->data[i], s->frame[nout]->linesize[i],
  183. inpicref->data[i], inpicref->linesize[i],
  184. s->stride[i],
  185. s->planeheight[i]);
  186. s->frame[nout]->interlaced_frame = inpicref->interlaced_frame;
  187. s->frame[nout]->top_field_first = inpicref->top_field_first;
  188. nout++;
  189. len -= 2;
  190. }
  191. if (len >= 1) {
  192. // copy THIS image to the buffer, we need it later
  193. for (i = 0; i < s->nb_planes; i++)
  194. av_image_copy_plane(s->temp->data[i], s->temp->linesize[i],
  195. inpicref->data[i], inpicref->linesize[i],
  196. s->stride[i],
  197. s->planeheight[i]);
  198. s->occupied = 1;
  199. }
  200. for (i = 0; i < nout; i++) {
  201. AVFrame *frame = av_frame_clone(s->frame[i]);
  202. int interlaced = frame ? frame->interlaced_frame : 0;
  203. int tff = frame ? frame->top_field_first : 0;
  204. if (!frame) {
  205. av_frame_free(&inpicref);
  206. return AVERROR(ENOMEM);
  207. }
  208. av_frame_copy_props(frame, inpicref);
  209. frame->interlaced_frame = interlaced;
  210. frame->top_field_first = tff;
  211. frame->pts = ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time) +
  212. av_rescale(outlink->frame_count_in, s->ts_unit.num,
  213. s->ts_unit.den);
  214. ret = ff_filter_frame(outlink, frame);
  215. }
  216. av_frame_free(&inpicref);
  217. return ret;
  218. }
  219. static av_cold void uninit(AVFilterContext *ctx)
  220. {
  221. TelecineContext *s = ctx->priv;
  222. int i;
  223. av_frame_free(&s->temp);
  224. for (i = 0; i < s->out_cnt; i++)
  225. av_frame_free(&s->frame[i]);
  226. }
  227. static const AVFilterPad telecine_inputs[] = {
  228. {
  229. .name = "default",
  230. .type = AVMEDIA_TYPE_VIDEO,
  231. .filter_frame = filter_frame,
  232. .config_props = config_input,
  233. },
  234. { NULL }
  235. };
  236. static const AVFilterPad telecine_outputs[] = {
  237. {
  238. .name = "default",
  239. .type = AVMEDIA_TYPE_VIDEO,
  240. .config_props = config_output,
  241. },
  242. { NULL }
  243. };
  244. AVFilter ff_vf_telecine = {
  245. .name = "telecine",
  246. .description = NULL_IF_CONFIG_SMALL("Apply a telecine pattern."),
  247. .priv_size = sizeof(TelecineContext),
  248. .priv_class = &telecine_class,
  249. .init = init,
  250. .uninit = uninit,
  251. .query_formats = query_formats,
  252. .inputs = telecine_inputs,
  253. .outputs = telecine_outputs,
  254. };