You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

337 lines
9.6KB

  1. /*
  2. * Copyright (C) 2012 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include "libavutil/cpu.h"
  21. #include "libavutil/common.h"
  22. #include "libavutil/pixdesc.h"
  23. #include "avfilter.h"
  24. #include "internal.h"
  25. #undef NDEBUG
  26. #include <assert.h>
  27. #define HIST_SIZE 4
  28. typedef enum {
  29. TFF,
  30. BFF,
  31. PROGRSSIVE,
  32. UNDETERMINED,
  33. } Type;
  34. typedef struct {
  35. float interlace_threshold;
  36. float progressive_threshold;
  37. Type last_type;
  38. Type prestat[4];
  39. Type poststat[4];
  40. uint8_t history[HIST_SIZE];
  41. AVFilterBufferRef *cur;
  42. AVFilterBufferRef *next;
  43. AVFilterBufferRef *prev;
  44. int (*filter_line)(const uint8_t *prev, const uint8_t *cur, const uint8_t *next, int w);
  45. const AVPixFmtDescriptor *csp;
  46. } IDETContext;
  47. static const char *type2str(Type type)
  48. {
  49. switch(type) {
  50. case TFF : return "Top Field First ";
  51. case BFF : return "Bottom Field First";
  52. case PROGRSSIVE : return "Progressive ";
  53. case UNDETERMINED: return "Undetermined ";
  54. }
  55. return NULL;
  56. }
  57. static int filter_line_c(const uint8_t *a, const uint8_t *b, const uint8_t *c, int w)
  58. {
  59. int x;
  60. int ret=0;
  61. for(x=0; x<w; x++){
  62. ret += FFABS((*a++ + *c++) - 2 * *b++);
  63. }
  64. return ret;
  65. }
  66. static int filter_line_c_16bit(const uint16_t *a, const uint16_t *b, const uint16_t *c, int w)
  67. {
  68. int x;
  69. int ret=0;
  70. for(x=0; x<w; x++){
  71. ret += FFABS((*a++ + *c++) - 2 * *b++);
  72. }
  73. return ret;
  74. }
  75. static void filter(AVFilterContext *ctx)
  76. {
  77. IDETContext *idet = ctx->priv;
  78. int y, i;
  79. int64_t alpha[2]={0};
  80. int64_t delta=0;
  81. Type type, best_type;
  82. int match = 0;
  83. for (i = 0; i < idet->csp->nb_components; i++) {
  84. int w = idet->cur->video->w;
  85. int h = idet->cur->video->h;
  86. int refs = idet->cur->linesize[i];
  87. if (i && i<3) {
  88. w >>= idet->csp->log2_chroma_w;
  89. h >>= idet->csp->log2_chroma_h;
  90. }
  91. for (y = 2; y < h - 2; y++) {
  92. uint8_t *prev = &idet->prev->data[i][y*refs];
  93. uint8_t *cur = &idet->cur ->data[i][y*refs];
  94. uint8_t *next = &idet->next->data[i][y*refs];
  95. alpha[ y &1] += idet->filter_line(cur-refs, prev, cur+refs, w);
  96. alpha[(y^1)&1] += idet->filter_line(cur-refs, next, cur+refs, w);
  97. delta += idet->filter_line(cur-refs, cur, cur+refs, w);
  98. }
  99. }
  100. if (alpha[0] / (float)alpha[1] > idet->interlace_threshold){
  101. type = TFF;
  102. }else if(alpha[1] / (float)alpha[0] > idet->interlace_threshold){
  103. type = BFF;
  104. }else if(alpha[1] / (float)delta > idet->progressive_threshold){
  105. type = PROGRSSIVE;
  106. }else{
  107. type = UNDETERMINED;
  108. }
  109. memmove(idet->history+1, idet->history, HIST_SIZE-1);
  110. idet->history[0] = type;
  111. best_type = UNDETERMINED;
  112. for(i=0; i<HIST_SIZE; i++){
  113. if(idet->history[i] != UNDETERMINED){
  114. if(best_type == UNDETERMINED)
  115. best_type = idet->history[i];
  116. if(idet->history[i] == best_type) {
  117. match++;
  118. }else{
  119. match=0;
  120. break;
  121. }
  122. }
  123. }
  124. if(idet->last_type == UNDETERMINED){
  125. if(match ) idet->last_type = best_type;
  126. }else{
  127. if(match>2) idet->last_type = best_type;
  128. }
  129. if (idet->last_type == TFF){
  130. idet->cur->video->top_field_first = 1;
  131. idet->cur->video->interlaced = 1;
  132. }else if(idet->last_type == BFF){
  133. idet->cur->video->top_field_first = 0;
  134. idet->cur->video->interlaced = 1;
  135. }else if(idet->last_type == PROGRSSIVE){
  136. idet->cur->video->interlaced = 0;
  137. }
  138. idet->prestat [ type] ++;
  139. idet->poststat[idet->last_type] ++;
  140. av_log(ctx, AV_LOG_DEBUG, "Single frame:%s, Multi frame:%s\n", type2str(type), type2str(idet->last_type));
  141. }
  142. static int start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
  143. {
  144. AVFilterContext *ctx = link->dst;
  145. IDETContext *idet = ctx->priv;
  146. if (idet->prev)
  147. avfilter_unref_buffer(idet->prev);
  148. idet->prev = idet->cur;
  149. idet->cur = idet->next;
  150. idet->next = picref;
  151. link->cur_buf = NULL;
  152. if (!idet->cur)
  153. return 0;
  154. if (!idet->prev)
  155. idet->prev = avfilter_ref_buffer(idet->cur, ~0);
  156. return ff_start_frame(ctx->outputs[0], avfilter_ref_buffer(idet->cur, ~0));
  157. }
  158. static int end_frame(AVFilterLink *link)
  159. {
  160. AVFilterContext *ctx = link->dst;
  161. IDETContext *idet = ctx->priv;
  162. if (!idet->cur)
  163. return 0;
  164. if (!idet->csp)
  165. idet->csp = &av_pix_fmt_descriptors[link->format];
  166. if (idet->csp->comp[0].depth_minus1 / 8 == 1)
  167. idet->filter_line = (void*)filter_line_c_16bit;
  168. filter(ctx);
  169. ff_draw_slice(ctx->outputs[0], 0, link->h, 1);
  170. return ff_end_frame(ctx->outputs[0]);
  171. }
  172. static int request_frame(AVFilterLink *link)
  173. {
  174. AVFilterContext *ctx = link->src;
  175. IDETContext *idet = ctx->priv;
  176. do {
  177. int ret;
  178. if ((ret = ff_request_frame(link->src->inputs[0])))
  179. return ret;
  180. } while (!idet->cur);
  181. return 0;
  182. }
  183. static int poll_frame(AVFilterLink *link)
  184. {
  185. IDETContext *idet = link->src->priv;
  186. int ret, val;
  187. val = ff_poll_frame(link->src->inputs[0]);
  188. if (val >= 1 && !idet->next) { //FIXME change API to not requre this red tape
  189. if ((ret = ff_request_frame(link->src->inputs[0])) < 0)
  190. return ret;
  191. val = ff_poll_frame(link->src->inputs[0]);
  192. }
  193. assert(idet->next || !val);
  194. return val;
  195. }
  196. static av_cold void uninit(AVFilterContext *ctx)
  197. {
  198. IDETContext *idet = ctx->priv;
  199. av_log(ctx, AV_LOG_INFO, "Single frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n",
  200. idet->prestat[TFF],
  201. idet->prestat[BFF],
  202. idet->prestat[PROGRSSIVE],
  203. idet->prestat[UNDETERMINED]
  204. );
  205. av_log(ctx, AV_LOG_INFO, "Multi frame detection: TFF:%d BFF:%d Progressive:%d Undetermined:%d\n",
  206. idet->poststat[TFF],
  207. idet->poststat[BFF],
  208. idet->poststat[PROGRSSIVE],
  209. idet->poststat[UNDETERMINED]
  210. );
  211. avfilter_unref_bufferp(&idet->prev);
  212. avfilter_unref_bufferp(&idet->cur );
  213. avfilter_unref_bufferp(&idet->next);
  214. }
  215. static int query_formats(AVFilterContext *ctx)
  216. {
  217. static const enum AVPixelFormat pix_fmts[] = {
  218. AV_PIX_FMT_YUV420P,
  219. AV_PIX_FMT_YUV422P,
  220. AV_PIX_FMT_YUV444P,
  221. AV_PIX_FMT_YUV410P,
  222. AV_PIX_FMT_YUV411P,
  223. AV_PIX_FMT_GRAY8,
  224. AV_PIX_FMT_YUVJ420P,
  225. AV_PIX_FMT_YUVJ422P,
  226. AV_PIX_FMT_YUVJ444P,
  227. AV_NE( AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_GRAY16LE ),
  228. AV_PIX_FMT_YUV440P,
  229. AV_PIX_FMT_YUVJ440P,
  230. AV_NE( AV_PIX_FMT_YUV420P10BE, AV_PIX_FMT_YUV420P10LE ),
  231. AV_NE( AV_PIX_FMT_YUV422P10BE, AV_PIX_FMT_YUV422P10LE ),
  232. AV_NE( AV_PIX_FMT_YUV444P10BE, AV_PIX_FMT_YUV444P10LE ),
  233. AV_NE( AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_YUV420P16LE ),
  234. AV_NE( AV_PIX_FMT_YUV422P16BE, AV_PIX_FMT_YUV422P16LE ),
  235. AV_NE( AV_PIX_FMT_YUV444P16BE, AV_PIX_FMT_YUV444P16LE ),
  236. AV_PIX_FMT_YUVA420P,
  237. AV_PIX_FMT_NONE
  238. };
  239. ff_set_common_formats(ctx, ff_make_format_list(pix_fmts));
  240. return 0;
  241. }
  242. static av_cold int init(AVFilterContext *ctx, const char *args)
  243. {
  244. IDETContext *idet = ctx->priv;
  245. idet->csp = NULL;
  246. idet->interlace_threshold = 1.01;
  247. idet->progressive_threshold = 2.5;
  248. if (args) sscanf(args, "%f:%f", &idet->interlace_threshold, &idet->progressive_threshold);
  249. idet->last_type = UNDETERMINED;
  250. memset(idet->history, UNDETERMINED, HIST_SIZE);
  251. idet->filter_line = filter_line_c;
  252. return 0;
  253. }
  254. static int null_draw_slice(AVFilterLink *link, int y, int h, int slice_dir) { return 0; }
  255. AVFilter avfilter_vf_idet = {
  256. .name = "idet",
  257. .description = NULL_IF_CONFIG_SMALL("Interlace detect Filter."),
  258. .priv_size = sizeof(IDETContext),
  259. .init = init,
  260. .uninit = uninit,
  261. .query_formats = query_formats,
  262. .inputs = (const AVFilterPad[]) {{ .name = "default",
  263. .type = AVMEDIA_TYPE_VIDEO,
  264. .start_frame = start_frame,
  265. .draw_slice = null_draw_slice,
  266. .end_frame = end_frame,
  267. .min_perms = AV_PERM_PRESERVE },
  268. { .name = NULL}},
  269. .outputs = (const AVFilterPad[]) {{ .name = "default",
  270. .type = AVMEDIA_TYPE_VIDEO,
  271. .rej_perms = AV_PERM_WRITE,
  272. .poll_frame = poll_frame,
  273. .request_frame = request_frame, },
  274. { .name = NULL}},
  275. };