You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

313 lines
11KB

  1. /*
  2. * Copyright (c) 2010 Stefano Sabatini
  3. * Copyright (c) 2008 Vitor Sessak
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * transposition filter
  24. * Based on MPlayer libmpcodecs/vf_rotate.c.
  25. */
  26. #include <stdio.h>
  27. #include "libavutil/avassert.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/internal.h"
  30. #include "libavutil/intreadwrite.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/pixdesc.h"
  33. #include "avfilter.h"
  34. #include "formats.h"
  35. #include "internal.h"
  36. #include "video.h"
  37. typedef enum {
  38. TRANSPOSE_PT_TYPE_NONE,
  39. TRANSPOSE_PT_TYPE_LANDSCAPE,
  40. TRANSPOSE_PT_TYPE_PORTRAIT,
  41. } PassthroughType;
  42. enum TransposeDir {
  43. TRANSPOSE_CCLOCK_FLIP,
  44. TRANSPOSE_CLOCK,
  45. TRANSPOSE_CCLOCK,
  46. TRANSPOSE_CLOCK_FLIP,
  47. };
  48. typedef struct TransContext {
  49. const AVClass *class;
  50. int hsub, vsub;
  51. int planes;
  52. int pixsteps[4];
  53. int passthrough; ///< PassthroughType, landscape passthrough mode enabled
  54. int dir; ///< TransposeDir
  55. } TransContext;
  56. static int query_formats(AVFilterContext *ctx)
  57. {
  58. AVFilterFormats *pix_fmts = NULL;
  59. int fmt;
  60. for (fmt = 0; av_pix_fmt_desc_get(fmt); fmt++) {
  61. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(fmt);
  62. if (!(desc->flags & AV_PIX_FMT_FLAG_PAL ||
  63. desc->flags & AV_PIX_FMT_FLAG_HWACCEL ||
  64. desc->flags & AV_PIX_FMT_FLAG_BITSTREAM ||
  65. desc->log2_chroma_w != desc->log2_chroma_h))
  66. ff_add_format(&pix_fmts, fmt);
  67. }
  68. return ff_set_common_formats(ctx, pix_fmts);
  69. }
  70. static int config_props_output(AVFilterLink *outlink)
  71. {
  72. AVFilterContext *ctx = outlink->src;
  73. TransContext *trans = ctx->priv;
  74. AVFilterLink *inlink = ctx->inputs[0];
  75. const AVPixFmtDescriptor *desc_out = av_pix_fmt_desc_get(outlink->format);
  76. const AVPixFmtDescriptor *desc_in = av_pix_fmt_desc_get(inlink->format);
  77. if (trans->dir&4) {
  78. av_log(ctx, AV_LOG_WARNING,
  79. "dir values greater than 3 are deprecated, use the passthrough option instead\n");
  80. trans->dir &= 3;
  81. trans->passthrough = TRANSPOSE_PT_TYPE_LANDSCAPE;
  82. }
  83. if ((inlink->w >= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_LANDSCAPE) ||
  84. (inlink->w <= inlink->h && trans->passthrough == TRANSPOSE_PT_TYPE_PORTRAIT)) {
  85. av_log(ctx, AV_LOG_VERBOSE,
  86. "w:%d h:%d -> w:%d h:%d (passthrough mode)\n",
  87. inlink->w, inlink->h, inlink->w, inlink->h);
  88. return 0;
  89. } else {
  90. trans->passthrough = TRANSPOSE_PT_TYPE_NONE;
  91. }
  92. trans->hsub = desc_in->log2_chroma_w;
  93. trans->vsub = desc_in->log2_chroma_h;
  94. trans->planes = av_pix_fmt_count_planes(outlink->format);
  95. av_assert0(desc_in->nb_components == desc_out->nb_components);
  96. av_image_fill_max_pixsteps(trans->pixsteps, NULL, desc_out);
  97. outlink->w = inlink->h;
  98. outlink->h = inlink->w;
  99. if (inlink->sample_aspect_ratio.num)
  100. outlink->sample_aspect_ratio = av_div_q((AVRational) { 1, 1 },
  101. inlink->sample_aspect_ratio);
  102. else
  103. outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
  104. av_log(ctx, AV_LOG_VERBOSE,
  105. "w:%d h:%d dir:%d -> w:%d h:%d rotation:%s vflip:%d\n",
  106. inlink->w, inlink->h, trans->dir, outlink->w, outlink->h,
  107. trans->dir == 1 || trans->dir == 3 ? "clockwise" : "counterclockwise",
  108. trans->dir == 0 || trans->dir == 3);
  109. return 0;
  110. }
  111. static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
  112. {
  113. TransContext *trans = inlink->dst->priv;
  114. return trans->passthrough ?
  115. ff_null_get_video_buffer (inlink, w, h) :
  116. ff_default_get_video_buffer(inlink, w, h);
  117. }
  118. typedef struct ThreadData {
  119. AVFrame *in, *out;
  120. } ThreadData;
  121. static int filter_slice(AVFilterContext *ctx, void *arg, int jobnr,
  122. int nb_jobs)
  123. {
  124. TransContext *trans = ctx->priv;
  125. ThreadData *td = arg;
  126. AVFrame *out = td->out;
  127. AVFrame *in = td->in;
  128. int plane;
  129. for (plane = 0; plane < trans->planes; plane++) {
  130. int hsub = plane == 1 || plane == 2 ? trans->hsub : 0;
  131. int vsub = plane == 1 || plane == 2 ? trans->vsub : 0;
  132. int pixstep = trans->pixsteps[plane];
  133. int inh = FF_CEIL_RSHIFT(in->height, vsub);
  134. int outw = FF_CEIL_RSHIFT(out->width, hsub);
  135. int outh = FF_CEIL_RSHIFT(out->height, vsub);
  136. int start = (outh * jobnr ) / nb_jobs;
  137. int end = (outh * (jobnr+1)) / nb_jobs;
  138. uint8_t *dst, *src;
  139. int dstlinesize, srclinesize;
  140. int x, y;
  141. dstlinesize = out->linesize[plane];
  142. dst = out->data[plane] + start * dstlinesize;
  143. src = in->data[plane];
  144. srclinesize = in->linesize[plane];
  145. if (trans->dir & 1) {
  146. src += in->linesize[plane] * (inh - 1);
  147. srclinesize *= -1;
  148. }
  149. if (trans->dir & 2) {
  150. dst = out->data[plane] + dstlinesize * (outh - start - 1);
  151. dstlinesize *= -1;
  152. }
  153. switch (pixstep) {
  154. case 1:
  155. for (y = start; y < end; y++, dst += dstlinesize)
  156. for (x = 0; x < outw; x++)
  157. dst[x] = src[x * srclinesize + y];
  158. break;
  159. case 2:
  160. for (y = start; y < end; y++, dst += dstlinesize) {
  161. for (x = 0; x < outw; x++)
  162. *((uint16_t *)(dst + 2 * x)) =
  163. *((uint16_t *)(src + x * srclinesize + y * 2));
  164. }
  165. break;
  166. case 3:
  167. for (y = start; y < end; y++, dst += dstlinesize) {
  168. for (x = 0; x < outw; x++) {
  169. int32_t v = AV_RB24(src + x * srclinesize + y * 3);
  170. AV_WB24(dst + 3 * x, v);
  171. }
  172. }
  173. break;
  174. case 4:
  175. for (y = start; y < end; y++, dst += dstlinesize) {
  176. for (x = 0; x < outw; x++)
  177. *((uint32_t *)(dst + 4 * x)) =
  178. *((uint32_t *)(src + x * srclinesize + y * 4));
  179. }
  180. break;
  181. case 6:
  182. for (y = start; y < end; y++, dst += dstlinesize) {
  183. for (x = 0; x < outw; x++) {
  184. int64_t v = AV_RB48(src + x * srclinesize + y*6);
  185. AV_WB48(dst + 6*x, v);
  186. }
  187. }
  188. break;
  189. case 8:
  190. for (y = start; y < end; y++, dst += dstlinesize) {
  191. for (x = 0; x < outw; x++)
  192. *((uint64_t *)(dst + 8*x)) = *((uint64_t *)(src + x * srclinesize + y*8));
  193. }
  194. break;
  195. }
  196. }
  197. return 0;
  198. }
  199. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  200. {
  201. AVFilterContext *ctx = inlink->dst;
  202. TransContext *trans = ctx->priv;
  203. AVFilterLink *outlink = ctx->outputs[0];
  204. ThreadData td;
  205. AVFrame *out;
  206. if (trans->passthrough)
  207. return ff_filter_frame(outlink, in);
  208. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  209. if (!out) {
  210. av_frame_free(&in);
  211. return AVERROR(ENOMEM);
  212. }
  213. av_frame_copy_props(out, in);
  214. if (in->sample_aspect_ratio.num == 0) {
  215. out->sample_aspect_ratio = in->sample_aspect_ratio;
  216. } else {
  217. out->sample_aspect_ratio.num = in->sample_aspect_ratio.den;
  218. out->sample_aspect_ratio.den = in->sample_aspect_ratio.num;
  219. }
  220. td.in = in, td.out = out;
  221. ctx->internal->execute(ctx, filter_slice, &td, NULL, FFMIN(outlink->h, ctx->graph->nb_threads));
  222. av_frame_free(&in);
  223. return ff_filter_frame(outlink, out);
  224. }
  225. #define OFFSET(x) offsetof(TransContext, x)
  226. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  227. static const AVOption transpose_options[] = {
  228. { "dir", "set transpose direction", OFFSET(dir), AV_OPT_TYPE_INT, { .i64 = TRANSPOSE_CCLOCK_FLIP }, 0, 7, FLAGS, "dir" },
  229. { "cclock_flip", "rotate counter-clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .unit = "dir" },
  230. { "clock", "rotate clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK }, .unit = "dir" },
  231. { "cclock", "rotate counter-clockwise", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK }, .unit = "dir" },
  232. { "clock_flip", "rotate clockwise with vertical flip", 0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP }, .unit = "dir" },
  233. { "passthrough", "do not apply transposition if the input matches the specified geometry",
  234. OFFSET(passthrough), AV_OPT_TYPE_INT, {.i64=TRANSPOSE_PT_TYPE_NONE}, 0, INT_MAX, FLAGS, "passthrough" },
  235. { "none", "always apply transposition", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_NONE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
  236. { "portrait", "preserve portrait geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_PORTRAIT}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
  237. { "landscape", "preserve landscape geometry", 0, AV_OPT_TYPE_CONST, {.i64=TRANSPOSE_PT_TYPE_LANDSCAPE}, INT_MIN, INT_MAX, FLAGS, "passthrough" },
  238. { NULL }
  239. };
  240. AVFILTER_DEFINE_CLASS(transpose);
  241. static const AVFilterPad avfilter_vf_transpose_inputs[] = {
  242. {
  243. .name = "default",
  244. .type = AVMEDIA_TYPE_VIDEO,
  245. .get_video_buffer = get_video_buffer,
  246. .filter_frame = filter_frame,
  247. },
  248. { NULL }
  249. };
  250. static const AVFilterPad avfilter_vf_transpose_outputs[] = {
  251. {
  252. .name = "default",
  253. .config_props = config_props_output,
  254. .type = AVMEDIA_TYPE_VIDEO,
  255. },
  256. { NULL }
  257. };
  258. AVFilter ff_vf_transpose = {
  259. .name = "transpose",
  260. .description = NULL_IF_CONFIG_SMALL("Transpose input video."),
  261. .priv_size = sizeof(TransContext),
  262. .priv_class = &transpose_class,
  263. .query_formats = query_formats,
  264. .inputs = avfilter_vf_transpose_inputs,
  265. .outputs = avfilter_vf_transpose_outputs,
  266. .flags = AVFILTER_FLAG_SLICE_THREADS,
  267. };