You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

372 lines
12KB

  1. /*
  2. * Copyright (c) 2013 Vittorio Giovara
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Generate a frame packed video, by combining two views in a single surface.
  23. */
  24. #include <string.h>
  25. #include "libavutil/imgutils.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/pixdesc.h"
  28. #include "libavutil/rational.h"
  29. #include "libavutil/stereo3d.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. #include "video.h"
  34. #define LEFT 0
  35. #define RIGHT 1
  36. typedef struct FramepackContext {
  37. const AVClass *class;
  38. const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
  39. enum AVStereo3DType format; ///< frame pack type output
  40. AVFrame *input_views[2]; ///< input frames
  41. int64_t double_pts; ///< new pts for frameseq mode
  42. } FramepackContext;
  43. static const enum AVPixelFormat formats_supported[] = {
  44. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  45. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
  46. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  47. AV_PIX_FMT_NONE
  48. };
  49. static int query_formats(AVFilterContext *ctx)
  50. {
  51. // this will ensure that formats are the same on all pads
  52. AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
  53. if (!fmts_list)
  54. return AVERROR(ENOMEM);
  55. return ff_set_common_formats(ctx, fmts_list);
  56. }
  57. static av_cold void framepack_uninit(AVFilterContext *ctx)
  58. {
  59. FramepackContext *s = ctx->priv;
  60. // clean any leftover frame
  61. av_frame_free(&s->input_views[LEFT]);
  62. av_frame_free(&s->input_views[RIGHT]);
  63. }
  64. static int config_output(AVFilterLink *outlink)
  65. {
  66. AVFilterContext *ctx = outlink->src;
  67. FramepackContext *s = outlink->src->priv;
  68. int width = ctx->inputs[LEFT]->w;
  69. int height = ctx->inputs[LEFT]->h;
  70. AVRational time_base = ctx->inputs[LEFT]->time_base;
  71. AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
  72. // check size and fps match on the other input
  73. if (width != ctx->inputs[RIGHT]->w ||
  74. height != ctx->inputs[RIGHT]->h) {
  75. av_log(ctx, AV_LOG_ERROR,
  76. "Left and right sizes differ (%dx%d vs %dx%d).\n",
  77. width, height,
  78. ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
  79. return AVERROR_INVALIDDATA;
  80. } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
  81. av_log(ctx, AV_LOG_ERROR,
  82. "Left and right time bases differ (%d/%d vs %d/%d).\n",
  83. time_base.num, time_base.den,
  84. ctx->inputs[RIGHT]->time_base.num,
  85. ctx->inputs[RIGHT]->time_base.den);
  86. return AVERROR_INVALIDDATA;
  87. } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
  88. av_log(ctx, AV_LOG_ERROR,
  89. "Left and right framerates differ (%d/%d vs %d/%d).\n",
  90. frame_rate.num, frame_rate.den,
  91. ctx->inputs[RIGHT]->frame_rate.num,
  92. ctx->inputs[RIGHT]->frame_rate.den);
  93. return AVERROR_INVALIDDATA;
  94. }
  95. s->pix_desc = av_pix_fmt_desc_get(outlink->format);
  96. if (!s->pix_desc)
  97. return AVERROR_BUG;
  98. // modify output properties as needed
  99. switch (s->format) {
  100. case AV_STEREO3D_FRAMESEQUENCE:
  101. time_base.den *= 2;
  102. frame_rate.num *= 2;
  103. s->double_pts = AV_NOPTS_VALUE;
  104. break;
  105. case AV_STEREO3D_COLUMNS:
  106. case AV_STEREO3D_SIDEBYSIDE:
  107. width *= 2;
  108. break;
  109. case AV_STEREO3D_LINES:
  110. case AV_STEREO3D_TOPBOTTOM:
  111. height *= 2;
  112. break;
  113. default:
  114. av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
  115. return AVERROR_INVALIDDATA;
  116. }
  117. outlink->w = width;
  118. outlink->h = height;
  119. outlink->time_base = time_base;
  120. outlink->frame_rate= frame_rate;
  121. return 0;
  122. }
  123. static void horizontal_frame_pack(FramepackContext *s,
  124. AVFrame *dst,
  125. int interleaved)
  126. {
  127. int plane, i;
  128. int length = dst->width / 2;
  129. int lines = dst->height;
  130. for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  131. const uint8_t *leftp = s->input_views[LEFT]->data[plane];
  132. const uint8_t *rightp = s->input_views[RIGHT]->data[plane];
  133. uint8_t *dstp = dst->data[plane];
  134. if (plane == 1 || plane == 2) {
  135. length = FF_CEIL_RSHIFT(dst->width / 2, s->pix_desc->log2_chroma_w);
  136. lines = FF_CEIL_RSHIFT(dst->height, s->pix_desc->log2_chroma_h);
  137. }
  138. if (interleaved) {
  139. for (i = 0; i < lines; i++) {
  140. int j;
  141. int k = 0;
  142. for (j = 0; j < length; j++) {
  143. dstp[k++] = leftp[j];
  144. dstp[k++] = rightp[j];
  145. }
  146. dstp += dst->linesize[plane];
  147. leftp += s->input_views[LEFT]->linesize[plane];
  148. rightp += s->input_views[RIGHT]->linesize[plane];
  149. }
  150. } else {
  151. av_image_copy_plane(dst->data[plane], dst->linesize[plane],
  152. leftp, s->input_views[LEFT]->linesize[plane],
  153. length, lines);
  154. av_image_copy_plane(dst->data[plane] + length, dst->linesize[plane],
  155. rightp, s->input_views[RIGHT]->linesize[plane],
  156. length, lines);
  157. }
  158. }
  159. }
  160. static void vertical_frame_pack(FramepackContext *s,
  161. AVFrame *dst,
  162. int interleaved)
  163. {
  164. int plane, offset;
  165. int length = dst->width;
  166. int lines = dst->height / 2;
  167. for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  168. if (plane == 1 || plane == 2) {
  169. length = -(-(dst->width) >> s->pix_desc->log2_chroma_w);
  170. lines = -(-(dst->height / 2) >> s->pix_desc->log2_chroma_h);
  171. }
  172. offset = interleaved ? dst->linesize[plane] : dst->linesize[plane] * lines;
  173. av_image_copy_plane(dst->data[plane],
  174. dst->linesize[plane] << interleaved,
  175. s->input_views[LEFT]->data[plane],
  176. s->input_views[LEFT]->linesize[plane],
  177. length, lines);
  178. av_image_copy_plane(dst->data[plane] + offset,
  179. dst->linesize[plane] << interleaved,
  180. s->input_views[RIGHT]->data[plane],
  181. s->input_views[RIGHT]->linesize[plane],
  182. length, lines);
  183. }
  184. }
  185. static av_always_inline void spatial_frame_pack(FramepackContext *s, AVFrame *dst)
  186. {
  187. switch (s->format) {
  188. case AV_STEREO3D_SIDEBYSIDE:
  189. horizontal_frame_pack(s, dst, 0);
  190. break;
  191. case AV_STEREO3D_COLUMNS:
  192. horizontal_frame_pack(s, dst, 1);
  193. break;
  194. case AV_STEREO3D_TOPBOTTOM:
  195. vertical_frame_pack(s, dst, 0);
  196. break;
  197. case AV_STEREO3D_LINES:
  198. vertical_frame_pack(s, dst, 1);
  199. break;
  200. }
  201. }
  202. static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
  203. {
  204. FramepackContext *s = inlink->dst->priv;
  205. s->input_views[LEFT] = frame;
  206. return 0;
  207. }
  208. static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
  209. {
  210. FramepackContext *s = inlink->dst->priv;
  211. s->input_views[RIGHT] = frame;
  212. return 0;
  213. }
  214. static int request_frame(AVFilterLink *outlink)
  215. {
  216. AVFilterContext *ctx = outlink->src;
  217. FramepackContext *s = ctx->priv;
  218. AVStereo3D *stereo;
  219. int ret, i;
  220. /* get a frame on the either input, stop as soon as a video ends */
  221. for (i = 0; i < 2; i++) {
  222. if (!s->input_views[i]) {
  223. ret = ff_request_frame(ctx->inputs[i]);
  224. if (ret < 0)
  225. return ret;
  226. }
  227. }
  228. if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
  229. if (s->double_pts == AV_NOPTS_VALUE)
  230. s->double_pts = s->input_views[LEFT]->pts;
  231. for (i = 0; i < 2; i++) {
  232. // set correct timestamps
  233. s->input_views[i]->pts = s->double_pts++;
  234. // set stereo3d side data
  235. stereo = av_stereo3d_create_side_data(s->input_views[i]);
  236. if (!stereo)
  237. return AVERROR(ENOMEM);
  238. stereo->type = s->format;
  239. // filter the frame and immediately relinquish its pointer
  240. ret = ff_filter_frame(outlink, s->input_views[i]);
  241. s->input_views[i] = NULL;
  242. if (ret < 0)
  243. return ret;
  244. }
  245. return ret;
  246. } else {
  247. AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  248. if (!dst)
  249. return AVERROR(ENOMEM);
  250. spatial_frame_pack(s, dst);
  251. // get any property from the original frame
  252. ret = av_frame_copy_props(dst, s->input_views[LEFT]);
  253. if (ret < 0) {
  254. av_frame_free(&dst);
  255. return ret;
  256. }
  257. for (i = 0; i < 2; i++)
  258. av_frame_free(&s->input_views[i]);
  259. // set stereo3d side data
  260. stereo = av_stereo3d_create_side_data(dst);
  261. if (!stereo) {
  262. av_frame_free(&dst);
  263. return AVERROR(ENOMEM);
  264. }
  265. stereo->type = s->format;
  266. return ff_filter_frame(outlink, dst);
  267. }
  268. }
  269. #define OFFSET(x) offsetof(FramepackContext, x)
  270. #define V AV_OPT_FLAG_VIDEO_PARAM
  271. static const AVOption framepack_options[] = {
  272. { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
  273. { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
  274. { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
  275. { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  276. { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
  277. { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  278. { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
  279. { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  280. { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
  281. { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  282. { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
  283. { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  284. { NULL },
  285. };
  286. AVFILTER_DEFINE_CLASS(framepack);
  287. static const AVFilterPad framepack_inputs[] = {
  288. {
  289. .name = "left",
  290. .type = AVMEDIA_TYPE_VIDEO,
  291. .filter_frame = filter_frame_left,
  292. .needs_fifo = 1,
  293. },
  294. {
  295. .name = "right",
  296. .type = AVMEDIA_TYPE_VIDEO,
  297. .filter_frame = filter_frame_right,
  298. .needs_fifo = 1,
  299. },
  300. { NULL }
  301. };
  302. static const AVFilterPad framepack_outputs[] = {
  303. {
  304. .name = "packed",
  305. .type = AVMEDIA_TYPE_VIDEO,
  306. .config_props = config_output,
  307. .request_frame = request_frame,
  308. },
  309. { NULL }
  310. };
  311. AVFilter ff_vf_framepack = {
  312. .name = "framepack",
  313. .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
  314. .priv_size = sizeof(FramepackContext),
  315. .priv_class = &framepack_class,
  316. .query_formats = query_formats,
  317. .inputs = framepack_inputs,
  318. .outputs = framepack_outputs,
  319. .uninit = framepack_uninit,
  320. };