You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

407 lines
13KB

  1. /*
  2. * Copyright (c) 2013 Vittorio Giovara
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Generate a frame packed video, by combining two views in a single surface.
  23. */
  24. #include <string.h>
  25. #include "libavutil/imgutils.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/pixdesc.h"
  28. #include "libavutil/rational.h"
  29. #include "libavutil/stereo3d.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. #include "video.h"
  34. #define LEFT 0
  35. #define RIGHT 1
  36. typedef struct FramepackContext {
  37. const AVClass *class;
  38. const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
  39. enum AVStereo3DType format; ///< frame pack type output
  40. AVFrame *input_views[2]; ///< input frames
  41. int64_t double_pts; ///< new pts for frameseq mode
  42. } FramepackContext;
  43. static const enum AVPixelFormat formats_supported[] = {
  44. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  45. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
  46. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  47. AV_PIX_FMT_NONE
  48. };
  49. static int query_formats(AVFilterContext *ctx)
  50. {
  51. // this will ensure that formats are the same on all pads
  52. AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
  53. if (!fmts_list)
  54. return AVERROR(ENOMEM);
  55. return ff_set_common_formats(ctx, fmts_list);
  56. }
  57. static av_cold void framepack_uninit(AVFilterContext *ctx)
  58. {
  59. FramepackContext *s = ctx->priv;
  60. // clean any leftover frame
  61. av_frame_free(&s->input_views[LEFT]);
  62. av_frame_free(&s->input_views[RIGHT]);
  63. }
  64. static int config_output(AVFilterLink *outlink)
  65. {
  66. AVFilterContext *ctx = outlink->src;
  67. FramepackContext *s = outlink->src->priv;
  68. int width = ctx->inputs[LEFT]->w;
  69. int height = ctx->inputs[LEFT]->h;
  70. AVRational time_base = ctx->inputs[LEFT]->time_base;
  71. AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
  72. // check size and fps match on the other input
  73. if (width != ctx->inputs[RIGHT]->w ||
  74. height != ctx->inputs[RIGHT]->h) {
  75. av_log(ctx, AV_LOG_ERROR,
  76. "Left and right sizes differ (%dx%d vs %dx%d).\n",
  77. width, height,
  78. ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
  79. return AVERROR_INVALIDDATA;
  80. } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
  81. av_log(ctx, AV_LOG_ERROR,
  82. "Left and right time bases differ (%d/%d vs %d/%d).\n",
  83. time_base.num, time_base.den,
  84. ctx->inputs[RIGHT]->time_base.num,
  85. ctx->inputs[RIGHT]->time_base.den);
  86. return AVERROR_INVALIDDATA;
  87. } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
  88. av_log(ctx, AV_LOG_ERROR,
  89. "Left and right framerates differ (%d/%d vs %d/%d).\n",
  90. frame_rate.num, frame_rate.den,
  91. ctx->inputs[RIGHT]->frame_rate.num,
  92. ctx->inputs[RIGHT]->frame_rate.den);
  93. return AVERROR_INVALIDDATA;
  94. }
  95. s->pix_desc = av_pix_fmt_desc_get(outlink->format);
  96. if (!s->pix_desc)
  97. return AVERROR_BUG;
  98. // modify output properties as needed
  99. switch (s->format) {
  100. case AV_STEREO3D_FRAMESEQUENCE:
  101. time_base.den *= 2;
  102. frame_rate.num *= 2;
  103. s->double_pts = AV_NOPTS_VALUE;
  104. break;
  105. case AV_STEREO3D_COLUMNS:
  106. case AV_STEREO3D_SIDEBYSIDE:
  107. width *= 2;
  108. break;
  109. case AV_STEREO3D_LINES:
  110. case AV_STEREO3D_TOPBOTTOM:
  111. height *= 2;
  112. break;
  113. default:
  114. av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
  115. return AVERROR_INVALIDDATA;
  116. }
  117. outlink->w = width;
  118. outlink->h = height;
  119. outlink->time_base = time_base;
  120. outlink->frame_rate= frame_rate;
  121. return 0;
  122. }
  123. static void horizontal_frame_pack(AVFilterLink *outlink,
  124. AVFrame *out,
  125. int interleaved)
  126. {
  127. AVFilterContext *ctx = outlink->src;
  128. FramepackContext *s = ctx->priv;
  129. int i, plane;
  130. if (interleaved) {
  131. const uint8_t *leftp = s->input_views[LEFT]->data[0];
  132. const uint8_t *rightp = s->input_views[RIGHT]->data[0];
  133. uint8_t *dstp = out->data[0];
  134. int length = out->width / 2;
  135. int lines = out->height;
  136. for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  137. if (plane == 1 || plane == 2) {
  138. length = FF_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
  139. lines = FF_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
  140. }
  141. for (i = 0; i < lines; i++) {
  142. int j;
  143. leftp = s->input_views[LEFT]->data[plane] +
  144. s->input_views[LEFT]->linesize[plane] * i;
  145. rightp = s->input_views[RIGHT]->data[plane] +
  146. s->input_views[RIGHT]->linesize[plane] * i;
  147. dstp = out->data[plane] + out->linesize[plane] * i;
  148. for (j = 0; j < length; j++) {
  149. // interpolate chroma as necessary
  150. if ((s->pix_desc->log2_chroma_w ||
  151. s->pix_desc->log2_chroma_h) &&
  152. (plane == 1 || plane == 2)) {
  153. *dstp++ = (*leftp + *rightp) / 2;
  154. *dstp++ = (*leftp + *rightp) / 2;
  155. } else {
  156. *dstp++ = *leftp;
  157. *dstp++ = *rightp;
  158. }
  159. leftp += 1;
  160. rightp += 1;
  161. }
  162. }
  163. }
  164. } else {
  165. for (i = 0; i < 2; i++) {
  166. const uint8_t *src[4];
  167. uint8_t *dst[4];
  168. int sub_w = s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
  169. src[0] = s->input_views[i]->data[0];
  170. src[1] = s->input_views[i]->data[1];
  171. src[2] = s->input_views[i]->data[2];
  172. dst[0] = out->data[0] + i * s->input_views[i]->width;
  173. dst[1] = out->data[1] + i * sub_w;
  174. dst[2] = out->data[2] + i * sub_w;
  175. av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
  176. s->input_views[i]->format,
  177. s->input_views[i]->width,
  178. s->input_views[i]->height);
  179. }
  180. }
  181. }
  182. static void vertical_frame_pack(AVFilterLink *outlink,
  183. AVFrame *out,
  184. int interleaved)
  185. {
  186. AVFilterContext *ctx = outlink->src;
  187. FramepackContext *s = ctx->priv;
  188. int i;
  189. for (i = 0; i < 2; i++) {
  190. const uint8_t *src[4];
  191. uint8_t *dst[4];
  192. int linesizes[4];
  193. int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
  194. src[0] = s->input_views[i]->data[0];
  195. src[1] = s->input_views[i]->data[1];
  196. src[2] = s->input_views[i]->data[2];
  197. dst[0] = out->data[0] + i * out->linesize[0] *
  198. (interleaved + s->input_views[i]->height * (1 - interleaved));
  199. dst[1] = out->data[1] + i * out->linesize[1] *
  200. (interleaved + sub_h * (1 - interleaved));
  201. dst[2] = out->data[2] + i * out->linesize[2] *
  202. (interleaved + sub_h * (1 - interleaved));
  203. linesizes[0] = out->linesize[0] +
  204. interleaved * out->linesize[0];
  205. linesizes[1] = out->linesize[1] +
  206. interleaved * out->linesize[1];
  207. linesizes[2] = out->linesize[2] +
  208. interleaved * out->linesize[2];
  209. av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
  210. s->input_views[i]->format,
  211. s->input_views[i]->width,
  212. s->input_views[i]->height);
  213. }
  214. }
  215. static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
  216. AVFrame *dst)
  217. {
  218. AVFilterContext *ctx = outlink->src;
  219. FramepackContext *s = ctx->priv;
  220. switch (s->format) {
  221. case AV_STEREO3D_SIDEBYSIDE:
  222. horizontal_frame_pack(outlink, dst, 0);
  223. break;
  224. case AV_STEREO3D_COLUMNS:
  225. horizontal_frame_pack(outlink, dst, 1);
  226. break;
  227. case AV_STEREO3D_TOPBOTTOM:
  228. vertical_frame_pack(outlink, dst, 0);
  229. break;
  230. case AV_STEREO3D_LINES:
  231. vertical_frame_pack(outlink, dst, 1);
  232. break;
  233. }
  234. }
  235. static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
  236. {
  237. FramepackContext *s = inlink->dst->priv;
  238. s->input_views[LEFT] = frame;
  239. return 0;
  240. }
  241. static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
  242. {
  243. FramepackContext *s = inlink->dst->priv;
  244. s->input_views[RIGHT] = frame;
  245. return 0;
  246. }
  247. static int request_frame(AVFilterLink *outlink)
  248. {
  249. AVFilterContext *ctx = outlink->src;
  250. FramepackContext *s = ctx->priv;
  251. AVStereo3D *stereo;
  252. int ret, i;
  253. /* get a frame on the either input, stop as soon as a video ends */
  254. for (i = 0; i < 2; i++) {
  255. if (!s->input_views[i]) {
  256. ret = ff_request_frame(ctx->inputs[i]);
  257. if (ret < 0)
  258. return ret;
  259. }
  260. }
  261. if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
  262. if (s->double_pts == AV_NOPTS_VALUE)
  263. s->double_pts = s->input_views[LEFT]->pts;
  264. for (i = 0; i < 2; i++) {
  265. // set correct timestamps
  266. s->input_views[i]->pts = s->double_pts++;
  267. // set stereo3d side data
  268. stereo = av_stereo3d_create_side_data(s->input_views[i]);
  269. if (!stereo)
  270. return AVERROR(ENOMEM);
  271. stereo->type = s->format;
  272. // filter the frame and immediately relinquish its pointer
  273. ret = ff_filter_frame(outlink, s->input_views[i]);
  274. s->input_views[i] = NULL;
  275. if (ret < 0)
  276. return ret;
  277. }
  278. return ret;
  279. } else {
  280. AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  281. if (!dst)
  282. return AVERROR(ENOMEM);
  283. spatial_frame_pack(outlink, dst);
  284. // get any property from the original frame
  285. ret = av_frame_copy_props(dst, s->input_views[LEFT]);
  286. if (ret < 0) {
  287. av_frame_free(&dst);
  288. return ret;
  289. }
  290. for (i = 0; i < 2; i++)
  291. av_frame_free(&s->input_views[i]);
  292. // set stereo3d side data
  293. stereo = av_stereo3d_create_side_data(dst);
  294. if (!stereo) {
  295. av_frame_free(&dst);
  296. return AVERROR(ENOMEM);
  297. }
  298. stereo->type = s->format;
  299. return ff_filter_frame(outlink, dst);
  300. }
  301. }
  302. #define OFFSET(x) offsetof(FramepackContext, x)
  303. #define V AV_OPT_FLAG_VIDEO_PARAM
  304. static const AVOption framepack_options[] = {
  305. { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
  306. { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
  307. { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
  308. { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  309. { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
  310. { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  311. { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
  312. { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  313. { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
  314. { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  315. { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
  316. { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  317. { NULL },
  318. };
  319. AVFILTER_DEFINE_CLASS(framepack);
  320. static const AVFilterPad framepack_inputs[] = {
  321. {
  322. .name = "left",
  323. .type = AVMEDIA_TYPE_VIDEO,
  324. .filter_frame = filter_frame_left,
  325. .needs_fifo = 1,
  326. },
  327. {
  328. .name = "right",
  329. .type = AVMEDIA_TYPE_VIDEO,
  330. .filter_frame = filter_frame_right,
  331. .needs_fifo = 1,
  332. },
  333. { NULL }
  334. };
  335. static const AVFilterPad framepack_outputs[] = {
  336. {
  337. .name = "packed",
  338. .type = AVMEDIA_TYPE_VIDEO,
  339. .config_props = config_output,
  340. .request_frame = request_frame,
  341. },
  342. { NULL }
  343. };
  344. AVFilter ff_vf_framepack = {
  345. .name = "framepack",
  346. .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
  347. .priv_size = sizeof(FramepackContext),
  348. .priv_class = &framepack_class,
  349. .query_formats = query_formats,
  350. .inputs = framepack_inputs,
  351. .outputs = framepack_outputs,
  352. .uninit = framepack_uninit,
  353. };