You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

425 lines
14KB

  1. /*
  2. * Copyright (c) 2013 Vittorio Giovara
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Generate a frame packed video, by combining two views in a single surface.
  23. */
  24. #include <string.h>
  25. #include "libavutil/common.h"
  26. #include "libavutil/imgutils.h"
  27. #include "libavutil/opt.h"
  28. #include "libavutil/pixdesc.h"
  29. #include "libavutil/rational.h"
  30. #include "libavutil/stereo3d.h"
  31. #include "avfilter.h"
  32. #include "filters.h"
  33. #include "formats.h"
  34. #include "internal.h"
  35. #include "video.h"
  36. #define LEFT 0
  37. #define RIGHT 1
  38. typedef struct FramepackContext {
  39. const AVClass *class;
  40. const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
  41. enum AVStereo3DType format; ///< frame pack type output
  42. AVFrame *input_views[2]; ///< input frames
  43. } FramepackContext;
  44. static const enum AVPixelFormat formats_supported[] = {
  45. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  46. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
  47. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  48. AV_PIX_FMT_NONE
  49. };
  50. static int query_formats(AVFilterContext *ctx)
  51. {
  52. // this will ensure that formats are the same on all pads
  53. AVFilterFormats *fmts_list = ff_make_format_list(formats_supported);
  54. if (!fmts_list)
  55. return AVERROR(ENOMEM);
  56. return ff_set_common_formats(ctx, fmts_list);
  57. }
  58. static av_cold void framepack_uninit(AVFilterContext *ctx)
  59. {
  60. FramepackContext *s = ctx->priv;
  61. // clean any leftover frame
  62. av_frame_free(&s->input_views[LEFT]);
  63. av_frame_free(&s->input_views[RIGHT]);
  64. }
  65. static int config_output(AVFilterLink *outlink)
  66. {
  67. AVFilterContext *ctx = outlink->src;
  68. FramepackContext *s = outlink->src->priv;
  69. int width = ctx->inputs[LEFT]->w;
  70. int height = ctx->inputs[LEFT]->h;
  71. AVRational time_base = ctx->inputs[LEFT]->time_base;
  72. AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
  73. // check size and fps match on the other input
  74. if (width != ctx->inputs[RIGHT]->w ||
  75. height != ctx->inputs[RIGHT]->h) {
  76. av_log(ctx, AV_LOG_ERROR,
  77. "Left and right sizes differ (%dx%d vs %dx%d).\n",
  78. width, height,
  79. ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
  80. return AVERROR_INVALIDDATA;
  81. } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
  82. av_log(ctx, AV_LOG_ERROR,
  83. "Left and right time bases differ (%d/%d vs %d/%d).\n",
  84. time_base.num, time_base.den,
  85. ctx->inputs[RIGHT]->time_base.num,
  86. ctx->inputs[RIGHT]->time_base.den);
  87. return AVERROR_INVALIDDATA;
  88. } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
  89. av_log(ctx, AV_LOG_ERROR,
  90. "Left and right framerates differ (%d/%d vs %d/%d).\n",
  91. frame_rate.num, frame_rate.den,
  92. ctx->inputs[RIGHT]->frame_rate.num,
  93. ctx->inputs[RIGHT]->frame_rate.den);
  94. return AVERROR_INVALIDDATA;
  95. }
  96. s->pix_desc = av_pix_fmt_desc_get(outlink->format);
  97. if (!s->pix_desc)
  98. return AVERROR_BUG;
  99. // modify output properties as needed
  100. switch (s->format) {
  101. case AV_STEREO3D_FRAMESEQUENCE:
  102. time_base.den *= 2;
  103. frame_rate.num *= 2;
  104. break;
  105. case AV_STEREO3D_COLUMNS:
  106. case AV_STEREO3D_SIDEBYSIDE:
  107. width *= 2;
  108. break;
  109. case AV_STEREO3D_LINES:
  110. case AV_STEREO3D_TOPBOTTOM:
  111. height *= 2;
  112. break;
  113. default:
  114. av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
  115. return AVERROR_INVALIDDATA;
  116. }
  117. outlink->w = width;
  118. outlink->h = height;
  119. outlink->time_base = time_base;
  120. outlink->frame_rate = frame_rate;
  121. return 0;
  122. }
  123. static void horizontal_frame_pack(AVFilterLink *outlink,
  124. AVFrame *out,
  125. int interleaved)
  126. {
  127. AVFilterContext *ctx = outlink->src;
  128. FramepackContext *s = ctx->priv;
  129. int i, plane;
  130. if (interleaved) {
  131. const uint8_t *leftp = s->input_views[LEFT]->data[0];
  132. const uint8_t *rightp = s->input_views[RIGHT]->data[0];
  133. uint8_t *dstp = out->data[0];
  134. int length = out->width / 2;
  135. int lines = out->height;
  136. for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  137. if (plane == 1 || plane == 2) {
  138. length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
  139. lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
  140. }
  141. for (i = 0; i < lines; i++) {
  142. int j;
  143. leftp = s->input_views[LEFT]->data[plane] +
  144. s->input_views[LEFT]->linesize[plane] * i;
  145. rightp = s->input_views[RIGHT]->data[plane] +
  146. s->input_views[RIGHT]->linesize[plane] * i;
  147. dstp = out->data[plane] + out->linesize[plane] * i;
  148. for (j = 0; j < length; j++) {
  149. // interpolate chroma as necessary
  150. if ((s->pix_desc->log2_chroma_w ||
  151. s->pix_desc->log2_chroma_h) &&
  152. (plane == 1 || plane == 2)) {
  153. *dstp++ = (*leftp + *rightp) / 2;
  154. *dstp++ = (*leftp + *rightp) / 2;
  155. } else {
  156. *dstp++ = *leftp;
  157. *dstp++ = *rightp;
  158. }
  159. leftp += 1;
  160. rightp += 1;
  161. }
  162. }
  163. }
  164. } else {
  165. for (i = 0; i < 2; i++) {
  166. const uint8_t *src[4];
  167. uint8_t *dst[4];
  168. int sub_w = s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
  169. src[0] = s->input_views[i]->data[0];
  170. src[1] = s->input_views[i]->data[1];
  171. src[2] = s->input_views[i]->data[2];
  172. dst[0] = out->data[0] + i * s->input_views[i]->width;
  173. dst[1] = out->data[1] + i * sub_w;
  174. dst[2] = out->data[2] + i * sub_w;
  175. av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
  176. s->input_views[i]->format,
  177. s->input_views[i]->width,
  178. s->input_views[i]->height);
  179. }
  180. }
  181. }
  182. static void vertical_frame_pack(AVFilterLink *outlink,
  183. AVFrame *out,
  184. int interleaved)
  185. {
  186. AVFilterContext *ctx = outlink->src;
  187. FramepackContext *s = ctx->priv;
  188. int i;
  189. for (i = 0; i < 2; i++) {
  190. const uint8_t *src[4];
  191. uint8_t *dst[4];
  192. int linesizes[4];
  193. int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
  194. src[0] = s->input_views[i]->data[0];
  195. src[1] = s->input_views[i]->data[1];
  196. src[2] = s->input_views[i]->data[2];
  197. dst[0] = out->data[0] + i * out->linesize[0] *
  198. (interleaved + s->input_views[i]->height * (1 - interleaved));
  199. dst[1] = out->data[1] + i * out->linesize[1] *
  200. (interleaved + sub_h * (1 - interleaved));
  201. dst[2] = out->data[2] + i * out->linesize[2] *
  202. (interleaved + sub_h * (1 - interleaved));
  203. linesizes[0] = out->linesize[0] +
  204. interleaved * out->linesize[0];
  205. linesizes[1] = out->linesize[1] +
  206. interleaved * out->linesize[1];
  207. linesizes[2] = out->linesize[2] +
  208. interleaved * out->linesize[2];
  209. av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
  210. s->input_views[i]->format,
  211. s->input_views[i]->width,
  212. s->input_views[i]->height);
  213. }
  214. }
  215. static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
  216. AVFrame *dst)
  217. {
  218. AVFilterContext *ctx = outlink->src;
  219. FramepackContext *s = ctx->priv;
  220. switch (s->format) {
  221. case AV_STEREO3D_SIDEBYSIDE:
  222. horizontal_frame_pack(outlink, dst, 0);
  223. break;
  224. case AV_STEREO3D_COLUMNS:
  225. horizontal_frame_pack(outlink, dst, 1);
  226. break;
  227. case AV_STEREO3D_TOPBOTTOM:
  228. vertical_frame_pack(outlink, dst, 0);
  229. break;
  230. case AV_STEREO3D_LINES:
  231. vertical_frame_pack(outlink, dst, 1);
  232. break;
  233. }
  234. }
  235. static int try_push_frame(AVFilterContext *ctx)
  236. {
  237. FramepackContext *s = ctx->priv;
  238. AVFilterLink *outlink = ctx->outputs[0];
  239. AVStereo3D *stereo;
  240. int ret, i;
  241. if (!(s->input_views[0] && s->input_views[1]))
  242. return 0;
  243. if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
  244. int64_t pts = s->input_views[0]->pts;
  245. for (i = 0; i < 2; i++) {
  246. // set correct timestamps
  247. if (pts != AV_NOPTS_VALUE)
  248. s->input_views[i]->pts = i == 0 ? pts * 2 : pts * 2 + av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
  249. // set stereo3d side data
  250. stereo = av_stereo3d_create_side_data(s->input_views[i]);
  251. if (!stereo)
  252. return AVERROR(ENOMEM);
  253. stereo->type = s->format;
  254. stereo->view = i == LEFT ? AV_STEREO3D_VIEW_LEFT
  255. : AV_STEREO3D_VIEW_RIGHT;
  256. // filter the frame and immediately relinquish its pointer
  257. ret = ff_filter_frame(outlink, s->input_views[i]);
  258. s->input_views[i] = NULL;
  259. if (ret < 0)
  260. return ret;
  261. }
  262. return ret;
  263. } else {
  264. AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  265. if (!dst)
  266. return AVERROR(ENOMEM);
  267. spatial_frame_pack(outlink, dst);
  268. // get any property from the original frame
  269. ret = av_frame_copy_props(dst, s->input_views[LEFT]);
  270. if (ret < 0) {
  271. av_frame_free(&dst);
  272. return ret;
  273. }
  274. for (i = 0; i < 2; i++)
  275. av_frame_free(&s->input_views[i]);
  276. // set stereo3d side data
  277. stereo = av_stereo3d_create_side_data(dst);
  278. if (!stereo) {
  279. av_frame_free(&dst);
  280. return AVERROR(ENOMEM);
  281. }
  282. stereo->type = s->format;
  283. return ff_filter_frame(outlink, dst);
  284. }
  285. }
  286. static int activate(AVFilterContext *ctx)
  287. {
  288. AVFilterLink *outlink = ctx->outputs[0];
  289. FramepackContext *s = ctx->priv;
  290. int ret;
  291. FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
  292. if (!s->input_views[0]) {
  293. ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_views[0]);
  294. if (ret < 0)
  295. return ret;
  296. }
  297. if (!s->input_views[1]) {
  298. ret = ff_inlink_consume_frame(ctx->inputs[1], &s->input_views[1]);
  299. if (ret < 0)
  300. return ret;
  301. }
  302. if (s->input_views[0] && s->input_views[1])
  303. return try_push_frame(ctx);
  304. FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink);
  305. FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);
  306. if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
  307. !ff_outlink_get_status(ctx->inputs[0]) &&
  308. !s->input_views[0]) {
  309. ff_inlink_request_frame(ctx->inputs[0]);
  310. return 0;
  311. }
  312. if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
  313. !ff_outlink_get_status(ctx->inputs[1]) &&
  314. !s->input_views[1]) {
  315. ff_inlink_request_frame(ctx->inputs[1]);
  316. return 0;
  317. }
  318. return FFERROR_NOT_READY;
  319. }
  320. #define OFFSET(x) offsetof(FramepackContext, x)
  321. #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
  322. static const AVOption framepack_options[] = {
  323. { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
  324. { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = VF, .unit = "format" },
  325. { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
  326. { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
  327. { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
  328. { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
  329. { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
  330. { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
  331. { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
  332. { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
  333. { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
  334. { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
  335. { NULL },
  336. };
  337. AVFILTER_DEFINE_CLASS(framepack);
  338. static const AVFilterPad framepack_inputs[] = {
  339. {
  340. .name = "left",
  341. .type = AVMEDIA_TYPE_VIDEO,
  342. },
  343. {
  344. .name = "right",
  345. .type = AVMEDIA_TYPE_VIDEO,
  346. },
  347. { NULL }
  348. };
  349. static const AVFilterPad framepack_outputs[] = {
  350. {
  351. .name = "packed",
  352. .type = AVMEDIA_TYPE_VIDEO,
  353. .config_props = config_output,
  354. },
  355. { NULL }
  356. };
  357. AVFilter ff_vf_framepack = {
  358. .name = "framepack",
  359. .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
  360. .priv_size = sizeof(FramepackContext),
  361. .priv_class = &framepack_class,
  362. .query_formats = query_formats,
  363. .inputs = framepack_inputs,
  364. .outputs = framepack_outputs,
  365. .activate = activate,
  366. .uninit = framepack_uninit,
  367. };