You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

399 lines
13KB

  1. /*
  2. * Copyright (c) 2013 Vittorio Giovara
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Generate a frame packed video, by combining two views in a single surface.
  23. */
  24. #include <string.h>
  25. #include "libavutil/imgutils.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/pixdesc.h"
  28. #include "libavutil/rational.h"
  29. #include "libavutil/stereo3d.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. #include "video.h"
  34. #define LEFT 0
  35. #define RIGHT 1
  36. typedef struct FramepackContext {
  37. const AVClass *class;
  38. const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
  39. enum AVStereo3DType format; ///< frame pack type output
  40. AVFrame *input_views[2]; ///< input frames
  41. int64_t double_pts; ///< new pts for frameseq mode
  42. } FramepackContext;
  43. static const enum AVPixelFormat formats_supported[] = {
  44. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  45. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
  46. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  47. AV_PIX_FMT_NONE
  48. };
  49. static int query_formats(AVFilterContext *ctx)
  50. {
  51. // this will ensure that formats are the same on all pads
  52. ff_set_common_formats(ctx, ff_make_format_list(formats_supported));
  53. return 0;
  54. }
  55. static av_cold void framepack_uninit(AVFilterContext *ctx)
  56. {
  57. FramepackContext *s = ctx->priv;
  58. // clean any leftover frame
  59. av_frame_free(&s->input_views[LEFT]);
  60. av_frame_free(&s->input_views[RIGHT]);
  61. }
  62. static int config_output(AVFilterLink *outlink)
  63. {
  64. AVFilterContext *ctx = outlink->src;
  65. FramepackContext *s = outlink->src->priv;
  66. int width = ctx->inputs[LEFT]->w;
  67. int height = ctx->inputs[LEFT]->h;
  68. AVRational time_base = ctx->inputs[LEFT]->time_base;
  69. // check size and fps match on the other input
  70. if (width != ctx->inputs[RIGHT]->w ||
  71. height != ctx->inputs[RIGHT]->h) {
  72. av_log(ctx, AV_LOG_ERROR,
  73. "Left and right sizes differ (%dx%d vs %dx%d).\n",
  74. width, height,
  75. ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
  76. return AVERROR_INVALIDDATA;
  77. } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
  78. av_log(ctx, AV_LOG_ERROR,
  79. "Left and right framerates differ (%d/%d vs %d/%d).\n",
  80. time_base.num, time_base.den,
  81. ctx->inputs[RIGHT]->time_base.num,
  82. ctx->inputs[RIGHT]->time_base.den);
  83. return AVERROR_INVALIDDATA;
  84. }
  85. s->pix_desc = av_pix_fmt_desc_get(outlink->format);
  86. if (!s->pix_desc)
  87. return AVERROR_BUG;
  88. // modify output properties as needed
  89. switch (s->format) {
  90. case AV_STEREO3D_FRAMESEQUENCE:
  91. time_base.den *= 2;
  92. s->double_pts = AV_NOPTS_VALUE;
  93. break;
  94. case AV_STEREO3D_COLUMNS:
  95. case AV_STEREO3D_SIDEBYSIDE:
  96. width *= 2;
  97. break;
  98. case AV_STEREO3D_LINES:
  99. case AV_STEREO3D_TOPBOTTOM:
  100. height *= 2;
  101. break;
  102. default:
  103. av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
  104. return AVERROR_INVALIDDATA;
  105. }
  106. outlink->w = width;
  107. outlink->h = height;
  108. outlink->time_base = time_base;
  109. return 0;
  110. }
  111. static void horizontal_frame_pack(AVFilterLink *outlink,
  112. AVFrame *out,
  113. int interleaved)
  114. {
  115. AVFilterContext *ctx = outlink->src;
  116. FramepackContext *s = ctx->priv;
  117. int i, plane;
  118. if (interleaved) {
  119. const uint8_t *leftp = s->input_views[LEFT]->data[0];
  120. const uint8_t *rightp = s->input_views[RIGHT]->data[0];
  121. uint8_t *dstp = out->data[0];
  122. int length = out->width / 2;
  123. int lines = out->height;
  124. for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  125. if (plane == 1 || plane == 2) {
  126. length = -(-(out->width / 2) >> s->pix_desc->log2_chroma_w);
  127. lines = -(-(out->height) >> s->pix_desc->log2_chroma_h);
  128. }
  129. for (i = 0; i < lines; i++) {
  130. int j;
  131. leftp = s->input_views[LEFT]->data[plane] +
  132. s->input_views[LEFT]->linesize[plane] * i;
  133. rightp = s->input_views[RIGHT]->data[plane] +
  134. s->input_views[RIGHT]->linesize[plane] * i;
  135. dstp = out->data[plane] + out->linesize[plane] * i;
  136. for (j = 0; j < length; j++) {
  137. // interpolate chroma as necessary
  138. if ((s->pix_desc->log2_chroma_w ||
  139. s->pix_desc->log2_chroma_h) &&
  140. (plane == 1 || plane == 2)) {
  141. *dstp++ = (*leftp + *rightp) / 2;
  142. *dstp++ = (*leftp + *rightp) / 2;
  143. } else {
  144. *dstp++ = *leftp;
  145. *dstp++ = *rightp;
  146. }
  147. leftp += 1;
  148. rightp += 1;
  149. }
  150. }
  151. }
  152. } else {
  153. for (i = 0; i < 2; i++) {
  154. const uint8_t *src[4];
  155. uint8_t *dst[4];
  156. int sub_w = s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
  157. src[0] = s->input_views[i]->data[0];
  158. src[1] = s->input_views[i]->data[1];
  159. src[2] = s->input_views[i]->data[2];
  160. dst[0] = out->data[0] + i * s->input_views[i]->width;
  161. dst[1] = out->data[1] + i * sub_w;
  162. dst[2] = out->data[2] + i * sub_w;
  163. av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
  164. s->input_views[i]->format,
  165. s->input_views[i]->width,
  166. s->input_views[i]->height);
  167. }
  168. }
  169. }
  170. static void vertical_frame_pack(AVFilterLink *outlink,
  171. AVFrame *out,
  172. int interleaved)
  173. {
  174. AVFilterContext *ctx = outlink->src;
  175. FramepackContext *s = ctx->priv;
  176. int i;
  177. for (i = 0; i < 2; i++) {
  178. const uint8_t *src[4];
  179. uint8_t *dst[4];
  180. int linesizes[4];
  181. int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
  182. src[0] = s->input_views[i]->data[0];
  183. src[1] = s->input_views[i]->data[1];
  184. src[2] = s->input_views[i]->data[2];
  185. dst[0] = out->data[0] + i * out->linesize[0] *
  186. (interleaved + s->input_views[i]->height * (1 - interleaved));
  187. dst[1] = out->data[1] + i * out->linesize[1] *
  188. (interleaved + sub_h * (1 - interleaved));
  189. dst[2] = out->data[2] + i * out->linesize[2] *
  190. (interleaved + sub_h * (1 - interleaved));
  191. linesizes[0] = out->linesize[0] +
  192. interleaved * out->linesize[0];
  193. linesizes[1] = out->linesize[1] +
  194. interleaved * out->linesize[1];
  195. linesizes[2] = out->linesize[2] +
  196. interleaved * out->linesize[2];
  197. av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
  198. s->input_views[i]->format,
  199. s->input_views[i]->width,
  200. s->input_views[i]->height);
  201. }
  202. }
  203. static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
  204. AVFrame *dst)
  205. {
  206. AVFilterContext *ctx = outlink->src;
  207. FramepackContext *s = ctx->priv;
  208. switch (s->format) {
  209. case AV_STEREO3D_SIDEBYSIDE:
  210. horizontal_frame_pack(outlink, dst, 0);
  211. break;
  212. case AV_STEREO3D_COLUMNS:
  213. horizontal_frame_pack(outlink, dst, 1);
  214. break;
  215. case AV_STEREO3D_TOPBOTTOM:
  216. vertical_frame_pack(outlink, dst, 0);
  217. break;
  218. case AV_STEREO3D_LINES:
  219. vertical_frame_pack(outlink, dst, 1);
  220. break;
  221. }
  222. }
  223. static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
  224. {
  225. FramepackContext *s = inlink->dst->priv;
  226. s->input_views[LEFT] = frame;
  227. return 0;
  228. }
  229. static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
  230. {
  231. FramepackContext *s = inlink->dst->priv;
  232. s->input_views[RIGHT] = frame;
  233. return 0;
  234. }
  235. static int request_frame(AVFilterLink *outlink)
  236. {
  237. AVFilterContext *ctx = outlink->src;
  238. FramepackContext *s = ctx->priv;
  239. AVStereo3D *stereo;
  240. int ret, i;
  241. /* get a frame on the either input, stop as soon as a video ends */
  242. for (i = 0; i < 2; i++) {
  243. if (!s->input_views[i]) {
  244. ret = ff_request_frame(ctx->inputs[i]);
  245. if (ret < 0)
  246. return ret;
  247. }
  248. }
  249. if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
  250. if (s->double_pts == AV_NOPTS_VALUE)
  251. s->double_pts = s->input_views[LEFT]->pts;
  252. for (i = 0; i < 2; i++) {
  253. // set correct timestamps
  254. s->input_views[i]->pts = s->double_pts++;
  255. // set stereo3d side data
  256. stereo = av_stereo3d_create_side_data(s->input_views[i]);
  257. if (!stereo)
  258. return AVERROR(ENOMEM);
  259. stereo->type = s->format;
  260. // filter the frame and immediately relinquish its pointer
  261. ret = ff_filter_frame(outlink, s->input_views[i]);
  262. s->input_views[i] = NULL;
  263. if (ret < 0)
  264. return ret;
  265. }
  266. return ret;
  267. } else {
  268. AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  269. if (!dst)
  270. return AVERROR(ENOMEM);
  271. spatial_frame_pack(outlink, dst);
  272. // get any property from the original frame
  273. ret = av_frame_copy_props(dst, s->input_views[LEFT]);
  274. if (ret < 0) {
  275. av_frame_free(&dst);
  276. return ret;
  277. }
  278. for (i = 0; i < 2; i++)
  279. av_frame_free(&s->input_views[i]);
  280. // set stereo3d side data
  281. stereo = av_stereo3d_create_side_data(dst);
  282. if (!stereo) {
  283. av_frame_free(&dst);
  284. return AVERROR(ENOMEM);
  285. }
  286. stereo->type = s->format;
  287. return ff_filter_frame(outlink, dst);
  288. }
  289. }
  290. #define OFFSET(x) offsetof(FramepackContext, x)
  291. #define V AV_OPT_FLAG_VIDEO_PARAM
  292. static const AVOption options[] = {
  293. { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
  294. { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
  295. { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
  296. { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  297. { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
  298. { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  299. { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
  300. { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  301. { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
  302. { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  303. { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
  304. { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  305. { NULL },
  306. };
  307. static const AVClass framepack_class = {
  308. .class_name = "framepack",
  309. .item_name = av_default_item_name,
  310. .option = options,
  311. .version = LIBAVUTIL_VERSION_INT,
  312. };
  313. static const AVFilterPad framepack_inputs[] = {
  314. {
  315. .name = "left",
  316. .type = AVMEDIA_TYPE_VIDEO,
  317. .filter_frame = filter_frame_left,
  318. .needs_fifo = 1,
  319. },
  320. {
  321. .name = "right",
  322. .type = AVMEDIA_TYPE_VIDEO,
  323. .filter_frame = filter_frame_right,
  324. .needs_fifo = 1,
  325. },
  326. { NULL }
  327. };
  328. static const AVFilterPad framepack_outputs[] = {
  329. {
  330. .name = "packed",
  331. .type = AVMEDIA_TYPE_VIDEO,
  332. .config_props = config_output,
  333. .request_frame = request_frame,
  334. },
  335. { NULL }
  336. };
  337. AVFilter ff_vf_framepack = {
  338. .name = "framepack",
  339. .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
  340. .priv_size = sizeof(FramepackContext),
  341. .priv_class = &framepack_class,
  342. .query_formats = query_formats,
  343. .inputs = framepack_inputs,
  344. .outputs = framepack_outputs,
  345. .uninit = framepack_uninit,
  346. };