You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

410 lines
13KB

  1. /*
  2. * Copyright (c) 2013 Vittorio Giovara
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Generate a frame packed video, by combining two views in a single surface.
  23. */
  24. #include <string.h>
  25. #include "libavutil/imgutils.h"
  26. #include "libavutil/opt.h"
  27. #include "libavutil/pixdesc.h"
  28. #include "libavutil/rational.h"
  29. #include "libavutil/stereo3d.h"
  30. #include "avfilter.h"
  31. #include "formats.h"
  32. #include "internal.h"
  33. #include "video.h"
  34. #define LEFT 0
  35. #define RIGHT 1
  36. typedef struct FramepackContext {
  37. const AVClass *class;
  38. const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
  39. enum AVStereo3DType format; ///< frame pack type output
  40. AVFrame *input_views[2]; ///< input frames
  41. int64_t double_pts; ///< new pts for frameseq mode
  42. } FramepackContext;
  43. static const enum AVPixelFormat formats_supported[] = {
  44. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  45. AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVJ420P,
  46. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  47. AV_PIX_FMT_NONE
  48. };
  49. static int query_formats(AVFilterContext *ctx)
  50. {
  51. // this will ensure that formats are the same on all pads
  52. ff_set_common_formats(ctx, ff_make_format_list(formats_supported));
  53. return 0;
  54. }
  55. static av_cold void framepack_uninit(AVFilterContext *ctx)
  56. {
  57. FramepackContext *s = ctx->priv;
  58. // clean any leftover frame
  59. av_frame_free(&s->input_views[LEFT]);
  60. av_frame_free(&s->input_views[RIGHT]);
  61. }
  62. static int config_output(AVFilterLink *outlink)
  63. {
  64. AVFilterContext *ctx = outlink->src;
  65. FramepackContext *s = outlink->src->priv;
  66. int width = ctx->inputs[LEFT]->w;
  67. int height = ctx->inputs[LEFT]->h;
  68. AVRational time_base = ctx->inputs[LEFT]->time_base;
  69. AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
  70. // check size and fps match on the other input
  71. if (width != ctx->inputs[RIGHT]->w ||
  72. height != ctx->inputs[RIGHT]->h) {
  73. av_log(ctx, AV_LOG_ERROR,
  74. "Left and right sizes differ (%dx%d vs %dx%d).\n",
  75. width, height,
  76. ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
  77. return AVERROR_INVALIDDATA;
  78. } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
  79. av_log(ctx, AV_LOG_ERROR,
  80. "Left and right time bases differ (%d/%d vs %d/%d).\n",
  81. time_base.num, time_base.den,
  82. ctx->inputs[RIGHT]->time_base.num,
  83. ctx->inputs[RIGHT]->time_base.den);
  84. return AVERROR_INVALIDDATA;
  85. } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
  86. av_log(ctx, AV_LOG_ERROR,
  87. "Left and right framerates differ (%d/%d vs %d/%d).\n",
  88. frame_rate.num, frame_rate.den,
  89. ctx->inputs[RIGHT]->frame_rate.num,
  90. ctx->inputs[RIGHT]->frame_rate.den);
  91. return AVERROR_INVALIDDATA;
  92. }
  93. s->pix_desc = av_pix_fmt_desc_get(outlink->format);
  94. if (!s->pix_desc)
  95. return AVERROR_BUG;
  96. // modify output properties as needed
  97. switch (s->format) {
  98. case AV_STEREO3D_FRAMESEQUENCE:
  99. time_base.den *= 2;
  100. frame_rate.num *= 2;
  101. s->double_pts = AV_NOPTS_VALUE;
  102. break;
  103. case AV_STEREO3D_COLUMNS:
  104. case AV_STEREO3D_SIDEBYSIDE:
  105. width *= 2;
  106. break;
  107. case AV_STEREO3D_LINES:
  108. case AV_STEREO3D_TOPBOTTOM:
  109. height *= 2;
  110. break;
  111. default:
  112. av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
  113. return AVERROR_INVALIDDATA;
  114. }
  115. outlink->w = width;
  116. outlink->h = height;
  117. outlink->time_base = time_base;
  118. outlink->frame_rate = frame_rate;
  119. return 0;
  120. }
  121. static void horizontal_frame_pack(AVFilterLink *outlink,
  122. AVFrame *out,
  123. int interleaved)
  124. {
  125. AVFilterContext *ctx = outlink->src;
  126. FramepackContext *s = ctx->priv;
  127. int i, plane;
  128. if (interleaved) {
  129. const uint8_t *leftp = s->input_views[LEFT]->data[0];
  130. const uint8_t *rightp = s->input_views[RIGHT]->data[0];
  131. uint8_t *dstp = out->data[0];
  132. int length = out->width / 2;
  133. int lines = out->height;
  134. for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
  135. if (plane == 1 || plane == 2) {
  136. length = -(-(out->width / 2) >> s->pix_desc->log2_chroma_w);
  137. lines = -(-(out->height) >> s->pix_desc->log2_chroma_h);
  138. }
  139. for (i = 0; i < lines; i++) {
  140. int j;
  141. leftp = s->input_views[LEFT]->data[plane] +
  142. s->input_views[LEFT]->linesize[plane] * i;
  143. rightp = s->input_views[RIGHT]->data[plane] +
  144. s->input_views[RIGHT]->linesize[plane] * i;
  145. dstp = out->data[plane] + out->linesize[plane] * i;
  146. for (j = 0; j < length; j++) {
  147. // interpolate chroma as necessary
  148. if ((s->pix_desc->log2_chroma_w ||
  149. s->pix_desc->log2_chroma_h) &&
  150. (plane == 1 || plane == 2)) {
  151. *dstp++ = (*leftp + *rightp) / 2;
  152. *dstp++ = (*leftp + *rightp) / 2;
  153. } else {
  154. *dstp++ = *leftp;
  155. *dstp++ = *rightp;
  156. }
  157. leftp += 1;
  158. rightp += 1;
  159. }
  160. }
  161. }
  162. } else {
  163. for (i = 0; i < 2; i++) {
  164. const uint8_t *src[4];
  165. uint8_t *dst[4];
  166. int sub_w = s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
  167. src[0] = s->input_views[i]->data[0];
  168. src[1] = s->input_views[i]->data[1];
  169. src[2] = s->input_views[i]->data[2];
  170. dst[0] = out->data[0] + i * s->input_views[i]->width;
  171. dst[1] = out->data[1] + i * sub_w;
  172. dst[2] = out->data[2] + i * sub_w;
  173. av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
  174. s->input_views[i]->format,
  175. s->input_views[i]->width,
  176. s->input_views[i]->height);
  177. }
  178. }
  179. }
  180. static void vertical_frame_pack(AVFilterLink *outlink,
  181. AVFrame *out,
  182. int interleaved)
  183. {
  184. AVFilterContext *ctx = outlink->src;
  185. FramepackContext *s = ctx->priv;
  186. int i;
  187. for (i = 0; i < 2; i++) {
  188. const uint8_t *src[4];
  189. uint8_t *dst[4];
  190. int linesizes[4];
  191. int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
  192. src[0] = s->input_views[i]->data[0];
  193. src[1] = s->input_views[i]->data[1];
  194. src[2] = s->input_views[i]->data[2];
  195. dst[0] = out->data[0] + i * out->linesize[0] *
  196. (interleaved + s->input_views[i]->height * (1 - interleaved));
  197. dst[1] = out->data[1] + i * out->linesize[1] *
  198. (interleaved + sub_h * (1 - interleaved));
  199. dst[2] = out->data[2] + i * out->linesize[2] *
  200. (interleaved + sub_h * (1 - interleaved));
  201. linesizes[0] = out->linesize[0] +
  202. interleaved * out->linesize[0];
  203. linesizes[1] = out->linesize[1] +
  204. interleaved * out->linesize[1];
  205. linesizes[2] = out->linesize[2] +
  206. interleaved * out->linesize[2];
  207. av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
  208. s->input_views[i]->format,
  209. s->input_views[i]->width,
  210. s->input_views[i]->height);
  211. }
  212. }
  213. static av_always_inline void spatial_frame_pack(AVFilterLink *outlink,
  214. AVFrame *dst)
  215. {
  216. AVFilterContext *ctx = outlink->src;
  217. FramepackContext *s = ctx->priv;
  218. switch (s->format) {
  219. case AV_STEREO3D_SIDEBYSIDE:
  220. horizontal_frame_pack(outlink, dst, 0);
  221. break;
  222. case AV_STEREO3D_COLUMNS:
  223. horizontal_frame_pack(outlink, dst, 1);
  224. break;
  225. case AV_STEREO3D_TOPBOTTOM:
  226. vertical_frame_pack(outlink, dst, 0);
  227. break;
  228. case AV_STEREO3D_LINES:
  229. vertical_frame_pack(outlink, dst, 1);
  230. break;
  231. }
  232. }
  233. static int filter_frame_left(AVFilterLink *inlink, AVFrame *frame)
  234. {
  235. FramepackContext *s = inlink->dst->priv;
  236. s->input_views[LEFT] = frame;
  237. return 0;
  238. }
  239. static int filter_frame_right(AVFilterLink *inlink, AVFrame *frame)
  240. {
  241. FramepackContext *s = inlink->dst->priv;
  242. s->input_views[RIGHT] = frame;
  243. return 0;
  244. }
  245. static int request_frame(AVFilterLink *outlink)
  246. {
  247. AVFilterContext *ctx = outlink->src;
  248. FramepackContext *s = ctx->priv;
  249. AVStereo3D *stereo;
  250. int ret, i;
  251. /* get a frame on the either input, stop as soon as a video ends */
  252. for (i = 0; i < 2; i++) {
  253. if (!s->input_views[i]) {
  254. ret = ff_request_frame(ctx->inputs[i]);
  255. if (ret < 0)
  256. return ret;
  257. }
  258. }
  259. if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
  260. if (s->double_pts == AV_NOPTS_VALUE)
  261. s->double_pts = s->input_views[LEFT]->pts;
  262. for (i = 0; i < 2; i++) {
  263. // set correct timestamps
  264. s->input_views[i]->pts = s->double_pts++;
  265. // set stereo3d side data
  266. stereo = av_stereo3d_create_side_data(s->input_views[i]);
  267. if (!stereo)
  268. return AVERROR(ENOMEM);
  269. stereo->type = s->format;
  270. // filter the frame and immediately relinquish its pointer
  271. ret = ff_filter_frame(outlink, s->input_views[i]);
  272. s->input_views[i] = NULL;
  273. if (ret < 0)
  274. return ret;
  275. }
  276. return ret;
  277. } else {
  278. AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  279. if (!dst)
  280. return AVERROR(ENOMEM);
  281. spatial_frame_pack(outlink, dst);
  282. // get any property from the original frame
  283. ret = av_frame_copy_props(dst, s->input_views[LEFT]);
  284. if (ret < 0) {
  285. av_frame_free(&dst);
  286. return ret;
  287. }
  288. for (i = 0; i < 2; i++)
  289. av_frame_free(&s->input_views[i]);
  290. // set stereo3d side data
  291. stereo = av_stereo3d_create_side_data(dst);
  292. if (!stereo) {
  293. av_frame_free(&dst);
  294. return AVERROR(ENOMEM);
  295. }
  296. stereo->type = s->format;
  297. return ff_filter_frame(outlink, dst);
  298. }
  299. }
  300. #define OFFSET(x) offsetof(FramepackContext, x)
  301. #define V AV_OPT_FLAG_VIDEO_PARAM
  302. static const AVOption options[] = {
  303. { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
  304. { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = V, .unit = "format" },
  305. { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
  306. { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  307. { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
  308. { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  309. { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
  310. { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  311. { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
  312. { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  313. { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
  314. { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = V, .unit = "format" },
  315. { NULL },
  316. };
  317. static const AVClass framepack_class = {
  318. .class_name = "framepack",
  319. .item_name = av_default_item_name,
  320. .option = options,
  321. .version = LIBAVUTIL_VERSION_INT,
  322. };
  323. static const AVFilterPad framepack_inputs[] = {
  324. {
  325. .name = "left",
  326. .type = AVMEDIA_TYPE_VIDEO,
  327. .filter_frame = filter_frame_left,
  328. .needs_fifo = 1,
  329. },
  330. {
  331. .name = "right",
  332. .type = AVMEDIA_TYPE_VIDEO,
  333. .filter_frame = filter_frame_right,
  334. .needs_fifo = 1,
  335. },
  336. { NULL }
  337. };
  338. static const AVFilterPad framepack_outputs[] = {
  339. {
  340. .name = "packed",
  341. .type = AVMEDIA_TYPE_VIDEO,
  342. .config_props = config_output,
  343. .request_frame = request_frame,
  344. },
  345. { NULL }
  346. };
  347. AVFilter ff_vf_framepack = {
  348. .name = "framepack",
  349. .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
  350. .priv_size = sizeof(FramepackContext),
  351. .priv_class = &framepack_class,
  352. .query_formats = query_formats,
  353. .inputs = framepack_inputs,
  354. .outputs = framepack_outputs,
  355. .uninit = framepack_uninit,
  356. };