You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4588 lines
148KB

  1. /*
  2. * Copyright (c) 2019 Eugene Lyapustin
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * 360 video conversion filter.
  23. * Principle of operation:
  24. *
  25. * (for each pixel in output frame)
  26. * 1) Calculate OpenGL-like coordinates (x, y, z) for pixel position (i, j)
  27. * 2) Apply 360 operations (rotation, mirror) to (x, y, z)
  28. * 3) Calculate pixel position (u, v) in input frame
  29. * 4) Calculate interpolation window and weight for each pixel
  30. *
  31. * (for each frame)
  32. * 5) Remap input frame to output frame using precalculated data
  33. */
  34. #include <math.h>
  35. #include "libavutil/avassert.h"
  36. #include "libavutil/imgutils.h"
  37. #include "libavutil/pixdesc.h"
  38. #include "libavutil/opt.h"
  39. #include "avfilter.h"
  40. #include "formats.h"
  41. #include "internal.h"
  42. #include "video.h"
  43. #include "v360.h"
  44. typedef struct ThreadData {
  45. AVFrame *in;
  46. AVFrame *out;
  47. } ThreadData;
  48. #define OFFSET(x) offsetof(V360Context, x)
  49. #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
  50. #define TFLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
  51. static const AVOption v360_options[] = {
  52. { "input", "set input projection", OFFSET(in), AV_OPT_TYPE_INT, {.i64=EQUIRECTANGULAR}, 0, NB_PROJECTIONS-1, FLAGS, "in" },
  53. { "e", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, "in" },
  54. { "equirect", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, "in" },
  55. { "c3x2", "cubemap 3x2", 0, AV_OPT_TYPE_CONST, {.i64=CUBEMAP_3_2}, 0, 0, FLAGS, "in" },
  56. { "c6x1", "cubemap 6x1", 0, AV_OPT_TYPE_CONST, {.i64=CUBEMAP_6_1}, 0, 0, FLAGS, "in" },
  57. { "eac", "equi-angular cubemap", 0, AV_OPT_TYPE_CONST, {.i64=EQUIANGULAR}, 0, 0, FLAGS, "in" },
  58. { "dfisheye", "dual fisheye", 0, AV_OPT_TYPE_CONST, {.i64=DUAL_FISHEYE}, 0, 0, FLAGS, "in" },
  59. { "flat", "regular video", 0, AV_OPT_TYPE_CONST, {.i64=FLAT}, 0, 0, FLAGS, "in" },
  60. {"rectilinear", "regular video", 0, AV_OPT_TYPE_CONST, {.i64=FLAT}, 0, 0, FLAGS, "in" },
  61. { "gnomonic", "regular video", 0, AV_OPT_TYPE_CONST, {.i64=FLAT}, 0, 0, FLAGS, "in" },
  62. { "barrel", "barrel facebook's 360 format", 0, AV_OPT_TYPE_CONST, {.i64=BARREL}, 0, 0, FLAGS, "in" },
  63. { "fb", "barrel facebook's 360 format", 0, AV_OPT_TYPE_CONST, {.i64=BARREL}, 0, 0, FLAGS, "in" },
  64. { "c1x6", "cubemap 1x6", 0, AV_OPT_TYPE_CONST, {.i64=CUBEMAP_1_6}, 0, 0, FLAGS, "in" },
  65. { "sg", "stereographic", 0, AV_OPT_TYPE_CONST, {.i64=STEREOGRAPHIC}, 0, 0, FLAGS, "in" },
  66. { "mercator", "mercator", 0, AV_OPT_TYPE_CONST, {.i64=MERCATOR}, 0, 0, FLAGS, "in" },
  67. { "ball", "ball", 0, AV_OPT_TYPE_CONST, {.i64=BALL}, 0, 0, FLAGS, "in" },
  68. { "hammer", "hammer", 0, AV_OPT_TYPE_CONST, {.i64=HAMMER}, 0, 0, FLAGS, "in" },
  69. {"sinusoidal", "sinusoidal", 0, AV_OPT_TYPE_CONST, {.i64=SINUSOIDAL}, 0, 0, FLAGS, "in" },
  70. { "fisheye", "fisheye", 0, AV_OPT_TYPE_CONST, {.i64=FISHEYE}, 0, 0, FLAGS, "in" },
  71. { "pannini", "pannini", 0, AV_OPT_TYPE_CONST, {.i64=PANNINI}, 0, 0, FLAGS, "in" },
  72. {"cylindrical", "cylindrical", 0, AV_OPT_TYPE_CONST, {.i64=CYLINDRICAL}, 0, 0, FLAGS, "in" },
  73. {"tetrahedron", "tetrahedron", 0, AV_OPT_TYPE_CONST, {.i64=TETRAHEDRON}, 0, 0, FLAGS, "in" },
  74. {"barrelsplit", "barrel split facebook's 360 format", 0, AV_OPT_TYPE_CONST, {.i64=BARREL_SPLIT}, 0, 0, FLAGS, "in" },
  75. { "tsp", "truncated square pyramid", 0, AV_OPT_TYPE_CONST, {.i64=TSPYRAMID}, 0, 0, FLAGS, "in" },
  76. { "hequirect", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "in" },
  77. { "he", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "in" },
  78. { "equisolid", "equisolid", 0, AV_OPT_TYPE_CONST, {.i64=EQUISOLID}, 0, 0, FLAGS, "in" },
  79. { "og", "orthographic", 0, AV_OPT_TYPE_CONST, {.i64=ORTHOGRAPHIC}, 0, 0, FLAGS, "in" },
  80. { "output", "set output projection", OFFSET(out), AV_OPT_TYPE_INT, {.i64=CUBEMAP_3_2}, 0, NB_PROJECTIONS-1, FLAGS, "out" },
  81. { "e", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, "out" },
  82. { "equirect", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, "out" },
  83. { "c3x2", "cubemap 3x2", 0, AV_OPT_TYPE_CONST, {.i64=CUBEMAP_3_2}, 0, 0, FLAGS, "out" },
  84. { "c6x1", "cubemap 6x1", 0, AV_OPT_TYPE_CONST, {.i64=CUBEMAP_6_1}, 0, 0, FLAGS, "out" },
  85. { "eac", "equi-angular cubemap", 0, AV_OPT_TYPE_CONST, {.i64=EQUIANGULAR}, 0, 0, FLAGS, "out" },
  86. { "dfisheye", "dual fisheye", 0, AV_OPT_TYPE_CONST, {.i64=DUAL_FISHEYE}, 0, 0, FLAGS, "out" },
  87. { "flat", "regular video", 0, AV_OPT_TYPE_CONST, {.i64=FLAT}, 0, 0, FLAGS, "out" },
  88. {"rectilinear", "regular video", 0, AV_OPT_TYPE_CONST, {.i64=FLAT}, 0, 0, FLAGS, "out" },
  89. { "gnomonic", "regular video", 0, AV_OPT_TYPE_CONST, {.i64=FLAT}, 0, 0, FLAGS, "out" },
  90. { "barrel", "barrel facebook's 360 format", 0, AV_OPT_TYPE_CONST, {.i64=BARREL}, 0, 0, FLAGS, "out" },
  91. { "fb", "barrel facebook's 360 format", 0, AV_OPT_TYPE_CONST, {.i64=BARREL}, 0, 0, FLAGS, "out" },
  92. { "c1x6", "cubemap 1x6", 0, AV_OPT_TYPE_CONST, {.i64=CUBEMAP_1_6}, 0, 0, FLAGS, "out" },
  93. { "sg", "stereographic", 0, AV_OPT_TYPE_CONST, {.i64=STEREOGRAPHIC}, 0, 0, FLAGS, "out" },
  94. { "mercator", "mercator", 0, AV_OPT_TYPE_CONST, {.i64=MERCATOR}, 0, 0, FLAGS, "out" },
  95. { "ball", "ball", 0, AV_OPT_TYPE_CONST, {.i64=BALL}, 0, 0, FLAGS, "out" },
  96. { "hammer", "hammer", 0, AV_OPT_TYPE_CONST, {.i64=HAMMER}, 0, 0, FLAGS, "out" },
  97. {"sinusoidal", "sinusoidal", 0, AV_OPT_TYPE_CONST, {.i64=SINUSOIDAL}, 0, 0, FLAGS, "out" },
  98. { "fisheye", "fisheye", 0, AV_OPT_TYPE_CONST, {.i64=FISHEYE}, 0, 0, FLAGS, "out" },
  99. { "pannini", "pannini", 0, AV_OPT_TYPE_CONST, {.i64=PANNINI}, 0, 0, FLAGS, "out" },
  100. {"cylindrical", "cylindrical", 0, AV_OPT_TYPE_CONST, {.i64=CYLINDRICAL}, 0, 0, FLAGS, "out" },
  101. {"perspective", "perspective", 0, AV_OPT_TYPE_CONST, {.i64=PERSPECTIVE}, 0, 0, FLAGS, "out" },
  102. {"tetrahedron", "tetrahedron", 0, AV_OPT_TYPE_CONST, {.i64=TETRAHEDRON}, 0, 0, FLAGS, "out" },
  103. {"barrelsplit", "barrel split facebook's 360 format", 0, AV_OPT_TYPE_CONST, {.i64=BARREL_SPLIT}, 0, 0, FLAGS, "out" },
  104. { "tsp", "truncated square pyramid", 0, AV_OPT_TYPE_CONST, {.i64=TSPYRAMID}, 0, 0, FLAGS, "out" },
  105. { "hequirect", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "out" },
  106. { "he", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "out" },
  107. { "equisolid", "equisolid", 0, AV_OPT_TYPE_CONST, {.i64=EQUISOLID}, 0, 0, FLAGS, "out" },
  108. { "og", "orthographic", 0, AV_OPT_TYPE_CONST, {.i64=ORTHOGRAPHIC}, 0, 0, FLAGS, "out" },
  109. { "interp", "set interpolation method", OFFSET(interp), AV_OPT_TYPE_INT, {.i64=BILINEAR}, 0, NB_INTERP_METHODS-1, FLAGS, "interp" },
  110. { "near", "nearest neighbour", 0, AV_OPT_TYPE_CONST, {.i64=NEAREST}, 0, 0, FLAGS, "interp" },
  111. { "nearest", "nearest neighbour", 0, AV_OPT_TYPE_CONST, {.i64=NEAREST}, 0, 0, FLAGS, "interp" },
  112. { "line", "bilinear interpolation", 0, AV_OPT_TYPE_CONST, {.i64=BILINEAR}, 0, 0, FLAGS, "interp" },
  113. { "linear", "bilinear interpolation", 0, AV_OPT_TYPE_CONST, {.i64=BILINEAR}, 0, 0, FLAGS, "interp" },
  114. { "lagrange9", "lagrange9 interpolation", 0, AV_OPT_TYPE_CONST, {.i64=LAGRANGE9}, 0, 0, FLAGS, "interp" },
  115. { "cube", "bicubic interpolation", 0, AV_OPT_TYPE_CONST, {.i64=BICUBIC}, 0, 0, FLAGS, "interp" },
  116. { "cubic", "bicubic interpolation", 0, AV_OPT_TYPE_CONST, {.i64=BICUBIC}, 0, 0, FLAGS, "interp" },
  117. { "lanc", "lanczos interpolation", 0, AV_OPT_TYPE_CONST, {.i64=LANCZOS}, 0, 0, FLAGS, "interp" },
  118. { "lanczos", "lanczos interpolation", 0, AV_OPT_TYPE_CONST, {.i64=LANCZOS}, 0, 0, FLAGS, "interp" },
  119. { "sp16", "spline16 interpolation", 0, AV_OPT_TYPE_CONST, {.i64=SPLINE16}, 0, 0, FLAGS, "interp" },
  120. { "spline16", "spline16 interpolation", 0, AV_OPT_TYPE_CONST, {.i64=SPLINE16}, 0, 0, FLAGS, "interp" },
  121. { "gauss", "gaussian interpolation", 0, AV_OPT_TYPE_CONST, {.i64=GAUSSIAN}, 0, 0, FLAGS, "interp" },
  122. { "gaussian", "gaussian interpolation", 0, AV_OPT_TYPE_CONST, {.i64=GAUSSIAN}, 0, 0, FLAGS, "interp" },
  123. { "w", "output width", OFFSET(width), AV_OPT_TYPE_INT, {.i64=0}, 0, INT16_MAX, FLAGS, "w"},
  124. { "h", "output height", OFFSET(height), AV_OPT_TYPE_INT, {.i64=0}, 0, INT16_MAX, FLAGS, "h"},
  125. { "in_stereo", "input stereo format", OFFSET(in_stereo), AV_OPT_TYPE_INT, {.i64=STEREO_2D}, 0, NB_STEREO_FMTS-1, FLAGS, "stereo" },
  126. {"out_stereo", "output stereo format", OFFSET(out_stereo), AV_OPT_TYPE_INT, {.i64=STEREO_2D}, 0, NB_STEREO_FMTS-1, FLAGS, "stereo" },
  127. { "2d", "2d mono", 0, AV_OPT_TYPE_CONST, {.i64=STEREO_2D}, 0, 0, FLAGS, "stereo" },
  128. { "sbs", "side by side", 0, AV_OPT_TYPE_CONST, {.i64=STEREO_SBS}, 0, 0, FLAGS, "stereo" },
  129. { "tb", "top bottom", 0, AV_OPT_TYPE_CONST, {.i64=STEREO_TB}, 0, 0, FLAGS, "stereo" },
  130. { "in_forder", "input cubemap face order", OFFSET(in_forder), AV_OPT_TYPE_STRING, {.str="rludfb"}, 0, NB_DIRECTIONS-1, FLAGS, "in_forder"},
  131. {"out_forder", "output cubemap face order", OFFSET(out_forder), AV_OPT_TYPE_STRING, {.str="rludfb"}, 0, NB_DIRECTIONS-1, FLAGS, "out_forder"},
  132. { "in_frot", "input cubemap face rotation", OFFSET(in_frot), AV_OPT_TYPE_STRING, {.str="000000"}, 0, NB_DIRECTIONS-1, FLAGS, "in_frot"},
  133. { "out_frot", "output cubemap face rotation",OFFSET(out_frot), AV_OPT_TYPE_STRING, {.str="000000"}, 0, NB_DIRECTIONS-1, FLAGS, "out_frot"},
  134. { "in_pad", "percent input cubemap pads", OFFSET(in_pad), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, 0.f, 0.1,TFLAGS, "in_pad"},
  135. { "out_pad", "percent output cubemap pads", OFFSET(out_pad), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, 0.f, 0.1,TFLAGS, "out_pad"},
  136. { "fin_pad", "fixed input cubemap pads", OFFSET(fin_pad), AV_OPT_TYPE_INT, {.i64=0}, 0, 100,TFLAGS, "fin_pad"},
  137. { "fout_pad", "fixed output cubemap pads", OFFSET(fout_pad), AV_OPT_TYPE_INT, {.i64=0}, 0, 100,TFLAGS, "fout_pad"},
  138. { "yaw", "yaw rotation", OFFSET(yaw), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, -180.f, 180.f,TFLAGS, "yaw"},
  139. { "pitch", "pitch rotation", OFFSET(pitch), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, -180.f, 180.f,TFLAGS, "pitch"},
  140. { "roll", "roll rotation", OFFSET(roll), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, -180.f, 180.f,TFLAGS, "roll"},
  141. { "rorder", "rotation order", OFFSET(rorder), AV_OPT_TYPE_STRING, {.str="ypr"}, 0, 0,TFLAGS, "rorder"},
  142. { "h_fov", "output horizontal field of view",OFFSET(h_fov), AV_OPT_TYPE_FLOAT, {.dbl=90.f}, 0.00001f, 360.f,TFLAGS, "h_fov"},
  143. { "v_fov", "output vertical field of view", OFFSET(v_fov), AV_OPT_TYPE_FLOAT, {.dbl=45.f}, 0.00001f, 360.f,TFLAGS, "v_fov"},
  144. { "d_fov", "output diagonal field of view", OFFSET(d_fov), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, 0.f, 360.f,TFLAGS, "d_fov"},
  145. { "h_flip", "flip out video horizontally", OFFSET(h_flip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1,TFLAGS, "h_flip"},
  146. { "v_flip", "flip out video vertically", OFFSET(v_flip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1,TFLAGS, "v_flip"},
  147. { "d_flip", "flip out video indepth", OFFSET(d_flip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1,TFLAGS, "d_flip"},
  148. { "ih_flip", "flip in video horizontally", OFFSET(ih_flip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1,TFLAGS, "ih_flip"},
  149. { "iv_flip", "flip in video vertically", OFFSET(iv_flip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1,TFLAGS, "iv_flip"},
  150. { "in_trans", "transpose video input", OFFSET(in_transpose), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS, "in_transpose"},
  151. { "out_trans", "transpose video output", OFFSET(out_transpose), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS, "out_transpose"},
  152. { "ih_fov", "input horizontal field of view",OFFSET(ih_fov), AV_OPT_TYPE_FLOAT, {.dbl=90.f}, 0.00001f, 360.f,TFLAGS, "ih_fov"},
  153. { "iv_fov", "input vertical field of view", OFFSET(iv_fov), AV_OPT_TYPE_FLOAT, {.dbl=45.f}, 0.00001f, 360.f,TFLAGS, "iv_fov"},
  154. { "id_fov", "input diagonal field of view", OFFSET(id_fov), AV_OPT_TYPE_FLOAT, {.dbl=0.f}, 0.f, 360.f,TFLAGS, "id_fov"},
  155. {"alpha_mask", "build mask in alpha plane", OFFSET(alpha), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS, "alpha"},
  156. { NULL }
  157. };
  158. AVFILTER_DEFINE_CLASS(v360);
  159. static int query_formats(AVFilterContext *ctx)
  160. {
  161. V360Context *s = ctx->priv;
  162. static const enum AVPixelFormat pix_fmts[] = {
  163. // YUVA444
  164. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA444P9,
  165. AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12,
  166. AV_PIX_FMT_YUVA444P16,
  167. // YUVA422
  168. AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA422P9,
  169. AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12,
  170. AV_PIX_FMT_YUVA422P16,
  171. // YUVA420
  172. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA420P9,
  173. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
  174. // YUVJ
  175. AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P,
  176. AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P,
  177. AV_PIX_FMT_YUVJ411P,
  178. // YUV444
  179. AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P9,
  180. AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12,
  181. AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV444P16,
  182. // YUV440
  183. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV440P10,
  184. AV_PIX_FMT_YUV440P12,
  185. // YUV422
  186. AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P9,
  187. AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12,
  188. AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV422P16,
  189. // YUV420
  190. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P9,
  191. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV420P12,
  192. AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV420P16,
  193. // YUV411
  194. AV_PIX_FMT_YUV411P,
  195. // YUV410
  196. AV_PIX_FMT_YUV410P,
  197. // GBR
  198. AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRP9,
  199. AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12,
  200. AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  201. // GBRA
  202. AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10,
  203. AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
  204. // GRAY
  205. AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY9,
  206. AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12,
  207. AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16,
  208. AV_PIX_FMT_NONE
  209. };
  210. static const enum AVPixelFormat alpha_pix_fmts[] = {
  211. AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA444P9,
  212. AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12,
  213. AV_PIX_FMT_YUVA444P16,
  214. AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA422P9,
  215. AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA422P12,
  216. AV_PIX_FMT_YUVA422P16,
  217. AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA420P9,
  218. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA420P16,
  219. AV_PIX_FMT_GBRAP, AV_PIX_FMT_GBRAP10,
  220. AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16,
  221. AV_PIX_FMT_NONE
  222. };
  223. AVFilterFormats *fmts_list = ff_make_format_list(s->alpha ? alpha_pix_fmts : pix_fmts);
  224. if (!fmts_list)
  225. return AVERROR(ENOMEM);
  226. return ff_set_common_formats(ctx, fmts_list);
  227. }
  228. #define DEFINE_REMAP1_LINE(bits, div) \
  229. static void remap1_##bits##bit_line_c(uint8_t *dst, int width, const uint8_t *const src, \
  230. ptrdiff_t in_linesize, \
  231. const int16_t *const u, const int16_t *const v, \
  232. const int16_t *const ker) \
  233. { \
  234. const uint##bits##_t *const s = (const uint##bits##_t *const)src; \
  235. uint##bits##_t *d = (uint##bits##_t *)dst; \
  236. \
  237. in_linesize /= div; \
  238. \
  239. for (int x = 0; x < width; x++) \
  240. d[x] = s[v[x] * in_linesize + u[x]]; \
  241. }
  242. DEFINE_REMAP1_LINE( 8, 1)
  243. DEFINE_REMAP1_LINE(16, 2)
  244. /**
  245. * Generate remapping function with a given window size and pixel depth.
  246. *
  247. * @param ws size of interpolation window
  248. * @param bits number of bits per pixel
  249. */
  250. #define DEFINE_REMAP(ws, bits) \
  251. static int remap##ws##_##bits##bit_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
  252. { \
  253. ThreadData *td = arg; \
  254. const V360Context *s = ctx->priv; \
  255. const AVFrame *in = td->in; \
  256. AVFrame *out = td->out; \
  257. \
  258. for (int stereo = 0; stereo < 1 + s->out_stereo > STEREO_2D; stereo++) { \
  259. for (int plane = 0; plane < s->nb_planes; plane++) { \
  260. const unsigned map = s->map[plane]; \
  261. const int in_linesize = in->linesize[plane]; \
  262. const int out_linesize = out->linesize[plane]; \
  263. const int uv_linesize = s->uv_linesize[plane]; \
  264. const int in_offset_w = stereo ? s->in_offset_w[plane] : 0; \
  265. const int in_offset_h = stereo ? s->in_offset_h[plane] : 0; \
  266. const int out_offset_w = stereo ? s->out_offset_w[plane] : 0; \
  267. const int out_offset_h = stereo ? s->out_offset_h[plane] : 0; \
  268. const uint8_t *const src = in->data[plane] + \
  269. in_offset_h * in_linesize + in_offset_w * (bits >> 3); \
  270. uint8_t *dst = out->data[plane] + out_offset_h * out_linesize + out_offset_w * (bits >> 3); \
  271. const uint8_t *mask = plane == 3 ? s->mask : NULL; \
  272. const int width = s->pr_width[plane]; \
  273. const int height = s->pr_height[plane]; \
  274. \
  275. const int slice_start = (height * jobnr ) / nb_jobs; \
  276. const int slice_end = (height * (jobnr + 1)) / nb_jobs; \
  277. \
  278. for (int y = slice_start; y < slice_end && !mask; y++) { \
  279. const int16_t *const u = s->u[map] + y * uv_linesize * ws * ws; \
  280. const int16_t *const v = s->v[map] + y * uv_linesize * ws * ws; \
  281. const int16_t *const ker = s->ker[map] + y * uv_linesize * ws * ws; \
  282. \
  283. s->remap_line(dst + y * out_linesize, width, src, in_linesize, u, v, ker); \
  284. } \
  285. \
  286. for (int y = slice_start; y < slice_end && mask; y++) { \
  287. memcpy(dst + y * out_linesize, mask + y * width * (bits >> 3), width * (bits >> 3)); \
  288. } \
  289. } \
  290. } \
  291. \
  292. return 0; \
  293. }
  294. DEFINE_REMAP(1, 8)
  295. DEFINE_REMAP(2, 8)
  296. DEFINE_REMAP(3, 8)
  297. DEFINE_REMAP(4, 8)
  298. DEFINE_REMAP(1, 16)
  299. DEFINE_REMAP(2, 16)
  300. DEFINE_REMAP(3, 16)
  301. DEFINE_REMAP(4, 16)
  302. #define DEFINE_REMAP_LINE(ws, bits, div) \
  303. static void remap##ws##_##bits##bit_line_c(uint8_t *dst, int width, const uint8_t *const src, \
  304. ptrdiff_t in_linesize, \
  305. const int16_t *const u, const int16_t *const v, \
  306. const int16_t *const ker) \
  307. { \
  308. const uint##bits##_t *const s = (const uint##bits##_t *const)src; \
  309. uint##bits##_t *d = (uint##bits##_t *)dst; \
  310. \
  311. in_linesize /= div; \
  312. \
  313. for (int x = 0; x < width; x++) { \
  314. const int16_t *const uu = u + x * ws * ws; \
  315. const int16_t *const vv = v + x * ws * ws; \
  316. const int16_t *const kker = ker + x * ws * ws; \
  317. int tmp = 0; \
  318. \
  319. for (int i = 0; i < ws; i++) { \
  320. for (int j = 0; j < ws; j++) { \
  321. tmp += kker[i * ws + j] * s[vv[i * ws + j] * in_linesize + uu[i * ws + j]]; \
  322. } \
  323. } \
  324. \
  325. d[x] = av_clip_uint##bits(tmp >> 14); \
  326. } \
  327. }
  328. DEFINE_REMAP_LINE(2, 8, 1)
  329. DEFINE_REMAP_LINE(3, 8, 1)
  330. DEFINE_REMAP_LINE(4, 8, 1)
  331. DEFINE_REMAP_LINE(2, 16, 2)
  332. DEFINE_REMAP_LINE(3, 16, 2)
  333. DEFINE_REMAP_LINE(4, 16, 2)
  334. void ff_v360_init(V360Context *s, int depth)
  335. {
  336. switch (s->interp) {
  337. case NEAREST:
  338. s->remap_line = depth <= 8 ? remap1_8bit_line_c : remap1_16bit_line_c;
  339. break;
  340. case BILINEAR:
  341. s->remap_line = depth <= 8 ? remap2_8bit_line_c : remap2_16bit_line_c;
  342. break;
  343. case LAGRANGE9:
  344. s->remap_line = depth <= 8 ? remap3_8bit_line_c : remap3_16bit_line_c;
  345. break;
  346. case BICUBIC:
  347. case LANCZOS:
  348. case SPLINE16:
  349. case GAUSSIAN:
  350. s->remap_line = depth <= 8 ? remap4_8bit_line_c : remap4_16bit_line_c;
  351. break;
  352. }
  353. if (ARCH_X86)
  354. ff_v360_init_x86(s, depth);
  355. }
  356. /**
  357. * Save nearest pixel coordinates for remapping.
  358. *
  359. * @param du horizontal relative coordinate
  360. * @param dv vertical relative coordinate
  361. * @param rmap calculated 4x4 window
  362. * @param u u remap data
  363. * @param v v remap data
  364. * @param ker ker remap data
  365. */
  366. static void nearest_kernel(float du, float dv, const XYRemap *rmap,
  367. int16_t *u, int16_t *v, int16_t *ker)
  368. {
  369. const int i = lrintf(dv) + 1;
  370. const int j = lrintf(du) + 1;
  371. u[0] = rmap->u[i][j];
  372. v[0] = rmap->v[i][j];
  373. }
  374. /**
  375. * Calculate kernel for bilinear interpolation.
  376. *
  377. * @param du horizontal relative coordinate
  378. * @param dv vertical relative coordinate
  379. * @param rmap calculated 4x4 window
  380. * @param u u remap data
  381. * @param v v remap data
  382. * @param ker ker remap data
  383. */
  384. static void bilinear_kernel(float du, float dv, const XYRemap *rmap,
  385. int16_t *u, int16_t *v, int16_t *ker)
  386. {
  387. for (int i = 0; i < 2; i++) {
  388. for (int j = 0; j < 2; j++) {
  389. u[i * 2 + j] = rmap->u[i + 1][j + 1];
  390. v[i * 2 + j] = rmap->v[i + 1][j + 1];
  391. }
  392. }
  393. ker[0] = lrintf((1.f - du) * (1.f - dv) * 16385.f);
  394. ker[1] = lrintf( du * (1.f - dv) * 16385.f);
  395. ker[2] = lrintf((1.f - du) * dv * 16385.f);
  396. ker[3] = lrintf( du * dv * 16385.f);
  397. }
  398. /**
  399. * Calculate 1-dimensional lagrange coefficients.
  400. *
  401. * @param t relative coordinate
  402. * @param coeffs coefficients
  403. */
  404. static inline void calculate_lagrange_coeffs(float t, float *coeffs)
  405. {
  406. coeffs[0] = (t - 1.f) * (t - 2.f) * 0.5f;
  407. coeffs[1] = -t * (t - 2.f);
  408. coeffs[2] = t * (t - 1.f) * 0.5f;
  409. }
  410. /**
  411. * Calculate kernel for lagrange interpolation.
  412. *
  413. * @param du horizontal relative coordinate
  414. * @param dv vertical relative coordinate
  415. * @param rmap calculated 4x4 window
  416. * @param u u remap data
  417. * @param v v remap data
  418. * @param ker ker remap data
  419. */
  420. static void lagrange_kernel(float du, float dv, const XYRemap *rmap,
  421. int16_t *u, int16_t *v, int16_t *ker)
  422. {
  423. float du_coeffs[3];
  424. float dv_coeffs[3];
  425. calculate_lagrange_coeffs(du, du_coeffs);
  426. calculate_lagrange_coeffs(dv, dv_coeffs);
  427. for (int i = 0; i < 3; i++) {
  428. for (int j = 0; j < 3; j++) {
  429. u[i * 3 + j] = rmap->u[i + 1][j + 1];
  430. v[i * 3 + j] = rmap->v[i + 1][j + 1];
  431. ker[i * 3 + j] = lrintf(du_coeffs[j] * dv_coeffs[i] * 16385.f);
  432. }
  433. }
  434. }
  435. /**
  436. * Calculate 1-dimensional cubic coefficients.
  437. *
  438. * @param t relative coordinate
  439. * @param coeffs coefficients
  440. */
  441. static inline void calculate_bicubic_coeffs(float t, float *coeffs)
  442. {
  443. const float tt = t * t;
  444. const float ttt = t * t * t;
  445. coeffs[0] = - t / 3.f + tt / 2.f - ttt / 6.f;
  446. coeffs[1] = 1.f - t / 2.f - tt + ttt / 2.f;
  447. coeffs[2] = t + tt / 2.f - ttt / 2.f;
  448. coeffs[3] = - t / 6.f + ttt / 6.f;
  449. }
  450. /**
  451. * Calculate kernel for bicubic interpolation.
  452. *
  453. * @param du horizontal relative coordinate
  454. * @param dv vertical relative coordinate
  455. * @param rmap calculated 4x4 window
  456. * @param u u remap data
  457. * @param v v remap data
  458. * @param ker ker remap data
  459. */
  460. static void bicubic_kernel(float du, float dv, const XYRemap *rmap,
  461. int16_t *u, int16_t *v, int16_t *ker)
  462. {
  463. float du_coeffs[4];
  464. float dv_coeffs[4];
  465. calculate_bicubic_coeffs(du, du_coeffs);
  466. calculate_bicubic_coeffs(dv, dv_coeffs);
  467. for (int i = 0; i < 4; i++) {
  468. for (int j = 0; j < 4; j++) {
  469. u[i * 4 + j] = rmap->u[i][j];
  470. v[i * 4 + j] = rmap->v[i][j];
  471. ker[i * 4 + j] = lrintf(du_coeffs[j] * dv_coeffs[i] * 16385.f);
  472. }
  473. }
  474. }
  475. /**
  476. * Calculate 1-dimensional lanczos coefficients.
  477. *
  478. * @param t relative coordinate
  479. * @param coeffs coefficients
  480. */
  481. static inline void calculate_lanczos_coeffs(float t, float *coeffs)
  482. {
  483. float sum = 0.f;
  484. for (int i = 0; i < 4; i++) {
  485. const float x = M_PI * (t - i + 1);
  486. if (x == 0.f) {
  487. coeffs[i] = 1.f;
  488. } else {
  489. coeffs[i] = sinf(x) * sinf(x / 2.f) / (x * x / 2.f);
  490. }
  491. sum += coeffs[i];
  492. }
  493. for (int i = 0; i < 4; i++) {
  494. coeffs[i] /= sum;
  495. }
  496. }
  497. /**
  498. * Calculate kernel for lanczos interpolation.
  499. *
  500. * @param du horizontal relative coordinate
  501. * @param dv vertical relative coordinate
  502. * @param rmap calculated 4x4 window
  503. * @param u u remap data
  504. * @param v v remap data
  505. * @param ker ker remap data
  506. */
  507. static void lanczos_kernel(float du, float dv, const XYRemap *rmap,
  508. int16_t *u, int16_t *v, int16_t *ker)
  509. {
  510. float du_coeffs[4];
  511. float dv_coeffs[4];
  512. calculate_lanczos_coeffs(du, du_coeffs);
  513. calculate_lanczos_coeffs(dv, dv_coeffs);
  514. for (int i = 0; i < 4; i++) {
  515. for (int j = 0; j < 4; j++) {
  516. u[i * 4 + j] = rmap->u[i][j];
  517. v[i * 4 + j] = rmap->v[i][j];
  518. ker[i * 4 + j] = lrintf(du_coeffs[j] * dv_coeffs[i] * 16385.f);
  519. }
  520. }
  521. }
  522. /**
  523. * Calculate 1-dimensional spline16 coefficients.
  524. *
  525. * @param t relative coordinate
  526. * @param coeffs coefficients
  527. */
  528. static void calculate_spline16_coeffs(float t, float *coeffs)
  529. {
  530. coeffs[0] = ((-1.f / 3.f * t + 0.8f) * t - 7.f / 15.f) * t;
  531. coeffs[1] = ((t - 9.f / 5.f) * t - 0.2f) * t + 1.f;
  532. coeffs[2] = ((6.f / 5.f - t) * t + 0.8f) * t;
  533. coeffs[3] = ((1.f / 3.f * t - 0.2f) * t - 2.f / 15.f) * t;
  534. }
  535. /**
  536. * Calculate kernel for spline16 interpolation.
  537. *
  538. * @param du horizontal relative coordinate
  539. * @param dv vertical relative coordinate
  540. * @param rmap calculated 4x4 window
  541. * @param u u remap data
  542. * @param v v remap data
  543. * @param ker ker remap data
  544. */
  545. static void spline16_kernel(float du, float dv, const XYRemap *rmap,
  546. int16_t *u, int16_t *v, int16_t *ker)
  547. {
  548. float du_coeffs[4];
  549. float dv_coeffs[4];
  550. calculate_spline16_coeffs(du, du_coeffs);
  551. calculate_spline16_coeffs(dv, dv_coeffs);
  552. for (int i = 0; i < 4; i++) {
  553. for (int j = 0; j < 4; j++) {
  554. u[i * 4 + j] = rmap->u[i][j];
  555. v[i * 4 + j] = rmap->v[i][j];
  556. ker[i * 4 + j] = lrintf(du_coeffs[j] * dv_coeffs[i] * 16385.f);
  557. }
  558. }
  559. }
  560. /**
  561. * Calculate 1-dimensional gaussian coefficients.
  562. *
  563. * @param t relative coordinate
  564. * @param coeffs coefficients
  565. */
  566. static void calculate_gaussian_coeffs(float t, float *coeffs)
  567. {
  568. float sum = 0.f;
  569. for (int i = 0; i < 4; i++) {
  570. const float x = t - (i - 1);
  571. if (x == 0.f) {
  572. coeffs[i] = 1.f;
  573. } else {
  574. coeffs[i] = expf(-2.f * x * x) * expf(-x * x / 2.f);
  575. }
  576. sum += coeffs[i];
  577. }
  578. for (int i = 0; i < 4; i++) {
  579. coeffs[i] /= sum;
  580. }
  581. }
  582. /**
  583. * Calculate kernel for gaussian interpolation.
  584. *
  585. * @param du horizontal relative coordinate
  586. * @param dv vertical relative coordinate
  587. * @param rmap calculated 4x4 window
  588. * @param u u remap data
  589. * @param v v remap data
  590. * @param ker ker remap data
  591. */
  592. static void gaussian_kernel(float du, float dv, const XYRemap *rmap,
  593. int16_t *u, int16_t *v, int16_t *ker)
  594. {
  595. float du_coeffs[4];
  596. float dv_coeffs[4];
  597. calculate_gaussian_coeffs(du, du_coeffs);
  598. calculate_gaussian_coeffs(dv, dv_coeffs);
  599. for (int i = 0; i < 4; i++) {
  600. for (int j = 0; j < 4; j++) {
  601. u[i * 4 + j] = rmap->u[i][j];
  602. v[i * 4 + j] = rmap->v[i][j];
  603. ker[i * 4 + j] = lrintf(du_coeffs[j] * dv_coeffs[i] * 16385.f);
  604. }
  605. }
  606. }
  607. /**
  608. * Modulo operation with only positive remainders.
  609. *
  610. * @param a dividend
  611. * @param b divisor
  612. *
  613. * @return positive remainder of (a / b)
  614. */
  615. static inline int mod(int a, int b)
  616. {
  617. const int res = a % b;
  618. if (res < 0) {
  619. return res + b;
  620. } else {
  621. return res;
  622. }
  623. }
  624. /**
  625. * Reflect y operation.
  626. *
  627. * @param y input vertical position
  628. * @param h input height
  629. */
  630. static inline int reflecty(int y, int h)
  631. {
  632. if (y < 0) {
  633. return -y;
  634. } else if (y >= h) {
  635. return 2 * h - 1 - y;
  636. }
  637. return y;
  638. }
  639. /**
  640. * Reflect x operation for equirect.
  641. *
  642. * @param x input horizontal position
  643. * @param y input vertical position
  644. * @param w input width
  645. * @param h input height
  646. */
  647. static inline int ereflectx(int x, int y, int w, int h)
  648. {
  649. if (y < 0 || y >= h)
  650. x += w / 2;
  651. return mod(x, w);
  652. }
  653. /**
  654. * Reflect x operation.
  655. *
  656. * @param x input horizontal position
  657. * @param y input vertical position
  658. * @param w input width
  659. * @param h input height
  660. */
  661. static inline int reflectx(int x, int y, int w, int h)
  662. {
  663. if (y < 0 || y >= h)
  664. return w - 1 - x;
  665. return mod(x, w);
  666. }
  667. /**
  668. * Convert char to corresponding direction.
  669. * Used for cubemap options.
  670. */
  671. static int get_direction(char c)
  672. {
  673. switch (c) {
  674. case 'r':
  675. return RIGHT;
  676. case 'l':
  677. return LEFT;
  678. case 'u':
  679. return UP;
  680. case 'd':
  681. return DOWN;
  682. case 'f':
  683. return FRONT;
  684. case 'b':
  685. return BACK;
  686. default:
  687. return -1;
  688. }
  689. }
  690. /**
  691. * Convert char to corresponding rotation angle.
  692. * Used for cubemap options.
  693. */
  694. static int get_rotation(char c)
  695. {
  696. switch (c) {
  697. case '0':
  698. return ROT_0;
  699. case '1':
  700. return ROT_90;
  701. case '2':
  702. return ROT_180;
  703. case '3':
  704. return ROT_270;
  705. default:
  706. return -1;
  707. }
  708. }
  709. /**
  710. * Convert char to corresponding rotation order.
  711. */
  712. static int get_rorder(char c)
  713. {
  714. switch (c) {
  715. case 'Y':
  716. case 'y':
  717. return YAW;
  718. case 'P':
  719. case 'p':
  720. return PITCH;
  721. case 'R':
  722. case 'r':
  723. return ROLL;
  724. default:
  725. return -1;
  726. }
  727. }
  728. /**
  729. * Prepare data for processing cubemap input format.
  730. *
  731. * @param ctx filter context
  732. *
  733. * @return error code
  734. */
  735. static int prepare_cube_in(AVFilterContext *ctx)
  736. {
  737. V360Context *s = ctx->priv;
  738. for (int face = 0; face < NB_FACES; face++) {
  739. const char c = s->in_forder[face];
  740. int direction;
  741. if (c == '\0') {
  742. av_log(ctx, AV_LOG_ERROR,
  743. "Incomplete in_forder option. Direction for all 6 faces should be specified.\n");
  744. return AVERROR(EINVAL);
  745. }
  746. direction = get_direction(c);
  747. if (direction == -1) {
  748. av_log(ctx, AV_LOG_ERROR,
  749. "Incorrect direction symbol '%c' in in_forder option.\n", c);
  750. return AVERROR(EINVAL);
  751. }
  752. s->in_cubemap_face_order[direction] = face;
  753. }
  754. for (int face = 0; face < NB_FACES; face++) {
  755. const char c = s->in_frot[face];
  756. int rotation;
  757. if (c == '\0') {
  758. av_log(ctx, AV_LOG_ERROR,
  759. "Incomplete in_frot option. Rotation for all 6 faces should be specified.\n");
  760. return AVERROR(EINVAL);
  761. }
  762. rotation = get_rotation(c);
  763. if (rotation == -1) {
  764. av_log(ctx, AV_LOG_ERROR,
  765. "Incorrect rotation symbol '%c' in in_frot option.\n", c);
  766. return AVERROR(EINVAL);
  767. }
  768. s->in_cubemap_face_rotation[face] = rotation;
  769. }
  770. return 0;
  771. }
  772. /**
  773. * Prepare data for processing cubemap output format.
  774. *
  775. * @param ctx filter context
  776. *
  777. * @return error code
  778. */
  779. static int prepare_cube_out(AVFilterContext *ctx)
  780. {
  781. V360Context *s = ctx->priv;
  782. for (int face = 0; face < NB_FACES; face++) {
  783. const char c = s->out_forder[face];
  784. int direction;
  785. if (c == '\0') {
  786. av_log(ctx, AV_LOG_ERROR,
  787. "Incomplete out_forder option. Direction for all 6 faces should be specified.\n");
  788. return AVERROR(EINVAL);
  789. }
  790. direction = get_direction(c);
  791. if (direction == -1) {
  792. av_log(ctx, AV_LOG_ERROR,
  793. "Incorrect direction symbol '%c' in out_forder option.\n", c);
  794. return AVERROR(EINVAL);
  795. }
  796. s->out_cubemap_direction_order[face] = direction;
  797. }
  798. for (int face = 0; face < NB_FACES; face++) {
  799. const char c = s->out_frot[face];
  800. int rotation;
  801. if (c == '\0') {
  802. av_log(ctx, AV_LOG_ERROR,
  803. "Incomplete out_frot option. Rotation for all 6 faces should be specified.\n");
  804. return AVERROR(EINVAL);
  805. }
  806. rotation = get_rotation(c);
  807. if (rotation == -1) {
  808. av_log(ctx, AV_LOG_ERROR,
  809. "Incorrect rotation symbol '%c' in out_frot option.\n", c);
  810. return AVERROR(EINVAL);
  811. }
  812. s->out_cubemap_face_rotation[face] = rotation;
  813. }
  814. return 0;
  815. }
  816. static inline void rotate_cube_face(float *uf, float *vf, int rotation)
  817. {
  818. float tmp;
  819. switch (rotation) {
  820. case ROT_0:
  821. break;
  822. case ROT_90:
  823. tmp = *uf;
  824. *uf = -*vf;
  825. *vf = tmp;
  826. break;
  827. case ROT_180:
  828. *uf = -*uf;
  829. *vf = -*vf;
  830. break;
  831. case ROT_270:
  832. tmp = -*uf;
  833. *uf = *vf;
  834. *vf = tmp;
  835. break;
  836. default:
  837. av_assert0(0);
  838. }
  839. }
  840. static inline void rotate_cube_face_inverse(float *uf, float *vf, int rotation)
  841. {
  842. float tmp;
  843. switch (rotation) {
  844. case ROT_0:
  845. break;
  846. case ROT_90:
  847. tmp = -*uf;
  848. *uf = *vf;
  849. *vf = tmp;
  850. break;
  851. case ROT_180:
  852. *uf = -*uf;
  853. *vf = -*vf;
  854. break;
  855. case ROT_270:
  856. tmp = *uf;
  857. *uf = -*vf;
  858. *vf = tmp;
  859. break;
  860. default:
  861. av_assert0(0);
  862. }
  863. }
  864. /**
  865. * Normalize vector.
  866. *
  867. * @param vec vector
  868. */
  869. static void normalize_vector(float *vec)
  870. {
  871. const float norm = sqrtf(vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]);
  872. vec[0] /= norm;
  873. vec[1] /= norm;
  874. vec[2] /= norm;
  875. }
  876. /**
  877. * Calculate 3D coordinates on sphere for corresponding cubemap position.
  878. * Common operation for every cubemap.
  879. *
  880. * @param s filter private context
  881. * @param uf horizontal cubemap coordinate [0, 1)
  882. * @param vf vertical cubemap coordinate [0, 1)
  883. * @param face face of cubemap
  884. * @param vec coordinates on sphere
  885. * @param scalew scale for uf
  886. * @param scaleh scale for vf
  887. */
  888. static void cube_to_xyz(const V360Context *s,
  889. float uf, float vf, int face,
  890. float *vec, float scalew, float scaleh)
  891. {
  892. const int direction = s->out_cubemap_direction_order[face];
  893. float l_x, l_y, l_z;
  894. uf /= scalew;
  895. vf /= scaleh;
  896. rotate_cube_face_inverse(&uf, &vf, s->out_cubemap_face_rotation[face]);
  897. switch (direction) {
  898. case RIGHT:
  899. l_x = 1.f;
  900. l_y = vf;
  901. l_z = -uf;
  902. break;
  903. case LEFT:
  904. l_x = -1.f;
  905. l_y = vf;
  906. l_z = uf;
  907. break;
  908. case UP:
  909. l_x = uf;
  910. l_y = -1.f;
  911. l_z = vf;
  912. break;
  913. case DOWN:
  914. l_x = uf;
  915. l_y = 1.f;
  916. l_z = -vf;
  917. break;
  918. case FRONT:
  919. l_x = uf;
  920. l_y = vf;
  921. l_z = 1.f;
  922. break;
  923. case BACK:
  924. l_x = -uf;
  925. l_y = vf;
  926. l_z = -1.f;
  927. break;
  928. default:
  929. av_assert0(0);
  930. }
  931. vec[0] = l_x;
  932. vec[1] = l_y;
  933. vec[2] = l_z;
  934. normalize_vector(vec);
  935. }
  936. /**
  937. * Calculate cubemap position for corresponding 3D coordinates on sphere.
  938. * Common operation for every cubemap.
  939. *
  940. * @param s filter private context
  941. * @param vec coordinated on sphere
  942. * @param uf horizontal cubemap coordinate [0, 1)
  943. * @param vf vertical cubemap coordinate [0, 1)
  944. * @param direction direction of view
  945. */
  946. static void xyz_to_cube(const V360Context *s,
  947. const float *vec,
  948. float *uf, float *vf, int *direction)
  949. {
  950. const float phi = atan2f(vec[0], vec[2]);
  951. const float theta = asinf(vec[1]);
  952. float phi_norm, theta_threshold;
  953. int face;
  954. if (phi >= -M_PI_4 && phi < M_PI_4) {
  955. *direction = FRONT;
  956. phi_norm = phi;
  957. } else if (phi >= -(M_PI_2 + M_PI_4) && phi < -M_PI_4) {
  958. *direction = LEFT;
  959. phi_norm = phi + M_PI_2;
  960. } else if (phi >= M_PI_4 && phi < M_PI_2 + M_PI_4) {
  961. *direction = RIGHT;
  962. phi_norm = phi - M_PI_2;
  963. } else {
  964. *direction = BACK;
  965. phi_norm = phi + ((phi > 0.f) ? -M_PI : M_PI);
  966. }
  967. theta_threshold = atanf(cosf(phi_norm));
  968. if (theta > theta_threshold) {
  969. *direction = DOWN;
  970. } else if (theta < -theta_threshold) {
  971. *direction = UP;
  972. }
  973. switch (*direction) {
  974. case RIGHT:
  975. *uf = -vec[2] / vec[0];
  976. *vf = vec[1] / vec[0];
  977. break;
  978. case LEFT:
  979. *uf = -vec[2] / vec[0];
  980. *vf = -vec[1] / vec[0];
  981. break;
  982. case UP:
  983. *uf = -vec[0] / vec[1];
  984. *vf = -vec[2] / vec[1];
  985. break;
  986. case DOWN:
  987. *uf = vec[0] / vec[1];
  988. *vf = -vec[2] / vec[1];
  989. break;
  990. case FRONT:
  991. *uf = vec[0] / vec[2];
  992. *vf = vec[1] / vec[2];
  993. break;
  994. case BACK:
  995. *uf = vec[0] / vec[2];
  996. *vf = -vec[1] / vec[2];
  997. break;
  998. default:
  999. av_assert0(0);
  1000. }
  1001. face = s->in_cubemap_face_order[*direction];
  1002. rotate_cube_face(uf, vf, s->in_cubemap_face_rotation[face]);
  1003. (*uf) *= s->input_mirror_modifier[0];
  1004. (*vf) *= s->input_mirror_modifier[1];
  1005. }
  1006. /**
  1007. * Find position on another cube face in case of overflow/underflow.
  1008. * Used for calculation of interpolation window.
  1009. *
  1010. * @param s filter private context
  1011. * @param uf horizontal cubemap coordinate
  1012. * @param vf vertical cubemap coordinate
  1013. * @param direction direction of view
  1014. * @param new_uf new horizontal cubemap coordinate
  1015. * @param new_vf new vertical cubemap coordinate
  1016. * @param face face position on cubemap
  1017. */
  1018. static void process_cube_coordinates(const V360Context *s,
  1019. float uf, float vf, int direction,
  1020. float *new_uf, float *new_vf, int *face)
  1021. {
  1022. /*
  1023. * Cubemap orientation
  1024. *
  1025. * width
  1026. * <------->
  1027. * +-------+
  1028. * | | U
  1029. * | up | h ------->
  1030. * +-------+-------+-------+-------+ ^ e |
  1031. * | | | | | | i V |
  1032. * | left | front | right | back | | g |
  1033. * +-------+-------+-------+-------+ v h v
  1034. * | | t
  1035. * | down |
  1036. * +-------+
  1037. */
  1038. *face = s->in_cubemap_face_order[direction];
  1039. rotate_cube_face_inverse(&uf, &vf, s->in_cubemap_face_rotation[*face]);
  1040. if ((uf < -1.f || uf >= 1.f) && (vf < -1.f || vf >= 1.f)) {
  1041. // There are no pixels to use in this case
  1042. *new_uf = uf;
  1043. *new_vf = vf;
  1044. } else if (uf < -1.f) {
  1045. uf += 2.f;
  1046. switch (direction) {
  1047. case RIGHT:
  1048. direction = FRONT;
  1049. *new_uf = uf;
  1050. *new_vf = vf;
  1051. break;
  1052. case LEFT:
  1053. direction = BACK;
  1054. *new_uf = uf;
  1055. *new_vf = vf;
  1056. break;
  1057. case UP:
  1058. direction = LEFT;
  1059. *new_uf = vf;
  1060. *new_vf = -uf;
  1061. break;
  1062. case DOWN:
  1063. direction = LEFT;
  1064. *new_uf = -vf;
  1065. *new_vf = uf;
  1066. break;
  1067. case FRONT:
  1068. direction = LEFT;
  1069. *new_uf = uf;
  1070. *new_vf = vf;
  1071. break;
  1072. case BACK:
  1073. direction = RIGHT;
  1074. *new_uf = uf;
  1075. *new_vf = vf;
  1076. break;
  1077. default:
  1078. av_assert0(0);
  1079. }
  1080. } else if (uf >= 1.f) {
  1081. uf -= 2.f;
  1082. switch (direction) {
  1083. case RIGHT:
  1084. direction = BACK;
  1085. *new_uf = uf;
  1086. *new_vf = vf;
  1087. break;
  1088. case LEFT:
  1089. direction = FRONT;
  1090. *new_uf = uf;
  1091. *new_vf = vf;
  1092. break;
  1093. case UP:
  1094. direction = RIGHT;
  1095. *new_uf = -vf;
  1096. *new_vf = uf;
  1097. break;
  1098. case DOWN:
  1099. direction = RIGHT;
  1100. *new_uf = vf;
  1101. *new_vf = -uf;
  1102. break;
  1103. case FRONT:
  1104. direction = RIGHT;
  1105. *new_uf = uf;
  1106. *new_vf = vf;
  1107. break;
  1108. case BACK:
  1109. direction = LEFT;
  1110. *new_uf = uf;
  1111. *new_vf = vf;
  1112. break;
  1113. default:
  1114. av_assert0(0);
  1115. }
  1116. } else if (vf < -1.f) {
  1117. vf += 2.f;
  1118. switch (direction) {
  1119. case RIGHT:
  1120. direction = UP;
  1121. *new_uf = vf;
  1122. *new_vf = -uf;
  1123. break;
  1124. case LEFT:
  1125. direction = UP;
  1126. *new_uf = -vf;
  1127. *new_vf = uf;
  1128. break;
  1129. case UP:
  1130. direction = BACK;
  1131. *new_uf = -uf;
  1132. *new_vf = -vf;
  1133. break;
  1134. case DOWN:
  1135. direction = FRONT;
  1136. *new_uf = uf;
  1137. *new_vf = vf;
  1138. break;
  1139. case FRONT:
  1140. direction = UP;
  1141. *new_uf = uf;
  1142. *new_vf = vf;
  1143. break;
  1144. case BACK:
  1145. direction = UP;
  1146. *new_uf = -uf;
  1147. *new_vf = -vf;
  1148. break;
  1149. default:
  1150. av_assert0(0);
  1151. }
  1152. } else if (vf >= 1.f) {
  1153. vf -= 2.f;
  1154. switch (direction) {
  1155. case RIGHT:
  1156. direction = DOWN;
  1157. *new_uf = -vf;
  1158. *new_vf = uf;
  1159. break;
  1160. case LEFT:
  1161. direction = DOWN;
  1162. *new_uf = vf;
  1163. *new_vf = -uf;
  1164. break;
  1165. case UP:
  1166. direction = FRONT;
  1167. *new_uf = uf;
  1168. *new_vf = vf;
  1169. break;
  1170. case DOWN:
  1171. direction = BACK;
  1172. *new_uf = -uf;
  1173. *new_vf = -vf;
  1174. break;
  1175. case FRONT:
  1176. direction = DOWN;
  1177. *new_uf = uf;
  1178. *new_vf = vf;
  1179. break;
  1180. case BACK:
  1181. direction = DOWN;
  1182. *new_uf = -uf;
  1183. *new_vf = -vf;
  1184. break;
  1185. default:
  1186. av_assert0(0);
  1187. }
  1188. } else {
  1189. // Inside cube face
  1190. *new_uf = uf;
  1191. *new_vf = vf;
  1192. }
  1193. *face = s->in_cubemap_face_order[direction];
  1194. rotate_cube_face(new_uf, new_vf, s->in_cubemap_face_rotation[*face]);
  1195. }
  1196. /**
  1197. * Calculate 3D coordinates on sphere for corresponding frame position in cubemap3x2 format.
  1198. *
  1199. * @param s filter private context
  1200. * @param i horizontal position on frame [0, width)
  1201. * @param j vertical position on frame [0, height)
  1202. * @param width frame width
  1203. * @param height frame height
  1204. * @param vec coordinates on sphere
  1205. */
  1206. static int cube3x2_to_xyz(const V360Context *s,
  1207. int i, int j, int width, int height,
  1208. float *vec)
  1209. {
  1210. const float scalew = s->fout_pad > 0 ? 1.f - s->fout_pad / (width / 3.f) : 1.f - s->out_pad;
  1211. const float scaleh = s->fout_pad > 0 ? 1.f - s->fout_pad / (height / 2.f) : 1.f - s->out_pad;
  1212. const float ew = width / 3.f;
  1213. const float eh = height / 2.f;
  1214. const int u_face = floorf(i / ew);
  1215. const int v_face = floorf(j / eh);
  1216. const int face = u_face + 3 * v_face;
  1217. const int u_shift = ceilf(ew * u_face);
  1218. const int v_shift = ceilf(eh * v_face);
  1219. const int ewi = ceilf(ew * (u_face + 1)) - u_shift;
  1220. const int ehi = ceilf(eh * (v_face + 1)) - v_shift;
  1221. const float uf = 2.f * (i - u_shift + 0.5f) / ewi - 1.f;
  1222. const float vf = 2.f * (j - v_shift + 0.5f) / ehi - 1.f;
  1223. cube_to_xyz(s, uf, vf, face, vec, scalew, scaleh);
  1224. return 1;
  1225. }
  1226. /**
  1227. * Calculate frame position in cubemap3x2 format for corresponding 3D coordinates on sphere.
  1228. *
  1229. * @param s filter private context
  1230. * @param vec coordinates on sphere
  1231. * @param width frame width
  1232. * @param height frame height
  1233. * @param us horizontal coordinates for interpolation window
  1234. * @param vs vertical coordinates for interpolation window
  1235. * @param du horizontal relative coordinate
  1236. * @param dv vertical relative coordinate
  1237. */
  1238. static int xyz_to_cube3x2(const V360Context *s,
  1239. const float *vec, int width, int height,
  1240. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1241. {
  1242. const float scalew = s->fin_pad > 0 ? 1.f - s->fin_pad / (width / 3.f) : 1.f - s->in_pad;
  1243. const float scaleh = s->fin_pad > 0 ? 1.f - s->fin_pad / (height / 2.f) : 1.f - s->in_pad;
  1244. const float ew = width / 3.f;
  1245. const float eh = height / 2.f;
  1246. float uf, vf;
  1247. int ui, vi;
  1248. int ewi, ehi;
  1249. int direction, face;
  1250. int u_face, v_face;
  1251. xyz_to_cube(s, vec, &uf, &vf, &direction);
  1252. uf *= scalew;
  1253. vf *= scaleh;
  1254. face = s->in_cubemap_face_order[direction];
  1255. u_face = face % 3;
  1256. v_face = face / 3;
  1257. ewi = ceilf(ew * (u_face + 1)) - ceilf(ew * u_face);
  1258. ehi = ceilf(eh * (v_face + 1)) - ceilf(eh * v_face);
  1259. uf = 0.5f * ewi * (uf + 1.f) - 0.5f;
  1260. vf = 0.5f * ehi * (vf + 1.f) - 0.5f;
  1261. ui = floorf(uf);
  1262. vi = floorf(vf);
  1263. *du = uf - ui;
  1264. *dv = vf - vi;
  1265. for (int i = 0; i < 4; i++) {
  1266. for (int j = 0; j < 4; j++) {
  1267. int new_ui = ui + j - 1;
  1268. int new_vi = vi + i - 1;
  1269. int u_shift, v_shift;
  1270. int new_ewi, new_ehi;
  1271. if (new_ui >= 0 && new_ui < ewi && new_vi >= 0 && new_vi < ehi) {
  1272. face = s->in_cubemap_face_order[direction];
  1273. u_face = face % 3;
  1274. v_face = face / 3;
  1275. u_shift = ceilf(ew * u_face);
  1276. v_shift = ceilf(eh * v_face);
  1277. } else {
  1278. uf = 2.f * new_ui / ewi - 1.f;
  1279. vf = 2.f * new_vi / ehi - 1.f;
  1280. uf /= scalew;
  1281. vf /= scaleh;
  1282. process_cube_coordinates(s, uf, vf, direction, &uf, &vf, &face);
  1283. uf *= scalew;
  1284. vf *= scaleh;
  1285. u_face = face % 3;
  1286. v_face = face / 3;
  1287. u_shift = ceilf(ew * u_face);
  1288. v_shift = ceilf(eh * v_face);
  1289. new_ewi = ceilf(ew * (u_face + 1)) - u_shift;
  1290. new_ehi = ceilf(eh * (v_face + 1)) - v_shift;
  1291. new_ui = av_clip(lrintf(0.5f * new_ewi * (uf + 1.f)), 0, new_ewi - 1);
  1292. new_vi = av_clip(lrintf(0.5f * new_ehi * (vf + 1.f)), 0, new_ehi - 1);
  1293. }
  1294. us[i][j] = u_shift + new_ui;
  1295. vs[i][j] = v_shift + new_vi;
  1296. }
  1297. }
  1298. return 1;
  1299. }
  1300. /**
  1301. * Calculate 3D coordinates on sphere for corresponding frame position in cubemap1x6 format.
  1302. *
  1303. * @param s filter private context
  1304. * @param i horizontal position on frame [0, width)
  1305. * @param j vertical position on frame [0, height)
  1306. * @param width frame width
  1307. * @param height frame height
  1308. * @param vec coordinates on sphere
  1309. */
  1310. static int cube1x6_to_xyz(const V360Context *s,
  1311. int i, int j, int width, int height,
  1312. float *vec)
  1313. {
  1314. const float scalew = s->fout_pad > 0 ? 1.f - (float)(s->fout_pad) / width : 1.f - s->out_pad;
  1315. const float scaleh = s->fout_pad > 0 ? 1.f - s->fout_pad / (height / 6.f) : 1.f - s->out_pad;
  1316. const float ew = width;
  1317. const float eh = height / 6.f;
  1318. const int face = floorf(j / eh);
  1319. const int v_shift = ceilf(eh * face);
  1320. const int ehi = ceilf(eh * (face + 1)) - v_shift;
  1321. const float uf = 2.f * (i + 0.5f) / ew - 1.f;
  1322. const float vf = 2.f * (j - v_shift + 0.5f) / ehi - 1.f;
  1323. cube_to_xyz(s, uf, vf, face, vec, scalew, scaleh);
  1324. return 1;
  1325. }
  1326. /**
  1327. * Calculate 3D coordinates on sphere for corresponding frame position in cubemap6x1 format.
  1328. *
  1329. * @param s filter private context
  1330. * @param i horizontal position on frame [0, width)
  1331. * @param j vertical position on frame [0, height)
  1332. * @param width frame width
  1333. * @param height frame height
  1334. * @param vec coordinates on sphere
  1335. */
  1336. static int cube6x1_to_xyz(const V360Context *s,
  1337. int i, int j, int width, int height,
  1338. float *vec)
  1339. {
  1340. const float scalew = s->fout_pad > 0 ? 1.f - s->fout_pad / (width / 6.f) : 1.f - s->out_pad;
  1341. const float scaleh = s->fout_pad > 0 ? 1.f - (float)(s->fout_pad) / height : 1.f - s->out_pad;
  1342. const float ew = width / 6.f;
  1343. const float eh = height;
  1344. const int face = floorf(i / ew);
  1345. const int u_shift = ceilf(ew * face);
  1346. const int ewi = ceilf(ew * (face + 1)) - u_shift;
  1347. const float uf = 2.f * (i - u_shift + 0.5f) / ewi - 1.f;
  1348. const float vf = 2.f * (j + 0.5f) / eh - 1.f;
  1349. cube_to_xyz(s, uf, vf, face, vec, scalew, scaleh);
  1350. return 1;
  1351. }
  1352. /**
  1353. * Calculate frame position in cubemap1x6 format for corresponding 3D coordinates on sphere.
  1354. *
  1355. * @param s filter private context
  1356. * @param vec coordinates on sphere
  1357. * @param width frame width
  1358. * @param height frame height
  1359. * @param us horizontal coordinates for interpolation window
  1360. * @param vs vertical coordinates for interpolation window
  1361. * @param du horizontal relative coordinate
  1362. * @param dv vertical relative coordinate
  1363. */
  1364. static int xyz_to_cube1x6(const V360Context *s,
  1365. const float *vec, int width, int height,
  1366. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1367. {
  1368. const float scalew = s->fin_pad > 0 ? 1.f - (float)(s->fin_pad) / width : 1.f - s->in_pad;
  1369. const float scaleh = s->fin_pad > 0 ? 1.f - s->fin_pad / (height / 6.f) : 1.f - s->in_pad;
  1370. const float eh = height / 6.f;
  1371. const int ewi = width;
  1372. float uf, vf;
  1373. int ui, vi;
  1374. int ehi;
  1375. int direction, face;
  1376. xyz_to_cube(s, vec, &uf, &vf, &direction);
  1377. uf *= scalew;
  1378. vf *= scaleh;
  1379. face = s->in_cubemap_face_order[direction];
  1380. ehi = ceilf(eh * (face + 1)) - ceilf(eh * face);
  1381. uf = 0.5f * ewi * (uf + 1.f) - 0.5f;
  1382. vf = 0.5f * ehi * (vf + 1.f) - 0.5f;
  1383. ui = floorf(uf);
  1384. vi = floorf(vf);
  1385. *du = uf - ui;
  1386. *dv = vf - vi;
  1387. for (int i = 0; i < 4; i++) {
  1388. for (int j = 0; j < 4; j++) {
  1389. int new_ui = ui + j - 1;
  1390. int new_vi = vi + i - 1;
  1391. int v_shift;
  1392. int new_ehi;
  1393. if (new_ui >= 0 && new_ui < ewi && new_vi >= 0 && new_vi < ehi) {
  1394. face = s->in_cubemap_face_order[direction];
  1395. v_shift = ceilf(eh * face);
  1396. } else {
  1397. uf = 2.f * new_ui / ewi - 1.f;
  1398. vf = 2.f * new_vi / ehi - 1.f;
  1399. uf /= scalew;
  1400. vf /= scaleh;
  1401. process_cube_coordinates(s, uf, vf, direction, &uf, &vf, &face);
  1402. uf *= scalew;
  1403. vf *= scaleh;
  1404. v_shift = ceilf(eh * face);
  1405. new_ehi = ceilf(eh * (face + 1)) - v_shift;
  1406. new_ui = av_clip(lrintf(0.5f * ewi * (uf + 1.f)), 0, ewi - 1);
  1407. new_vi = av_clip(lrintf(0.5f * new_ehi * (vf + 1.f)), 0, new_ehi - 1);
  1408. }
  1409. us[i][j] = new_ui;
  1410. vs[i][j] = v_shift + new_vi;
  1411. }
  1412. }
  1413. return 1;
  1414. }
  1415. /**
  1416. * Calculate frame position in cubemap6x1 format for corresponding 3D coordinates on sphere.
  1417. *
  1418. * @param s filter private context
  1419. * @param vec coordinates on sphere
  1420. * @param width frame width
  1421. * @param height frame height
  1422. * @param us horizontal coordinates for interpolation window
  1423. * @param vs vertical coordinates for interpolation window
  1424. * @param du horizontal relative coordinate
  1425. * @param dv vertical relative coordinate
  1426. */
  1427. static int xyz_to_cube6x1(const V360Context *s,
  1428. const float *vec, int width, int height,
  1429. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1430. {
  1431. const float scalew = s->fin_pad > 0 ? 1.f - s->fin_pad / (width / 6.f) : 1.f - s->in_pad;
  1432. const float scaleh = s->fin_pad > 0 ? 1.f - (float)(s->fin_pad) / height : 1.f - s->in_pad;
  1433. const float ew = width / 6.f;
  1434. const int ehi = height;
  1435. float uf, vf;
  1436. int ui, vi;
  1437. int ewi;
  1438. int direction, face;
  1439. xyz_to_cube(s, vec, &uf, &vf, &direction);
  1440. uf *= scalew;
  1441. vf *= scaleh;
  1442. face = s->in_cubemap_face_order[direction];
  1443. ewi = ceilf(ew * (face + 1)) - ceilf(ew * face);
  1444. uf = 0.5f * ewi * (uf + 1.f) - 0.5f;
  1445. vf = 0.5f * ehi * (vf + 1.f) - 0.5f;
  1446. ui = floorf(uf);
  1447. vi = floorf(vf);
  1448. *du = uf - ui;
  1449. *dv = vf - vi;
  1450. for (int i = 0; i < 4; i++) {
  1451. for (int j = 0; j < 4; j++) {
  1452. int new_ui = ui + j - 1;
  1453. int new_vi = vi + i - 1;
  1454. int u_shift;
  1455. int new_ewi;
  1456. if (new_ui >= 0 && new_ui < ewi && new_vi >= 0 && new_vi < ehi) {
  1457. face = s->in_cubemap_face_order[direction];
  1458. u_shift = ceilf(ew * face);
  1459. } else {
  1460. uf = 2.f * new_ui / ewi - 1.f;
  1461. vf = 2.f * new_vi / ehi - 1.f;
  1462. uf /= scalew;
  1463. vf /= scaleh;
  1464. process_cube_coordinates(s, uf, vf, direction, &uf, &vf, &face);
  1465. uf *= scalew;
  1466. vf *= scaleh;
  1467. u_shift = ceilf(ew * face);
  1468. new_ewi = ceilf(ew * (face + 1)) - u_shift;
  1469. new_ui = av_clip(lrintf(0.5f * new_ewi * (uf + 1.f)), 0, new_ewi - 1);
  1470. new_vi = av_clip(lrintf(0.5f * ehi * (vf + 1.f)), 0, ehi - 1);
  1471. }
  1472. us[i][j] = u_shift + new_ui;
  1473. vs[i][j] = new_vi;
  1474. }
  1475. }
  1476. return 1;
  1477. }
  1478. /**
  1479. * Calculate 3D coordinates on sphere for corresponding frame position in equirectangular format.
  1480. *
  1481. * @param s filter private context
  1482. * @param i horizontal position on frame [0, width)
  1483. * @param j vertical position on frame [0, height)
  1484. * @param width frame width
  1485. * @param height frame height
  1486. * @param vec coordinates on sphere
  1487. */
  1488. static int equirect_to_xyz(const V360Context *s,
  1489. int i, int j, int width, int height,
  1490. float *vec)
  1491. {
  1492. const float phi = ((2.f * i + 0.5f) / width - 1.f) * M_PI;
  1493. const float theta = ((2.f * j + 0.5f) / height - 1.f) * M_PI_2;
  1494. const float sin_phi = sinf(phi);
  1495. const float cos_phi = cosf(phi);
  1496. const float sin_theta = sinf(theta);
  1497. const float cos_theta = cosf(theta);
  1498. vec[0] = cos_theta * sin_phi;
  1499. vec[1] = sin_theta;
  1500. vec[2] = cos_theta * cos_phi;
  1501. return 1;
  1502. }
  1503. /**
  1504. * Calculate 3D coordinates on sphere for corresponding frame position in half equirectangular format.
  1505. *
  1506. * @param s filter private context
  1507. * @param i horizontal position on frame [0, width)
  1508. * @param j vertical position on frame [0, height)
  1509. * @param width frame width
  1510. * @param height frame height
  1511. * @param vec coordinates on sphere
  1512. */
  1513. static int hequirect_to_xyz(const V360Context *s,
  1514. int i, int j, int width, int height,
  1515. float *vec)
  1516. {
  1517. const float phi = ((2.f * i + 0.5f) / width - 1.f) * M_PI_2;
  1518. const float theta = ((2.f * j + 0.5f) / height - 1.f) * M_PI_2;
  1519. const float sin_phi = sinf(phi);
  1520. const float cos_phi = cosf(phi);
  1521. const float sin_theta = sinf(theta);
  1522. const float cos_theta = cosf(theta);
  1523. vec[0] = cos_theta * sin_phi;
  1524. vec[1] = sin_theta;
  1525. vec[2] = cos_theta * cos_phi;
  1526. return 1;
  1527. }
  1528. /**
  1529. * Prepare data for processing stereographic output format.
  1530. *
  1531. * @param ctx filter context
  1532. *
  1533. * @return error code
  1534. */
  1535. static int prepare_stereographic_out(AVFilterContext *ctx)
  1536. {
  1537. V360Context *s = ctx->priv;
  1538. s->flat_range[0] = tanf(FFMIN(s->h_fov, 359.f) * M_PI / 720.f);
  1539. s->flat_range[1] = tanf(FFMIN(s->v_fov, 359.f) * M_PI / 720.f);
  1540. return 0;
  1541. }
  1542. /**
  1543. * Calculate 3D coordinates on sphere for corresponding frame position in stereographic format.
  1544. *
  1545. * @param s filter private context
  1546. * @param i horizontal position on frame [0, width)
  1547. * @param j vertical position on frame [0, height)
  1548. * @param width frame width
  1549. * @param height frame height
  1550. * @param vec coordinates on sphere
  1551. */
  1552. static int stereographic_to_xyz(const V360Context *s,
  1553. int i, int j, int width, int height,
  1554. float *vec)
  1555. {
  1556. const float x = ((2.f * i + 1.f) / width - 1.f) * s->flat_range[0];
  1557. const float y = ((2.f * j + 1.f) / height - 1.f) * s->flat_range[1];
  1558. const float r = hypotf(x, y);
  1559. const float theta = atanf(r) * 2.f;
  1560. const float sin_theta = sinf(theta);
  1561. vec[0] = x / r * sin_theta;
  1562. vec[1] = y / r * sin_theta;
  1563. vec[2] = cosf(theta);
  1564. normalize_vector(vec);
  1565. return 1;
  1566. }
  1567. /**
  1568. * Prepare data for processing stereographic input format.
  1569. *
  1570. * @param ctx filter context
  1571. *
  1572. * @return error code
  1573. */
  1574. static int prepare_stereographic_in(AVFilterContext *ctx)
  1575. {
  1576. V360Context *s = ctx->priv;
  1577. s->iflat_range[0] = tanf(FFMIN(s->ih_fov, 359.f) * M_PI / 720.f);
  1578. s->iflat_range[1] = tanf(FFMIN(s->iv_fov, 359.f) * M_PI / 720.f);
  1579. return 0;
  1580. }
  1581. /**
  1582. * Calculate frame position in stereographic format for corresponding 3D coordinates on sphere.
  1583. *
  1584. * @param s filter private context
  1585. * @param vec coordinates on sphere
  1586. * @param width frame width
  1587. * @param height frame height
  1588. * @param us horizontal coordinates for interpolation window
  1589. * @param vs vertical coordinates for interpolation window
  1590. * @param du horizontal relative coordinate
  1591. * @param dv vertical relative coordinate
  1592. */
  1593. static int xyz_to_stereographic(const V360Context *s,
  1594. const float *vec, int width, int height,
  1595. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1596. {
  1597. const float theta = acosf(vec[2]);
  1598. const float r = tanf(theta * 0.5f);
  1599. const float c = r / hypotf(vec[0], vec[1]);
  1600. const float x = vec[0] * c / s->iflat_range[0] * s->input_mirror_modifier[0];
  1601. const float y = vec[1] * c / s->iflat_range[1] * s->input_mirror_modifier[1];
  1602. const float uf = (x + 1.f) * width / 2.f;
  1603. const float vf = (y + 1.f) * height / 2.f;
  1604. const int ui = floorf(uf);
  1605. const int vi = floorf(vf);
  1606. const int visible = isfinite(x) && isfinite(y) && vi >= 0 && vi < height && ui >= 0 && ui < width;
  1607. *du = visible ? uf - ui : 0.f;
  1608. *dv = visible ? vf - vi : 0.f;
  1609. for (int i = 0; i < 4; i++) {
  1610. for (int j = 0; j < 4; j++) {
  1611. us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
  1612. vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
  1613. }
  1614. }
  1615. return visible;
  1616. }
  1617. /**
  1618. * Prepare data for processing equisolid output format.
  1619. *
  1620. * @param ctx filter context
  1621. *
  1622. * @return error code
  1623. */
  1624. static int prepare_equisolid_out(AVFilterContext *ctx)
  1625. {
  1626. V360Context *s = ctx->priv;
  1627. s->flat_range[0] = sinf(s->h_fov * M_PI / 720.f);
  1628. s->flat_range[1] = sinf(s->v_fov * M_PI / 720.f);
  1629. return 0;
  1630. }
  1631. /**
  1632. * Calculate 3D coordinates on sphere for corresponding frame position in equisolid format.
  1633. *
  1634. * @param s filter private context
  1635. * @param i horizontal position on frame [0, width)
  1636. * @param j vertical position on frame [0, height)
  1637. * @param width frame width
  1638. * @param height frame height
  1639. * @param vec coordinates on sphere
  1640. */
  1641. static int equisolid_to_xyz(const V360Context *s,
  1642. int i, int j, int width, int height,
  1643. float *vec)
  1644. {
  1645. const float x = ((2.f * i + 1.f) / width - 1.f) * s->flat_range[0];
  1646. const float y = ((2.f * j + 1.f) / height - 1.f) * s->flat_range[1];
  1647. const float r = hypotf(x, y);
  1648. const float theta = asinf(r) * 2.f;
  1649. const float sin_theta = sinf(theta);
  1650. vec[0] = x / r * sin_theta;
  1651. vec[1] = y / r * sin_theta;
  1652. vec[2] = cosf(theta);
  1653. normalize_vector(vec);
  1654. return 1;
  1655. }
  1656. /**
  1657. * Prepare data for processing equisolid input format.
  1658. *
  1659. * @param ctx filter context
  1660. *
  1661. * @return error code
  1662. */
  1663. static int prepare_equisolid_in(AVFilterContext *ctx)
  1664. {
  1665. V360Context *s = ctx->priv;
  1666. s->iflat_range[0] = sinf(FFMIN(s->ih_fov, 359.f) * M_PI / 720.f);
  1667. s->iflat_range[1] = sinf(FFMIN(s->iv_fov, 359.f) * M_PI / 720.f);
  1668. return 0;
  1669. }
  1670. /**
  1671. * Calculate frame position in equisolid format for corresponding 3D coordinates on sphere.
  1672. *
  1673. * @param s filter private context
  1674. * @param vec coordinates on sphere
  1675. * @param width frame width
  1676. * @param height frame height
  1677. * @param us horizontal coordinates for interpolation window
  1678. * @param vs vertical coordinates for interpolation window
  1679. * @param du horizontal relative coordinate
  1680. * @param dv vertical relative coordinate
  1681. */
  1682. static int xyz_to_equisolid(const V360Context *s,
  1683. const float *vec, int width, int height,
  1684. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1685. {
  1686. const float theta = acosf(vec[2]);
  1687. const float r = sinf(theta * 0.5f);
  1688. const float c = r / hypotf(vec[0], vec[1]);
  1689. const float x = vec[0] * c / s->iflat_range[0] * s->input_mirror_modifier[0];
  1690. const float y = vec[1] * c / s->iflat_range[1] * s->input_mirror_modifier[1];
  1691. const float uf = (x + 1.f) * width / 2.f;
  1692. const float vf = (y + 1.f) * height / 2.f;
  1693. const int ui = floorf(uf);
  1694. const int vi = floorf(vf);
  1695. const int visible = isfinite(x) && isfinite(y) && vi >= 0 && vi < height && ui >= 0 && ui < width;
  1696. *du = visible ? uf - ui : 0.f;
  1697. *dv = visible ? vf - vi : 0.f;
  1698. for (int i = 0; i < 4; i++) {
  1699. for (int j = 0; j < 4; j++) {
  1700. us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
  1701. vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
  1702. }
  1703. }
  1704. return visible;
  1705. }
  1706. /**
  1707. * Prepare data for processing orthographic output format.
  1708. *
  1709. * @param ctx filter context
  1710. *
  1711. * @return error code
  1712. */
  1713. static int prepare_orthographic_out(AVFilterContext *ctx)
  1714. {
  1715. V360Context *s = ctx->priv;
  1716. s->flat_range[0] = sinf(FFMIN(s->h_fov, 180.f) * M_PI / 360.f);
  1717. s->flat_range[1] = sinf(FFMIN(s->v_fov, 180.f) * M_PI / 360.f);
  1718. return 0;
  1719. }
  1720. /**
  1721. * Calculate 3D coordinates on sphere for corresponding frame position in orthographic format.
  1722. *
  1723. * @param s filter private context
  1724. * @param i horizontal position on frame [0, width)
  1725. * @param j vertical position on frame [0, height)
  1726. * @param width frame width
  1727. * @param height frame height
  1728. * @param vec coordinates on sphere
  1729. */
  1730. static int orthographic_to_xyz(const V360Context *s,
  1731. int i, int j, int width, int height,
  1732. float *vec)
  1733. {
  1734. const float x = ((2.f * i + 1.f) / width - 1.f) * s->flat_range[0];
  1735. const float y = ((2.f * j + 1.f) / height - 1.f) * s->flat_range[1];
  1736. const float r = hypotf(x, y);
  1737. const float theta = asinf(r);
  1738. vec[0] = x;
  1739. vec[1] = y;
  1740. vec[2] = cosf(theta);
  1741. normalize_vector(vec);
  1742. return 1;
  1743. }
  1744. /**
  1745. * Prepare data for processing orthographic input format.
  1746. *
  1747. * @param ctx filter context
  1748. *
  1749. * @return error code
  1750. */
  1751. static int prepare_orthographic_in(AVFilterContext *ctx)
  1752. {
  1753. V360Context *s = ctx->priv;
  1754. s->iflat_range[0] = sinf(FFMIN(s->ih_fov, 180.f) * M_PI / 360.f);
  1755. s->iflat_range[1] = sinf(FFMIN(s->iv_fov, 180.f) * M_PI / 360.f);
  1756. return 0;
  1757. }
  1758. /**
  1759. * Calculate frame position in orthographic format for corresponding 3D coordinates on sphere.
  1760. *
  1761. * @param s filter private context
  1762. * @param vec coordinates on sphere
  1763. * @param width frame width
  1764. * @param height frame height
  1765. * @param us horizontal coordinates for interpolation window
  1766. * @param vs vertical coordinates for interpolation window
  1767. * @param du horizontal relative coordinate
  1768. * @param dv vertical relative coordinate
  1769. */
  1770. static int xyz_to_orthographic(const V360Context *s,
  1771. const float *vec, int width, int height,
  1772. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1773. {
  1774. const float theta = acosf(vec[2]);
  1775. const float r = sinf(theta);
  1776. const float c = r / hypotf(vec[0], vec[1]);
  1777. const float x = vec[0] * c / s->iflat_range[0] * s->input_mirror_modifier[0];
  1778. const float y = vec[1] * c / s->iflat_range[1] * s->input_mirror_modifier[1];
  1779. const float uf = (x + 1.f) * width / 2.f;
  1780. const float vf = (y + 1.f) * height / 2.f;
  1781. const int ui = floorf(uf);
  1782. const int vi = floorf(vf);
  1783. const int visible = vec[2] >= 0.f && isfinite(x) && isfinite(y) && vi >= 0 && vi < height && ui >= 0 && ui < width;
  1784. *du = visible ? uf - ui : 0.f;
  1785. *dv = visible ? vf - vi : 0.f;
  1786. for (int i = 0; i < 4; i++) {
  1787. for (int j = 0; j < 4; j++) {
  1788. us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
  1789. vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
  1790. }
  1791. }
  1792. return visible;
  1793. }
  1794. /**
  1795. * Calculate frame position in equirectangular format for corresponding 3D coordinates on sphere.
  1796. *
  1797. * @param s filter private context
  1798. * @param vec coordinates on sphere
  1799. * @param width frame width
  1800. * @param height frame height
  1801. * @param us horizontal coordinates for interpolation window
  1802. * @param vs vertical coordinates for interpolation window
  1803. * @param du horizontal relative coordinate
  1804. * @param dv vertical relative coordinate
  1805. */
  1806. static int xyz_to_equirect(const V360Context *s,
  1807. const float *vec, int width, int height,
  1808. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1809. {
  1810. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0];
  1811. const float theta = asinf(vec[1]) * s->input_mirror_modifier[1];
  1812. const float uf = (phi / M_PI + 1.f) * width / 2.f;
  1813. const float vf = (theta / M_PI_2 + 1.f) * height / 2.f;
  1814. const int ui = floorf(uf);
  1815. const int vi = floorf(vf);
  1816. *du = uf - ui;
  1817. *dv = vf - vi;
  1818. for (int i = 0; i < 4; i++) {
  1819. for (int j = 0; j < 4; j++) {
  1820. us[i][j] = ereflectx(ui + j - 1, vi + i - 1, width, height);
  1821. vs[i][j] = reflecty(vi + i - 1, height);
  1822. }
  1823. }
  1824. return 1;
  1825. }
  1826. /**
  1827. * Calculate frame position in half equirectangular format for corresponding 3D coordinates on sphere.
  1828. *
  1829. * @param s filter private context
  1830. * @param vec coordinates on sphere
  1831. * @param width frame width
  1832. * @param height frame height
  1833. * @param us horizontal coordinates for interpolation window
  1834. * @param vs vertical coordinates for interpolation window
  1835. * @param du horizontal relative coordinate
  1836. * @param dv vertical relative coordinate
  1837. */
  1838. static int xyz_to_hequirect(const V360Context *s,
  1839. const float *vec, int width, int height,
  1840. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1841. {
  1842. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0];
  1843. const float theta = asinf(vec[1]) * s->input_mirror_modifier[1];
  1844. const float uf = (phi / M_PI_2 + 1.f) * width / 2.f;
  1845. const float vf = (theta / M_PI_2 + 1.f) * height / 2.f;
  1846. const int ui = floorf(uf);
  1847. const int vi = floorf(vf);
  1848. const int visible = phi >= -M_PI_2 && phi <= M_PI_2;
  1849. *du = uf - ui;
  1850. *dv = vf - vi;
  1851. for (int i = 0; i < 4; i++) {
  1852. for (int j = 0; j < 4; j++) {
  1853. us[i][j] = av_clip(ui + j - 1, 0, width - 1);
  1854. vs[i][j] = av_clip(vi + i - 1, 0, height - 1);
  1855. }
  1856. }
  1857. return visible;
  1858. }
  1859. /**
  1860. * Prepare data for processing flat input format.
  1861. *
  1862. * @param ctx filter context
  1863. *
  1864. * @return error code
  1865. */
  1866. static int prepare_flat_in(AVFilterContext *ctx)
  1867. {
  1868. V360Context *s = ctx->priv;
  1869. s->iflat_range[0] = tanf(0.5f * s->ih_fov * M_PI / 180.f);
  1870. s->iflat_range[1] = tanf(0.5f * s->iv_fov * M_PI / 180.f);
  1871. return 0;
  1872. }
  1873. /**
  1874. * Calculate frame position in flat format for corresponding 3D coordinates on sphere.
  1875. *
  1876. * @param s filter private context
  1877. * @param vec coordinates on sphere
  1878. * @param width frame width
  1879. * @param height frame height
  1880. * @param us horizontal coordinates for interpolation window
  1881. * @param vs vertical coordinates for interpolation window
  1882. * @param du horizontal relative coordinate
  1883. * @param dv vertical relative coordinate
  1884. */
  1885. static int xyz_to_flat(const V360Context *s,
  1886. const float *vec, int width, int height,
  1887. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1888. {
  1889. const float theta = acosf(vec[2]);
  1890. const float r = tanf(theta);
  1891. const float rr = fabsf(r) < 1e+6f ? r : hypotf(width, height);
  1892. const float zf = vec[2];
  1893. const float h = hypotf(vec[0], vec[1]);
  1894. const float c = h <= 1e-6f ? 1.f : rr / h;
  1895. float uf = vec[0] * c / s->iflat_range[0] * s->input_mirror_modifier[0];
  1896. float vf = vec[1] * c / s->iflat_range[1] * s->input_mirror_modifier[1];
  1897. int visible, ui, vi;
  1898. uf = zf >= 0.f ? (uf + 1.f) * width / 2.f : 0.f;
  1899. vf = zf >= 0.f ? (vf + 1.f) * height / 2.f : 0.f;
  1900. ui = floorf(uf);
  1901. vi = floorf(vf);
  1902. visible = vi >= 0 && vi < height && ui >= 0 && ui < width && zf >= 0.f;
  1903. *du = uf - ui;
  1904. *dv = vf - vi;
  1905. for (int i = 0; i < 4; i++) {
  1906. for (int j = 0; j < 4; j++) {
  1907. us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
  1908. vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
  1909. }
  1910. }
  1911. return visible;
  1912. }
  1913. /**
  1914. * Calculate frame position in mercator format for corresponding 3D coordinates on sphere.
  1915. *
  1916. * @param s filter private context
  1917. * @param vec coordinates on sphere
  1918. * @param width frame width
  1919. * @param height frame height
  1920. * @param us horizontal coordinates for interpolation window
  1921. * @param vs vertical coordinates for interpolation window
  1922. * @param du horizontal relative coordinate
  1923. * @param dv vertical relative coordinate
  1924. */
  1925. static int xyz_to_mercator(const V360Context *s,
  1926. const float *vec, int width, int height,
  1927. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1928. {
  1929. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0];
  1930. const float theta = vec[1] * s->input_mirror_modifier[1];
  1931. const float uf = (phi / M_PI + 1.f) * width / 2.f;
  1932. const float vf = (av_clipf(logf((1.f + theta) / (1.f - theta)) / (2.f * M_PI), -1.f, 1.f) + 1.f) * height / 2.f;
  1933. const int ui = floorf(uf);
  1934. const int vi = floorf(vf);
  1935. *du = uf - ui;
  1936. *dv = vf - vi;
  1937. for (int i = 0; i < 4; i++) {
  1938. for (int j = 0; j < 4; j++) {
  1939. us[i][j] = av_clip(ui + j - 1, 0, width - 1);
  1940. vs[i][j] = av_clip(vi + i - 1, 0, height - 1);
  1941. }
  1942. }
  1943. return 1;
  1944. }
  1945. /**
  1946. * Calculate 3D coordinates on sphere for corresponding frame position in mercator format.
  1947. *
  1948. * @param s filter private context
  1949. * @param i horizontal position on frame [0, width)
  1950. * @param j vertical position on frame [0, height)
  1951. * @param width frame width
  1952. * @param height frame height
  1953. * @param vec coordinates on sphere
  1954. */
  1955. static int mercator_to_xyz(const V360Context *s,
  1956. int i, int j, int width, int height,
  1957. float *vec)
  1958. {
  1959. const float phi = ((2.f * i + 1.f) / width - 1.f) * M_PI + M_PI_2;
  1960. const float y = ((2.f * j + 1.f) / height - 1.f) * M_PI;
  1961. const float div = expf(2.f * y) + 1.f;
  1962. const float sin_phi = sinf(phi);
  1963. const float cos_phi = cosf(phi);
  1964. const float sin_theta = 2.f * expf(y) / div;
  1965. const float cos_theta = (expf(2.f * y) - 1.f) / div;
  1966. vec[0] = -sin_theta * cos_phi;
  1967. vec[1] = cos_theta;
  1968. vec[2] = sin_theta * sin_phi;
  1969. return 1;
  1970. }
  1971. /**
  1972. * Calculate frame position in ball format for corresponding 3D coordinates on sphere.
  1973. *
  1974. * @param s filter private context
  1975. * @param vec coordinates on sphere
  1976. * @param width frame width
  1977. * @param height frame height
  1978. * @param us horizontal coordinates for interpolation window
  1979. * @param vs vertical coordinates for interpolation window
  1980. * @param du horizontal relative coordinate
  1981. * @param dv vertical relative coordinate
  1982. */
  1983. static int xyz_to_ball(const V360Context *s,
  1984. const float *vec, int width, int height,
  1985. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  1986. {
  1987. const float l = hypotf(vec[0], vec[1]);
  1988. const float r = sqrtf(1.f - vec[2]) / M_SQRT2;
  1989. const float uf = (1.f + r * vec[0] * s->input_mirror_modifier[0] / (l > 0.f ? l : 1.f)) * width * 0.5f;
  1990. const float vf = (1.f + r * vec[1] * s->input_mirror_modifier[1] / (l > 0.f ? l : 1.f)) * height * 0.5f;
  1991. const int ui = floorf(uf);
  1992. const int vi = floorf(vf);
  1993. *du = uf - ui;
  1994. *dv = vf - vi;
  1995. for (int i = 0; i < 4; i++) {
  1996. for (int j = 0; j < 4; j++) {
  1997. us[i][j] = av_clip(ui + j - 1, 0, width - 1);
  1998. vs[i][j] = av_clip(vi + i - 1, 0, height - 1);
  1999. }
  2000. }
  2001. return 1;
  2002. }
  2003. /**
  2004. * Calculate 3D coordinates on sphere for corresponding frame position in ball format.
  2005. *
  2006. * @param s filter private context
  2007. * @param i horizontal position on frame [0, width)
  2008. * @param j vertical position on frame [0, height)
  2009. * @param width frame width
  2010. * @param height frame height
  2011. * @param vec coordinates on sphere
  2012. */
  2013. static int ball_to_xyz(const V360Context *s,
  2014. int i, int j, int width, int height,
  2015. float *vec)
  2016. {
  2017. const float x = (2.f * i + 1.f) / width - 1.f;
  2018. const float y = (2.f * j + 1.f) / height - 1.f;
  2019. const float l = hypotf(x, y);
  2020. if (l <= 1.f) {
  2021. const float z = 2.f * l * sqrtf(1.f - l * l);
  2022. vec[0] = z * x / (l > 0.f ? l : 1.f);
  2023. vec[1] = z * y / (l > 0.f ? l : 1.f);
  2024. vec[2] = 1.f - 2.f * l * l;
  2025. } else {
  2026. vec[0] = 0.f;
  2027. vec[1] = 1.f;
  2028. vec[2] = 0.f;
  2029. return 0;
  2030. }
  2031. return 1;
  2032. }
  2033. /**
  2034. * Calculate 3D coordinates on sphere for corresponding frame position in hammer format.
  2035. *
  2036. * @param s filter private context
  2037. * @param i horizontal position on frame [0, width)
  2038. * @param j vertical position on frame [0, height)
  2039. * @param width frame width
  2040. * @param height frame height
  2041. * @param vec coordinates on sphere
  2042. */
  2043. static int hammer_to_xyz(const V360Context *s,
  2044. int i, int j, int width, int height,
  2045. float *vec)
  2046. {
  2047. const float x = ((2.f * i + 1.f) / width - 1.f);
  2048. const float y = ((2.f * j + 1.f) / height - 1.f);
  2049. const float xx = x * x;
  2050. const float yy = y * y;
  2051. const float z = sqrtf(1.f - xx * 0.5f - yy * 0.5f);
  2052. const float a = M_SQRT2 * x * z;
  2053. const float b = 2.f * z * z - 1.f;
  2054. const float aa = a * a;
  2055. const float bb = b * b;
  2056. const float w = sqrtf(1.f - 2.f * yy * z * z);
  2057. vec[0] = w * 2.f * a * b / (aa + bb);
  2058. vec[1] = M_SQRT2 * y * z;
  2059. vec[2] = w * (bb - aa) / (aa + bb);
  2060. normalize_vector(vec);
  2061. return 1;
  2062. }
  2063. /**
  2064. * Calculate frame position in hammer format for corresponding 3D coordinates on sphere.
  2065. *
  2066. * @param s filter private context
  2067. * @param vec coordinates on sphere
  2068. * @param width frame width
  2069. * @param height frame height
  2070. * @param us horizontal coordinates for interpolation window
  2071. * @param vs vertical coordinates for interpolation window
  2072. * @param du horizontal relative coordinate
  2073. * @param dv vertical relative coordinate
  2074. */
  2075. static int xyz_to_hammer(const V360Context *s,
  2076. const float *vec, int width, int height,
  2077. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2078. {
  2079. const float theta = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0];
  2080. const float z = sqrtf(1.f + sqrtf(1.f - vec[1] * vec[1]) * cosf(theta * 0.5f));
  2081. const float x = sqrtf(1.f - vec[1] * vec[1]) * sinf(theta * 0.5f) / z;
  2082. const float y = vec[1] / z * s->input_mirror_modifier[1];
  2083. const float uf = (x + 1.f) * width / 2.f;
  2084. const float vf = (y + 1.f) * height / 2.f;
  2085. const int ui = floorf(uf);
  2086. const int vi = floorf(vf);
  2087. *du = uf - ui;
  2088. *dv = vf - vi;
  2089. for (int i = 0; i < 4; i++) {
  2090. for (int j = 0; j < 4; j++) {
  2091. us[i][j] = av_clip(ui + j - 1, 0, width - 1);
  2092. vs[i][j] = av_clip(vi + i - 1, 0, height - 1);
  2093. }
  2094. }
  2095. return 1;
  2096. }
  2097. /**
  2098. * Calculate 3D coordinates on sphere for corresponding frame position in sinusoidal format.
  2099. *
  2100. * @param s filter private context
  2101. * @param i horizontal position on frame [0, width)
  2102. * @param j vertical position on frame [0, height)
  2103. * @param width frame width
  2104. * @param height frame height
  2105. * @param vec coordinates on sphere
  2106. */
  2107. static int sinusoidal_to_xyz(const V360Context *s,
  2108. int i, int j, int width, int height,
  2109. float *vec)
  2110. {
  2111. const float theta = ((2.f * j + 1.f) / height - 1.f) * M_PI_2;
  2112. const float phi = ((2.f * i + 1.f) / width - 1.f) * M_PI / cosf(theta);
  2113. const float sin_phi = sinf(phi);
  2114. const float cos_phi = cosf(phi);
  2115. const float sin_theta = sinf(theta);
  2116. const float cos_theta = cosf(theta);
  2117. vec[0] = cos_theta * sin_phi;
  2118. vec[1] = sin_theta;
  2119. vec[2] = cos_theta * cos_phi;
  2120. normalize_vector(vec);
  2121. return 1;
  2122. }
  2123. /**
  2124. * Calculate frame position in sinusoidal format for corresponding 3D coordinates on sphere.
  2125. *
  2126. * @param s filter private context
  2127. * @param vec coordinates on sphere
  2128. * @param width frame width
  2129. * @param height frame height
  2130. * @param us horizontal coordinates for interpolation window
  2131. * @param vs vertical coordinates for interpolation window
  2132. * @param du horizontal relative coordinate
  2133. * @param dv vertical relative coordinate
  2134. */
  2135. static int xyz_to_sinusoidal(const V360Context *s,
  2136. const float *vec, int width, int height,
  2137. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2138. {
  2139. const float theta = asinf(vec[1]) * s->input_mirror_modifier[1];
  2140. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0] * cosf(theta);
  2141. const float uf = (phi / M_PI + 1.f) * width / 2.f;
  2142. const float vf = (theta / M_PI_2 + 1.f) * height / 2.f;
  2143. const int ui = floorf(uf);
  2144. const int vi = floorf(vf);
  2145. *du = uf - ui;
  2146. *dv = vf - vi;
  2147. for (int i = 0; i < 4; i++) {
  2148. for (int j = 0; j < 4; j++) {
  2149. us[i][j] = av_clip(ui + j - 1, 0, width - 1);
  2150. vs[i][j] = av_clip(vi + i - 1, 0, height - 1);
  2151. }
  2152. }
  2153. return 1;
  2154. }
  2155. /**
  2156. * Prepare data for processing equi-angular cubemap input format.
  2157. *
  2158. * @param ctx filter context
  2159. *
  2160. * @return error code
  2161. */
  2162. static int prepare_eac_in(AVFilterContext *ctx)
  2163. {
  2164. V360Context *s = ctx->priv;
  2165. if (s->ih_flip && s->iv_flip) {
  2166. s->in_cubemap_face_order[RIGHT] = BOTTOM_LEFT;
  2167. s->in_cubemap_face_order[LEFT] = BOTTOM_RIGHT;
  2168. s->in_cubemap_face_order[UP] = TOP_LEFT;
  2169. s->in_cubemap_face_order[DOWN] = TOP_RIGHT;
  2170. s->in_cubemap_face_order[FRONT] = BOTTOM_MIDDLE;
  2171. s->in_cubemap_face_order[BACK] = TOP_MIDDLE;
  2172. } else if (s->ih_flip) {
  2173. s->in_cubemap_face_order[RIGHT] = TOP_LEFT;
  2174. s->in_cubemap_face_order[LEFT] = TOP_RIGHT;
  2175. s->in_cubemap_face_order[UP] = BOTTOM_LEFT;
  2176. s->in_cubemap_face_order[DOWN] = BOTTOM_RIGHT;
  2177. s->in_cubemap_face_order[FRONT] = TOP_MIDDLE;
  2178. s->in_cubemap_face_order[BACK] = BOTTOM_MIDDLE;
  2179. } else if (s->iv_flip) {
  2180. s->in_cubemap_face_order[RIGHT] = BOTTOM_RIGHT;
  2181. s->in_cubemap_face_order[LEFT] = BOTTOM_LEFT;
  2182. s->in_cubemap_face_order[UP] = TOP_RIGHT;
  2183. s->in_cubemap_face_order[DOWN] = TOP_LEFT;
  2184. s->in_cubemap_face_order[FRONT] = BOTTOM_MIDDLE;
  2185. s->in_cubemap_face_order[BACK] = TOP_MIDDLE;
  2186. } else {
  2187. s->in_cubemap_face_order[RIGHT] = TOP_RIGHT;
  2188. s->in_cubemap_face_order[LEFT] = TOP_LEFT;
  2189. s->in_cubemap_face_order[UP] = BOTTOM_RIGHT;
  2190. s->in_cubemap_face_order[DOWN] = BOTTOM_LEFT;
  2191. s->in_cubemap_face_order[FRONT] = TOP_MIDDLE;
  2192. s->in_cubemap_face_order[BACK] = BOTTOM_MIDDLE;
  2193. }
  2194. if (s->iv_flip) {
  2195. s->in_cubemap_face_rotation[TOP_LEFT] = ROT_270;
  2196. s->in_cubemap_face_rotation[TOP_MIDDLE] = ROT_90;
  2197. s->in_cubemap_face_rotation[TOP_RIGHT] = ROT_270;
  2198. s->in_cubemap_face_rotation[BOTTOM_LEFT] = ROT_0;
  2199. s->in_cubemap_face_rotation[BOTTOM_MIDDLE] = ROT_0;
  2200. s->in_cubemap_face_rotation[BOTTOM_RIGHT] = ROT_0;
  2201. } else {
  2202. s->in_cubemap_face_rotation[TOP_LEFT] = ROT_0;
  2203. s->in_cubemap_face_rotation[TOP_MIDDLE] = ROT_0;
  2204. s->in_cubemap_face_rotation[TOP_RIGHT] = ROT_0;
  2205. s->in_cubemap_face_rotation[BOTTOM_LEFT] = ROT_270;
  2206. s->in_cubemap_face_rotation[BOTTOM_MIDDLE] = ROT_90;
  2207. s->in_cubemap_face_rotation[BOTTOM_RIGHT] = ROT_270;
  2208. }
  2209. return 0;
  2210. }
  2211. /**
  2212. * Prepare data for processing equi-angular cubemap output format.
  2213. *
  2214. * @param ctx filter context
  2215. *
  2216. * @return error code
  2217. */
  2218. static int prepare_eac_out(AVFilterContext *ctx)
  2219. {
  2220. V360Context *s = ctx->priv;
  2221. s->out_cubemap_direction_order[TOP_LEFT] = LEFT;
  2222. s->out_cubemap_direction_order[TOP_MIDDLE] = FRONT;
  2223. s->out_cubemap_direction_order[TOP_RIGHT] = RIGHT;
  2224. s->out_cubemap_direction_order[BOTTOM_LEFT] = DOWN;
  2225. s->out_cubemap_direction_order[BOTTOM_MIDDLE] = BACK;
  2226. s->out_cubemap_direction_order[BOTTOM_RIGHT] = UP;
  2227. s->out_cubemap_face_rotation[TOP_LEFT] = ROT_0;
  2228. s->out_cubemap_face_rotation[TOP_MIDDLE] = ROT_0;
  2229. s->out_cubemap_face_rotation[TOP_RIGHT] = ROT_0;
  2230. s->out_cubemap_face_rotation[BOTTOM_LEFT] = ROT_270;
  2231. s->out_cubemap_face_rotation[BOTTOM_MIDDLE] = ROT_90;
  2232. s->out_cubemap_face_rotation[BOTTOM_RIGHT] = ROT_270;
  2233. return 0;
  2234. }
  2235. /**
  2236. * Calculate 3D coordinates on sphere for corresponding frame position in equi-angular cubemap format.
  2237. *
  2238. * @param s filter private context
  2239. * @param i horizontal position on frame [0, width)
  2240. * @param j vertical position on frame [0, height)
  2241. * @param width frame width
  2242. * @param height frame height
  2243. * @param vec coordinates on sphere
  2244. */
  2245. static int eac_to_xyz(const V360Context *s,
  2246. int i, int j, int width, int height,
  2247. float *vec)
  2248. {
  2249. const float pixel_pad = 2;
  2250. const float u_pad = pixel_pad / width;
  2251. const float v_pad = pixel_pad / height;
  2252. int u_face, v_face, face;
  2253. float l_x, l_y, l_z;
  2254. float uf = (i + 0.5f) / width;
  2255. float vf = (j + 0.5f) / height;
  2256. // EAC has 2-pixel padding on faces except between faces on the same row
  2257. // Padding pixels seems not to be stretched with tangent as regular pixels
  2258. // Formulas below approximate original padding as close as I could get experimentally
  2259. // Horizontal padding
  2260. uf = 3.f * (uf - u_pad) / (1.f - 2.f * u_pad);
  2261. if (uf < 0.f) {
  2262. u_face = 0;
  2263. uf -= 0.5f;
  2264. } else if (uf >= 3.f) {
  2265. u_face = 2;
  2266. uf -= 2.5f;
  2267. } else {
  2268. u_face = floorf(uf);
  2269. uf = fmodf(uf, 1.f) - 0.5f;
  2270. }
  2271. // Vertical padding
  2272. v_face = floorf(vf * 2.f);
  2273. vf = (vf - v_pad - 0.5f * v_face) / (0.5f - 2.f * v_pad) - 0.5f;
  2274. if (uf >= -0.5f && uf < 0.5f) {
  2275. uf = tanf(M_PI_2 * uf);
  2276. } else {
  2277. uf = 2.f * uf;
  2278. }
  2279. if (vf >= -0.5f && vf < 0.5f) {
  2280. vf = tanf(M_PI_2 * vf);
  2281. } else {
  2282. vf = 2.f * vf;
  2283. }
  2284. face = u_face + 3 * v_face;
  2285. switch (face) {
  2286. case TOP_LEFT:
  2287. l_x = -1.f;
  2288. l_y = vf;
  2289. l_z = uf;
  2290. break;
  2291. case TOP_MIDDLE:
  2292. l_x = uf;
  2293. l_y = vf;
  2294. l_z = 1.f;
  2295. break;
  2296. case TOP_RIGHT:
  2297. l_x = 1.f;
  2298. l_y = vf;
  2299. l_z = -uf;
  2300. break;
  2301. case BOTTOM_LEFT:
  2302. l_x = -vf;
  2303. l_y = 1.f;
  2304. l_z = -uf;
  2305. break;
  2306. case BOTTOM_MIDDLE:
  2307. l_x = -vf;
  2308. l_y = -uf;
  2309. l_z = -1.f;
  2310. break;
  2311. case BOTTOM_RIGHT:
  2312. l_x = -vf;
  2313. l_y = -1.f;
  2314. l_z = uf;
  2315. break;
  2316. default:
  2317. av_assert0(0);
  2318. }
  2319. vec[0] = l_x;
  2320. vec[1] = l_y;
  2321. vec[2] = l_z;
  2322. normalize_vector(vec);
  2323. return 1;
  2324. }
  2325. /**
  2326. * Calculate frame position in equi-angular cubemap format for corresponding 3D coordinates on sphere.
  2327. *
  2328. * @param s filter private context
  2329. * @param vec coordinates on sphere
  2330. * @param width frame width
  2331. * @param height frame height
  2332. * @param us horizontal coordinates for interpolation window
  2333. * @param vs vertical coordinates for interpolation window
  2334. * @param du horizontal relative coordinate
  2335. * @param dv vertical relative coordinate
  2336. */
  2337. static int xyz_to_eac(const V360Context *s,
  2338. const float *vec, int width, int height,
  2339. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2340. {
  2341. const float pixel_pad = 2;
  2342. const float u_pad = pixel_pad / width;
  2343. const float v_pad = pixel_pad / height;
  2344. float uf, vf;
  2345. int ui, vi;
  2346. int direction, face;
  2347. int u_face, v_face;
  2348. xyz_to_cube(s, vec, &uf, &vf, &direction);
  2349. face = s->in_cubemap_face_order[direction];
  2350. u_face = face % 3;
  2351. v_face = face / 3;
  2352. uf = M_2_PI * atanf(uf) + 0.5f;
  2353. vf = M_2_PI * atanf(vf) + 0.5f;
  2354. // These formulas are inversed from eac_to_xyz ones
  2355. uf = (uf + u_face) * (1.f - 2.f * u_pad) / 3.f + u_pad;
  2356. vf = vf * (0.5f - 2.f * v_pad) + v_pad + 0.5f * v_face;
  2357. uf *= width;
  2358. vf *= height;
  2359. uf -= 0.5f;
  2360. vf -= 0.5f;
  2361. ui = floorf(uf);
  2362. vi = floorf(vf);
  2363. *du = uf - ui;
  2364. *dv = vf - vi;
  2365. for (int i = 0; i < 4; i++) {
  2366. for (int j = 0; j < 4; j++) {
  2367. us[i][j] = av_clip(ui + j - 1, 0, width - 1);
  2368. vs[i][j] = av_clip(vi + i - 1, 0, height - 1);
  2369. }
  2370. }
  2371. return 1;
  2372. }
  2373. /**
  2374. * Prepare data for processing flat output format.
  2375. *
  2376. * @param ctx filter context
  2377. *
  2378. * @return error code
  2379. */
  2380. static int prepare_flat_out(AVFilterContext *ctx)
  2381. {
  2382. V360Context *s = ctx->priv;
  2383. s->flat_range[0] = tanf(0.5f * s->h_fov * M_PI / 180.f);
  2384. s->flat_range[1] = tanf(0.5f * s->v_fov * M_PI / 180.f);
  2385. return 0;
  2386. }
  2387. /**
  2388. * Calculate 3D coordinates on sphere for corresponding frame position in flat format.
  2389. *
  2390. * @param s filter private context
  2391. * @param i horizontal position on frame [0, width)
  2392. * @param j vertical position on frame [0, height)
  2393. * @param width frame width
  2394. * @param height frame height
  2395. * @param vec coordinates on sphere
  2396. */
  2397. static int flat_to_xyz(const V360Context *s,
  2398. int i, int j, int width, int height,
  2399. float *vec)
  2400. {
  2401. const float l_x = s->flat_range[0] * ((2.f * i + 0.5f) / width - 1.f);
  2402. const float l_y = s->flat_range[1] * ((2.f * j + 0.5f) / height - 1.f);
  2403. vec[0] = l_x;
  2404. vec[1] = l_y;
  2405. vec[2] = 1.f;
  2406. normalize_vector(vec);
  2407. return 1;
  2408. }
  2409. /**
  2410. * Prepare data for processing fisheye output format.
  2411. *
  2412. * @param ctx filter context
  2413. *
  2414. * @return error code
  2415. */
  2416. static int prepare_fisheye_out(AVFilterContext *ctx)
  2417. {
  2418. V360Context *s = ctx->priv;
  2419. s->flat_range[0] = s->h_fov / 180.f;
  2420. s->flat_range[1] = s->v_fov / 180.f;
  2421. return 0;
  2422. }
  2423. /**
  2424. * Calculate 3D coordinates on sphere for corresponding frame position in fisheye format.
  2425. *
  2426. * @param s filter private context
  2427. * @param i horizontal position on frame [0, width)
  2428. * @param j vertical position on frame [0, height)
  2429. * @param width frame width
  2430. * @param height frame height
  2431. * @param vec coordinates on sphere
  2432. */
  2433. static int fisheye_to_xyz(const V360Context *s,
  2434. int i, int j, int width, int height,
  2435. float *vec)
  2436. {
  2437. const float uf = s->flat_range[0] * ((2.f * i) / width - 1.f);
  2438. const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height - 1.f);
  2439. const float phi = atan2f(vf, uf);
  2440. const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
  2441. const float sin_phi = sinf(phi);
  2442. const float cos_phi = cosf(phi);
  2443. const float sin_theta = sinf(theta);
  2444. const float cos_theta = cosf(theta);
  2445. vec[0] = cos_theta * cos_phi;
  2446. vec[1] = cos_theta * sin_phi;
  2447. vec[2] = sin_theta;
  2448. normalize_vector(vec);
  2449. return 1;
  2450. }
  2451. /**
  2452. * Prepare data for processing fisheye input format.
  2453. *
  2454. * @param ctx filter context
  2455. *
  2456. * @return error code
  2457. */
  2458. static int prepare_fisheye_in(AVFilterContext *ctx)
  2459. {
  2460. V360Context *s = ctx->priv;
  2461. s->iflat_range[0] = s->ih_fov / 180.f;
  2462. s->iflat_range[1] = s->iv_fov / 180.f;
  2463. return 0;
  2464. }
  2465. /**
  2466. * Calculate frame position in fisheye format for corresponding 3D coordinates on sphere.
  2467. *
  2468. * @param s filter private context
  2469. * @param vec coordinates on sphere
  2470. * @param width frame width
  2471. * @param height frame height
  2472. * @param us horizontal coordinates for interpolation window
  2473. * @param vs vertical coordinates for interpolation window
  2474. * @param du horizontal relative coordinate
  2475. * @param dv vertical relative coordinate
  2476. */
  2477. static int xyz_to_fisheye(const V360Context *s,
  2478. const float *vec, int width, int height,
  2479. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2480. {
  2481. const float h = hypotf(vec[0], vec[1]);
  2482. const float lh = h > 0.f ? h : 1.f;
  2483. const float phi = atan2f(h, vec[2]) / M_PI;
  2484. float uf = vec[0] / lh * phi * s->input_mirror_modifier[0] / s->iflat_range[0];
  2485. float vf = vec[1] / lh * phi * s->input_mirror_modifier[1] / s->iflat_range[1];
  2486. const int visible = hypotf(uf, vf) <= 0.5f;
  2487. int ui, vi;
  2488. uf = (uf + 0.5f) * width;
  2489. vf = (vf + 0.5f) * height;
  2490. ui = floorf(uf);
  2491. vi = floorf(vf);
  2492. *du = visible ? uf - ui : 0.f;
  2493. *dv = visible ? vf - vi : 0.f;
  2494. for (int i = 0; i < 4; i++) {
  2495. for (int j = 0; j < 4; j++) {
  2496. us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
  2497. vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
  2498. }
  2499. }
  2500. return visible;
  2501. }
  2502. /**
  2503. * Calculate 3D coordinates on sphere for corresponding frame position in pannini format.
  2504. *
  2505. * @param s filter private context
  2506. * @param i horizontal position on frame [0, width)
  2507. * @param j vertical position on frame [0, height)
  2508. * @param width frame width
  2509. * @param height frame height
  2510. * @param vec coordinates on sphere
  2511. */
  2512. static int pannini_to_xyz(const V360Context *s,
  2513. int i, int j, int width, int height,
  2514. float *vec)
  2515. {
  2516. const float uf = ((2.f * i + 1.f) / width - 1.f);
  2517. const float vf = ((2.f * j + 1.f) / height - 1.f);
  2518. const float d = s->h_fov;
  2519. const float k = uf * uf / ((d + 1.f) * (d + 1.f));
  2520. const float dscr = k * k * d * d - (k + 1.f) * (k * d * d - 1.f);
  2521. const float clon = (-k * d + sqrtf(dscr)) / (k + 1.f);
  2522. const float S = (d + 1.f) / (d + clon);
  2523. const float lon = atan2f(uf, S * clon);
  2524. const float lat = atan2f(vf, S);
  2525. vec[0] = sinf(lon) * cosf(lat);
  2526. vec[1] = sinf(lat);
  2527. vec[2] = cosf(lon) * cosf(lat);
  2528. normalize_vector(vec);
  2529. return 1;
  2530. }
  2531. /**
  2532. * Calculate frame position in pannini format for corresponding 3D coordinates on sphere.
  2533. *
  2534. * @param s filter private context
  2535. * @param vec coordinates on sphere
  2536. * @param width frame width
  2537. * @param height frame height
  2538. * @param us horizontal coordinates for interpolation window
  2539. * @param vs vertical coordinates for interpolation window
  2540. * @param du horizontal relative coordinate
  2541. * @param dv vertical relative coordinate
  2542. */
  2543. static int xyz_to_pannini(const V360Context *s,
  2544. const float *vec, int width, int height,
  2545. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2546. {
  2547. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0];
  2548. const float theta = asinf(vec[1]) * s->input_mirror_modifier[1];
  2549. const float d = s->ih_fov;
  2550. const float S = (d + 1.f) / (d + cosf(phi));
  2551. const float x = S * sinf(phi);
  2552. const float y = S * tanf(theta);
  2553. const float uf = (x + 1.f) * width / 2.f;
  2554. const float vf = (y + 1.f) * height / 2.f;
  2555. const int ui = floorf(uf);
  2556. const int vi = floorf(vf);
  2557. const int visible = vi >= 0 && vi < height && ui >= 0 && ui < width && vec[2] >= 0.f;
  2558. *du = uf - ui;
  2559. *dv = vf - vi;
  2560. for (int i = 0; i < 4; i++) {
  2561. for (int j = 0; j < 4; j++) {
  2562. us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
  2563. vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
  2564. }
  2565. }
  2566. return visible;
  2567. }
  2568. /**
  2569. * Prepare data for processing cylindrical output format.
  2570. *
  2571. * @param ctx filter context
  2572. *
  2573. * @return error code
  2574. */
  2575. static int prepare_cylindrical_out(AVFilterContext *ctx)
  2576. {
  2577. V360Context *s = ctx->priv;
  2578. s->flat_range[0] = M_PI * s->h_fov / 360.f;
  2579. s->flat_range[1] = tanf(0.5f * s->v_fov * M_PI / 180.f);
  2580. return 0;
  2581. }
  2582. /**
  2583. * Calculate 3D coordinates on sphere for corresponding frame position in cylindrical format.
  2584. *
  2585. * @param s filter private context
  2586. * @param i horizontal position on frame [0, width)
  2587. * @param j vertical position on frame [0, height)
  2588. * @param width frame width
  2589. * @param height frame height
  2590. * @param vec coordinates on sphere
  2591. */
  2592. static int cylindrical_to_xyz(const V360Context *s,
  2593. int i, int j, int width, int height,
  2594. float *vec)
  2595. {
  2596. const float uf = s->flat_range[0] * ((2.f * i + 1.f) / width - 1.f);
  2597. const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height - 1.f);
  2598. const float phi = uf;
  2599. const float theta = atanf(vf);
  2600. const float sin_phi = sinf(phi);
  2601. const float cos_phi = cosf(phi);
  2602. const float sin_theta = sinf(theta);
  2603. const float cos_theta = cosf(theta);
  2604. vec[0] = cos_theta * sin_phi;
  2605. vec[1] = sin_theta;
  2606. vec[2] = cos_theta * cos_phi;
  2607. normalize_vector(vec);
  2608. return 1;
  2609. }
  2610. /**
  2611. * Prepare data for processing cylindrical input format.
  2612. *
  2613. * @param ctx filter context
  2614. *
  2615. * @return error code
  2616. */
  2617. static int prepare_cylindrical_in(AVFilterContext *ctx)
  2618. {
  2619. V360Context *s = ctx->priv;
  2620. s->iflat_range[0] = M_PI * s->ih_fov / 360.f;
  2621. s->iflat_range[1] = tanf(0.5f * s->iv_fov * M_PI / 180.f);
  2622. return 0;
  2623. }
  2624. /**
  2625. * Calculate frame position in cylindrical format for corresponding 3D coordinates on sphere.
  2626. *
  2627. * @param s filter private context
  2628. * @param vec coordinates on sphere
  2629. * @param width frame width
  2630. * @param height frame height
  2631. * @param us horizontal coordinates for interpolation window
  2632. * @param vs vertical coordinates for interpolation window
  2633. * @param du horizontal relative coordinate
  2634. * @param dv vertical relative coordinate
  2635. */
  2636. static int xyz_to_cylindrical(const V360Context *s,
  2637. const float *vec, int width, int height,
  2638. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2639. {
  2640. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0] / s->iflat_range[0];
  2641. const float theta = asinf(vec[1]) * s->input_mirror_modifier[1];
  2642. const float uf = (phi + 1.f) * (width - 1) / 2.f;
  2643. const float vf = (tanf(theta) / s->iflat_range[1] + 1.f) * height / 2.f;
  2644. const int ui = floorf(uf);
  2645. const int vi = floorf(vf);
  2646. const int visible = vi >= 0 && vi < height && ui >= 0 && ui < width &&
  2647. theta <= M_PI * s->iv_fov / 180.f &&
  2648. theta >= -M_PI * s->iv_fov / 180.f;
  2649. *du = uf - ui;
  2650. *dv = vf - vi;
  2651. for (int i = 0; i < 4; i++) {
  2652. for (int j = 0; j < 4; j++) {
  2653. us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
  2654. vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
  2655. }
  2656. }
  2657. return visible;
  2658. }
  2659. /**
  2660. * Calculate 3D coordinates on sphere for corresponding frame position in perspective format.
  2661. *
  2662. * @param s filter private context
  2663. * @param i horizontal position on frame [0, width)
  2664. * @param j vertical position on frame [0, height)
  2665. * @param width frame width
  2666. * @param height frame height
  2667. * @param vec coordinates on sphere
  2668. */
  2669. static int perspective_to_xyz(const V360Context *s,
  2670. int i, int j, int width, int height,
  2671. float *vec)
  2672. {
  2673. const float uf = ((2.f * i + 1.f) / width - 1.f);
  2674. const float vf = ((2.f * j + 1.f) / height - 1.f);
  2675. const float rh = hypotf(uf, vf);
  2676. const float sinzz = 1.f - rh * rh;
  2677. const float h = 1.f + s->v_fov;
  2678. const float sinz = (h - sqrtf(sinzz)) / (h / rh + rh / h);
  2679. const float sinz2 = sinz * sinz;
  2680. if (sinz2 <= 1.f) {
  2681. const float cosz = sqrtf(1.f - sinz2);
  2682. const float theta = asinf(cosz);
  2683. const float phi = atan2f(uf, vf);
  2684. const float sin_phi = sinf(phi);
  2685. const float cos_phi = cosf(phi);
  2686. const float sin_theta = sinf(theta);
  2687. const float cos_theta = cosf(theta);
  2688. vec[0] = cos_theta * sin_phi;
  2689. vec[1] = sin_theta;
  2690. vec[2] = cos_theta * cos_phi;
  2691. } else {
  2692. vec[0] = 0.f;
  2693. vec[1] = 1.f;
  2694. vec[2] = 0.f;
  2695. return 0;
  2696. }
  2697. normalize_vector(vec);
  2698. return 1;
  2699. }
  2700. /**
  2701. * Calculate 3D coordinates on sphere for corresponding frame position in tetrahedron format.
  2702. *
  2703. * @param s filter private context
  2704. * @param i horizontal position on frame [0, width)
  2705. * @param j vertical position on frame [0, height)
  2706. * @param width frame width
  2707. * @param height frame height
  2708. * @param vec coordinates on sphere
  2709. */
  2710. static int tetrahedron_to_xyz(const V360Context *s,
  2711. int i, int j, int width, int height,
  2712. float *vec)
  2713. {
  2714. const float uf = (float)i / width;
  2715. const float vf = (float)j / height;
  2716. vec[0] = uf < 0.5f ? uf * 4.f - 1.f : 3.f - uf * 4.f;
  2717. vec[1] = 1.f - vf * 2.f;
  2718. vec[2] = 2.f * fabsf(1.f - fabsf(1.f - uf * 2.f + vf)) - 1.f;
  2719. normalize_vector(vec);
  2720. return 1;
  2721. }
  2722. /**
  2723. * Calculate frame position in tetrahedron format for corresponding 3D coordinates on sphere.
  2724. *
  2725. * @param s filter private context
  2726. * @param vec coordinates on sphere
  2727. * @param width frame width
  2728. * @param height frame height
  2729. * @param us horizontal coordinates for interpolation window
  2730. * @param vs vertical coordinates for interpolation window
  2731. * @param du horizontal relative coordinate
  2732. * @param dv vertical relative coordinate
  2733. */
  2734. static int xyz_to_tetrahedron(const V360Context *s,
  2735. const float *vec, int width, int height,
  2736. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2737. {
  2738. const float d0 = vec[0] * 1.f + vec[1] * 1.f + vec[2] *-1.f;
  2739. const float d1 = vec[0] *-1.f + vec[1] *-1.f + vec[2] *-1.f;
  2740. const float d2 = vec[0] * 1.f + vec[1] *-1.f + vec[2] * 1.f;
  2741. const float d3 = vec[0] *-1.f + vec[1] * 1.f + vec[2] * 1.f;
  2742. const float d = FFMAX(d0, FFMAX3(d1, d2, d3));
  2743. float uf, vf, x, y, z;
  2744. int ui, vi;
  2745. x = vec[0] / d;
  2746. y = vec[1] / d;
  2747. z = -vec[2] / d;
  2748. vf = 0.5f - y * 0.5f * s->input_mirror_modifier[1];
  2749. if ((x + y >= 0.f && y + z >= 0.f && -z - x <= 0.f) ||
  2750. (x + y <= 0.f && -y + z >= 0.f && z - x >= 0.f)) {
  2751. uf = 0.25f * x * s->input_mirror_modifier[0] + 0.25f;
  2752. } else {
  2753. uf = 0.75f - 0.25f * x * s->input_mirror_modifier[0];
  2754. }
  2755. uf *= width;
  2756. vf *= height;
  2757. ui = floorf(uf);
  2758. vi = floorf(vf);
  2759. *du = uf - ui;
  2760. *dv = vf - vi;
  2761. for (int i = 0; i < 4; i++) {
  2762. for (int j = 0; j < 4; j++) {
  2763. us[i][j] = reflectx(ui + j - 1, vi + i - 1, width, height);
  2764. vs[i][j] = reflecty(vi + i - 1, height);
  2765. }
  2766. }
  2767. return 1;
  2768. }
  2769. /**
  2770. * Calculate 3D coordinates on sphere for corresponding frame position in dual fisheye format.
  2771. *
  2772. * @param s filter private context
  2773. * @param i horizontal position on frame [0, width)
  2774. * @param j vertical position on frame [0, height)
  2775. * @param width frame width
  2776. * @param height frame height
  2777. * @param vec coordinates on sphere
  2778. */
  2779. static int dfisheye_to_xyz(const V360Context *s,
  2780. int i, int j, int width, int height,
  2781. float *vec)
  2782. {
  2783. const float ew = width / 2.f;
  2784. const float eh = height;
  2785. const int ei = i >= ew ? i - ew : i;
  2786. const float m = i >= ew ? 1.f : -1.f;
  2787. const float uf = s->flat_range[0] * ((2.f * ei) / ew - 1.f);
  2788. const float vf = s->flat_range[1] * ((2.f * j + 1.f) / eh - 1.f);
  2789. const float h = hypotf(uf, vf);
  2790. const float lh = h > 0.f ? h : 1.f;
  2791. const float theta = m * M_PI_2 * (1.f - h);
  2792. const float sin_theta = sinf(theta);
  2793. const float cos_theta = cosf(theta);
  2794. vec[0] = cos_theta * m * uf / lh;
  2795. vec[1] = cos_theta * vf / lh;
  2796. vec[2] = sin_theta;
  2797. normalize_vector(vec);
  2798. return 1;
  2799. }
  2800. /**
  2801. * Calculate frame position in dual fisheye format for corresponding 3D coordinates on sphere.
  2802. *
  2803. * @param s filter private context
  2804. * @param vec coordinates on sphere
  2805. * @param width frame width
  2806. * @param height frame height
  2807. * @param us horizontal coordinates for interpolation window
  2808. * @param vs vertical coordinates for interpolation window
  2809. * @param du horizontal relative coordinate
  2810. * @param dv vertical relative coordinate
  2811. */
  2812. static int xyz_to_dfisheye(const V360Context *s,
  2813. const float *vec, int width, int height,
  2814. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2815. {
  2816. const float ew = width / 2.f;
  2817. const float eh = height;
  2818. const float h = hypotf(vec[0], vec[1]);
  2819. const float lh = h > 0.f ? h : 1.f;
  2820. const float theta = acosf(fabsf(vec[2])) / M_PI;
  2821. float uf = (theta * (vec[0] / lh) * s->input_mirror_modifier[0] / s->iflat_range[0] + 0.5f) * ew;
  2822. float vf = (theta * (vec[1] / lh) * s->input_mirror_modifier[1] / s->iflat_range[1] + 0.5f) * eh;
  2823. int ui, vi;
  2824. int u_shift;
  2825. if (vec[2] >= 0.f) {
  2826. u_shift = ceilf(ew);
  2827. } else {
  2828. u_shift = 0;
  2829. uf = ew - uf;
  2830. }
  2831. ui = floorf(uf);
  2832. vi = floorf(vf);
  2833. *du = uf - ui;
  2834. *dv = vf - vi;
  2835. for (int i = 0; i < 4; i++) {
  2836. for (int j = 0; j < 4; j++) {
  2837. us[i][j] = av_clip(u_shift + ui + j - 1, 0, width - 1);
  2838. vs[i][j] = av_clip( vi + i - 1, 0, height - 1);
  2839. }
  2840. }
  2841. return 1;
  2842. }
  2843. /**
  2844. * Calculate 3D coordinates on sphere for corresponding frame position in barrel facebook's format.
  2845. *
  2846. * @param s filter private context
  2847. * @param i horizontal position on frame [0, width)
  2848. * @param j vertical position on frame [0, height)
  2849. * @param width frame width
  2850. * @param height frame height
  2851. * @param vec coordinates on sphere
  2852. */
  2853. static int barrel_to_xyz(const V360Context *s,
  2854. int i, int j, int width, int height,
  2855. float *vec)
  2856. {
  2857. const float scale = 0.99f;
  2858. float l_x, l_y, l_z;
  2859. if (i < 4 * width / 5) {
  2860. const float theta_range = M_PI_4;
  2861. const int ew = 4 * width / 5;
  2862. const int eh = height;
  2863. const float phi = ((2.f * i) / ew - 1.f) * M_PI / scale;
  2864. const float theta = ((2.f * j) / eh - 1.f) * theta_range / scale;
  2865. const float sin_phi = sinf(phi);
  2866. const float cos_phi = cosf(phi);
  2867. const float sin_theta = sinf(theta);
  2868. const float cos_theta = cosf(theta);
  2869. l_x = cos_theta * sin_phi;
  2870. l_y = sin_theta;
  2871. l_z = cos_theta * cos_phi;
  2872. } else {
  2873. const int ew = width / 5;
  2874. const int eh = height / 2;
  2875. float uf, vf;
  2876. if (j < eh) { // UP
  2877. uf = 2.f * (i - 4 * ew) / ew - 1.f;
  2878. vf = 2.f * (j ) / eh - 1.f;
  2879. uf /= scale;
  2880. vf /= scale;
  2881. l_x = uf;
  2882. l_y = -1.f;
  2883. l_z = vf;
  2884. } else { // DOWN
  2885. uf = 2.f * (i - 4 * ew) / ew - 1.f;
  2886. vf = 2.f * (j - eh) / eh - 1.f;
  2887. uf /= scale;
  2888. vf /= scale;
  2889. l_x = uf;
  2890. l_y = 1.f;
  2891. l_z = -vf;
  2892. }
  2893. }
  2894. vec[0] = l_x;
  2895. vec[1] = l_y;
  2896. vec[2] = l_z;
  2897. normalize_vector(vec);
  2898. return 1;
  2899. }
  2900. /**
  2901. * Calculate frame position in barrel facebook's format for corresponding 3D coordinates on sphere.
  2902. *
  2903. * @param s filter private context
  2904. * @param vec coordinates on sphere
  2905. * @param width frame width
  2906. * @param height frame height
  2907. * @param us horizontal coordinates for interpolation window
  2908. * @param vs vertical coordinates for interpolation window
  2909. * @param du horizontal relative coordinate
  2910. * @param dv vertical relative coordinate
  2911. */
  2912. static int xyz_to_barrel(const V360Context *s,
  2913. const float *vec, int width, int height,
  2914. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2915. {
  2916. const float scale = 0.99f;
  2917. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0];
  2918. const float theta = asinf(vec[1]) * s->input_mirror_modifier[1];
  2919. const float theta_range = M_PI_4;
  2920. int ew, eh;
  2921. int u_shift, v_shift;
  2922. float uf, vf;
  2923. int ui, vi;
  2924. if (theta > -theta_range && theta < theta_range) {
  2925. ew = 4 * width / 5;
  2926. eh = height;
  2927. u_shift = s->ih_flip ? width / 5 : 0;
  2928. v_shift = 0;
  2929. uf = (phi / M_PI * scale + 1.f) * ew / 2.f;
  2930. vf = (theta / theta_range * scale + 1.f) * eh / 2.f;
  2931. } else {
  2932. ew = width / 5;
  2933. eh = height / 2;
  2934. u_shift = s->ih_flip ? 0 : 4 * ew;
  2935. if (theta < 0.f) { // UP
  2936. uf = -vec[0] / vec[1];
  2937. vf = -vec[2] / vec[1];
  2938. v_shift = 0;
  2939. } else { // DOWN
  2940. uf = vec[0] / vec[1];
  2941. vf = -vec[2] / vec[1];
  2942. v_shift = eh;
  2943. }
  2944. uf *= s->input_mirror_modifier[0] * s->input_mirror_modifier[1];
  2945. vf *= s->input_mirror_modifier[1];
  2946. uf = 0.5f * ew * (uf * scale + 1.f);
  2947. vf = 0.5f * eh * (vf * scale + 1.f);
  2948. }
  2949. ui = floorf(uf);
  2950. vi = floorf(vf);
  2951. *du = uf - ui;
  2952. *dv = vf - vi;
  2953. for (int i = 0; i < 4; i++) {
  2954. for (int j = 0; j < 4; j++) {
  2955. us[i][j] = u_shift + av_clip(ui + j - 1, 0, ew - 1);
  2956. vs[i][j] = v_shift + av_clip(vi + i - 1, 0, eh - 1);
  2957. }
  2958. }
  2959. return 1;
  2960. }
  2961. /**
  2962. * Calculate frame position in barrel split facebook's format for corresponding 3D coordinates on sphere.
  2963. *
  2964. * @param s filter private context
  2965. * @param vec coordinates on sphere
  2966. * @param width frame width
  2967. * @param height frame height
  2968. * @param us horizontal coordinates for interpolation window
  2969. * @param vs vertical coordinates for interpolation window
  2970. * @param du horizontal relative coordinate
  2971. * @param dv vertical relative coordinate
  2972. */
  2973. static int xyz_to_barrelsplit(const V360Context *s,
  2974. const float *vec, int width, int height,
  2975. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  2976. {
  2977. const float phi = atan2f(vec[0], vec[2]) * s->input_mirror_modifier[0];
  2978. const float theta = asinf(vec[1]) * s->input_mirror_modifier[1];
  2979. const float theta_range = M_PI_4;
  2980. int ew, eh;
  2981. int u_shift, v_shift;
  2982. float uf, vf;
  2983. int ui, vi;
  2984. if (theta >= -theta_range && theta <= theta_range) {
  2985. const float scalew = s->fin_pad > 0 ? 1.f - s->fin_pad / (width * 2.f / 3.f) : 1.f - s->in_pad;
  2986. const float scaleh = s->fin_pad > 0 ? 1.f - s->fin_pad / (height / 2.f) : 1.f - s->in_pad;
  2987. ew = width / 3 * 2;
  2988. eh = height / 2;
  2989. u_shift = s->ih_flip ? width / 3 : 0;
  2990. v_shift = phi >= M_PI_2 || phi < -M_PI_2 ? eh : 0;
  2991. uf = fmodf(phi, M_PI_2) / M_PI_2;
  2992. vf = theta / M_PI_4;
  2993. if (v_shift)
  2994. uf = uf >= 0.f ? fmodf(uf - 1.f, 1.f) : fmodf(uf + 1.f, 1.f);
  2995. uf = (uf * scalew + 1.f) * width / 3.f;
  2996. vf = (vf * scaleh + 1.f) * height / 4.f;
  2997. } else {
  2998. const float scalew = s->fin_pad > 0 ? 1.f - s->fin_pad / (width / 3.f) : 1.f - s->in_pad;
  2999. const float scaleh = s->fin_pad > 0 ? 1.f - s->fin_pad / (height / 4.f) : 1.f - s->in_pad;
  3000. int v_offset = 0;
  3001. ew = width / 3;
  3002. eh = height / 4;
  3003. u_shift = s->ih_flip ? 0 : 2 * ew;
  3004. if (theta <= 0.f && theta >= -M_PI_2 &&
  3005. phi <= M_PI_2 && phi >= -M_PI_2) {
  3006. uf = -vec[0] / vec[1];
  3007. vf = -vec[2] / vec[1];
  3008. v_shift = 0;
  3009. v_offset = -eh;
  3010. } else if (theta >= 0.f && theta <= M_PI_2 &&
  3011. phi <= M_PI_2 && phi >= -M_PI_2) {
  3012. uf = vec[0] / vec[1];
  3013. vf = -vec[2] / vec[1];
  3014. v_shift = height * 0.25f;
  3015. } else if (theta <= 0.f && theta >= -M_PI_2) {
  3016. uf = vec[0] / vec[1];
  3017. vf = vec[2] / vec[1];
  3018. v_shift = height * 0.5f;
  3019. v_offset = -eh;
  3020. } else {
  3021. uf = -vec[0] / vec[1];
  3022. vf = vec[2] / vec[1];
  3023. v_shift = height * 0.75f;
  3024. }
  3025. uf *= s->input_mirror_modifier[0] * s->input_mirror_modifier[1];
  3026. vf *= s->input_mirror_modifier[1];
  3027. uf = 0.5f * width / 3.f * (uf * scalew + 1.f);
  3028. vf = height * 0.25f * (vf * scaleh + 1.f) + v_offset;
  3029. }
  3030. ui = floorf(uf);
  3031. vi = floorf(vf);
  3032. *du = uf - ui;
  3033. *dv = vf - vi;
  3034. for (int i = 0; i < 4; i++) {
  3035. for (int j = 0; j < 4; j++) {
  3036. us[i][j] = u_shift + av_clip(ui + j - 1, 0, ew - 1);
  3037. vs[i][j] = v_shift + av_clip(vi + i - 1, 0, eh - 1);
  3038. }
  3039. }
  3040. return 1;
  3041. }
  3042. /**
  3043. * Calculate 3D coordinates on sphere for corresponding frame position in barrel split facebook's format.
  3044. *
  3045. * @param s filter private context
  3046. * @param i horizontal position on frame [0, width)
  3047. * @param j vertical position on frame [0, height)
  3048. * @param width frame width
  3049. * @param height frame height
  3050. * @param vec coordinates on sphere
  3051. */
  3052. static int barrelsplit_to_xyz(const V360Context *s,
  3053. int i, int j, int width, int height,
  3054. float *vec)
  3055. {
  3056. const float x = (i + 0.5f) / width;
  3057. const float y = (j + 0.5f) / height;
  3058. float l_x, l_y, l_z;
  3059. if (x < 2.f / 3.f) {
  3060. const float scalew = s->fout_pad > 0 ? 1.f - s->fout_pad / (width * 2.f / 3.f) : 1.f - s->out_pad;
  3061. const float scaleh = s->fout_pad > 0 ? 1.f - s->fout_pad / (height / 2.f) : 1.f - s->out_pad;
  3062. const float back = floorf(y * 2.f);
  3063. const float phi = ((3.f / 2.f * x - 0.5f) / scalew - back) * M_PI;
  3064. const float theta = (y - 0.25f - 0.5f * back) / scaleh * M_PI;
  3065. const float sin_phi = sinf(phi);
  3066. const float cos_phi = cosf(phi);
  3067. const float sin_theta = sinf(theta);
  3068. const float cos_theta = cosf(theta);
  3069. l_x = cos_theta * sin_phi;
  3070. l_y = sin_theta;
  3071. l_z = cos_theta * cos_phi;
  3072. } else {
  3073. const float scalew = s->fout_pad > 0 ? 1.f - s->fout_pad / (width / 3.f) : 1.f - s->out_pad;
  3074. const float scaleh = s->fout_pad > 0 ? 1.f - s->fout_pad / (height / 4.f) : 1.f - s->out_pad;
  3075. const int face = floorf(y * 4.f);
  3076. float uf, vf;
  3077. uf = x * 3.f - 2.f;
  3078. switch (face) {
  3079. case 0:
  3080. vf = y * 2.f;
  3081. uf = 1.f - uf;
  3082. vf = 0.5f - vf;
  3083. l_x = (0.5f - uf) / scalew;
  3084. l_y = -0.5f;
  3085. l_z = (0.5f - vf) / scaleh;
  3086. break;
  3087. case 1:
  3088. vf = y * 2.f;
  3089. uf = 1.f - uf;
  3090. vf = 1.f - (vf - 0.5f);
  3091. l_x = (0.5f - uf) / scalew;
  3092. l_y = 0.5f;
  3093. l_z = (-0.5f + vf) / scaleh;
  3094. break;
  3095. case 2:
  3096. vf = y * 2.f - 0.5f;
  3097. vf = 1.f - (1.f - vf);
  3098. l_x = (0.5f - uf) / scalew;
  3099. l_y = -0.5f;
  3100. l_z = (0.5f - vf) / scaleh;
  3101. break;
  3102. case 3:
  3103. vf = y * 2.f - 1.5f;
  3104. l_x = (0.5f - uf) / scalew;
  3105. l_y = 0.5f;
  3106. l_z = (-0.5f + vf) / scaleh;
  3107. break;
  3108. }
  3109. }
  3110. vec[0] = l_x;
  3111. vec[1] = l_y;
  3112. vec[2] = l_z;
  3113. normalize_vector(vec);
  3114. return 1;
  3115. }
  3116. /**
  3117. * Calculate 3D coordinates on sphere for corresponding frame position in tspyramid format.
  3118. *
  3119. * @param s filter private context
  3120. * @param i horizontal position on frame [0, width)
  3121. * @param j vertical position on frame [0, height)
  3122. * @param width frame width
  3123. * @param height frame height
  3124. * @param vec coordinates on sphere
  3125. */
  3126. static int tspyramid_to_xyz(const V360Context *s,
  3127. int i, int j, int width, int height,
  3128. float *vec)
  3129. {
  3130. const float x = (i + 0.5f) / width;
  3131. const float y = (j + 0.5f) / height;
  3132. if (x < 0.5f) {
  3133. vec[0] = x * 4.f - 1.f;
  3134. vec[1] = (y * 2.f - 1.f);
  3135. vec[2] = 1.f;
  3136. } else if (x >= 0.6875f && x < 0.8125f &&
  3137. y >= 0.375f && y < 0.625f) {
  3138. vec[0] = -(x - 0.6875f) * 16.f + 1.f;
  3139. vec[1] = (y - 0.375f) * 8.f - 1.f;
  3140. vec[2] = -1.f;
  3141. } else if (0.5f <= x && x < 0.6875f &&
  3142. ((0.f <= y && y < 0.375f && y >= 2.f * (x - 0.5f)) ||
  3143. (0.375f <= y && y < 0.625f) ||
  3144. (0.625f <= y && y < 1.f && y <= 2.f * (1.f - x)))) {
  3145. vec[0] = 1.f;
  3146. vec[1] = 2.f * (y - 2.f * x + 1.f) / (3.f - 4.f * x) - 1.f;
  3147. vec[2] = -2.f * (x - 0.5f) / 0.1875f + 1.f;
  3148. } else if (0.8125f <= x && x < 1.f &&
  3149. ((0.f <= y && y < 0.375f && x >= (1.f - y / 2.f)) ||
  3150. (0.375f <= y && y < 0.625f) ||
  3151. (0.625f <= y && y < 1.f && y <= (2.f * x - 1.f)))) {
  3152. vec[0] = -1.f;
  3153. vec[1] = 2.f * (y + 2.f * x - 2.f) / (4.f * x - 3.f) - 1.f;
  3154. vec[2] = 2.f * (x - 0.8125f) / 0.1875f - 1.f;
  3155. } else if (0.f <= y && y < 0.375f &&
  3156. ((0.5f <= x && x < 0.8125f && y < 2.f * (x - 0.5f)) ||
  3157. (0.6875f <= x && x < 0.8125f) ||
  3158. (0.8125f <= x && x < 1.f && x < (1.f - y / 2.f)))) {
  3159. vec[0] = 2.f * (1.f - x - 0.5f * y) / (0.5f - y) - 1.f;
  3160. vec[1] = -1.f;
  3161. vec[2] = 2.f * (0.375f - y) / 0.375f - 1.f;
  3162. } else {
  3163. vec[0] = 2.f * (0.5f - x + 0.5f * y) / (y - 0.5f) - 1.f;
  3164. vec[1] = 1.f;
  3165. vec[2] = -2.f * (1.f - y) / 0.375f + 1.f;
  3166. }
  3167. normalize_vector(vec);
  3168. return 1;
  3169. }
  3170. /**
  3171. * Calculate frame position in tspyramid format for corresponding 3D coordinates on sphere.
  3172. *
  3173. * @param s filter private context
  3174. * @param vec coordinates on sphere
  3175. * @param width frame width
  3176. * @param height frame height
  3177. * @param us horizontal coordinates for interpolation window
  3178. * @param vs vertical coordinates for interpolation window
  3179. * @param du horizontal relative coordinate
  3180. * @param dv vertical relative coordinate
  3181. */
  3182. static int xyz_to_tspyramid(const V360Context *s,
  3183. const float *vec, int width, int height,
  3184. int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
  3185. {
  3186. float uf, vf;
  3187. int ui, vi;
  3188. int face;
  3189. xyz_to_cube(s, vec, &uf, &vf, &face);
  3190. uf = (uf + 1.f) * 0.5f;
  3191. vf = (vf + 1.f) * 0.5f;
  3192. switch (face) {
  3193. case UP:
  3194. uf = 0.1875f * vf - 0.375f * uf * vf - 0.125f * uf + 0.8125f;
  3195. vf = 0.375f - 0.375f * vf;
  3196. break;
  3197. case FRONT:
  3198. uf = 0.5f * uf;
  3199. break;
  3200. case DOWN:
  3201. uf = 1.f - 0.1875f * vf - 0.5f * uf + 0.375f * uf * vf;
  3202. vf = 1.f - 0.375f * vf;
  3203. break;
  3204. case LEFT:
  3205. vf = 0.25f * vf + 0.75f * uf * vf - 0.375f * uf + 0.375f;
  3206. uf = 0.1875f * uf + 0.8125f;
  3207. break;
  3208. case RIGHT:
  3209. vf = 0.375f * uf - 0.75f * uf * vf + vf;
  3210. uf = 0.1875f * uf + 0.5f;
  3211. break;
  3212. case BACK:
  3213. uf = 0.125f * uf + 0.6875f;
  3214. vf = 0.25f * vf + 0.375f;
  3215. break;
  3216. }
  3217. uf *= width;
  3218. vf *= height;
  3219. ui = floorf(uf);
  3220. vi = floorf(vf);
  3221. *du = uf - ui;
  3222. *dv = vf - vi;
  3223. for (int i = 0; i < 4; i++) {
  3224. for (int j = 0; j < 4; j++) {
  3225. us[i][j] = reflectx(ui + j - 1, vi + i - 1, width, height);
  3226. vs[i][j] = reflecty(vi + i - 1, height);
  3227. }
  3228. }
  3229. return 1;
  3230. }
  3231. static void multiply_matrix(float c[3][3], const float a[3][3], const float b[3][3])
  3232. {
  3233. for (int i = 0; i < 3; i++) {
  3234. for (int j = 0; j < 3; j++) {
  3235. float sum = 0.f;
  3236. for (int k = 0; k < 3; k++)
  3237. sum += a[i][k] * b[k][j];
  3238. c[i][j] = sum;
  3239. }
  3240. }
  3241. }
  3242. /**
  3243. * Calculate rotation matrix for yaw/pitch/roll angles.
  3244. */
  3245. static inline void calculate_rotation_matrix(float yaw, float pitch, float roll,
  3246. float rot_mat[3][3],
  3247. const int rotation_order[3])
  3248. {
  3249. const float yaw_rad = yaw * M_PI / 180.f;
  3250. const float pitch_rad = pitch * M_PI / 180.f;
  3251. const float roll_rad = roll * M_PI / 180.f;
  3252. const float sin_yaw = sinf(yaw_rad);
  3253. const float cos_yaw = cosf(yaw_rad);
  3254. const float sin_pitch = sinf(pitch_rad);
  3255. const float cos_pitch = cosf(pitch_rad);
  3256. const float sin_roll = sinf(roll_rad);
  3257. const float cos_roll = cosf(roll_rad);
  3258. float m[3][3][3];
  3259. float temp[3][3];
  3260. m[0][0][0] = cos_yaw; m[0][0][1] = 0; m[0][0][2] = sin_yaw;
  3261. m[0][1][0] = 0; m[0][1][1] = 1; m[0][1][2] = 0;
  3262. m[0][2][0] = -sin_yaw; m[0][2][1] = 0; m[0][2][2] = cos_yaw;
  3263. m[1][0][0] = 1; m[1][0][1] = 0; m[1][0][2] = 0;
  3264. m[1][1][0] = 0; m[1][1][1] = cos_pitch; m[1][1][2] = -sin_pitch;
  3265. m[1][2][0] = 0; m[1][2][1] = sin_pitch; m[1][2][2] = cos_pitch;
  3266. m[2][0][0] = cos_roll; m[2][0][1] = -sin_roll; m[2][0][2] = 0;
  3267. m[2][1][0] = sin_roll; m[2][1][1] = cos_roll; m[2][1][2] = 0;
  3268. m[2][2][0] = 0; m[2][2][1] = 0; m[2][2][2] = 1;
  3269. multiply_matrix(temp, m[rotation_order[0]], m[rotation_order[1]]);
  3270. multiply_matrix(rot_mat, temp, m[rotation_order[2]]);
  3271. }
  3272. /**
  3273. * Rotate vector with given rotation matrix.
  3274. *
  3275. * @param rot_mat rotation matrix
  3276. * @param vec vector
  3277. */
  3278. static inline void rotate(const float rot_mat[3][3],
  3279. float *vec)
  3280. {
  3281. const float x_tmp = vec[0] * rot_mat[0][0] + vec[1] * rot_mat[0][1] + vec[2] * rot_mat[0][2];
  3282. const float y_tmp = vec[0] * rot_mat[1][0] + vec[1] * rot_mat[1][1] + vec[2] * rot_mat[1][2];
  3283. const float z_tmp = vec[0] * rot_mat[2][0] + vec[1] * rot_mat[2][1] + vec[2] * rot_mat[2][2];
  3284. vec[0] = x_tmp;
  3285. vec[1] = y_tmp;
  3286. vec[2] = z_tmp;
  3287. }
  3288. static inline void set_mirror_modifier(int h_flip, int v_flip, int d_flip,
  3289. float *modifier)
  3290. {
  3291. modifier[0] = h_flip ? -1.f : 1.f;
  3292. modifier[1] = v_flip ? -1.f : 1.f;
  3293. modifier[2] = d_flip ? -1.f : 1.f;
  3294. }
  3295. static inline void mirror(const float *modifier, float *vec)
  3296. {
  3297. vec[0] *= modifier[0];
  3298. vec[1] *= modifier[1];
  3299. vec[2] *= modifier[2];
  3300. }
  3301. static int allocate_plane(V360Context *s, int sizeof_uv, int sizeof_ker, int sizeof_mask, int p)
  3302. {
  3303. if (!s->u[p])
  3304. s->u[p] = av_calloc(s->uv_linesize[p] * s->pr_height[p], sizeof_uv);
  3305. if (!s->v[p])
  3306. s->v[p] = av_calloc(s->uv_linesize[p] * s->pr_height[p], sizeof_uv);
  3307. if (!s->u[p] || !s->v[p])
  3308. return AVERROR(ENOMEM);
  3309. if (sizeof_ker) {
  3310. if (!s->ker[p])
  3311. s->ker[p] = av_calloc(s->uv_linesize[p] * s->pr_height[p], sizeof_ker);
  3312. if (!s->ker[p])
  3313. return AVERROR(ENOMEM);
  3314. }
  3315. if (sizeof_mask && !p) {
  3316. if (!s->mask)
  3317. s->mask = av_calloc(s->pr_width[p] * s->pr_height[p], sizeof_mask);
  3318. if (!s->mask)
  3319. return AVERROR(ENOMEM);
  3320. }
  3321. return 0;
  3322. }
  3323. static void fov_from_dfov(int format, float d_fov, float w, float h, float *h_fov, float *v_fov)
  3324. {
  3325. switch (format) {
  3326. case ORTHOGRAPHIC:
  3327. {
  3328. const float d = 0.5f * hypotf(w, h);
  3329. const float l = sinf(d_fov * M_PI / 360.f) / d;
  3330. *h_fov = asinf(w * 0.5 * l) * 360.f / M_PI;
  3331. *v_fov = asinf(h * 0.5 * l) * 360.f / M_PI;
  3332. if (d_fov > 180.f) {
  3333. *h_fov = 180.f - *h_fov;
  3334. *v_fov = 180.f - *v_fov;
  3335. }
  3336. }
  3337. break;
  3338. case EQUISOLID:
  3339. {
  3340. const float d = 0.5f * hypotf(w, h);
  3341. const float l = d / (sinf(d_fov * M_PI / 720.f));
  3342. *h_fov = 2.f * asinf(w * 0.5f / l) * 360.f / M_PI;
  3343. *v_fov = 2.f * asinf(h * 0.5f / l) * 360.f / M_PI;
  3344. }
  3345. break;
  3346. case STEREOGRAPHIC:
  3347. {
  3348. const float d = 0.5f * hypotf(w, h);
  3349. const float l = d / (tanf(d_fov * M_PI / 720.f));
  3350. *h_fov = 2.f * atan2f(w * 0.5f, l) * 360.f / M_PI;
  3351. *v_fov = 2.f * atan2f(h * 0.5f, l) * 360.f / M_PI;
  3352. }
  3353. break;
  3354. case DUAL_FISHEYE:
  3355. {
  3356. const float d = 0.5f * hypotf(w * 0.5f, h);
  3357. *h_fov = d / w * 2.f * d_fov;
  3358. *v_fov = d / h * d_fov;
  3359. }
  3360. break;
  3361. case FISHEYE:
  3362. {
  3363. const float d = 0.5f * hypotf(w, h);
  3364. *h_fov = d / w * d_fov;
  3365. *v_fov = d / h * d_fov;
  3366. }
  3367. break;
  3368. case FLAT:
  3369. default:
  3370. {
  3371. const float da = tanf(0.5f * FFMIN(d_fov, 359.f) * M_PI / 180.f);
  3372. const float d = hypotf(w, h);
  3373. *h_fov = atan2f(da * w, d) * 360.f / M_PI;
  3374. *v_fov = atan2f(da * h, d) * 360.f / M_PI;
  3375. if (*h_fov < 0.f)
  3376. *h_fov += 360.f;
  3377. if (*v_fov < 0.f)
  3378. *v_fov += 360.f;
  3379. }
  3380. break;
  3381. }
  3382. }
  3383. static void set_dimensions(int *outw, int *outh, int w, int h, const AVPixFmtDescriptor *desc)
  3384. {
  3385. outw[1] = outw[2] = FF_CEIL_RSHIFT(w, desc->log2_chroma_w);
  3386. outw[0] = outw[3] = w;
  3387. outh[1] = outh[2] = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);
  3388. outh[0] = outh[3] = h;
  3389. }
  3390. // Calculate remap data
  3391. static av_always_inline int v360_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
  3392. {
  3393. V360Context *s = ctx->priv;
  3394. for (int p = 0; p < s->nb_allocated; p++) {
  3395. const int max_value = s->max_value;
  3396. const int width = s->pr_width[p];
  3397. const int uv_linesize = s->uv_linesize[p];
  3398. const int height = s->pr_height[p];
  3399. const int in_width = s->inplanewidth[p];
  3400. const int in_height = s->inplaneheight[p];
  3401. const int slice_start = (height * jobnr ) / nb_jobs;
  3402. const int slice_end = (height * (jobnr + 1)) / nb_jobs;
  3403. float du, dv;
  3404. float vec[3];
  3405. XYRemap rmap;
  3406. for (int j = slice_start; j < slice_end; j++) {
  3407. for (int i = 0; i < width; i++) {
  3408. int16_t *u = s->u[p] + (j * uv_linesize + i) * s->elements;
  3409. int16_t *v = s->v[p] + (j * uv_linesize + i) * s->elements;
  3410. int16_t *ker = s->ker[p] + (j * uv_linesize + i) * s->elements;
  3411. uint8_t *mask8 = p ? NULL : s->mask + (j * s->pr_width[0] + i);
  3412. uint16_t *mask16 = p ? NULL : (uint16_t *)s->mask + (j * s->pr_width[0] + i);
  3413. int in_mask, out_mask;
  3414. if (s->out_transpose)
  3415. out_mask = s->out_transform(s, j, i, height, width, vec);
  3416. else
  3417. out_mask = s->out_transform(s, i, j, width, height, vec);
  3418. av_assert1(!isnan(vec[0]) && !isnan(vec[1]) && !isnan(vec[2]));
  3419. rotate(s->rot_mat, vec);
  3420. av_assert1(!isnan(vec[0]) && !isnan(vec[1]) && !isnan(vec[2]));
  3421. normalize_vector(vec);
  3422. mirror(s->output_mirror_modifier, vec);
  3423. if (s->in_transpose)
  3424. in_mask = s->in_transform(s, vec, in_height, in_width, rmap.v, rmap.u, &du, &dv);
  3425. else
  3426. in_mask = s->in_transform(s, vec, in_width, in_height, rmap.u, rmap.v, &du, &dv);
  3427. av_assert1(!isnan(du) && !isnan(dv));
  3428. s->calculate_kernel(du, dv, &rmap, u, v, ker);
  3429. if (!p && s->mask) {
  3430. if (s->mask_size == 1) {
  3431. mask8[0] = 255 * (out_mask & in_mask);
  3432. } else {
  3433. mask16[0] = max_value * (out_mask & in_mask);
  3434. }
  3435. }
  3436. }
  3437. }
  3438. }
  3439. return 0;
  3440. }
  3441. static int config_output(AVFilterLink *outlink)
  3442. {
  3443. AVFilterContext *ctx = outlink->src;
  3444. AVFilterLink *inlink = ctx->inputs[0];
  3445. V360Context *s = ctx->priv;
  3446. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
  3447. const int depth = desc->comp[0].depth;
  3448. const int sizeof_mask = s->mask_size = (depth + 7) >> 3;
  3449. int sizeof_uv;
  3450. int sizeof_ker;
  3451. int err;
  3452. int h, w;
  3453. int in_offset_h, in_offset_w;
  3454. int out_offset_h, out_offset_w;
  3455. float hf, wf;
  3456. int (*prepare_out)(AVFilterContext *ctx);
  3457. int have_alpha;
  3458. s->max_value = (1 << depth) - 1;
  3459. s->input_mirror_modifier[0] = s->ih_flip ? -1.f : 1.f;
  3460. s->input_mirror_modifier[1] = s->iv_flip ? -1.f : 1.f;
  3461. switch (s->interp) {
  3462. case NEAREST:
  3463. s->calculate_kernel = nearest_kernel;
  3464. s->remap_slice = depth <= 8 ? remap1_8bit_slice : remap1_16bit_slice;
  3465. s->elements = 1;
  3466. sizeof_uv = sizeof(int16_t) * s->elements;
  3467. sizeof_ker = 0;
  3468. break;
  3469. case BILINEAR:
  3470. s->calculate_kernel = bilinear_kernel;
  3471. s->remap_slice = depth <= 8 ? remap2_8bit_slice : remap2_16bit_slice;
  3472. s->elements = 2 * 2;
  3473. sizeof_uv = sizeof(int16_t) * s->elements;
  3474. sizeof_ker = sizeof(int16_t) * s->elements;
  3475. break;
  3476. case LAGRANGE9:
  3477. s->calculate_kernel = lagrange_kernel;
  3478. s->remap_slice = depth <= 8 ? remap3_8bit_slice : remap3_16bit_slice;
  3479. s->elements = 3 * 3;
  3480. sizeof_uv = sizeof(int16_t) * s->elements;
  3481. sizeof_ker = sizeof(int16_t) * s->elements;
  3482. break;
  3483. case BICUBIC:
  3484. s->calculate_kernel = bicubic_kernel;
  3485. s->remap_slice = depth <= 8 ? remap4_8bit_slice : remap4_16bit_slice;
  3486. s->elements = 4 * 4;
  3487. sizeof_uv = sizeof(int16_t) * s->elements;
  3488. sizeof_ker = sizeof(int16_t) * s->elements;
  3489. break;
  3490. case LANCZOS:
  3491. s->calculate_kernel = lanczos_kernel;
  3492. s->remap_slice = depth <= 8 ? remap4_8bit_slice : remap4_16bit_slice;
  3493. s->elements = 4 * 4;
  3494. sizeof_uv = sizeof(int16_t) * s->elements;
  3495. sizeof_ker = sizeof(int16_t) * s->elements;
  3496. break;
  3497. case SPLINE16:
  3498. s->calculate_kernel = spline16_kernel;
  3499. s->remap_slice = depth <= 8 ? remap4_8bit_slice : remap4_16bit_slice;
  3500. s->elements = 4 * 4;
  3501. sizeof_uv = sizeof(int16_t) * s->elements;
  3502. sizeof_ker = sizeof(int16_t) * s->elements;
  3503. break;
  3504. case GAUSSIAN:
  3505. s->calculate_kernel = gaussian_kernel;
  3506. s->remap_slice = depth <= 8 ? remap4_8bit_slice : remap4_16bit_slice;
  3507. s->elements = 4 * 4;
  3508. sizeof_uv = sizeof(int16_t) * s->elements;
  3509. sizeof_ker = sizeof(int16_t) * s->elements;
  3510. break;
  3511. default:
  3512. av_assert0(0);
  3513. }
  3514. ff_v360_init(s, depth);
  3515. for (int order = 0; order < NB_RORDERS; order++) {
  3516. const char c = s->rorder[order];
  3517. int rorder;
  3518. if (c == '\0') {
  3519. av_log(ctx, AV_LOG_WARNING,
  3520. "Incomplete rorder option. Direction for all 3 rotation orders should be specified. Switching to default rorder.\n");
  3521. s->rotation_order[0] = YAW;
  3522. s->rotation_order[1] = PITCH;
  3523. s->rotation_order[2] = ROLL;
  3524. break;
  3525. }
  3526. rorder = get_rorder(c);
  3527. if (rorder == -1) {
  3528. av_log(ctx, AV_LOG_WARNING,
  3529. "Incorrect rotation order symbol '%c' in rorder option. Switching to default rorder.\n", c);
  3530. s->rotation_order[0] = YAW;
  3531. s->rotation_order[1] = PITCH;
  3532. s->rotation_order[2] = ROLL;
  3533. break;
  3534. }
  3535. s->rotation_order[order] = rorder;
  3536. }
  3537. switch (s->in_stereo) {
  3538. case STEREO_2D:
  3539. w = inlink->w;
  3540. h = inlink->h;
  3541. in_offset_w = in_offset_h = 0;
  3542. break;
  3543. case STEREO_SBS:
  3544. w = inlink->w / 2;
  3545. h = inlink->h;
  3546. in_offset_w = w;
  3547. in_offset_h = 0;
  3548. break;
  3549. case STEREO_TB:
  3550. w = inlink->w;
  3551. h = inlink->h / 2;
  3552. in_offset_w = 0;
  3553. in_offset_h = h;
  3554. break;
  3555. default:
  3556. av_assert0(0);
  3557. }
  3558. set_dimensions(s->inplanewidth, s->inplaneheight, w, h, desc);
  3559. set_dimensions(s->in_offset_w, s->in_offset_h, in_offset_w, in_offset_h, desc);
  3560. s->in_width = s->inplanewidth[0];
  3561. s->in_height = s->inplaneheight[0];
  3562. if (s->id_fov > 0.f)
  3563. fov_from_dfov(s->in, s->id_fov, w, h, &s->ih_fov, &s->iv_fov);
  3564. if (s->in_transpose)
  3565. FFSWAP(int, s->in_width, s->in_height);
  3566. switch (s->in) {
  3567. case EQUIRECTANGULAR:
  3568. s->in_transform = xyz_to_equirect;
  3569. err = 0;
  3570. wf = w;
  3571. hf = h;
  3572. break;
  3573. case CUBEMAP_3_2:
  3574. s->in_transform = xyz_to_cube3x2;
  3575. err = prepare_cube_in(ctx);
  3576. wf = w / 3.f * 4.f;
  3577. hf = h;
  3578. break;
  3579. case CUBEMAP_1_6:
  3580. s->in_transform = xyz_to_cube1x6;
  3581. err = prepare_cube_in(ctx);
  3582. wf = w * 4.f;
  3583. hf = h / 3.f;
  3584. break;
  3585. case CUBEMAP_6_1:
  3586. s->in_transform = xyz_to_cube6x1;
  3587. err = prepare_cube_in(ctx);
  3588. wf = w / 3.f * 2.f;
  3589. hf = h * 2.f;
  3590. break;
  3591. case EQUIANGULAR:
  3592. s->in_transform = xyz_to_eac;
  3593. err = prepare_eac_in(ctx);
  3594. wf = w;
  3595. hf = h / 9.f * 8.f;
  3596. break;
  3597. case FLAT:
  3598. s->in_transform = xyz_to_flat;
  3599. err = prepare_flat_in(ctx);
  3600. wf = w;
  3601. hf = h;
  3602. break;
  3603. case PERSPECTIVE:
  3604. av_log(ctx, AV_LOG_ERROR, "Supplied format is not accepted as input.\n");
  3605. return AVERROR(EINVAL);
  3606. case DUAL_FISHEYE:
  3607. s->in_transform = xyz_to_dfisheye;
  3608. err = prepare_fisheye_in(ctx);
  3609. wf = w;
  3610. hf = h;
  3611. break;
  3612. case BARREL:
  3613. s->in_transform = xyz_to_barrel;
  3614. err = 0;
  3615. wf = w / 5.f * 4.f;
  3616. hf = h;
  3617. break;
  3618. case STEREOGRAPHIC:
  3619. s->in_transform = xyz_to_stereographic;
  3620. err = prepare_stereographic_in(ctx);
  3621. wf = w;
  3622. hf = h / 2.f;
  3623. break;
  3624. case MERCATOR:
  3625. s->in_transform = xyz_to_mercator;
  3626. err = 0;
  3627. wf = w;
  3628. hf = h / 2.f;
  3629. break;
  3630. case BALL:
  3631. s->in_transform = xyz_to_ball;
  3632. err = 0;
  3633. wf = w;
  3634. hf = h / 2.f;
  3635. break;
  3636. case HAMMER:
  3637. s->in_transform = xyz_to_hammer;
  3638. err = 0;
  3639. wf = w;
  3640. hf = h;
  3641. break;
  3642. case SINUSOIDAL:
  3643. s->in_transform = xyz_to_sinusoidal;
  3644. err = 0;
  3645. wf = w;
  3646. hf = h;
  3647. break;
  3648. case FISHEYE:
  3649. s->in_transform = xyz_to_fisheye;
  3650. err = prepare_fisheye_in(ctx);
  3651. wf = w * 2;
  3652. hf = h;
  3653. break;
  3654. case PANNINI:
  3655. s->in_transform = xyz_to_pannini;
  3656. err = 0;
  3657. wf = w;
  3658. hf = h;
  3659. break;
  3660. case CYLINDRICAL:
  3661. s->in_transform = xyz_to_cylindrical;
  3662. err = prepare_cylindrical_in(ctx);
  3663. wf = w;
  3664. hf = h * 2.f;
  3665. break;
  3666. case TETRAHEDRON:
  3667. s->in_transform = xyz_to_tetrahedron;
  3668. err = 0;
  3669. wf = w;
  3670. hf = h;
  3671. break;
  3672. case BARREL_SPLIT:
  3673. s->in_transform = xyz_to_barrelsplit;
  3674. err = 0;
  3675. wf = w * 4.f / 3.f;
  3676. hf = h;
  3677. break;
  3678. case TSPYRAMID:
  3679. s->in_transform = xyz_to_tspyramid;
  3680. err = 0;
  3681. wf = w;
  3682. hf = h;
  3683. break;
  3684. case HEQUIRECTANGULAR:
  3685. s->in_transform = xyz_to_hequirect;
  3686. err = 0;
  3687. wf = w * 2.f;
  3688. hf = h;
  3689. break;
  3690. case EQUISOLID:
  3691. s->in_transform = xyz_to_equisolid;
  3692. err = prepare_equisolid_in(ctx);
  3693. wf = w;
  3694. hf = h / 2.f;
  3695. break;
  3696. case ORTHOGRAPHIC:
  3697. s->in_transform = xyz_to_orthographic;
  3698. err = prepare_orthographic_in(ctx);
  3699. wf = w;
  3700. hf = h / 2.f;
  3701. break;
  3702. default:
  3703. av_log(ctx, AV_LOG_ERROR, "Specified input format is not handled.\n");
  3704. return AVERROR_BUG;
  3705. }
  3706. if (err != 0) {
  3707. return err;
  3708. }
  3709. switch (s->out) {
  3710. case EQUIRECTANGULAR:
  3711. s->out_transform = equirect_to_xyz;
  3712. prepare_out = NULL;
  3713. w = lrintf(wf);
  3714. h = lrintf(hf);
  3715. break;
  3716. case CUBEMAP_3_2:
  3717. s->out_transform = cube3x2_to_xyz;
  3718. prepare_out = prepare_cube_out;
  3719. w = lrintf(wf / 4.f * 3.f);
  3720. h = lrintf(hf);
  3721. break;
  3722. case CUBEMAP_1_6:
  3723. s->out_transform = cube1x6_to_xyz;
  3724. prepare_out = prepare_cube_out;
  3725. w = lrintf(wf / 4.f);
  3726. h = lrintf(hf * 3.f);
  3727. break;
  3728. case CUBEMAP_6_1:
  3729. s->out_transform = cube6x1_to_xyz;
  3730. prepare_out = prepare_cube_out;
  3731. w = lrintf(wf / 2.f * 3.f);
  3732. h = lrintf(hf / 2.f);
  3733. break;
  3734. case EQUIANGULAR:
  3735. s->out_transform = eac_to_xyz;
  3736. prepare_out = prepare_eac_out;
  3737. w = lrintf(wf);
  3738. h = lrintf(hf / 8.f * 9.f);
  3739. break;
  3740. case FLAT:
  3741. s->out_transform = flat_to_xyz;
  3742. prepare_out = prepare_flat_out;
  3743. w = lrintf(wf);
  3744. h = lrintf(hf);
  3745. break;
  3746. case DUAL_FISHEYE:
  3747. s->out_transform = dfisheye_to_xyz;
  3748. prepare_out = prepare_fisheye_out;
  3749. w = lrintf(wf);
  3750. h = lrintf(hf);
  3751. break;
  3752. case BARREL:
  3753. s->out_transform = barrel_to_xyz;
  3754. prepare_out = NULL;
  3755. w = lrintf(wf / 4.f * 5.f);
  3756. h = lrintf(hf);
  3757. break;
  3758. case STEREOGRAPHIC:
  3759. s->out_transform = stereographic_to_xyz;
  3760. prepare_out = prepare_stereographic_out;
  3761. w = lrintf(wf);
  3762. h = lrintf(hf * 2.f);
  3763. break;
  3764. case MERCATOR:
  3765. s->out_transform = mercator_to_xyz;
  3766. prepare_out = NULL;
  3767. w = lrintf(wf);
  3768. h = lrintf(hf * 2.f);
  3769. break;
  3770. case BALL:
  3771. s->out_transform = ball_to_xyz;
  3772. prepare_out = NULL;
  3773. w = lrintf(wf);
  3774. h = lrintf(hf * 2.f);
  3775. break;
  3776. case HAMMER:
  3777. s->out_transform = hammer_to_xyz;
  3778. prepare_out = NULL;
  3779. w = lrintf(wf);
  3780. h = lrintf(hf);
  3781. break;
  3782. case SINUSOIDAL:
  3783. s->out_transform = sinusoidal_to_xyz;
  3784. prepare_out = NULL;
  3785. w = lrintf(wf);
  3786. h = lrintf(hf);
  3787. break;
  3788. case FISHEYE:
  3789. s->out_transform = fisheye_to_xyz;
  3790. prepare_out = prepare_fisheye_out;
  3791. w = lrintf(wf * 0.5f);
  3792. h = lrintf(hf);
  3793. break;
  3794. case PANNINI:
  3795. s->out_transform = pannini_to_xyz;
  3796. prepare_out = NULL;
  3797. w = lrintf(wf);
  3798. h = lrintf(hf);
  3799. break;
  3800. case CYLINDRICAL:
  3801. s->out_transform = cylindrical_to_xyz;
  3802. prepare_out = prepare_cylindrical_out;
  3803. w = lrintf(wf);
  3804. h = lrintf(hf * 0.5f);
  3805. break;
  3806. case PERSPECTIVE:
  3807. s->out_transform = perspective_to_xyz;
  3808. prepare_out = NULL;
  3809. w = lrintf(wf / 2.f);
  3810. h = lrintf(hf);
  3811. break;
  3812. case TETRAHEDRON:
  3813. s->out_transform = tetrahedron_to_xyz;
  3814. prepare_out = NULL;
  3815. w = lrintf(wf);
  3816. h = lrintf(hf);
  3817. break;
  3818. case BARREL_SPLIT:
  3819. s->out_transform = barrelsplit_to_xyz;
  3820. prepare_out = NULL;
  3821. w = lrintf(wf / 4.f * 3.f);
  3822. h = lrintf(hf);
  3823. break;
  3824. case TSPYRAMID:
  3825. s->out_transform = tspyramid_to_xyz;
  3826. prepare_out = NULL;
  3827. w = lrintf(wf);
  3828. h = lrintf(hf);
  3829. break;
  3830. case HEQUIRECTANGULAR:
  3831. s->out_transform = hequirect_to_xyz;
  3832. prepare_out = NULL;
  3833. w = lrintf(wf / 2.f);
  3834. h = lrintf(hf);
  3835. break;
  3836. case EQUISOLID:
  3837. s->out_transform = equisolid_to_xyz;
  3838. prepare_out = prepare_equisolid_out;
  3839. w = lrintf(wf);
  3840. h = lrintf(hf * 2.f);
  3841. break;
  3842. case ORTHOGRAPHIC:
  3843. s->out_transform = orthographic_to_xyz;
  3844. prepare_out = prepare_orthographic_out;
  3845. w = lrintf(wf);
  3846. h = lrintf(hf * 2.f);
  3847. break;
  3848. default:
  3849. av_log(ctx, AV_LOG_ERROR, "Specified output format is not handled.\n");
  3850. return AVERROR_BUG;
  3851. }
  3852. // Override resolution with user values if specified
  3853. if (s->width > 0 && s->height <= 0 && s->h_fov > 0.f && s->v_fov > 0.f &&
  3854. s->out == FLAT && s->d_fov == 0.f) {
  3855. w = s->width;
  3856. h = w / tanf(s->h_fov * M_PI / 360.f) * tanf(s->v_fov * M_PI / 360.f);
  3857. } else if (s->width <= 0 && s->height > 0 && s->h_fov > 0.f && s->v_fov > 0.f &&
  3858. s->out == FLAT && s->d_fov == 0.f) {
  3859. h = s->height;
  3860. w = h / tanf(s->v_fov * M_PI / 360.f) * tanf(s->h_fov * M_PI / 360.f);
  3861. } else if (s->width > 0 && s->height > 0) {
  3862. w = s->width;
  3863. h = s->height;
  3864. } else if (s->width > 0 || s->height > 0) {
  3865. av_log(ctx, AV_LOG_ERROR, "Both width and height values should be specified.\n");
  3866. return AVERROR(EINVAL);
  3867. } else {
  3868. if (s->out_transpose)
  3869. FFSWAP(int, w, h);
  3870. if (s->in_transpose)
  3871. FFSWAP(int, w, h);
  3872. }
  3873. s->width = w;
  3874. s->height = h;
  3875. if (s->d_fov > 0.f)
  3876. fov_from_dfov(s->out, s->d_fov, w, h, &s->h_fov, &s->v_fov);
  3877. if (prepare_out) {
  3878. err = prepare_out(ctx);
  3879. if (err != 0)
  3880. return err;
  3881. }
  3882. set_dimensions(s->pr_width, s->pr_height, w, h, desc);
  3883. switch (s->out_stereo) {
  3884. case STEREO_2D:
  3885. out_offset_w = out_offset_h = 0;
  3886. break;
  3887. case STEREO_SBS:
  3888. out_offset_w = w;
  3889. out_offset_h = 0;
  3890. w *= 2;
  3891. break;
  3892. case STEREO_TB:
  3893. out_offset_w = 0;
  3894. out_offset_h = h;
  3895. h *= 2;
  3896. break;
  3897. default:
  3898. av_assert0(0);
  3899. }
  3900. set_dimensions(s->out_offset_w, s->out_offset_h, out_offset_w, out_offset_h, desc);
  3901. set_dimensions(s->planewidth, s->planeheight, w, h, desc);
  3902. for (int i = 0; i < 4; i++)
  3903. s->uv_linesize[i] = FFALIGN(s->pr_width[i], 8);
  3904. outlink->h = h;
  3905. outlink->w = w;
  3906. s->nb_planes = av_pix_fmt_count_planes(inlink->format);
  3907. have_alpha = !!(desc->flags & AV_PIX_FMT_FLAG_ALPHA);
  3908. if (desc->log2_chroma_h == desc->log2_chroma_w && desc->log2_chroma_h == 0) {
  3909. s->nb_allocated = 1;
  3910. s->map[0] = s->map[1] = s->map[2] = s->map[3] = 0;
  3911. } else {
  3912. s->nb_allocated = 2;
  3913. s->map[0] = s->map[3] = 0;
  3914. s->map[1] = s->map[2] = 1;
  3915. }
  3916. for (int i = 0; i < s->nb_allocated; i++) {
  3917. err = allocate_plane(s, sizeof_uv, sizeof_ker, sizeof_mask * have_alpha * s->alpha, i);
  3918. if (err < 0)
  3919. return err;
  3920. }
  3921. calculate_rotation_matrix(s->yaw, s->pitch, s->roll, s->rot_mat, s->rotation_order);
  3922. set_mirror_modifier(s->h_flip, s->v_flip, s->d_flip, s->output_mirror_modifier);
  3923. ctx->internal->execute(ctx, v360_slice, NULL, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
  3924. return 0;
  3925. }
  3926. static int filter_frame(AVFilterLink *inlink, AVFrame *in)
  3927. {
  3928. AVFilterContext *ctx = inlink->dst;
  3929. AVFilterLink *outlink = ctx->outputs[0];
  3930. V360Context *s = ctx->priv;
  3931. AVFrame *out;
  3932. ThreadData td;
  3933. out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
  3934. if (!out) {
  3935. av_frame_free(&in);
  3936. return AVERROR(ENOMEM);
  3937. }
  3938. av_frame_copy_props(out, in);
  3939. td.in = in;
  3940. td.out = out;
  3941. ctx->internal->execute(ctx, s->remap_slice, &td, NULL, FFMIN(outlink->h, ff_filter_get_nb_threads(ctx)));
  3942. av_frame_free(&in);
  3943. return ff_filter_frame(outlink, out);
  3944. }
  3945. static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
  3946. char *res, int res_len, int flags)
  3947. {
  3948. int ret;
  3949. ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
  3950. if (ret < 0)
  3951. return ret;
  3952. return config_output(ctx->outputs[0]);
  3953. }
  3954. static av_cold void uninit(AVFilterContext *ctx)
  3955. {
  3956. V360Context *s = ctx->priv;
  3957. for (int p = 0; p < s->nb_allocated; p++) {
  3958. av_freep(&s->u[p]);
  3959. av_freep(&s->v[p]);
  3960. av_freep(&s->ker[p]);
  3961. }
  3962. av_freep(&s->mask);
  3963. }
  3964. static const AVFilterPad inputs[] = {
  3965. {
  3966. .name = "default",
  3967. .type = AVMEDIA_TYPE_VIDEO,
  3968. .filter_frame = filter_frame,
  3969. },
  3970. { NULL }
  3971. };
  3972. static const AVFilterPad outputs[] = {
  3973. {
  3974. .name = "default",
  3975. .type = AVMEDIA_TYPE_VIDEO,
  3976. .config_props = config_output,
  3977. },
  3978. { NULL }
  3979. };
  3980. AVFilter ff_vf_v360 = {
  3981. .name = "v360",
  3982. .description = NULL_IF_CONFIG_SMALL("Convert 360 projection of video."),
  3983. .priv_size = sizeof(V360Context),
  3984. .uninit = uninit,
  3985. .query_formats = query_formats,
  3986. .inputs = inputs,
  3987. .outputs = outputs,
  3988. .priv_class = &v360_class,
  3989. .flags = AVFILTER_FLAG_SLICE_THREADS,
  3990. .process_command = process_command,
  3991. };