You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1178 lines
46KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <inttypes.h>
  21. #include <string.h>
  22. #include <math.h>
  23. #include <stdio.h>
  24. #include "config.h"
  25. #include <assert.h>
  26. #include "swscale.h"
  27. #include "swscale_internal.h"
  28. #include "rgb2rgb.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "libavutil/cpu.h"
  31. #include "libavutil/avutil.h"
  32. #include "libavutil/mathematics.h"
  33. #include "libavutil/bswap.h"
  34. #include "libavutil/pixdesc.h"
  35. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_1)[8][8] = {
  36. { 0, 1, 0, 1, 0, 1, 0, 1,},
  37. { 1, 0, 1, 0, 1, 0, 1, 0,},
  38. { 0, 1, 0, 1, 0, 1, 0, 1,},
  39. { 1, 0, 1, 0, 1, 0, 1, 0,},
  40. { 0, 1, 0, 1, 0, 1, 0, 1,},
  41. { 1, 0, 1, 0, 1, 0, 1, 0,},
  42. { 0, 1, 0, 1, 0, 1, 0, 1,},
  43. { 1, 0, 1, 0, 1, 0, 1, 0,},
  44. };
  45. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_3)[8][8] = {
  46. { 1, 2, 1, 2, 1, 2, 1, 2,},
  47. { 3, 0, 3, 0, 3, 0, 3, 0,},
  48. { 1, 2, 1, 2, 1, 2, 1, 2,},
  49. { 3, 0, 3, 0, 3, 0, 3, 0,},
  50. { 1, 2, 1, 2, 1, 2, 1, 2,},
  51. { 3, 0, 3, 0, 3, 0, 3, 0,},
  52. { 1, 2, 1, 2, 1, 2, 1, 2,},
  53. { 3, 0, 3, 0, 3, 0, 3, 0,},
  54. };
  55. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_64)[8][8] = {
  56. { 18, 34, 30, 46, 17, 33, 29, 45,},
  57. { 50, 2, 62, 14, 49, 1, 61, 13,},
  58. { 26, 42, 22, 38, 25, 41, 21, 37,},
  59. { 58, 10, 54, 6, 57, 9, 53, 5,},
  60. { 16, 32, 28, 44, 19, 35, 31, 47,},
  61. { 48, 0, 60, 12, 51, 3, 63, 15,},
  62. { 24, 40, 20, 36, 27, 43, 23, 39,},
  63. { 56, 8, 52, 4, 59, 11, 55, 7,},
  64. };
  65. extern const uint8_t dither_8x8_128[8][8];
  66. DECLARE_ALIGNED(8, const uint8_t, dither_8x8_256)[8][8] = {
  67. { 72, 136, 120, 184, 68, 132, 116, 180,},
  68. { 200, 8, 248, 56, 196, 4, 244, 52,},
  69. { 104, 168, 88, 152, 100, 164, 84, 148,},
  70. { 232, 40, 216, 24, 228, 36, 212, 20,},
  71. { 64, 128, 102, 176, 76, 140, 124, 188,},
  72. { 192, 0, 240, 48, 204, 12, 252, 60,},
  73. { 96, 160, 80, 144, 108, 172, 92, 156,},
  74. { 224, 32, 208, 16, 236, 44, 220, 28,},
  75. };
  76. #define RGB2YUV_SHIFT 15
  77. #define BY ( (int) (0.114 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  78. #define BV (-(int) (0.081 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  79. #define BU ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  80. #define GY ( (int) (0.587 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  81. #define GV (-(int) (0.419 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  82. #define GU (-(int) (0.331 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  83. #define RY ( (int) (0.299 * 219 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  84. #define RV ( (int) (0.500 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  85. #define RU (-(int) (0.169 * 224 / 255 * (1 << RGB2YUV_SHIFT) + 0.5))
  86. static void fillPlane(uint8_t *plane, int stride, int width, int height, int y,
  87. uint8_t val)
  88. {
  89. int i;
  90. uint8_t *ptr = plane + stride * y;
  91. for (i = 0; i < height; i++) {
  92. memset(ptr, val, width);
  93. ptr += stride;
  94. }
  95. }
  96. static void copyPlane(const uint8_t *src, int srcStride,
  97. int srcSliceY, int srcSliceH, int width,
  98. uint8_t *dst, int dstStride)
  99. {
  100. dst += dstStride * srcSliceY;
  101. if (dstStride == srcStride && srcStride > 0) {
  102. memcpy(dst, src, srcSliceH * dstStride);
  103. } else {
  104. int i;
  105. for (i = 0; i < srcSliceH; i++) {
  106. memcpy(dst, src, width);
  107. src += srcStride;
  108. dst += dstStride;
  109. }
  110. }
  111. }
  112. static int planarToNv12Wrapper(SwsContext *c, const uint8_t *src[],
  113. int srcStride[], int srcSliceY,
  114. int srcSliceH, uint8_t *dstParam[],
  115. int dstStride[])
  116. {
  117. uint8_t *dst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  118. copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
  119. dstParam[0], dstStride[0]);
  120. if (c->dstFormat == AV_PIX_FMT_NV12)
  121. interleaveBytes(src[1], src[2], dst, c->srcW / 2, srcSliceH / 2,
  122. srcStride[1], srcStride[2], dstStride[0]);
  123. else
  124. interleaveBytes(src[2], src[1], dst, c->srcW / 2, srcSliceH / 2,
  125. srcStride[2], srcStride[1], dstStride[0]);
  126. return srcSliceH;
  127. }
  128. static int planarToYuy2Wrapper(SwsContext *c, const uint8_t *src[],
  129. int srcStride[], int srcSliceY, int srcSliceH,
  130. uint8_t *dstParam[], int dstStride[])
  131. {
  132. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  133. yv12toyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  134. srcStride[1], dstStride[0]);
  135. return srcSliceH;
  136. }
  137. static int planarToUyvyWrapper(SwsContext *c, const uint8_t *src[],
  138. int srcStride[], int srcSliceY, int srcSliceH,
  139. uint8_t *dstParam[], int dstStride[])
  140. {
  141. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  142. yv12touyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  143. srcStride[1], dstStride[0]);
  144. return srcSliceH;
  145. }
  146. static int yuv422pToYuy2Wrapper(SwsContext *c, const uint8_t *src[],
  147. int srcStride[], int srcSliceY, int srcSliceH,
  148. uint8_t *dstParam[], int dstStride[])
  149. {
  150. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  151. yuv422ptoyuy2(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  152. srcStride[1], dstStride[0]);
  153. return srcSliceH;
  154. }
  155. static int yuv422pToUyvyWrapper(SwsContext *c, const uint8_t *src[],
  156. int srcStride[], int srcSliceY, int srcSliceH,
  157. uint8_t *dstParam[], int dstStride[])
  158. {
  159. uint8_t *dst = dstParam[0] + dstStride[0] * srcSliceY;
  160. yuv422ptouyvy(src[0], src[1], src[2], dst, c->srcW, srcSliceH, srcStride[0],
  161. srcStride[1], dstStride[0]);
  162. return srcSliceH;
  163. }
  164. static int yuyvToYuv420Wrapper(SwsContext *c, const uint8_t *src[],
  165. int srcStride[], int srcSliceY, int srcSliceH,
  166. uint8_t *dstParam[], int dstStride[])
  167. {
  168. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  169. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  170. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
  171. yuyvtoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  172. dstStride[1], srcStride[0]);
  173. if (dstParam[3])
  174. fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  175. return srcSliceH;
  176. }
  177. static int yuyvToYuv422Wrapper(SwsContext *c, const uint8_t *src[],
  178. int srcStride[], int srcSliceY, int srcSliceH,
  179. uint8_t *dstParam[], int dstStride[])
  180. {
  181. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  182. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
  183. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
  184. yuyvtoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  185. dstStride[1], srcStride[0]);
  186. return srcSliceH;
  187. }
  188. static int uyvyToYuv420Wrapper(SwsContext *c, const uint8_t *src[],
  189. int srcStride[], int srcSliceY, int srcSliceH,
  190. uint8_t *dstParam[], int dstStride[])
  191. {
  192. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  193. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY / 2;
  194. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY / 2;
  195. uyvytoyuv420(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  196. dstStride[1], srcStride[0]);
  197. if (dstParam[3])
  198. fillPlane(dstParam[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  199. return srcSliceH;
  200. }
  201. static int uyvyToYuv422Wrapper(SwsContext *c, const uint8_t *src[],
  202. int srcStride[], int srcSliceY, int srcSliceH,
  203. uint8_t *dstParam[], int dstStride[])
  204. {
  205. uint8_t *ydst = dstParam[0] + dstStride[0] * srcSliceY;
  206. uint8_t *udst = dstParam[1] + dstStride[1] * srcSliceY;
  207. uint8_t *vdst = dstParam[2] + dstStride[2] * srcSliceY;
  208. uyvytoyuv422(ydst, udst, vdst, src[0], c->srcW, srcSliceH, dstStride[0],
  209. dstStride[1], srcStride[0]);
  210. return srcSliceH;
  211. }
  212. static void gray8aToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels,
  213. const uint8_t *palette)
  214. {
  215. int i;
  216. for (i = 0; i < num_pixels; i++)
  217. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | (src[(i << 1) + 1] << 24);
  218. }
  219. static void gray8aToPacked32_1(const uint8_t *src, uint8_t *dst, int num_pixels,
  220. const uint8_t *palette)
  221. {
  222. int i;
  223. for (i = 0; i < num_pixels; i++)
  224. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i << 1]] | src[(i << 1) + 1];
  225. }
  226. static void gray8aToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels,
  227. const uint8_t *palette)
  228. {
  229. int i;
  230. for (i = 0; i < num_pixels; i++) {
  231. //FIXME slow?
  232. dst[0] = palette[src[i << 1] * 4 + 0];
  233. dst[1] = palette[src[i << 1] * 4 + 1];
  234. dst[2] = palette[src[i << 1] * 4 + 2];
  235. dst += 3;
  236. }
  237. }
  238. static int packed_16bpc_bswap(SwsContext *c, const uint8_t *src[],
  239. int srcStride[], int srcSliceY, int srcSliceH,
  240. uint8_t *dst[], int dstStride[])
  241. {
  242. int i, j;
  243. int srcstr = srcStride[0] >> 1;
  244. int dststr = dstStride[0] >> 1;
  245. uint16_t *dstPtr = (uint16_t *) dst[0];
  246. const uint16_t *srcPtr = (const uint16_t *) src[0];
  247. int min_stride = FFMIN(srcstr, dststr);
  248. for (i = 0; i < srcSliceH; i++) {
  249. for (j = 0; j < min_stride; j++) {
  250. dstPtr[j] = av_bswap16(srcPtr[j]);
  251. }
  252. srcPtr += srcstr;
  253. dstPtr += dststr;
  254. }
  255. return srcSliceH;
  256. }
  257. static int palToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[],
  258. int srcSliceY, int srcSliceH, uint8_t *dst[],
  259. int dstStride[])
  260. {
  261. const enum AVPixelFormat srcFormat = c->srcFormat;
  262. const enum AVPixelFormat dstFormat = c->dstFormat;
  263. void (*conv)(const uint8_t *src, uint8_t *dst, int num_pixels,
  264. const uint8_t *palette) = NULL;
  265. int i;
  266. uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
  267. const uint8_t *srcPtr = src[0];
  268. if (srcFormat == AV_PIX_FMT_Y400A) {
  269. switch (dstFormat) {
  270. case AV_PIX_FMT_RGB32 : conv = gray8aToPacked32; break;
  271. case AV_PIX_FMT_BGR32 : conv = gray8aToPacked32; break;
  272. case AV_PIX_FMT_BGR32_1: conv = gray8aToPacked32_1; break;
  273. case AV_PIX_FMT_RGB32_1: conv = gray8aToPacked32_1; break;
  274. case AV_PIX_FMT_RGB24 : conv = gray8aToPacked24; break;
  275. case AV_PIX_FMT_BGR24 : conv = gray8aToPacked24; break;
  276. }
  277. } else if (usePal(srcFormat)) {
  278. switch (dstFormat) {
  279. case AV_PIX_FMT_RGB32 : conv = sws_convertPalette8ToPacked32; break;
  280. case AV_PIX_FMT_BGR32 : conv = sws_convertPalette8ToPacked32; break;
  281. case AV_PIX_FMT_BGR32_1: conv = sws_convertPalette8ToPacked32; break;
  282. case AV_PIX_FMT_RGB32_1: conv = sws_convertPalette8ToPacked32; break;
  283. case AV_PIX_FMT_RGB24 : conv = sws_convertPalette8ToPacked24; break;
  284. case AV_PIX_FMT_BGR24 : conv = sws_convertPalette8ToPacked24; break;
  285. }
  286. }
  287. if (!conv)
  288. av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
  289. sws_format_name(srcFormat), sws_format_name(dstFormat));
  290. else {
  291. for (i = 0; i < srcSliceH; i++) {
  292. conv(srcPtr, dstPtr, c->srcW, (uint8_t *) c->pal_rgb);
  293. srcPtr += srcStride[0];
  294. dstPtr += dstStride[0];
  295. }
  296. }
  297. return srcSliceH;
  298. }
  299. static void gbr24ptopacked24(const uint8_t *src[], int srcStride[],
  300. uint8_t *dst, int dstStride, int srcSliceH,
  301. int width)
  302. {
  303. int x, h, i;
  304. for (h = 0; h < srcSliceH; h++) {
  305. uint8_t *dest = dst + dstStride * h;
  306. for (x = 0; x < width; x++) {
  307. *dest++ = src[0][x];
  308. *dest++ = src[1][x];
  309. *dest++ = src[2][x];
  310. }
  311. for (i = 0; i < 3; i++)
  312. src[i] += srcStride[i];
  313. }
  314. }
  315. static void gbr24ptopacked32(const uint8_t *src[], int srcStride[],
  316. uint8_t *dst, int dstStride, int srcSliceH,
  317. int alpha_first, int width)
  318. {
  319. int x, h, i;
  320. for (h = 0; h < srcSliceH; h++) {
  321. uint8_t *dest = dst + dstStride * h;
  322. if (alpha_first) {
  323. for (x = 0; x < width; x++) {
  324. *dest++ = 0xff;
  325. *dest++ = src[0][x];
  326. *dest++ = src[1][x];
  327. *dest++ = src[2][x];
  328. }
  329. } else {
  330. for (x = 0; x < width; x++) {
  331. *dest++ = src[0][x];
  332. *dest++ = src[1][x];
  333. *dest++ = src[2][x];
  334. *dest++ = 0xff;
  335. }
  336. }
  337. for (i = 0; i < 3; i++)
  338. src[i] += srcStride[i];
  339. }
  340. }
  341. static int planarRgbToRgbWrapper(SwsContext *c, const uint8_t *src[],
  342. int srcStride[], int srcSliceY, int srcSliceH,
  343. uint8_t *dst[], int dstStride[])
  344. {
  345. int alpha_first = 0;
  346. if (c->srcFormat != AV_PIX_FMT_GBRP) {
  347. av_log(c, AV_LOG_ERROR, "unsupported planar RGB conversion %s -> %s\n",
  348. av_get_pix_fmt_name(c->srcFormat),
  349. av_get_pix_fmt_name(c->dstFormat));
  350. return srcSliceH;
  351. }
  352. switch (c->dstFormat) {
  353. case AV_PIX_FMT_BGR24:
  354. gbr24ptopacked24((const uint8_t *[]) { src[1], src[0], src[2] },
  355. (int []) { srcStride[1], srcStride[0], srcStride[2] },
  356. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  357. srcSliceH, c->srcW);
  358. break;
  359. case AV_PIX_FMT_RGB24:
  360. gbr24ptopacked24((const uint8_t *[]) { src[2], src[0], src[1] },
  361. (int []) { srcStride[2], srcStride[0], srcStride[1] },
  362. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  363. srcSliceH, c->srcW);
  364. break;
  365. case AV_PIX_FMT_ARGB:
  366. alpha_first = 1;
  367. case AV_PIX_FMT_RGBA:
  368. gbr24ptopacked32((const uint8_t *[]) { src[2], src[0], src[1] },
  369. (int []) { srcStride[2], srcStride[0], srcStride[1] },
  370. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  371. srcSliceH, alpha_first, c->srcW);
  372. break;
  373. case AV_PIX_FMT_ABGR:
  374. alpha_first = 1;
  375. case AV_PIX_FMT_BGRA:
  376. gbr24ptopacked32((const uint8_t *[]) { src[1], src[0], src[2] },
  377. (int []) { srcStride[1], srcStride[0], srcStride[2] },
  378. dst[0] + srcSliceY * dstStride[0], dstStride[0],
  379. srcSliceH, alpha_first, c->srcW);
  380. break;
  381. default:
  382. av_log(c, AV_LOG_ERROR,
  383. "unsupported planar RGB conversion %s -> %s\n",
  384. av_get_pix_fmt_name(c->srcFormat),
  385. av_get_pix_fmt_name(c->dstFormat));
  386. }
  387. return srcSliceH;
  388. }
  389. #define isRGBA32(x) ( \
  390. (x) == AV_PIX_FMT_ARGB \
  391. || (x) == AV_PIX_FMT_RGBA \
  392. || (x) == AV_PIX_FMT_BGRA \
  393. || (x) == AV_PIX_FMT_ABGR \
  394. )
  395. /* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
  396. typedef void (* rgbConvFn) (const uint8_t *, uint8_t *, int);
  397. static rgbConvFn findRgbConvFn(SwsContext *c)
  398. {
  399. const enum AVPixelFormat srcFormat = c->srcFormat;
  400. const enum AVPixelFormat dstFormat = c->dstFormat;
  401. const int srcId = c->srcFormatBpp;
  402. const int dstId = c->dstFormatBpp;
  403. rgbConvFn conv = NULL;
  404. const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(srcFormat);
  405. const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(dstFormat);
  406. #define IS_NOT_NE(bpp, desc) \
  407. (((bpp + 7) >> 3) == 2 && \
  408. (!(desc->flags & PIX_FMT_BE) != !HAVE_BIGENDIAN))
  409. /* if this is non-native rgb444/555/565, don't handle it here. */
  410. if (IS_NOT_NE(srcId, desc_src) || IS_NOT_NE(dstId, desc_dst))
  411. return NULL;
  412. #define CONV_IS(src, dst) (srcFormat == AV_PIX_FMT_##src && dstFormat == AV_PIX_FMT_##dst)
  413. if (isRGBA32(srcFormat) && isRGBA32(dstFormat)) {
  414. if ( CONV_IS(ABGR, RGBA)
  415. || CONV_IS(ARGB, BGRA)
  416. || CONV_IS(BGRA, ARGB)
  417. || CONV_IS(RGBA, ABGR)) conv = shuffle_bytes_3210;
  418. else if (CONV_IS(ABGR, ARGB)
  419. || CONV_IS(ARGB, ABGR)) conv = shuffle_bytes_0321;
  420. else if (CONV_IS(ABGR, BGRA)
  421. || CONV_IS(ARGB, RGBA)) conv = shuffle_bytes_1230;
  422. else if (CONV_IS(BGRA, RGBA)
  423. || CONV_IS(RGBA, BGRA)) conv = shuffle_bytes_2103;
  424. else if (CONV_IS(BGRA, ABGR)
  425. || CONV_IS(RGBA, ARGB)) conv = shuffle_bytes_3012;
  426. } else
  427. /* BGR -> BGR */
  428. if ((isBGRinInt(srcFormat) && isBGRinInt(dstFormat)) ||
  429. (isRGBinInt(srcFormat) && isRGBinInt(dstFormat))) {
  430. switch (srcId | (dstId << 16)) {
  431. case 0x000F000C: conv = rgb12to15; break;
  432. case 0x000F0010: conv = rgb16to15; break;
  433. case 0x000F0018: conv = rgb24to15; break;
  434. case 0x000F0020: conv = rgb32to15; break;
  435. case 0x0010000F: conv = rgb15to16; break;
  436. case 0x00100018: conv = rgb24to16; break;
  437. case 0x00100020: conv = rgb32to16; break;
  438. case 0x0018000F: conv = rgb15to24; break;
  439. case 0x00180010: conv = rgb16to24; break;
  440. case 0x00180020: conv = rgb32to24; break;
  441. case 0x0020000F: conv = rgb15to32; break;
  442. case 0x00200010: conv = rgb16to32; break;
  443. case 0x00200018: conv = rgb24to32; break;
  444. }
  445. } else if ((isBGRinInt(srcFormat) && isRGBinInt(dstFormat)) ||
  446. (isRGBinInt(srcFormat) && isBGRinInt(dstFormat))) {
  447. switch (srcId | (dstId << 16)) {
  448. case 0x000C000C: conv = rgb12tobgr12; break;
  449. case 0x000F000F: conv = rgb15tobgr15; break;
  450. case 0x000F0010: conv = rgb16tobgr15; break;
  451. case 0x000F0018: conv = rgb24tobgr15; break;
  452. case 0x000F0020: conv = rgb32tobgr15; break;
  453. case 0x0010000F: conv = rgb15tobgr16; break;
  454. case 0x00100010: conv = rgb16tobgr16; break;
  455. case 0x00100018: conv = rgb24tobgr16; break;
  456. case 0x00100020: conv = rgb32tobgr16; break;
  457. case 0x0018000F: conv = rgb15tobgr24; break;
  458. case 0x00180010: conv = rgb16tobgr24; break;
  459. case 0x00180018: conv = rgb24tobgr24; break;
  460. case 0x00180020: conv = rgb32tobgr24; break;
  461. case 0x0020000F: conv = rgb15tobgr32; break;
  462. case 0x00200010: conv = rgb16tobgr32; break;
  463. case 0x00200018: conv = rgb24tobgr32; break;
  464. }
  465. }
  466. return conv;
  467. }
  468. /* {RGB,BGR}{15,16,24,32,32_1} -> {RGB,BGR}{15,16,24,32} */
  469. static int rgbToRgbWrapper(SwsContext *c, const uint8_t *src[], int srcStride[],
  470. int srcSliceY, int srcSliceH, uint8_t *dst[],
  471. int dstStride[])
  472. {
  473. const enum AVPixelFormat srcFormat = c->srcFormat;
  474. const enum AVPixelFormat dstFormat = c->dstFormat;
  475. const int srcBpp = (c->srcFormatBpp + 7) >> 3;
  476. const int dstBpp = (c->dstFormatBpp + 7) >> 3;
  477. rgbConvFn conv = findRgbConvFn(c);
  478. if (!conv) {
  479. av_log(c, AV_LOG_ERROR, "internal error %s -> %s converter\n",
  480. sws_format_name(srcFormat), sws_format_name(dstFormat));
  481. } else {
  482. const uint8_t *srcPtr = src[0];
  483. uint8_t *dstPtr = dst[0];
  484. if ((srcFormat == AV_PIX_FMT_RGB32_1 || srcFormat == AV_PIX_FMT_BGR32_1) &&
  485. !isRGBA32(dstFormat))
  486. srcPtr += ALT32_CORR;
  487. if ((dstFormat == AV_PIX_FMT_RGB32_1 || dstFormat == AV_PIX_FMT_BGR32_1) &&
  488. !isRGBA32(srcFormat))
  489. dstPtr += ALT32_CORR;
  490. if (dstStride[0] * srcBpp == srcStride[0] * dstBpp && srcStride[0] > 0 &&
  491. !(srcStride[0] % srcBpp))
  492. conv(srcPtr, dstPtr + dstStride[0] * srcSliceY,
  493. srcSliceH * srcStride[0]);
  494. else {
  495. int i;
  496. dstPtr += dstStride[0] * srcSliceY;
  497. for (i = 0; i < srcSliceH; i++) {
  498. conv(srcPtr, dstPtr, c->srcW * srcBpp);
  499. srcPtr += srcStride[0];
  500. dstPtr += dstStride[0];
  501. }
  502. }
  503. }
  504. return srcSliceH;
  505. }
  506. static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t *src[],
  507. int srcStride[], int srcSliceY, int srcSliceH,
  508. uint8_t *dst[], int dstStride[])
  509. {
  510. rgb24toyv12(
  511. src[0],
  512. dst[0] + srcSliceY * dstStride[0],
  513. dst[1] + (srcSliceY >> 1) * dstStride[1],
  514. dst[2] + (srcSliceY >> 1) * dstStride[2],
  515. c->srcW, srcSliceH,
  516. dstStride[0], dstStride[1], srcStride[0]);
  517. if (dst[3])
  518. fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  519. return srcSliceH;
  520. }
  521. static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t *src[],
  522. int srcStride[], int srcSliceY, int srcSliceH,
  523. uint8_t *dst[], int dstStride[])
  524. {
  525. copyPlane(src[0], srcStride[0], srcSliceY, srcSliceH, c->srcW,
  526. dst[0], dstStride[0]);
  527. planar2x(src[1], dst[1] + dstStride[1] * (srcSliceY >> 1), c->chrSrcW,
  528. srcSliceH >> 2, srcStride[1], dstStride[1]);
  529. planar2x(src[2], dst[2] + dstStride[2] * (srcSliceY >> 1), c->chrSrcW,
  530. srcSliceH >> 2, srcStride[2], dstStride[2]);
  531. if (dst[3])
  532. fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255);
  533. return srcSliceH;
  534. }
  535. /* unscaled copy like stuff (assumes nearly identical formats) */
  536. static int packedCopyWrapper(SwsContext *c, const uint8_t *src[],
  537. int srcStride[], int srcSliceY, int srcSliceH,
  538. uint8_t *dst[], int dstStride[])
  539. {
  540. if (dstStride[0] == srcStride[0] && srcStride[0] > 0)
  541. memcpy(dst[0] + dstStride[0] * srcSliceY, src[0], srcSliceH * dstStride[0]);
  542. else {
  543. int i;
  544. const uint8_t *srcPtr = src[0];
  545. uint8_t *dstPtr = dst[0] + dstStride[0] * srcSliceY;
  546. int length = 0;
  547. /* universal length finder */
  548. while (length + c->srcW <= FFABS(dstStride[0]) &&
  549. length + c->srcW <= FFABS(srcStride[0]))
  550. length += c->srcW;
  551. assert(length != 0);
  552. for (i = 0; i < srcSliceH; i++) {
  553. memcpy(dstPtr, srcPtr, length);
  554. srcPtr += srcStride[0];
  555. dstPtr += dstStride[0];
  556. }
  557. }
  558. return srcSliceH;
  559. }
  560. #define clip9(x) av_clip_uintp2(x, 9)
  561. #define clip10(x) av_clip_uintp2(x, 10)
  562. #define DITHER_COPY(dst, dstStride, wfunc, src, srcStride, rfunc, dithers, shift, clip) \
  563. for (i = 0; i < height; i++) { \
  564. const uint8_t *dither = dithers[i & 7]; \
  565. for (j = 0; j < length - 7; j += 8) { \
  566. wfunc(&dst[j + 0], clip((rfunc(&src[j + 0]) + dither[0]) >> shift)); \
  567. wfunc(&dst[j + 1], clip((rfunc(&src[j + 1]) + dither[1]) >> shift)); \
  568. wfunc(&dst[j + 2], clip((rfunc(&src[j + 2]) + dither[2]) >> shift)); \
  569. wfunc(&dst[j + 3], clip((rfunc(&src[j + 3]) + dither[3]) >> shift)); \
  570. wfunc(&dst[j + 4], clip((rfunc(&src[j + 4]) + dither[4]) >> shift)); \
  571. wfunc(&dst[j + 5], clip((rfunc(&src[j + 5]) + dither[5]) >> shift)); \
  572. wfunc(&dst[j + 6], clip((rfunc(&src[j + 6]) + dither[6]) >> shift)); \
  573. wfunc(&dst[j + 7], clip((rfunc(&src[j + 7]) + dither[7]) >> shift)); \
  574. } \
  575. for (; j < length; j++) \
  576. wfunc(&dst[j], (rfunc(&src[j]) + dither[j & 7]) >> shift); \
  577. dst += dstStride; \
  578. src += srcStride; \
  579. }
  580. static int planarCopyWrapper(SwsContext *c, const uint8_t *src[],
  581. int srcStride[], int srcSliceY, int srcSliceH,
  582. uint8_t *dst[], int dstStride[])
  583. {
  584. const AVPixFmtDescriptor *desc_src = av_pix_fmt_desc_get(c->srcFormat);
  585. const AVPixFmtDescriptor *desc_dst = av_pix_fmt_desc_get(c->dstFormat);
  586. int plane, i, j;
  587. for (plane = 0; plane < 4; plane++) {
  588. int length = (plane == 0 || plane == 3) ? c->srcW : -((-c->srcW ) >> c->chrDstHSubSample);
  589. int y = (plane == 0 || plane == 3) ? srcSliceY: -((-srcSliceY) >> c->chrDstVSubSample);
  590. int height = (plane == 0 || plane == 3) ? srcSliceH: -((-srcSliceH) >> c->chrDstVSubSample);
  591. const uint8_t *srcPtr = src[plane];
  592. uint8_t *dstPtr = dst[plane] + dstStride[plane] * y;
  593. if (!dst[plane])
  594. continue;
  595. // ignore palette for GRAY8
  596. if (plane == 1 && !dst[2]) continue;
  597. if (!src[plane] || (plane == 1 && !src[2])) {
  598. if (is16BPS(c->dstFormat))
  599. length *= 2;
  600. fillPlane(dst[plane], dstStride[plane], length, height, y,
  601. (plane == 3) ? 255 : 128);
  602. } else {
  603. if (is9_OR_10BPS(c->srcFormat)) {
  604. const int src_depth = desc_src->comp[plane].depth_minus1 + 1;
  605. const int dst_depth = desc_dst->comp[plane].depth_minus1 + 1;
  606. const uint16_t *srcPtr2 = (const uint16_t *) srcPtr;
  607. if (is16BPS(c->dstFormat)) {
  608. uint16_t *dstPtr2 = (uint16_t *) dstPtr;
  609. #define COPY9_OR_10TO16(rfunc, wfunc) \
  610. for (i = 0; i < height; i++) { \
  611. for (j = 0; j < length; j++) { \
  612. int srcpx = rfunc(&srcPtr2[j]); \
  613. wfunc(&dstPtr2[j], (srcpx << (16 - src_depth)) | (srcpx >> (2 * src_depth - 16))); \
  614. } \
  615. dstPtr2 += dstStride[plane] / 2; \
  616. srcPtr2 += srcStride[plane] / 2; \
  617. }
  618. if (isBE(c->dstFormat)) {
  619. if (isBE(c->srcFormat)) {
  620. COPY9_OR_10TO16(AV_RB16, AV_WB16);
  621. } else {
  622. COPY9_OR_10TO16(AV_RL16, AV_WB16);
  623. }
  624. } else {
  625. if (isBE(c->srcFormat)) {
  626. COPY9_OR_10TO16(AV_RB16, AV_WL16);
  627. } else {
  628. COPY9_OR_10TO16(AV_RL16, AV_WL16);
  629. }
  630. }
  631. } else if (is9_OR_10BPS(c->dstFormat)) {
  632. uint16_t *dstPtr2 = (uint16_t *) dstPtr;
  633. #define COPY9_OR_10TO9_OR_10(loop) \
  634. for (i = 0; i < height; i++) { \
  635. for (j = 0; j < length; j++) { \
  636. loop; \
  637. } \
  638. dstPtr2 += dstStride[plane] / 2; \
  639. srcPtr2 += srcStride[plane] / 2; \
  640. }
  641. #define COPY9_OR_10TO9_OR_10_2(rfunc, wfunc) \
  642. if (dst_depth > src_depth) { \
  643. COPY9_OR_10TO9_OR_10(int srcpx = rfunc(&srcPtr2[j]); \
  644. wfunc(&dstPtr2[j], (srcpx << 1) | (srcpx >> 9))); \
  645. } else if (dst_depth < src_depth) { \
  646. DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \
  647. srcPtr2, srcStride[plane] / 2, rfunc, \
  648. dither_8x8_1, 1, clip9); \
  649. } else { \
  650. COPY9_OR_10TO9_OR_10(wfunc(&dstPtr2[j], rfunc(&srcPtr2[j]))); \
  651. }
  652. if (isBE(c->dstFormat)) {
  653. if (isBE(c->srcFormat)) {
  654. COPY9_OR_10TO9_OR_10_2(AV_RB16, AV_WB16);
  655. } else {
  656. COPY9_OR_10TO9_OR_10_2(AV_RL16, AV_WB16);
  657. }
  658. } else {
  659. if (isBE(c->srcFormat)) {
  660. COPY9_OR_10TO9_OR_10_2(AV_RB16, AV_WL16);
  661. } else {
  662. COPY9_OR_10TO9_OR_10_2(AV_RL16, AV_WL16);
  663. }
  664. }
  665. } else {
  666. #define W8(a, b) { *(a) = (b); }
  667. #define COPY9_OR_10TO8(rfunc) \
  668. if (src_depth == 9) { \
  669. DITHER_COPY(dstPtr, dstStride[plane], W8, \
  670. srcPtr2, srcStride[plane] / 2, rfunc, \
  671. dither_8x8_1, 1, av_clip_uint8); \
  672. } else { \
  673. DITHER_COPY(dstPtr, dstStride[plane], W8, \
  674. srcPtr2, srcStride[plane] / 2, rfunc, \
  675. dither_8x8_3, 2, av_clip_uint8); \
  676. }
  677. if (isBE(c->srcFormat)) {
  678. COPY9_OR_10TO8(AV_RB16);
  679. } else {
  680. COPY9_OR_10TO8(AV_RL16);
  681. }
  682. }
  683. } else if (is9_OR_10BPS(c->dstFormat)) {
  684. const int dst_depth = desc_dst->comp[plane].depth_minus1 + 1;
  685. uint16_t *dstPtr2 = (uint16_t *) dstPtr;
  686. if (is16BPS(c->srcFormat)) {
  687. const uint16_t *srcPtr2 = (const uint16_t *) srcPtr;
  688. #define COPY16TO9_OR_10(rfunc, wfunc) \
  689. if (dst_depth == 9) { \
  690. DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \
  691. srcPtr2, srcStride[plane] / 2, rfunc, \
  692. dither_8x8_128, 7, clip9); \
  693. } else { \
  694. DITHER_COPY(dstPtr2, dstStride[plane] / 2, wfunc, \
  695. srcPtr2, srcStride[plane] / 2, rfunc, \
  696. dither_8x8_64, 6, clip10); \
  697. }
  698. if (isBE(c->dstFormat)) {
  699. if (isBE(c->srcFormat)) {
  700. COPY16TO9_OR_10(AV_RB16, AV_WB16);
  701. } else {
  702. COPY16TO9_OR_10(AV_RL16, AV_WB16);
  703. }
  704. } else {
  705. if (isBE(c->srcFormat)) {
  706. COPY16TO9_OR_10(AV_RB16, AV_WL16);
  707. } else {
  708. COPY16TO9_OR_10(AV_RL16, AV_WL16);
  709. }
  710. }
  711. } else /* 8bit */ {
  712. #define COPY8TO9_OR_10(wfunc) \
  713. for (i = 0; i < height; i++) { \
  714. for (j = 0; j < length; j++) { \
  715. const int srcpx = srcPtr[j]; \
  716. wfunc(&dstPtr2[j], (srcpx << (dst_depth - 8)) | (srcpx >> (16 - dst_depth))); \
  717. } \
  718. dstPtr2 += dstStride[plane] / 2; \
  719. srcPtr += srcStride[plane]; \
  720. }
  721. if (isBE(c->dstFormat)) {
  722. COPY8TO9_OR_10(AV_WB16);
  723. } else {
  724. COPY8TO9_OR_10(AV_WL16);
  725. }
  726. }
  727. } else if (is16BPS(c->srcFormat) && !is16BPS(c->dstFormat)) {
  728. const uint16_t *srcPtr2 = (const uint16_t *) srcPtr;
  729. #define COPY16TO8(rfunc) \
  730. DITHER_COPY(dstPtr, dstStride[plane], W8, \
  731. srcPtr2, srcStride[plane] / 2, rfunc, \
  732. dither_8x8_256, 8, av_clip_uint8);
  733. if (isBE(c->srcFormat)) {
  734. COPY16TO8(AV_RB16);
  735. } else {
  736. COPY16TO8(AV_RL16);
  737. }
  738. } else if (!is16BPS(c->srcFormat) && is16BPS(c->dstFormat)) {
  739. for (i = 0; i < height; i++) {
  740. for (j = 0; j < length; j++) {
  741. dstPtr[ j << 1 ] = srcPtr[j];
  742. dstPtr[(j << 1) + 1] = srcPtr[j];
  743. }
  744. srcPtr += srcStride[plane];
  745. dstPtr += dstStride[plane];
  746. }
  747. } else if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat) &&
  748. isBE(c->srcFormat) != isBE(c->dstFormat)) {
  749. for (i = 0; i < height; i++) {
  750. for (j = 0; j < length; j++)
  751. ((uint16_t *) dstPtr)[j] = av_bswap16(((const uint16_t *) srcPtr)[j]);
  752. srcPtr += srcStride[plane];
  753. dstPtr += dstStride[plane];
  754. }
  755. } else if (dstStride[plane] == srcStride[plane] &&
  756. srcStride[plane] > 0 && srcStride[plane] == length) {
  757. memcpy(dst[plane] + dstStride[plane] * y, src[plane],
  758. height * dstStride[plane]);
  759. } else {
  760. if (is16BPS(c->srcFormat) && is16BPS(c->dstFormat))
  761. length *= 2;
  762. else if (!desc_src->comp[0].depth_minus1)
  763. length >>= 3; // monowhite/black
  764. for (i = 0; i < height; i++) {
  765. memcpy(dstPtr, srcPtr, length);
  766. srcPtr += srcStride[plane];
  767. dstPtr += dstStride[plane];
  768. }
  769. }
  770. }
  771. }
  772. return srcSliceH;
  773. }
  774. #define IS_DIFFERENT_ENDIANESS(src_fmt, dst_fmt, pix_fmt) \
  775. ((src_fmt == pix_fmt ## BE && dst_fmt == pix_fmt ## LE) || \
  776. (src_fmt == pix_fmt ## LE && dst_fmt == pix_fmt ## BE))
  777. void ff_get_unscaled_swscale(SwsContext *c)
  778. {
  779. const enum AVPixelFormat srcFormat = c->srcFormat;
  780. const enum AVPixelFormat dstFormat = c->dstFormat;
  781. const int flags = c->flags;
  782. const int dstH = c->dstH;
  783. int needsDither;
  784. needsDither = isAnyRGB(dstFormat) &&
  785. c->dstFormatBpp < 24 &&
  786. (c->dstFormatBpp < c->srcFormatBpp || (!isAnyRGB(srcFormat)));
  787. /* yv12_to_nv12 */
  788. if ((srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUVA420P) &&
  789. (dstFormat == AV_PIX_FMT_NV12 || dstFormat == AV_PIX_FMT_NV21)) {
  790. c->swScale = planarToNv12Wrapper;
  791. }
  792. /* yuv2bgr */
  793. if ((srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUV422P ||
  794. srcFormat == AV_PIX_FMT_YUVA420P) && isAnyRGB(dstFormat) &&
  795. !(flags & SWS_ACCURATE_RND) && !(dstH & 1)) {
  796. c->swScale = ff_yuv2rgb_get_func_ptr(c);
  797. }
  798. if (srcFormat == AV_PIX_FMT_YUV410P &&
  799. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P) &&
  800. !(flags & SWS_BITEXACT)) {
  801. c->swScale = yvu9ToYv12Wrapper;
  802. }
  803. /* bgr24toYV12 */
  804. if (srcFormat == AV_PIX_FMT_BGR24 &&
  805. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P) &&
  806. !(flags & SWS_ACCURATE_RND))
  807. c->swScale = bgr24ToYv12Wrapper;
  808. /* RGB/BGR -> RGB/BGR (no dither needed forms) */
  809. if (isAnyRGB(srcFormat) && isAnyRGB(dstFormat) && findRgbConvFn(c)
  810. && (!needsDither || (c->flags&(SWS_FAST_BILINEAR|SWS_POINT))))
  811. c->swScale= rgbToRgbWrapper;
  812. if (isPlanarRGB(srcFormat) && isPackedRGB(dstFormat))
  813. c->swScale = planarRgbToRgbWrapper;
  814. /* bswap 16 bits per pixel/component packed formats */
  815. if (IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR444) ||
  816. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR48) ||
  817. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR555) ||
  818. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_BGR565) ||
  819. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_GRAY16) ||
  820. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB444) ||
  821. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB48) ||
  822. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB555) ||
  823. IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_RGB565))
  824. c->swScale = packed_16bpc_bswap;
  825. if ((usePal(srcFormat) && (
  826. dstFormat == AV_PIX_FMT_RGB32 ||
  827. dstFormat == AV_PIX_FMT_RGB32_1 ||
  828. dstFormat == AV_PIX_FMT_RGB24 ||
  829. dstFormat == AV_PIX_FMT_BGR32 ||
  830. dstFormat == AV_PIX_FMT_BGR32_1 ||
  831. dstFormat == AV_PIX_FMT_BGR24)))
  832. c->swScale = palToRgbWrapper;
  833. if (srcFormat == AV_PIX_FMT_YUV422P) {
  834. if (dstFormat == AV_PIX_FMT_YUYV422)
  835. c->swScale = yuv422pToYuy2Wrapper;
  836. else if (dstFormat == AV_PIX_FMT_UYVY422)
  837. c->swScale = yuv422pToUyvyWrapper;
  838. }
  839. /* LQ converters if -sws 0 or -sws 4*/
  840. if (c->flags&(SWS_FAST_BILINEAR|SWS_POINT)) {
  841. /* yv12_to_yuy2 */
  842. if (srcFormat == AV_PIX_FMT_YUV420P || srcFormat == AV_PIX_FMT_YUVA420P) {
  843. if (dstFormat == AV_PIX_FMT_YUYV422)
  844. c->swScale = planarToYuy2Wrapper;
  845. else if (dstFormat == AV_PIX_FMT_UYVY422)
  846. c->swScale = planarToUyvyWrapper;
  847. }
  848. }
  849. if (srcFormat == AV_PIX_FMT_YUYV422 &&
  850. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))
  851. c->swScale = yuyvToYuv420Wrapper;
  852. if (srcFormat == AV_PIX_FMT_UYVY422 &&
  853. (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))
  854. c->swScale = uyvyToYuv420Wrapper;
  855. if (srcFormat == AV_PIX_FMT_YUYV422 && dstFormat == AV_PIX_FMT_YUV422P)
  856. c->swScale = yuyvToYuv422Wrapper;
  857. if (srcFormat == AV_PIX_FMT_UYVY422 && dstFormat == AV_PIX_FMT_YUV422P)
  858. c->swScale = uyvyToYuv422Wrapper;
  859. /* simple copy */
  860. if ( srcFormat == dstFormat ||
  861. (srcFormat == AV_PIX_FMT_YUVA420P && dstFormat == AV_PIX_FMT_YUV420P) ||
  862. (srcFormat == AV_PIX_FMT_YUV420P && dstFormat == AV_PIX_FMT_YUVA420P) ||
  863. (isPlanarYUV(srcFormat) && isGray(dstFormat)) ||
  864. (isPlanarYUV(dstFormat) && isGray(srcFormat)) ||
  865. (isGray(dstFormat) && isGray(srcFormat)) ||
  866. (isPlanarYUV(srcFormat) && isPlanarYUV(dstFormat) &&
  867. c->chrDstHSubSample == c->chrSrcHSubSample &&
  868. c->chrDstVSubSample == c->chrSrcVSubSample &&
  869. dstFormat != AV_PIX_FMT_NV12 && dstFormat != AV_PIX_FMT_NV21 &&
  870. srcFormat != AV_PIX_FMT_NV12 && srcFormat != AV_PIX_FMT_NV21))
  871. {
  872. if (isPacked(c->srcFormat))
  873. c->swScale = packedCopyWrapper;
  874. else /* Planar YUV or gray */
  875. c->swScale = planarCopyWrapper;
  876. }
  877. if (ARCH_BFIN)
  878. ff_bfin_get_unscaled_swscale(c);
  879. if (HAVE_ALTIVEC)
  880. ff_swscale_get_unscaled_altivec(c);
  881. }
  882. static void reset_ptr(const uint8_t *src[], int format)
  883. {
  884. if (!isALPHA(format))
  885. src[3] = NULL;
  886. if (!isPlanar(format)) {
  887. src[3] = src[2] = NULL;
  888. if (!usePal(format))
  889. src[1] = NULL;
  890. }
  891. }
  892. static int check_image_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt,
  893. const int linesizes[4])
  894. {
  895. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
  896. int i;
  897. for (i = 0; i < 4; i++) {
  898. int plane = desc->comp[i].plane;
  899. if (!data[plane] || !linesizes[plane])
  900. return 0;
  901. }
  902. return 1;
  903. }
  904. /**
  905. * swscale wrapper, so we don't need to export the SwsContext.
  906. * Assumes planar YUV to be in YUV order instead of YVU.
  907. */
  908. int attribute_align_arg sws_scale(struct SwsContext *c,
  909. const uint8_t * const srcSlice[],
  910. const int srcStride[], int srcSliceY,
  911. int srcSliceH, uint8_t *const dst[],
  912. const int dstStride[])
  913. {
  914. int i;
  915. const uint8_t *src2[4] = { srcSlice[0], srcSlice[1], srcSlice[2], srcSlice[3] };
  916. uint8_t *dst2[4] = { dst[0], dst[1], dst[2], dst[3] };
  917. // do not mess up sliceDir if we have a "trailing" 0-size slice
  918. if (srcSliceH == 0)
  919. return 0;
  920. if (!check_image_pointers(srcSlice, c->srcFormat, srcStride)) {
  921. av_log(c, AV_LOG_ERROR, "bad src image pointers\n");
  922. return 0;
  923. }
  924. if (!check_image_pointers(dst, c->dstFormat, dstStride)) {
  925. av_log(c, AV_LOG_ERROR, "bad dst image pointers\n");
  926. return 0;
  927. }
  928. if (c->sliceDir == 0 && srcSliceY != 0 && srcSliceY + srcSliceH != c->srcH) {
  929. av_log(c, AV_LOG_ERROR, "Slices start in the middle!\n");
  930. return 0;
  931. }
  932. if (c->sliceDir == 0) {
  933. if (srcSliceY == 0) c->sliceDir = 1; else c->sliceDir = -1;
  934. }
  935. if (usePal(c->srcFormat)) {
  936. for (i = 0; i < 256; i++) {
  937. int p, r, g, b, y, u, v;
  938. if (c->srcFormat == AV_PIX_FMT_PAL8) {
  939. p = ((const uint32_t *)(srcSlice[1]))[i];
  940. r = (p >> 16) & 0xFF;
  941. g = (p >> 8) & 0xFF;
  942. b = p & 0xFF;
  943. } else if (c->srcFormat == AV_PIX_FMT_RGB8) {
  944. r = ( i >> 5 ) * 36;
  945. g = ((i >> 2) & 7) * 36;
  946. b = ( i & 3) * 85;
  947. } else if (c->srcFormat == AV_PIX_FMT_BGR8) {
  948. b = ( i >> 6 ) * 85;
  949. g = ((i >> 3) & 7) * 36;
  950. r = ( i & 7) * 36;
  951. } else if (c->srcFormat == AV_PIX_FMT_RGB4_BYTE) {
  952. r = ( i >> 3 ) * 255;
  953. g = ((i >> 1) & 3) * 85;
  954. b = ( i & 1) * 255;
  955. } else if (c->srcFormat == AV_PIX_FMT_GRAY8 ||
  956. c->srcFormat == AV_PIX_FMT_Y400A) {
  957. r = g = b = i;
  958. } else {
  959. assert(c->srcFormat == AV_PIX_FMT_BGR4_BYTE);
  960. b = ( i >> 3 ) * 255;
  961. g = ((i >> 1) & 3) * 85;
  962. r = ( i & 1) * 255;
  963. }
  964. y = av_clip_uint8((RY * r + GY * g + BY * b + ( 33 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
  965. u = av_clip_uint8((RU * r + GU * g + BU * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
  966. v = av_clip_uint8((RV * r + GV * g + BV * b + (257 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT);
  967. c->pal_yuv[i] = y + (u << 8) + (v << 16);
  968. switch (c->dstFormat) {
  969. case AV_PIX_FMT_BGR32:
  970. #if !HAVE_BIGENDIAN
  971. case AV_PIX_FMT_RGB24:
  972. #endif
  973. c->pal_rgb[i] = r + (g << 8) + (b << 16);
  974. break;
  975. case AV_PIX_FMT_BGR32_1:
  976. #if HAVE_BIGENDIAN
  977. case AV_PIX_FMT_BGR24:
  978. #endif
  979. c->pal_rgb[i] = (r + (g << 8) + (b << 16)) << 8;
  980. break;
  981. case AV_PIX_FMT_RGB32_1:
  982. #if HAVE_BIGENDIAN
  983. case AV_PIX_FMT_RGB24:
  984. #endif
  985. c->pal_rgb[i] = (b + (g << 8) + (r << 16)) << 8;
  986. break;
  987. case AV_PIX_FMT_RGB32:
  988. #if !HAVE_BIGENDIAN
  989. case AV_PIX_FMT_BGR24:
  990. #endif
  991. default:
  992. c->pal_rgb[i] = b + (g << 8) + (r << 16);
  993. }
  994. }
  995. }
  996. // copy strides, so they can safely be modified
  997. if (c->sliceDir == 1) {
  998. // slices go from top to bottom
  999. int srcStride2[4] = { srcStride[0], srcStride[1], srcStride[2],
  1000. srcStride[3] };
  1001. int dstStride2[4] = { dstStride[0], dstStride[1], dstStride[2],
  1002. dstStride[3] };
  1003. reset_ptr(src2, c->srcFormat);
  1004. reset_ptr((const uint8_t **) dst2, c->dstFormat);
  1005. /* reset slice direction at end of frame */
  1006. if (srcSliceY + srcSliceH == c->srcH)
  1007. c->sliceDir = 0;
  1008. return c->swScale(c, src2, srcStride2, srcSliceY, srcSliceH, dst2,
  1009. dstStride2);
  1010. } else {
  1011. // slices go from bottom to top => we flip the image internally
  1012. int srcStride2[4] = { -srcStride[0], -srcStride[1], -srcStride[2],
  1013. -srcStride[3] };
  1014. int dstStride2[4] = { -dstStride[0], -dstStride[1], -dstStride[2],
  1015. -dstStride[3] };
  1016. src2[0] += (srcSliceH - 1) * srcStride[0];
  1017. if (!usePal(c->srcFormat))
  1018. src2[1] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[1];
  1019. src2[2] += ((srcSliceH >> c->chrSrcVSubSample) - 1) * srcStride[2];
  1020. src2[3] += (srcSliceH - 1) * srcStride[3];
  1021. dst2[0] += ( c->dstH - 1) * dstStride[0];
  1022. dst2[1] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[1];
  1023. dst2[2] += ((c->dstH >> c->chrDstVSubSample) - 1) * dstStride[2];
  1024. dst2[3] += ( c->dstH - 1) * dstStride[3];
  1025. reset_ptr(src2, c->srcFormat);
  1026. reset_ptr((const uint8_t **) dst2, c->dstFormat);
  1027. /* reset slice direction at end of frame */
  1028. if (!srcSliceY)
  1029. c->sliceDir = 0;
  1030. return c->swScale(c, src2, srcStride2, c->srcH-srcSliceY-srcSliceH,
  1031. srcSliceH, dst2, dstStride2);
  1032. }
  1033. }
  1034. /* Convert the palette to the same packed 32-bit format as the palette */
  1035. void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst,
  1036. int num_pixels, const uint8_t *palette)
  1037. {
  1038. int i;
  1039. for (i = 0; i < num_pixels; i++)
  1040. ((uint32_t *) dst)[i] = ((const uint32_t *) palette)[src[i]];
  1041. }
  1042. /* Palette format: ABCD -> dst format: ABC */
  1043. void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst,
  1044. int num_pixels, const uint8_t *palette)
  1045. {
  1046. int i;
  1047. for (i = 0; i < num_pixels; i++) {
  1048. //FIXME slow?
  1049. dst[0] = palette[src[i] * 4 + 0];
  1050. dst[1] = palette[src[i] * 4 + 1];
  1051. dst[2] = palette[src[i] * 4 + 2];
  1052. dst += 3;
  1053. }
  1054. }