You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1637 lines
72KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include "libavutil/x86/asm.h"
  22. #include "libswscale/swscale_internal.h"
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef PREFETCH
  26. #if COMPILE_TEMPLATE_MMXEXT
  27. #define PREFETCH "prefetchnta"
  28. #else
  29. #define PREFETCH " # nop"
  30. #endif
  31. #if COMPILE_TEMPLATE_MMXEXT
  32. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  33. #else
  34. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  35. #endif
  36. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  37. #define YSCALEYUV2PACKEDX_UV \
  38. __asm__ volatile(\
  39. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
  40. ".p2align 4 \n\t"\
  41. "nop \n\t"\
  42. "1: \n\t"\
  43. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"FF_REG_d" \n\t"\
  44. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  45. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  46. "movq %%mm3, %%mm4 \n\t"\
  47. ".p2align 4 \n\t"\
  48. "2: \n\t"\
  49. "movq 8(%%"FF_REG_d"), %%mm0 \n\t" /* filterCoeff */\
  50. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm2 \n\t" /* UsrcData */\
  51. "add %6, %%"FF_REG_S" \n\t" \
  52. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm5 \n\t" /* VsrcData */\
  53. "add $16, %%"FF_REG_d" \n\t"\
  54. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  55. "pmulhw %%mm0, %%mm2 \n\t"\
  56. "pmulhw %%mm0, %%mm5 \n\t"\
  57. "paddw %%mm2, %%mm3 \n\t"\
  58. "paddw %%mm5, %%mm4 \n\t"\
  59. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  60. " jnz 2b \n\t"\
  61. #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
  62. "lea "offset"(%0), %%"FF_REG_d" \n\t"\
  63. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  64. "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
  65. "movq "#dst1", "#dst2" \n\t"\
  66. ".p2align 4 \n\t"\
  67. "2: \n\t"\
  68. "movq 8(%%"FF_REG_d"), "#coeff" \n\t" /* filterCoeff */\
  69. "movq (%%"FF_REG_S", %%"FF_REG_a", 2), "#src1" \n\t" /* Y1srcData */\
  70. "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), "#src2" \n\t" /* Y2srcData */\
  71. "add $16, %%"FF_REG_d" \n\t"\
  72. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  73. "pmulhw "#coeff", "#src1" \n\t"\
  74. "pmulhw "#coeff", "#src2" \n\t"\
  75. "paddw "#src1", "#dst1" \n\t"\
  76. "paddw "#src2", "#dst2" \n\t"\
  77. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  78. " jnz 2b \n\t"\
  79. #define YSCALEYUV2PACKEDX \
  80. YSCALEYUV2PACKEDX_UV \
  81. YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
  82. #define YSCALEYUV2PACKEDX_END \
  83. :: "r" (&c->redDither), \
  84. "m" (dummy), "m" (dummy), "m" (dummy),\
  85. "r" (dest), "m" (dstW_reg), "m"(uv_off) \
  86. : "%"FF_REG_a, "%"FF_REG_d, "%"FF_REG_S \
  87. );
  88. #define YSCALEYUV2PACKEDX_ACCURATE_UV \
  89. __asm__ volatile(\
  90. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
  91. ".p2align 4 \n\t"\
  92. "nop \n\t"\
  93. "1: \n\t"\
  94. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"FF_REG_d" \n\t"\
  95. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  96. "pxor %%mm4, %%mm4 \n\t"\
  97. "pxor %%mm5, %%mm5 \n\t"\
  98. "pxor %%mm6, %%mm6 \n\t"\
  99. "pxor %%mm7, %%mm7 \n\t"\
  100. ".p2align 4 \n\t"\
  101. "2: \n\t"\
  102. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm0 \n\t" /* UsrcData */\
  103. "add %6, %%"FF_REG_S" \n\t" \
  104. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm2 \n\t" /* VsrcData */\
  105. "mov "STR(APCK_PTR2)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  106. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm1 \n\t" /* UsrcData */\
  107. "movq %%mm0, %%mm3 \n\t"\
  108. "punpcklwd %%mm1, %%mm0 \n\t"\
  109. "punpckhwd %%mm1, %%mm3 \n\t"\
  110. "movq "STR(APCK_COEF)"(%%"FF_REG_d"),%%mm1 \n\t" /* filterCoeff */\
  111. "pmaddwd %%mm1, %%mm0 \n\t"\
  112. "pmaddwd %%mm1, %%mm3 \n\t"\
  113. "paddd %%mm0, %%mm4 \n\t"\
  114. "paddd %%mm3, %%mm5 \n\t"\
  115. "add %6, %%"FF_REG_S" \n\t"\
  116. "movq (%%"FF_REG_S", %%"FF_REG_a"), %%mm3 \n\t" /* VsrcData */\
  117. "mov "STR(APCK_SIZE)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  118. "add $"STR(APCK_SIZE)", %%"FF_REG_d" \n\t"\
  119. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  120. "movq %%mm2, %%mm0 \n\t"\
  121. "punpcklwd %%mm3, %%mm2 \n\t"\
  122. "punpckhwd %%mm3, %%mm0 \n\t"\
  123. "pmaddwd %%mm1, %%mm2 \n\t"\
  124. "pmaddwd %%mm1, %%mm0 \n\t"\
  125. "paddd %%mm2, %%mm6 \n\t"\
  126. "paddd %%mm0, %%mm7 \n\t"\
  127. " jnz 2b \n\t"\
  128. "psrad $16, %%mm4 \n\t"\
  129. "psrad $16, %%mm5 \n\t"\
  130. "psrad $16, %%mm6 \n\t"\
  131. "psrad $16, %%mm7 \n\t"\
  132. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  133. "packssdw %%mm5, %%mm4 \n\t"\
  134. "packssdw %%mm7, %%mm6 \n\t"\
  135. "paddw %%mm0, %%mm4 \n\t"\
  136. "paddw %%mm0, %%mm6 \n\t"\
  137. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  138. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  139. #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
  140. "lea "offset"(%0), %%"FF_REG_d" \n\t"\
  141. "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  142. "pxor %%mm1, %%mm1 \n\t"\
  143. "pxor %%mm5, %%mm5 \n\t"\
  144. "pxor %%mm7, %%mm7 \n\t"\
  145. "pxor %%mm6, %%mm6 \n\t"\
  146. ".p2align 4 \n\t"\
  147. "2: \n\t"\
  148. "movq (%%"FF_REG_S", %%"FF_REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  149. "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  150. "mov "STR(APCK_PTR2)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  151. "movq (%%"FF_REG_S", %%"FF_REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  152. "movq %%mm0, %%mm3 \n\t"\
  153. "punpcklwd %%mm4, %%mm0 \n\t"\
  154. "punpckhwd %%mm4, %%mm3 \n\t"\
  155. "movq "STR(APCK_COEF)"(%%"FF_REG_d"), %%mm4 \n\t" /* filterCoeff */\
  156. "pmaddwd %%mm4, %%mm0 \n\t"\
  157. "pmaddwd %%mm4, %%mm3 \n\t"\
  158. "paddd %%mm0, %%mm1 \n\t"\
  159. "paddd %%mm3, %%mm5 \n\t"\
  160. "movq 8(%%"FF_REG_S", %%"FF_REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  161. "mov "STR(APCK_SIZE)"(%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
  162. "add $"STR(APCK_SIZE)", %%"FF_REG_d" \n\t"\
  163. "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
  164. "movq %%mm2, %%mm0 \n\t"\
  165. "punpcklwd %%mm3, %%mm2 \n\t"\
  166. "punpckhwd %%mm3, %%mm0 \n\t"\
  167. "pmaddwd %%mm4, %%mm2 \n\t"\
  168. "pmaddwd %%mm4, %%mm0 \n\t"\
  169. "paddd %%mm2, %%mm7 \n\t"\
  170. "paddd %%mm0, %%mm6 \n\t"\
  171. " jnz 2b \n\t"\
  172. "psrad $16, %%mm1 \n\t"\
  173. "psrad $16, %%mm5 \n\t"\
  174. "psrad $16, %%mm7 \n\t"\
  175. "psrad $16, %%mm6 \n\t"\
  176. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  177. "packssdw %%mm5, %%mm1 \n\t"\
  178. "packssdw %%mm6, %%mm7 \n\t"\
  179. "paddw %%mm0, %%mm1 \n\t"\
  180. "paddw %%mm0, %%mm7 \n\t"\
  181. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  182. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  183. #define YSCALEYUV2PACKEDX_ACCURATE \
  184. YSCALEYUV2PACKEDX_ACCURATE_UV \
  185. YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
  186. #define YSCALEYUV2RGBX \
  187. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  188. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  189. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  190. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  191. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  192. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  193. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  194. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  195. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  196. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  197. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  198. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  199. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  200. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  201. "paddw %%mm3, %%mm4 \n\t"\
  202. "movq %%mm2, %%mm0 \n\t"\
  203. "movq %%mm5, %%mm6 \n\t"\
  204. "movq %%mm4, %%mm3 \n\t"\
  205. "punpcklwd %%mm2, %%mm2 \n\t"\
  206. "punpcklwd %%mm5, %%mm5 \n\t"\
  207. "punpcklwd %%mm4, %%mm4 \n\t"\
  208. "paddw %%mm1, %%mm2 \n\t"\
  209. "paddw %%mm1, %%mm5 \n\t"\
  210. "paddw %%mm1, %%mm4 \n\t"\
  211. "punpckhwd %%mm0, %%mm0 \n\t"\
  212. "punpckhwd %%mm6, %%mm6 \n\t"\
  213. "punpckhwd %%mm3, %%mm3 \n\t"\
  214. "paddw %%mm7, %%mm0 \n\t"\
  215. "paddw %%mm7, %%mm6 \n\t"\
  216. "paddw %%mm7, %%mm3 \n\t"\
  217. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  218. "packuswb %%mm0, %%mm2 \n\t"\
  219. "packuswb %%mm6, %%mm5 \n\t"\
  220. "packuswb %%mm3, %%mm4 \n\t"\
  221. #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
  222. "movq "#b", "#q2" \n\t" /* B */\
  223. "movq "#r", "#t" \n\t" /* R */\
  224. "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
  225. "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
  226. "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
  227. "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
  228. "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
  229. "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
  230. "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
  231. "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
  232. "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
  233. "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
  234. \
  235. MOVNTQ( q0, (dst, index, 4))\
  236. MOVNTQ( b, 8(dst, index, 4))\
  237. MOVNTQ( q2, 16(dst, index, 4))\
  238. MOVNTQ( q3, 24(dst, index, 4))\
  239. \
  240. "add $8, "#index" \n\t"\
  241. "cmp "#dstw", "#index" \n\t"\
  242. " jb 1b \n\t"
  243. #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
  244. static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
  245. const int16_t **lumSrc, int lumFilterSize,
  246. const int16_t *chrFilter, const int16_t **chrUSrc,
  247. const int16_t **chrVSrc,
  248. int chrFilterSize, const int16_t **alpSrc,
  249. uint8_t *dest, int dstW, int dstY)
  250. {
  251. x86_reg dummy=0;
  252. x86_reg dstW_reg = dstW;
  253. x86_reg uv_off = c->uv_off_byte;
  254. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  255. YSCALEYUV2PACKEDX_ACCURATE
  256. YSCALEYUV2RGBX
  257. "movq %%mm2, "U_TEMP"(%0) \n\t"
  258. "movq %%mm4, "V_TEMP"(%0) \n\t"
  259. "movq %%mm5, "Y_TEMP"(%0) \n\t"
  260. YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
  261. "movq "Y_TEMP"(%0), %%mm5 \n\t"
  262. "psraw $3, %%mm1 \n\t"
  263. "psraw $3, %%mm7 \n\t"
  264. "packuswb %%mm7, %%mm1 \n\t"
  265. WRITEBGR32(%4, %5, %%FF_REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
  266. YSCALEYUV2PACKEDX_END
  267. } else {
  268. YSCALEYUV2PACKEDX_ACCURATE
  269. YSCALEYUV2RGBX
  270. "pcmpeqd %%mm7, %%mm7 \n\t"
  271. WRITEBGR32(%4, %5, %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  272. YSCALEYUV2PACKEDX_END
  273. }
  274. }
  275. static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
  276. const int16_t **lumSrc, int lumFilterSize,
  277. const int16_t *chrFilter, const int16_t **chrUSrc,
  278. const int16_t **chrVSrc,
  279. int chrFilterSize, const int16_t **alpSrc,
  280. uint8_t *dest, int dstW, int dstY)
  281. {
  282. x86_reg dummy=0;
  283. x86_reg dstW_reg = dstW;
  284. x86_reg uv_off = c->uv_off_byte;
  285. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  286. YSCALEYUV2PACKEDX
  287. YSCALEYUV2RGBX
  288. YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
  289. "psraw $3, %%mm1 \n\t"
  290. "psraw $3, %%mm7 \n\t"
  291. "packuswb %%mm7, %%mm1 \n\t"
  292. WRITEBGR32(%4, %5, %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  293. YSCALEYUV2PACKEDX_END
  294. } else {
  295. YSCALEYUV2PACKEDX
  296. YSCALEYUV2RGBX
  297. "pcmpeqd %%mm7, %%mm7 \n\t"
  298. WRITEBGR32(%4, %5, %%FF_REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  299. YSCALEYUV2PACKEDX_END
  300. }
  301. }
  302. #define REAL_WRITERGB16(dst, dstw, index) \
  303. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  304. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  305. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  306. "psrlq $3, %%mm2 \n\t"\
  307. \
  308. "movq %%mm2, %%mm1 \n\t"\
  309. "movq %%mm4, %%mm3 \n\t"\
  310. \
  311. "punpcklbw %%mm7, %%mm3 \n\t"\
  312. "punpcklbw %%mm5, %%mm2 \n\t"\
  313. "punpckhbw %%mm7, %%mm4 \n\t"\
  314. "punpckhbw %%mm5, %%mm1 \n\t"\
  315. \
  316. "psllq $3, %%mm3 \n\t"\
  317. "psllq $3, %%mm4 \n\t"\
  318. \
  319. "por %%mm3, %%mm2 \n\t"\
  320. "por %%mm4, %%mm1 \n\t"\
  321. \
  322. MOVNTQ(%%mm2, (dst, index, 2))\
  323. MOVNTQ(%%mm1, 8(dst, index, 2))\
  324. \
  325. "add $8, "#index" \n\t"\
  326. "cmp "#dstw", "#index" \n\t"\
  327. " jb 1b \n\t"
  328. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  329. static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter,
  330. const int16_t **lumSrc, int lumFilterSize,
  331. const int16_t *chrFilter, const int16_t **chrUSrc,
  332. const int16_t **chrVSrc,
  333. int chrFilterSize, const int16_t **alpSrc,
  334. uint8_t *dest, int dstW, int dstY)
  335. {
  336. x86_reg dummy=0;
  337. x86_reg dstW_reg = dstW;
  338. x86_reg uv_off = c->uv_off_byte;
  339. YSCALEYUV2PACKEDX_ACCURATE
  340. YSCALEYUV2RGBX
  341. "pxor %%mm7, %%mm7 \n\t"
  342. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  343. #ifdef DITHER1XBPP
  344. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  345. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  346. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  347. #endif
  348. WRITERGB16(%4, %5, %%FF_REGa)
  349. YSCALEYUV2PACKEDX_END
  350. }
  351. static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter,
  352. const int16_t **lumSrc, int lumFilterSize,
  353. const int16_t *chrFilter, const int16_t **chrUSrc,
  354. const int16_t **chrVSrc,
  355. int chrFilterSize, const int16_t **alpSrc,
  356. uint8_t *dest, int dstW, int dstY)
  357. {
  358. x86_reg dummy=0;
  359. x86_reg dstW_reg = dstW;
  360. x86_reg uv_off = c->uv_off_byte;
  361. YSCALEYUV2PACKEDX
  362. YSCALEYUV2RGBX
  363. "pxor %%mm7, %%mm7 \n\t"
  364. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  365. #ifdef DITHER1XBPP
  366. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  367. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  368. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  369. #endif
  370. WRITERGB16(%4, %5, %%FF_REGa)
  371. YSCALEYUV2PACKEDX_END
  372. }
  373. #define REAL_WRITERGB15(dst, dstw, index) \
  374. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  375. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  376. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  377. "psrlq $3, %%mm2 \n\t"\
  378. "psrlq $1, %%mm5 \n\t"\
  379. \
  380. "movq %%mm2, %%mm1 \n\t"\
  381. "movq %%mm4, %%mm3 \n\t"\
  382. \
  383. "punpcklbw %%mm7, %%mm3 \n\t"\
  384. "punpcklbw %%mm5, %%mm2 \n\t"\
  385. "punpckhbw %%mm7, %%mm4 \n\t"\
  386. "punpckhbw %%mm5, %%mm1 \n\t"\
  387. \
  388. "psllq $2, %%mm3 \n\t"\
  389. "psllq $2, %%mm4 \n\t"\
  390. \
  391. "por %%mm3, %%mm2 \n\t"\
  392. "por %%mm4, %%mm1 \n\t"\
  393. \
  394. MOVNTQ(%%mm2, (dst, index, 2))\
  395. MOVNTQ(%%mm1, 8(dst, index, 2))\
  396. \
  397. "add $8, "#index" \n\t"\
  398. "cmp "#dstw", "#index" \n\t"\
  399. " jb 1b \n\t"
  400. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  401. static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter,
  402. const int16_t **lumSrc, int lumFilterSize,
  403. const int16_t *chrFilter, const int16_t **chrUSrc,
  404. const int16_t **chrVSrc,
  405. int chrFilterSize, const int16_t **alpSrc,
  406. uint8_t *dest, int dstW, int dstY)
  407. {
  408. x86_reg dummy=0;
  409. x86_reg dstW_reg = dstW;
  410. x86_reg uv_off = c->uv_off_byte;
  411. YSCALEYUV2PACKEDX_ACCURATE
  412. YSCALEYUV2RGBX
  413. "pxor %%mm7, %%mm7 \n\t"
  414. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  415. #ifdef DITHER1XBPP
  416. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  417. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  418. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  419. #endif
  420. WRITERGB15(%4, %5, %%FF_REGa)
  421. YSCALEYUV2PACKEDX_END
  422. }
  423. static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
  424. const int16_t **lumSrc, int lumFilterSize,
  425. const int16_t *chrFilter, const int16_t **chrUSrc,
  426. const int16_t **chrVSrc,
  427. int chrFilterSize, const int16_t **alpSrc,
  428. uint8_t *dest, int dstW, int dstY)
  429. {
  430. x86_reg dummy=0;
  431. x86_reg dstW_reg = dstW;
  432. x86_reg uv_off = c->uv_off_byte;
  433. YSCALEYUV2PACKEDX
  434. YSCALEYUV2RGBX
  435. "pxor %%mm7, %%mm7 \n\t"
  436. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  437. #ifdef DITHER1XBPP
  438. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  439. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  440. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  441. #endif
  442. WRITERGB15(%4, %5, %%FF_REGa)
  443. YSCALEYUV2PACKEDX_END
  444. }
  445. #define WRITEBGR24MMX(dst, dstw, index) \
  446. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  447. "movq %%mm2, %%mm1 \n\t" /* B */\
  448. "movq %%mm5, %%mm6 \n\t" /* R */\
  449. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  450. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  451. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  452. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  453. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  454. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  455. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  456. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  457. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  458. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  459. \
  460. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  461. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  462. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  463. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  464. \
  465. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  466. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  467. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  468. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  469. \
  470. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  471. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  472. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  473. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  474. \
  475. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  476. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  477. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  478. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  479. MOVNTQ(%%mm0, (dst))\
  480. \
  481. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  482. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  483. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  484. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  485. MOVNTQ(%%mm6, 8(dst))\
  486. \
  487. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  488. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  489. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  490. MOVNTQ(%%mm5, 16(dst))\
  491. \
  492. "add $24, "#dst" \n\t"\
  493. \
  494. "add $8, "#index" \n\t"\
  495. "cmp "#dstw", "#index" \n\t"\
  496. " jb 1b \n\t"
  497. #define WRITEBGR24MMXEXT(dst, dstw, index) \
  498. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  499. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  500. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  501. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  502. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  503. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  504. \
  505. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  506. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  507. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  508. \
  509. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  510. "por %%mm1, %%mm6 \n\t"\
  511. "por %%mm3, %%mm6 \n\t"\
  512. MOVNTQ(%%mm6, (dst))\
  513. \
  514. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  515. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  516. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  517. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  518. \
  519. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  520. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  521. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  522. \
  523. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  524. "por %%mm3, %%mm6 \n\t"\
  525. MOVNTQ(%%mm6, 8(dst))\
  526. \
  527. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  528. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  529. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  530. \
  531. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  532. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  533. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  534. \
  535. "por %%mm1, %%mm3 \n\t"\
  536. "por %%mm3, %%mm6 \n\t"\
  537. MOVNTQ(%%mm6, 16(dst))\
  538. \
  539. "add $24, "#dst" \n\t"\
  540. \
  541. "add $8, "#index" \n\t"\
  542. "cmp "#dstw", "#index" \n\t"\
  543. " jb 1b \n\t"
  544. #if COMPILE_TEMPLATE_MMXEXT
  545. #undef WRITEBGR24
  546. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMXEXT(dst, dstw, index)
  547. #else
  548. #undef WRITEBGR24
  549. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  550. #endif
  551. static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter,
  552. const int16_t **lumSrc, int lumFilterSize,
  553. const int16_t *chrFilter, const int16_t **chrUSrc,
  554. const int16_t **chrVSrc,
  555. int chrFilterSize, const int16_t **alpSrc,
  556. uint8_t *dest, int dstW, int dstY)
  557. {
  558. x86_reg dummy=0;
  559. x86_reg dstW_reg = dstW;
  560. x86_reg uv_off = c->uv_off_byte;
  561. YSCALEYUV2PACKEDX_ACCURATE
  562. YSCALEYUV2RGBX
  563. "pxor %%mm7, %%mm7 \n\t"
  564. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_c" \n\t" // FIXME optimize
  565. "add %4, %%"FF_REG_c" \n\t"
  566. WRITEBGR24(%%FF_REGc, %5, %%FF_REGa)
  567. :: "r" (&c->redDither),
  568. "m" (dummy), "m" (dummy), "m" (dummy),
  569. "r" (dest), "m" (dstW_reg), "m"(uv_off)
  570. : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S
  571. );
  572. }
  573. static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter,
  574. const int16_t **lumSrc, int lumFilterSize,
  575. const int16_t *chrFilter, const int16_t **chrUSrc,
  576. const int16_t **chrVSrc,
  577. int chrFilterSize, const int16_t **alpSrc,
  578. uint8_t *dest, int dstW, int dstY)
  579. {
  580. x86_reg dummy=0;
  581. x86_reg dstW_reg = dstW;
  582. x86_reg uv_off = c->uv_off_byte;
  583. YSCALEYUV2PACKEDX
  584. YSCALEYUV2RGBX
  585. "pxor %%mm7, %%mm7 \n\t"
  586. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_c" \n\t" // FIXME optimize
  587. "add %4, %%"FF_REG_c" \n\t"
  588. WRITEBGR24(%%FF_REGc, %5, %%FF_REGa)
  589. :: "r" (&c->redDither),
  590. "m" (dummy), "m" (dummy), "m" (dummy),
  591. "r" (dest), "m" (dstW_reg), "m"(uv_off)
  592. : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S
  593. );
  594. }
  595. #define REAL_WRITEYUY2(dst, dstw, index) \
  596. "packuswb %%mm3, %%mm3 \n\t"\
  597. "packuswb %%mm4, %%mm4 \n\t"\
  598. "packuswb %%mm7, %%mm1 \n\t"\
  599. "punpcklbw %%mm4, %%mm3 \n\t"\
  600. "movq %%mm1, %%mm7 \n\t"\
  601. "punpcklbw %%mm3, %%mm1 \n\t"\
  602. "punpckhbw %%mm3, %%mm7 \n\t"\
  603. \
  604. MOVNTQ(%%mm1, (dst, index, 2))\
  605. MOVNTQ(%%mm7, 8(dst, index, 2))\
  606. \
  607. "add $8, "#index" \n\t"\
  608. "cmp "#dstw", "#index" \n\t"\
  609. " jb 1b \n\t"
  610. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  611. static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter,
  612. const int16_t **lumSrc, int lumFilterSize,
  613. const int16_t *chrFilter, const int16_t **chrUSrc,
  614. const int16_t **chrVSrc,
  615. int chrFilterSize, const int16_t **alpSrc,
  616. uint8_t *dest, int dstW, int dstY)
  617. {
  618. x86_reg dummy=0;
  619. x86_reg dstW_reg = dstW;
  620. x86_reg uv_off = c->uv_off_byte;
  621. YSCALEYUV2PACKEDX_ACCURATE
  622. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  623. "psraw $3, %%mm3 \n\t"
  624. "psraw $3, %%mm4 \n\t"
  625. "psraw $3, %%mm1 \n\t"
  626. "psraw $3, %%mm7 \n\t"
  627. WRITEYUY2(%4, %5, %%FF_REGa)
  628. YSCALEYUV2PACKEDX_END
  629. }
  630. static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter,
  631. const int16_t **lumSrc, int lumFilterSize,
  632. const int16_t *chrFilter, const int16_t **chrUSrc,
  633. const int16_t **chrVSrc,
  634. int chrFilterSize, const int16_t **alpSrc,
  635. uint8_t *dest, int dstW, int dstY)
  636. {
  637. x86_reg dummy=0;
  638. x86_reg dstW_reg = dstW;
  639. x86_reg uv_off = c->uv_off_byte;
  640. YSCALEYUV2PACKEDX
  641. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  642. "psraw $3, %%mm3 \n\t"
  643. "psraw $3, %%mm4 \n\t"
  644. "psraw $3, %%mm1 \n\t"
  645. "psraw $3, %%mm7 \n\t"
  646. WRITEYUY2(%4, %5, %%FF_REGa)
  647. YSCALEYUV2PACKEDX_END
  648. }
  649. #define REAL_YSCALEYUV2RGB_UV(index, c) \
  650. "xor "#index", "#index" \n\t"\
  651. ".p2align 4 \n\t"\
  652. "1: \n\t"\
  653. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  654. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  655. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  656. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  657. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  658. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  659. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  660. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  661. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  662. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  663. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  664. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  665. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  666. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  667. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  668. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  669. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  670. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  671. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  672. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  673. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  674. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  675. #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
  676. "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  677. "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  678. "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  679. "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  680. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  681. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  682. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  683. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  684. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  685. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  686. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  687. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  688. #define REAL_YSCALEYUV2RGB_COEFF(c) \
  689. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  690. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  691. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  692. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  693. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  694. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  695. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  696. "paddw %%mm3, %%mm4 \n\t"\
  697. "movq %%mm2, %%mm0 \n\t"\
  698. "movq %%mm5, %%mm6 \n\t"\
  699. "movq %%mm4, %%mm3 \n\t"\
  700. "punpcklwd %%mm2, %%mm2 \n\t"\
  701. "punpcklwd %%mm5, %%mm5 \n\t"\
  702. "punpcklwd %%mm4, %%mm4 \n\t"\
  703. "paddw %%mm1, %%mm2 \n\t"\
  704. "paddw %%mm1, %%mm5 \n\t"\
  705. "paddw %%mm1, %%mm4 \n\t"\
  706. "punpckhwd %%mm0, %%mm0 \n\t"\
  707. "punpckhwd %%mm6, %%mm6 \n\t"\
  708. "punpckhwd %%mm3, %%mm3 \n\t"\
  709. "paddw %%mm7, %%mm0 \n\t"\
  710. "paddw %%mm7, %%mm6 \n\t"\
  711. "paddw %%mm7, %%mm3 \n\t"\
  712. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  713. "packuswb %%mm0, %%mm2 \n\t"\
  714. "packuswb %%mm6, %%mm5 \n\t"\
  715. "packuswb %%mm3, %%mm4 \n\t"\
  716. #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
  717. #define YSCALEYUV2RGB(index, c) \
  718. REAL_YSCALEYUV2RGB_UV(index, c) \
  719. REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
  720. REAL_YSCALEYUV2RGB_COEFF(c)
  721. /**
  722. * vertical bilinear scale YV12 to RGB
  723. */
  724. static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2],
  725. const int16_t *ubuf[2], const int16_t *vbuf[2],
  726. const int16_t *abuf[2], uint8_t *dest,
  727. int dstW, int yalpha, int uvalpha, int y)
  728. {
  729. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  730. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  731. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  732. const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1];
  733. #if ARCH_X86_64
  734. __asm__ volatile(
  735. YSCALEYUV2RGB(%%r8, %5)
  736. YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
  737. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  738. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  739. "packuswb %%mm7, %%mm1 \n\t"
  740. WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  741. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "r" (dest),
  742. "a" (&c->redDither),
  743. "r" (abuf0), "r" (abuf1)
  744. : "%r8"
  745. );
  746. #else
  747. *(const uint16_t **)(&c->u_temp)=abuf0;
  748. *(const uint16_t **)(&c->v_temp)=abuf1;
  749. __asm__ volatile(
  750. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  751. "mov %4, %%"FF_REG_b" \n\t"
  752. "push %%"FF_REG_BP" \n\t"
  753. YSCALEYUV2RGB(%%FF_REGBP, %5)
  754. "push %0 \n\t"
  755. "push %1 \n\t"
  756. "mov "U_TEMP"(%5), %0 \n\t"
  757. "mov "V_TEMP"(%5), %1 \n\t"
  758. YSCALEYUV2RGB_YA(%%FF_REGBP, %5, %0, %1)
  759. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  760. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  761. "packuswb %%mm7, %%mm1 \n\t"
  762. "pop %1 \n\t"
  763. "pop %0 \n\t"
  764. WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  765. "pop %%"FF_REG_BP" \n\t"
  766. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  767. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  768. "a" (&c->redDither)
  769. );
  770. #endif
  771. } else {
  772. __asm__ volatile(
  773. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  774. "mov %4, %%"FF_REG_b" \n\t"
  775. "push %%"FF_REG_BP" \n\t"
  776. YSCALEYUV2RGB(%%FF_REGBP, %5)
  777. "pcmpeqd %%mm7, %%mm7 \n\t"
  778. WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  779. "pop %%"FF_REG_BP" \n\t"
  780. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  781. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  782. "a" (&c->redDither)
  783. );
  784. }
  785. }
  786. static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2],
  787. const int16_t *ubuf[2], const int16_t *vbuf[2],
  788. const int16_t *abuf[2], uint8_t *dest,
  789. int dstW, int yalpha, int uvalpha, int y)
  790. {
  791. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  792. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  793. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  794. __asm__ volatile(
  795. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  796. "mov %4, %%"FF_REG_b" \n\t"
  797. "push %%"FF_REG_BP" \n\t"
  798. YSCALEYUV2RGB(%%FF_REGBP, %5)
  799. "pxor %%mm7, %%mm7 \n\t"
  800. WRITEBGR24(%%FF_REGb, 8280(%5), %%FF_REGBP)
  801. "pop %%"FF_REG_BP" \n\t"
  802. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  803. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  804. "a" (&c->redDither)
  805. );
  806. }
  807. static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2],
  808. const int16_t *ubuf[2], const int16_t *vbuf[2],
  809. const int16_t *abuf[2], uint8_t *dest,
  810. int dstW, int yalpha, int uvalpha, int y)
  811. {
  812. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  813. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  814. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  815. __asm__ volatile(
  816. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  817. "mov %4, %%"FF_REG_b" \n\t"
  818. "push %%"FF_REG_BP" \n\t"
  819. YSCALEYUV2RGB(%%FF_REGBP, %5)
  820. "pxor %%mm7, %%mm7 \n\t"
  821. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  822. #ifdef DITHER1XBPP
  823. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  824. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  825. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  826. #endif
  827. WRITERGB15(%%FF_REGb, 8280(%5), %%FF_REGBP)
  828. "pop %%"FF_REG_BP" \n\t"
  829. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  830. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  831. "a" (&c->redDither)
  832. );
  833. }
  834. static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2],
  835. const int16_t *ubuf[2], const int16_t *vbuf[2],
  836. const int16_t *abuf[2], uint8_t *dest,
  837. int dstW, int yalpha, int uvalpha, int y)
  838. {
  839. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  840. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  841. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  842. __asm__ volatile(
  843. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  844. "mov %4, %%"FF_REG_b" \n\t"
  845. "push %%"FF_REG_BP" \n\t"
  846. YSCALEYUV2RGB(%%FF_REGBP, %5)
  847. "pxor %%mm7, %%mm7 \n\t"
  848. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  849. #ifdef DITHER1XBPP
  850. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  851. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  852. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  853. #endif
  854. WRITERGB16(%%FF_REGb, 8280(%5), %%FF_REGBP)
  855. "pop %%"FF_REG_BP" \n\t"
  856. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  857. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  858. "a" (&c->redDither)
  859. );
  860. }
  861. #define REAL_YSCALEYUV2PACKED(index, c) \
  862. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  863. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  864. "psraw $3, %%mm0 \n\t"\
  865. "psraw $3, %%mm1 \n\t"\
  866. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  867. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  868. "xor "#index", "#index" \n\t"\
  869. ".p2align 4 \n\t"\
  870. "1: \n\t"\
  871. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  872. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  873. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  874. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  875. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  876. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  877. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  878. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  879. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  880. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  881. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  882. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  883. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  884. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  885. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  886. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  887. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  888. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  889. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  890. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  891. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  892. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  893. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  894. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  895. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  896. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  897. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  898. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  899. static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2],
  900. const int16_t *ubuf[2], const int16_t *vbuf[2],
  901. const int16_t *abuf[2], uint8_t *dest,
  902. int dstW, int yalpha, int uvalpha, int y)
  903. {
  904. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  905. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  906. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  907. __asm__ volatile(
  908. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  909. "mov %4, %%"FF_REG_b" \n\t"
  910. "push %%"FF_REG_BP" \n\t"
  911. YSCALEYUV2PACKED(%%FF_REGBP, %5)
  912. WRITEYUY2(%%FF_REGb, 8280(%5), %%FF_REGBP)
  913. "pop %%"FF_REG_BP" \n\t"
  914. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  915. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  916. "a" (&c->redDither)
  917. );
  918. }
  919. #define REAL_YSCALEYUV2RGB1(index, c) \
  920. "xor "#index", "#index" \n\t"\
  921. ".p2align 4 \n\t"\
  922. "1: \n\t"\
  923. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  924. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  925. "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  926. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  927. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  928. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  929. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  930. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  931. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  932. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  933. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  934. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  935. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  936. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  937. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  938. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  939. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  940. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  941. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  942. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  943. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  944. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  945. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  946. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  947. "paddw %%mm3, %%mm4 \n\t"\
  948. "movq %%mm2, %%mm0 \n\t"\
  949. "movq %%mm5, %%mm6 \n\t"\
  950. "movq %%mm4, %%mm3 \n\t"\
  951. "punpcklwd %%mm2, %%mm2 \n\t"\
  952. "punpcklwd %%mm5, %%mm5 \n\t"\
  953. "punpcklwd %%mm4, %%mm4 \n\t"\
  954. "paddw %%mm1, %%mm2 \n\t"\
  955. "paddw %%mm1, %%mm5 \n\t"\
  956. "paddw %%mm1, %%mm4 \n\t"\
  957. "punpckhwd %%mm0, %%mm0 \n\t"\
  958. "punpckhwd %%mm6, %%mm6 \n\t"\
  959. "punpckhwd %%mm3, %%mm3 \n\t"\
  960. "paddw %%mm7, %%mm0 \n\t"\
  961. "paddw %%mm7, %%mm6 \n\t"\
  962. "paddw %%mm7, %%mm3 \n\t"\
  963. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  964. "packuswb %%mm0, %%mm2 \n\t"\
  965. "packuswb %%mm6, %%mm5 \n\t"\
  966. "packuswb %%mm3, %%mm4 \n\t"\
  967. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  968. // do vertical chrominance interpolation
  969. #define REAL_YSCALEYUV2RGB1b(index, c) \
  970. "xor "#index", "#index" \n\t"\
  971. ".p2align 4 \n\t"\
  972. "1: \n\t"\
  973. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  974. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  975. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  976. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  977. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  978. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  979. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  980. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  981. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  982. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  983. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  984. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  985. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  986. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  987. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  988. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  989. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  990. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  991. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  992. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  993. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  994. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  995. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  996. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  997. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  998. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  999. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  1000. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  1001. "paddw %%mm3, %%mm4 \n\t"\
  1002. "movq %%mm2, %%mm0 \n\t"\
  1003. "movq %%mm5, %%mm6 \n\t"\
  1004. "movq %%mm4, %%mm3 \n\t"\
  1005. "punpcklwd %%mm2, %%mm2 \n\t"\
  1006. "punpcklwd %%mm5, %%mm5 \n\t"\
  1007. "punpcklwd %%mm4, %%mm4 \n\t"\
  1008. "paddw %%mm1, %%mm2 \n\t"\
  1009. "paddw %%mm1, %%mm5 \n\t"\
  1010. "paddw %%mm1, %%mm4 \n\t"\
  1011. "punpckhwd %%mm0, %%mm0 \n\t"\
  1012. "punpckhwd %%mm6, %%mm6 \n\t"\
  1013. "punpckhwd %%mm3, %%mm3 \n\t"\
  1014. "paddw %%mm7, %%mm0 \n\t"\
  1015. "paddw %%mm7, %%mm6 \n\t"\
  1016. "paddw %%mm7, %%mm3 \n\t"\
  1017. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  1018. "packuswb %%mm0, %%mm2 \n\t"\
  1019. "packuswb %%mm6, %%mm5 \n\t"\
  1020. "packuswb %%mm3, %%mm4 \n\t"\
  1021. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  1022. #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
  1023. "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
  1024. "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
  1025. "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
  1026. "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
  1027. "packuswb %%mm1, %%mm7 \n\t"
  1028. #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
  1029. /**
  1030. * YV12 to RGB without scaling or interpolating
  1031. */
  1032. static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0,
  1033. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1034. const int16_t *abuf0, uint8_t *dest,
  1035. int dstW, int uvalpha, int y)
  1036. {
  1037. const int16_t *ubuf0 = ubuf[0];
  1038. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1039. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1040. const int16_t *ubuf1 = ubuf[0];
  1041. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1042. __asm__ volatile(
  1043. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1044. "mov %4, %%"FF_REG_b" \n\t"
  1045. "push %%"FF_REG_BP" \n\t"
  1046. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1047. YSCALEYUV2RGB1_ALPHA(%%FF_REGBP)
  1048. WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1049. "pop %%"FF_REG_BP" \n\t"
  1050. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1051. :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1052. "a" (&c->redDither)
  1053. );
  1054. } else {
  1055. __asm__ volatile(
  1056. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1057. "mov %4, %%"FF_REG_b" \n\t"
  1058. "push %%"FF_REG_BP" \n\t"
  1059. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1060. "pcmpeqd %%mm7, %%mm7 \n\t"
  1061. WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1062. "pop %%"FF_REG_BP" \n\t"
  1063. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1064. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1065. "a" (&c->redDither)
  1066. );
  1067. }
  1068. } else {
  1069. const int16_t *ubuf1 = ubuf[1];
  1070. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1071. __asm__ volatile(
  1072. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1073. "mov %4, %%"FF_REG_b" \n\t"
  1074. "push %%"FF_REG_BP" \n\t"
  1075. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1076. YSCALEYUV2RGB1_ALPHA(%%FF_REGBP)
  1077. WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1078. "pop %%"FF_REG_BP" \n\t"
  1079. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1080. :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1081. "a" (&c->redDither)
  1082. );
  1083. } else {
  1084. __asm__ volatile(
  1085. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1086. "mov %4, %%"FF_REG_b" \n\t"
  1087. "push %%"FF_REG_BP" \n\t"
  1088. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1089. "pcmpeqd %%mm7, %%mm7 \n\t"
  1090. WRITEBGR32(%%FF_REGb, 8280(%5), %%FF_REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1091. "pop %%"FF_REG_BP" \n\t"
  1092. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1093. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1094. "a" (&c->redDither)
  1095. );
  1096. }
  1097. }
  1098. }
  1099. static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0,
  1100. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1101. const int16_t *abuf0, uint8_t *dest,
  1102. int dstW, int uvalpha, int y)
  1103. {
  1104. const int16_t *ubuf0 = ubuf[0];
  1105. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1106. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1107. const int16_t *ubuf1 = ubuf[0];
  1108. __asm__ volatile(
  1109. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1110. "mov %4, %%"FF_REG_b" \n\t"
  1111. "push %%"FF_REG_BP" \n\t"
  1112. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1113. "pxor %%mm7, %%mm7 \n\t"
  1114. WRITEBGR24(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1115. "pop %%"FF_REG_BP" \n\t"
  1116. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1117. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1118. "a" (&c->redDither)
  1119. );
  1120. } else {
  1121. const int16_t *ubuf1 = ubuf[1];
  1122. __asm__ volatile(
  1123. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1124. "mov %4, %%"FF_REG_b" \n\t"
  1125. "push %%"FF_REG_BP" \n\t"
  1126. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1127. "pxor %%mm7, %%mm7 \n\t"
  1128. WRITEBGR24(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1129. "pop %%"FF_REG_BP" \n\t"
  1130. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1131. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1132. "a" (&c->redDither)
  1133. );
  1134. }
  1135. }
  1136. static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0,
  1137. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1138. const int16_t *abuf0, uint8_t *dest,
  1139. int dstW, int uvalpha, int y)
  1140. {
  1141. const int16_t *ubuf0 = ubuf[0];
  1142. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1143. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1144. const int16_t *ubuf1 = ubuf[0];
  1145. __asm__ volatile(
  1146. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1147. "mov %4, %%"FF_REG_b" \n\t"
  1148. "push %%"FF_REG_BP" \n\t"
  1149. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1150. "pxor %%mm7, %%mm7 \n\t"
  1151. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1152. #ifdef DITHER1XBPP
  1153. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1154. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1155. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1156. #endif
  1157. WRITERGB15(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1158. "pop %%"FF_REG_BP" \n\t"
  1159. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1160. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1161. "a" (&c->redDither)
  1162. );
  1163. } else {
  1164. const int16_t *ubuf1 = ubuf[1];
  1165. __asm__ volatile(
  1166. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1167. "mov %4, %%"FF_REG_b" \n\t"
  1168. "push %%"FF_REG_BP" \n\t"
  1169. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1170. "pxor %%mm7, %%mm7 \n\t"
  1171. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1172. #ifdef DITHER1XBPP
  1173. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1174. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1175. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1176. #endif
  1177. WRITERGB15(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1178. "pop %%"FF_REG_BP" \n\t"
  1179. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1180. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1181. "a" (&c->redDither)
  1182. );
  1183. }
  1184. }
  1185. static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0,
  1186. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1187. const int16_t *abuf0, uint8_t *dest,
  1188. int dstW, int uvalpha, int y)
  1189. {
  1190. const int16_t *ubuf0 = ubuf[0];
  1191. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1192. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1193. const int16_t *ubuf1 = ubuf[0];
  1194. __asm__ volatile(
  1195. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1196. "mov %4, %%"FF_REG_b" \n\t"
  1197. "push %%"FF_REG_BP" \n\t"
  1198. YSCALEYUV2RGB1(%%FF_REGBP, %5)
  1199. "pxor %%mm7, %%mm7 \n\t"
  1200. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1201. #ifdef DITHER1XBPP
  1202. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1203. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1204. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1205. #endif
  1206. WRITERGB16(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1207. "pop %%"FF_REG_BP" \n\t"
  1208. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1209. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1210. "a" (&c->redDither)
  1211. );
  1212. } else {
  1213. const int16_t *ubuf1 = ubuf[1];
  1214. __asm__ volatile(
  1215. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1216. "mov %4, %%"FF_REG_b" \n\t"
  1217. "push %%"FF_REG_BP" \n\t"
  1218. YSCALEYUV2RGB1b(%%FF_REGBP, %5)
  1219. "pxor %%mm7, %%mm7 \n\t"
  1220. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1221. #ifdef DITHER1XBPP
  1222. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1223. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1224. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1225. #endif
  1226. WRITERGB16(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1227. "pop %%"FF_REG_BP" \n\t"
  1228. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1229. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1230. "a" (&c->redDither)
  1231. );
  1232. }
  1233. }
  1234. #define REAL_YSCALEYUV2PACKED1(index, c) \
  1235. "xor "#index", "#index" \n\t"\
  1236. ".p2align 4 \n\t"\
  1237. "1: \n\t"\
  1238. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  1239. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1240. "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  1241. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1242. "psraw $7, %%mm3 \n\t" \
  1243. "psraw $7, %%mm4 \n\t" \
  1244. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  1245. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  1246. "psraw $7, %%mm1 \n\t" \
  1247. "psraw $7, %%mm7 \n\t" \
  1248. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  1249. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  1250. "xor "#index", "#index" \n\t"\
  1251. ".p2align 4 \n\t"\
  1252. "1: \n\t"\
  1253. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  1254. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  1255. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1256. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  1257. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  1258. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1259. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  1260. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  1261. "psrlw $8, %%mm3 \n\t" \
  1262. "psrlw $8, %%mm4 \n\t" \
  1263. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  1264. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  1265. "psraw $7, %%mm1 \n\t" \
  1266. "psraw $7, %%mm7 \n\t"
  1267. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  1268. static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0,
  1269. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1270. const int16_t *abuf0, uint8_t *dest,
  1271. int dstW, int uvalpha, int y)
  1272. {
  1273. const int16_t *ubuf0 = ubuf[0];
  1274. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1275. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1276. const int16_t *ubuf1 = ubuf[0];
  1277. __asm__ volatile(
  1278. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1279. "mov %4, %%"FF_REG_b" \n\t"
  1280. "push %%"FF_REG_BP" \n\t"
  1281. YSCALEYUV2PACKED1(%%FF_REGBP, %5)
  1282. WRITEYUY2(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1283. "pop %%"FF_REG_BP" \n\t"
  1284. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1285. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1286. "a" (&c->redDither)
  1287. );
  1288. } else {
  1289. const int16_t *ubuf1 = ubuf[1];
  1290. __asm__ volatile(
  1291. "mov %%"FF_REG_b", "ESP_OFFSET"(%5) \n\t"
  1292. "mov %4, %%"FF_REG_b" \n\t"
  1293. "push %%"FF_REG_BP" \n\t"
  1294. YSCALEYUV2PACKED1b(%%FF_REGBP, %5)
  1295. WRITEYUY2(%%FF_REGb, 8280(%5), %%FF_REGBP)
  1296. "pop %%"FF_REG_BP" \n\t"
  1297. "mov "ESP_OFFSET"(%5), %%"FF_REG_b" \n\t"
  1298. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1299. "a" (&c->redDither)
  1300. );
  1301. }
  1302. }
  1303. #if COMPILE_TEMPLATE_MMXEXT
  1304. static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
  1305. int dstWidth, const uint8_t *src,
  1306. int srcW, int xInc)
  1307. {
  1308. int32_t *filterPos = c->hLumFilterPos;
  1309. int16_t *filter = c->hLumFilter;
  1310. void *mmxextFilterCode = c->lumMmxextFilterCode;
  1311. int i;
  1312. #if defined(PIC)
  1313. uint64_t ebxsave;
  1314. #endif
  1315. #if ARCH_X86_64
  1316. uint64_t retsave;
  1317. #endif
  1318. __asm__ volatile(
  1319. #if defined(PIC)
  1320. "mov %%"FF_REG_b", %5 \n\t"
  1321. #if ARCH_X86_64
  1322. "mov -8(%%rsp), %%"FF_REG_a" \n\t"
  1323. "mov %%"FF_REG_a", %6 \n\t"
  1324. #endif
  1325. #else
  1326. #if ARCH_X86_64
  1327. "mov -8(%%rsp), %%"FF_REG_a" \n\t"
  1328. "mov %%"FF_REG_a", %5 \n\t"
  1329. #endif
  1330. #endif
  1331. "pxor %%mm7, %%mm7 \n\t"
  1332. "mov %0, %%"FF_REG_c" \n\t"
  1333. "mov %1, %%"FF_REG_D" \n\t"
  1334. "mov %2, %%"FF_REG_d" \n\t"
  1335. "mov %3, %%"FF_REG_b" \n\t"
  1336. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i
  1337. PREFETCH" (%%"FF_REG_c") \n\t"
  1338. PREFETCH" 32(%%"FF_REG_c") \n\t"
  1339. PREFETCH" 64(%%"FF_REG_c") \n\t"
  1340. #if ARCH_X86_64
  1341. #define CALL_MMXEXT_FILTER_CODE \
  1342. "movl (%%"FF_REG_b"), %%esi \n\t"\
  1343. "call *%4 \n\t"\
  1344. "movl (%%"FF_REG_b", %%"FF_REG_a"), %%esi \n\t"\
  1345. "add %%"FF_REG_S", %%"FF_REG_c" \n\t"\
  1346. "add %%"FF_REG_a", %%"FF_REG_D" \n\t"\
  1347. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
  1348. #else
  1349. #define CALL_MMXEXT_FILTER_CODE \
  1350. "movl (%%"FF_REG_b"), %%esi \n\t"\
  1351. "call *%4 \n\t"\
  1352. "addl (%%"FF_REG_b", %%"FF_REG_a"), %%"FF_REG_c" \n\t"\
  1353. "add %%"FF_REG_a", %%"FF_REG_D" \n\t"\
  1354. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"\
  1355. #endif /* ARCH_X86_64 */
  1356. CALL_MMXEXT_FILTER_CODE
  1357. CALL_MMXEXT_FILTER_CODE
  1358. CALL_MMXEXT_FILTER_CODE
  1359. CALL_MMXEXT_FILTER_CODE
  1360. CALL_MMXEXT_FILTER_CODE
  1361. CALL_MMXEXT_FILTER_CODE
  1362. CALL_MMXEXT_FILTER_CODE
  1363. CALL_MMXEXT_FILTER_CODE
  1364. #if defined(PIC)
  1365. "mov %5, %%"FF_REG_b" \n\t"
  1366. #if ARCH_X86_64
  1367. "mov %6, %%"FF_REG_a" \n\t"
  1368. "mov %%"FF_REG_a", -8(%%rsp) \n\t"
  1369. #endif
  1370. #else
  1371. #if ARCH_X86_64
  1372. "mov %5, %%"FF_REG_a" \n\t"
  1373. "mov %%"FF_REG_a", -8(%%rsp) \n\t"
  1374. #endif
  1375. #endif
  1376. :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
  1377. "m" (mmxextFilterCode)
  1378. #if defined(PIC)
  1379. ,"m" (ebxsave)
  1380. #endif
  1381. #if ARCH_X86_64
  1382. ,"m"(retsave)
  1383. #endif
  1384. : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_D
  1385. #if !defined(PIC)
  1386. ,"%"FF_REG_b
  1387. #endif
  1388. );
  1389. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  1390. dst[i] = src[srcW-1]*128;
  1391. }
  1392. static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2,
  1393. int dstWidth, const uint8_t *src1,
  1394. const uint8_t *src2, int srcW, int xInc)
  1395. {
  1396. int32_t *filterPos = c->hChrFilterPos;
  1397. int16_t *filter = c->hChrFilter;
  1398. void *mmxextFilterCode = c->chrMmxextFilterCode;
  1399. int i;
  1400. #if defined(PIC)
  1401. DECLARE_ALIGNED(8, uint64_t, ebxsave);
  1402. #endif
  1403. #if ARCH_X86_64
  1404. DECLARE_ALIGNED(8, uint64_t, retsave);
  1405. #endif
  1406. __asm__ volatile(
  1407. #if defined(PIC)
  1408. "mov %%"FF_REG_b", %7 \n\t"
  1409. #if ARCH_X86_64
  1410. "mov -8(%%rsp), %%"FF_REG_a" \n\t"
  1411. "mov %%"FF_REG_a", %8 \n\t"
  1412. #endif
  1413. #else
  1414. #if ARCH_X86_64
  1415. "mov -8(%%rsp), %%"FF_REG_a" \n\t"
  1416. "mov %%"FF_REG_a", %7 \n\t"
  1417. #endif
  1418. #endif
  1419. "pxor %%mm7, %%mm7 \n\t"
  1420. "mov %0, %%"FF_REG_c" \n\t"
  1421. "mov %1, %%"FF_REG_D" \n\t"
  1422. "mov %2, %%"FF_REG_d" \n\t"
  1423. "mov %3, %%"FF_REG_b" \n\t"
  1424. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i
  1425. PREFETCH" (%%"FF_REG_c") \n\t"
  1426. PREFETCH" 32(%%"FF_REG_c") \n\t"
  1427. PREFETCH" 64(%%"FF_REG_c") \n\t"
  1428. CALL_MMXEXT_FILTER_CODE
  1429. CALL_MMXEXT_FILTER_CODE
  1430. CALL_MMXEXT_FILTER_CODE
  1431. CALL_MMXEXT_FILTER_CODE
  1432. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t" // i
  1433. "mov %5, %%"FF_REG_c" \n\t" // src
  1434. "mov %6, %%"FF_REG_D" \n\t" // buf2
  1435. PREFETCH" (%%"FF_REG_c") \n\t"
  1436. PREFETCH" 32(%%"FF_REG_c") \n\t"
  1437. PREFETCH" 64(%%"FF_REG_c") \n\t"
  1438. CALL_MMXEXT_FILTER_CODE
  1439. CALL_MMXEXT_FILTER_CODE
  1440. CALL_MMXEXT_FILTER_CODE
  1441. CALL_MMXEXT_FILTER_CODE
  1442. #if defined(PIC)
  1443. "mov %7, %%"FF_REG_b" \n\t"
  1444. #if ARCH_X86_64
  1445. "mov %8, %%"FF_REG_a" \n\t"
  1446. "mov %%"FF_REG_a", -8(%%rsp) \n\t"
  1447. #endif
  1448. #else
  1449. #if ARCH_X86_64
  1450. "mov %7, %%"FF_REG_a" \n\t"
  1451. "mov %%"FF_REG_a", -8(%%rsp) \n\t"
  1452. #endif
  1453. #endif
  1454. :: "m" (src1), "m" (dst1), "m" (filter), "m" (filterPos),
  1455. "m" (mmxextFilterCode), "m" (src2), "m"(dst2)
  1456. #if defined(PIC)
  1457. ,"m" (ebxsave)
  1458. #endif
  1459. #if ARCH_X86_64
  1460. ,"m"(retsave)
  1461. #endif
  1462. : "%"FF_REG_a, "%"FF_REG_c, "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_D
  1463. #if !defined(PIC)
  1464. ,"%"FF_REG_b
  1465. #endif
  1466. );
  1467. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
  1468. dst1[i] = src1[srcW-1]*128;
  1469. dst2[i] = src2[srcW-1]*128;
  1470. }
  1471. }
  1472. #endif /* COMPILE_TEMPLATE_MMXEXT */
  1473. static av_cold void RENAME(sws_init_swscale)(SwsContext *c)
  1474. {
  1475. enum AVPixelFormat dstFormat = c->dstFormat;
  1476. if (!is16BPS(dstFormat) && !is9_15BPS(dstFormat) &&
  1477. dstFormat != AV_PIX_FMT_NV12 && dstFormat != AV_PIX_FMT_NV21) {
  1478. if (!(c->flags & SWS_BITEXACT)) {
  1479. if (c->flags & SWS_ACCURATE_RND) {
  1480. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1481. switch (c->dstFormat) {
  1482. case AV_PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X_ar); break;
  1483. case AV_PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X_ar); break;
  1484. case AV_PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X_ar); break;
  1485. case AV_PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X_ar); break;
  1486. case AV_PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
  1487. default: break;
  1488. }
  1489. }
  1490. } else {
  1491. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1492. switch (c->dstFormat) {
  1493. case AV_PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X); break;
  1494. case AV_PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X); break;
  1495. case AV_PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X); break;
  1496. case AV_PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X); break;
  1497. case AV_PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
  1498. default: break;
  1499. }
  1500. }
  1501. }
  1502. }
  1503. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1504. switch (c->dstFormat) {
  1505. case AV_PIX_FMT_RGB32:
  1506. c->yuv2packed1 = RENAME(yuv2rgb32_1);
  1507. c->yuv2packed2 = RENAME(yuv2rgb32_2);
  1508. break;
  1509. case AV_PIX_FMT_BGR24:
  1510. c->yuv2packed1 = RENAME(yuv2bgr24_1);
  1511. c->yuv2packed2 = RENAME(yuv2bgr24_2);
  1512. break;
  1513. case AV_PIX_FMT_RGB555:
  1514. c->yuv2packed1 = RENAME(yuv2rgb555_1);
  1515. c->yuv2packed2 = RENAME(yuv2rgb555_2);
  1516. break;
  1517. case AV_PIX_FMT_RGB565:
  1518. c->yuv2packed1 = RENAME(yuv2rgb565_1);
  1519. c->yuv2packed2 = RENAME(yuv2rgb565_2);
  1520. break;
  1521. case AV_PIX_FMT_YUYV422:
  1522. c->yuv2packed1 = RENAME(yuv2yuyv422_1);
  1523. c->yuv2packed2 = RENAME(yuv2yuyv422_2);
  1524. break;
  1525. default:
  1526. break;
  1527. }
  1528. }
  1529. }
  1530. if (c->srcBpc == 8 && c->dstBpc <= 12) {
  1531. // Use the new MMX scaler if the MMXEXT one can't be used (it is faster than the x86 ASM one).
  1532. #if COMPILE_TEMPLATE_MMXEXT
  1533. if (c->flags & SWS_FAST_BILINEAR && c->canMMXEXTBeUsed) {
  1534. c->hyscale_fast = RENAME(hyscale_fast);
  1535. c->hcscale_fast = RENAME(hcscale_fast);
  1536. } else {
  1537. #endif /* COMPILE_TEMPLATE_MMXEXT */
  1538. c->hyscale_fast = NULL;
  1539. c->hcscale_fast = NULL;
  1540. #if COMPILE_TEMPLATE_MMXEXT
  1541. }
  1542. #endif /* COMPILE_TEMPLATE_MMXEXT */
  1543. }
  1544. }