You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1633 lines
71KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #undef REAL_MOVNTQ
  21. #undef MOVNTQ
  22. #undef PREFETCH
  23. #if COMPILE_TEMPLATE_MMX2
  24. #define PREFETCH "prefetchnta"
  25. #else
  26. #define PREFETCH " # nop"
  27. #endif
  28. #if COMPILE_TEMPLATE_MMX2
  29. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  30. #else
  31. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  32. #endif
  33. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  34. #define YSCALEYUV2PACKEDX_UV \
  35. __asm__ volatile(\
  36. "xor %%"REG_a", %%"REG_a" \n\t"\
  37. ".p2align 4 \n\t"\
  38. "nop \n\t"\
  39. "1: \n\t"\
  40. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  41. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  42. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  43. "movq %%mm3, %%mm4 \n\t"\
  44. ".p2align 4 \n\t"\
  45. "2: \n\t"\
  46. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  47. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  48. "add %6, %%"REG_S" \n\t" \
  49. "movq (%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  50. "add $16, %%"REG_d" \n\t"\
  51. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  52. "pmulhw %%mm0, %%mm2 \n\t"\
  53. "pmulhw %%mm0, %%mm5 \n\t"\
  54. "paddw %%mm2, %%mm3 \n\t"\
  55. "paddw %%mm5, %%mm4 \n\t"\
  56. "test %%"REG_S", %%"REG_S" \n\t"\
  57. " jnz 2b \n\t"\
  58. #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
  59. "lea "offset"(%0), %%"REG_d" \n\t"\
  60. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  61. "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
  62. "movq "#dst1", "#dst2" \n\t"\
  63. ".p2align 4 \n\t"\
  64. "2: \n\t"\
  65. "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
  66. "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
  67. "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
  68. "add $16, %%"REG_d" \n\t"\
  69. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  70. "pmulhw "#coeff", "#src1" \n\t"\
  71. "pmulhw "#coeff", "#src2" \n\t"\
  72. "paddw "#src1", "#dst1" \n\t"\
  73. "paddw "#src2", "#dst2" \n\t"\
  74. "test %%"REG_S", %%"REG_S" \n\t"\
  75. " jnz 2b \n\t"\
  76. #define YSCALEYUV2PACKEDX \
  77. YSCALEYUV2PACKEDX_UV \
  78. YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
  79. #define YSCALEYUV2PACKEDX_END \
  80. :: "r" (&c->redDither), \
  81. "m" (dummy), "m" (dummy), "m" (dummy),\
  82. "r" (dest), "m" (dstW_reg), "m"(uv_off) \
  83. : "%"REG_a, "%"REG_d, "%"REG_S \
  84. );
  85. #define YSCALEYUV2PACKEDX_ACCURATE_UV \
  86. __asm__ volatile(\
  87. "xor %%"REG_a", %%"REG_a" \n\t"\
  88. ".p2align 4 \n\t"\
  89. "nop \n\t"\
  90. "1: \n\t"\
  91. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  92. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  93. "pxor %%mm4, %%mm4 \n\t"\
  94. "pxor %%mm5, %%mm5 \n\t"\
  95. "pxor %%mm6, %%mm6 \n\t"\
  96. "pxor %%mm7, %%mm7 \n\t"\
  97. ".p2align 4 \n\t"\
  98. "2: \n\t"\
  99. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  100. "add %6, %%"REG_S" \n\t" \
  101. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  102. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  103. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  104. "movq %%mm0, %%mm3 \n\t"\
  105. "punpcklwd %%mm1, %%mm0 \n\t"\
  106. "punpckhwd %%mm1, %%mm3 \n\t"\
  107. "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
  108. "pmaddwd %%mm1, %%mm0 \n\t"\
  109. "pmaddwd %%mm1, %%mm3 \n\t"\
  110. "paddd %%mm0, %%mm4 \n\t"\
  111. "paddd %%mm3, %%mm5 \n\t"\
  112. "add %6, %%"REG_S" \n\t" \
  113. "movq (%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  114. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  115. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  116. "test %%"REG_S", %%"REG_S" \n\t"\
  117. "movq %%mm2, %%mm0 \n\t"\
  118. "punpcklwd %%mm3, %%mm2 \n\t"\
  119. "punpckhwd %%mm3, %%mm0 \n\t"\
  120. "pmaddwd %%mm1, %%mm2 \n\t"\
  121. "pmaddwd %%mm1, %%mm0 \n\t"\
  122. "paddd %%mm2, %%mm6 \n\t"\
  123. "paddd %%mm0, %%mm7 \n\t"\
  124. " jnz 2b \n\t"\
  125. "psrad $16, %%mm4 \n\t"\
  126. "psrad $16, %%mm5 \n\t"\
  127. "psrad $16, %%mm6 \n\t"\
  128. "psrad $16, %%mm7 \n\t"\
  129. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  130. "packssdw %%mm5, %%mm4 \n\t"\
  131. "packssdw %%mm7, %%mm6 \n\t"\
  132. "paddw %%mm0, %%mm4 \n\t"\
  133. "paddw %%mm0, %%mm6 \n\t"\
  134. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  135. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  136. #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
  137. "lea "offset"(%0), %%"REG_d" \n\t"\
  138. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  139. "pxor %%mm1, %%mm1 \n\t"\
  140. "pxor %%mm5, %%mm5 \n\t"\
  141. "pxor %%mm7, %%mm7 \n\t"\
  142. "pxor %%mm6, %%mm6 \n\t"\
  143. ".p2align 4 \n\t"\
  144. "2: \n\t"\
  145. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  146. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  147. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  148. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  149. "movq %%mm0, %%mm3 \n\t"\
  150. "punpcklwd %%mm4, %%mm0 \n\t"\
  151. "punpckhwd %%mm4, %%mm3 \n\t"\
  152. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  153. "pmaddwd %%mm4, %%mm0 \n\t"\
  154. "pmaddwd %%mm4, %%mm3 \n\t"\
  155. "paddd %%mm0, %%mm1 \n\t"\
  156. "paddd %%mm3, %%mm5 \n\t"\
  157. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  158. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  159. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  160. "test %%"REG_S", %%"REG_S" \n\t"\
  161. "movq %%mm2, %%mm0 \n\t"\
  162. "punpcklwd %%mm3, %%mm2 \n\t"\
  163. "punpckhwd %%mm3, %%mm0 \n\t"\
  164. "pmaddwd %%mm4, %%mm2 \n\t"\
  165. "pmaddwd %%mm4, %%mm0 \n\t"\
  166. "paddd %%mm2, %%mm7 \n\t"\
  167. "paddd %%mm0, %%mm6 \n\t"\
  168. " jnz 2b \n\t"\
  169. "psrad $16, %%mm1 \n\t"\
  170. "psrad $16, %%mm5 \n\t"\
  171. "psrad $16, %%mm7 \n\t"\
  172. "psrad $16, %%mm6 \n\t"\
  173. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  174. "packssdw %%mm5, %%mm1 \n\t"\
  175. "packssdw %%mm6, %%mm7 \n\t"\
  176. "paddw %%mm0, %%mm1 \n\t"\
  177. "paddw %%mm0, %%mm7 \n\t"\
  178. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  179. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  180. #define YSCALEYUV2PACKEDX_ACCURATE \
  181. YSCALEYUV2PACKEDX_ACCURATE_UV \
  182. YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
  183. #define YSCALEYUV2RGBX \
  184. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  185. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  186. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  187. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  188. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  189. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  190. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  191. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  192. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  193. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  194. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  195. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  196. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  197. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  198. "paddw %%mm3, %%mm4 \n\t"\
  199. "movq %%mm2, %%mm0 \n\t"\
  200. "movq %%mm5, %%mm6 \n\t"\
  201. "movq %%mm4, %%mm3 \n\t"\
  202. "punpcklwd %%mm2, %%mm2 \n\t"\
  203. "punpcklwd %%mm5, %%mm5 \n\t"\
  204. "punpcklwd %%mm4, %%mm4 \n\t"\
  205. "paddw %%mm1, %%mm2 \n\t"\
  206. "paddw %%mm1, %%mm5 \n\t"\
  207. "paddw %%mm1, %%mm4 \n\t"\
  208. "punpckhwd %%mm0, %%mm0 \n\t"\
  209. "punpckhwd %%mm6, %%mm6 \n\t"\
  210. "punpckhwd %%mm3, %%mm3 \n\t"\
  211. "paddw %%mm7, %%mm0 \n\t"\
  212. "paddw %%mm7, %%mm6 \n\t"\
  213. "paddw %%mm7, %%mm3 \n\t"\
  214. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  215. "packuswb %%mm0, %%mm2 \n\t"\
  216. "packuswb %%mm6, %%mm5 \n\t"\
  217. "packuswb %%mm3, %%mm4 \n\t"\
  218. #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
  219. "movq "#b", "#q2" \n\t" /* B */\
  220. "movq "#r", "#t" \n\t" /* R */\
  221. "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
  222. "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
  223. "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
  224. "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
  225. "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
  226. "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
  227. "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
  228. "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
  229. "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
  230. "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
  231. \
  232. MOVNTQ( q0, (dst, index, 4))\
  233. MOVNTQ( b, 8(dst, index, 4))\
  234. MOVNTQ( q2, 16(dst, index, 4))\
  235. MOVNTQ( q3, 24(dst, index, 4))\
  236. \
  237. "add $8, "#index" \n\t"\
  238. "cmp "#dstw", "#index" \n\t"\
  239. " jb 1b \n\t"
  240. #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
  241. static void RENAME(yuv2rgb32_X_ar)(SwsContext *c, const int16_t *lumFilter,
  242. const int16_t **lumSrc, int lumFilterSize,
  243. const int16_t *chrFilter, const int16_t **chrUSrc,
  244. const int16_t **chrVSrc,
  245. int chrFilterSize, const int16_t **alpSrc,
  246. uint8_t *dest, int dstW, int dstY)
  247. {
  248. x86_reg dummy=0;
  249. x86_reg dstW_reg = dstW;
  250. x86_reg uv_off = c->uv_off_byte;
  251. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  252. YSCALEYUV2PACKEDX_ACCURATE
  253. YSCALEYUV2RGBX
  254. "movq %%mm2, "U_TEMP"(%0) \n\t"
  255. "movq %%mm4, "V_TEMP"(%0) \n\t"
  256. "movq %%mm5, "Y_TEMP"(%0) \n\t"
  257. YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
  258. "movq "Y_TEMP"(%0), %%mm5 \n\t"
  259. "psraw $3, %%mm1 \n\t"
  260. "psraw $3, %%mm7 \n\t"
  261. "packuswb %%mm7, %%mm1 \n\t"
  262. WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
  263. YSCALEYUV2PACKEDX_END
  264. } else {
  265. YSCALEYUV2PACKEDX_ACCURATE
  266. YSCALEYUV2RGBX
  267. "pcmpeqd %%mm7, %%mm7 \n\t"
  268. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  269. YSCALEYUV2PACKEDX_END
  270. }
  271. }
  272. static void RENAME(yuv2rgb32_X)(SwsContext *c, const int16_t *lumFilter,
  273. const int16_t **lumSrc, int lumFilterSize,
  274. const int16_t *chrFilter, const int16_t **chrUSrc,
  275. const int16_t **chrVSrc,
  276. int chrFilterSize, const int16_t **alpSrc,
  277. uint8_t *dest, int dstW, int dstY)
  278. {
  279. x86_reg dummy=0;
  280. x86_reg dstW_reg = dstW;
  281. x86_reg uv_off = c->uv_off_byte;
  282. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  283. YSCALEYUV2PACKEDX
  284. YSCALEYUV2RGBX
  285. YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
  286. "psraw $3, %%mm1 \n\t"
  287. "psraw $3, %%mm7 \n\t"
  288. "packuswb %%mm7, %%mm1 \n\t"
  289. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  290. YSCALEYUV2PACKEDX_END
  291. } else {
  292. YSCALEYUV2PACKEDX
  293. YSCALEYUV2RGBX
  294. "pcmpeqd %%mm7, %%mm7 \n\t"
  295. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  296. YSCALEYUV2PACKEDX_END
  297. }
  298. }
  299. #define REAL_WRITERGB16(dst, dstw, index) \
  300. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  301. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  302. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  303. "psrlq $3, %%mm2 \n\t"\
  304. \
  305. "movq %%mm2, %%mm1 \n\t"\
  306. "movq %%mm4, %%mm3 \n\t"\
  307. \
  308. "punpcklbw %%mm7, %%mm3 \n\t"\
  309. "punpcklbw %%mm5, %%mm2 \n\t"\
  310. "punpckhbw %%mm7, %%mm4 \n\t"\
  311. "punpckhbw %%mm5, %%mm1 \n\t"\
  312. \
  313. "psllq $3, %%mm3 \n\t"\
  314. "psllq $3, %%mm4 \n\t"\
  315. \
  316. "por %%mm3, %%mm2 \n\t"\
  317. "por %%mm4, %%mm1 \n\t"\
  318. \
  319. MOVNTQ(%%mm2, (dst, index, 2))\
  320. MOVNTQ(%%mm1, 8(dst, index, 2))\
  321. \
  322. "add $8, "#index" \n\t"\
  323. "cmp "#dstw", "#index" \n\t"\
  324. " jb 1b \n\t"
  325. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  326. static void RENAME(yuv2rgb565_X_ar)(SwsContext *c, const int16_t *lumFilter,
  327. const int16_t **lumSrc, int lumFilterSize,
  328. const int16_t *chrFilter, const int16_t **chrUSrc,
  329. const int16_t **chrVSrc,
  330. int chrFilterSize, const int16_t **alpSrc,
  331. uint8_t *dest, int dstW, int dstY)
  332. {
  333. x86_reg dummy=0;
  334. x86_reg dstW_reg = dstW;
  335. x86_reg uv_off = c->uv_off_byte;
  336. YSCALEYUV2PACKEDX_ACCURATE
  337. YSCALEYUV2RGBX
  338. "pxor %%mm7, %%mm7 \n\t"
  339. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  340. #ifdef DITHER1XBPP
  341. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  342. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  343. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  344. #endif
  345. WRITERGB16(%4, %5, %%REGa)
  346. YSCALEYUV2PACKEDX_END
  347. }
  348. static void RENAME(yuv2rgb565_X)(SwsContext *c, const int16_t *lumFilter,
  349. const int16_t **lumSrc, int lumFilterSize,
  350. const int16_t *chrFilter, const int16_t **chrUSrc,
  351. const int16_t **chrVSrc,
  352. int chrFilterSize, const int16_t **alpSrc,
  353. uint8_t *dest, int dstW, int dstY)
  354. {
  355. x86_reg dummy=0;
  356. x86_reg dstW_reg = dstW;
  357. x86_reg uv_off = c->uv_off_byte;
  358. YSCALEYUV2PACKEDX
  359. YSCALEYUV2RGBX
  360. "pxor %%mm7, %%mm7 \n\t"
  361. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  362. #ifdef DITHER1XBPP
  363. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  364. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  365. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  366. #endif
  367. WRITERGB16(%4, %5, %%REGa)
  368. YSCALEYUV2PACKEDX_END
  369. }
  370. #define REAL_WRITERGB15(dst, dstw, index) \
  371. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  372. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  373. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  374. "psrlq $3, %%mm2 \n\t"\
  375. "psrlq $1, %%mm5 \n\t"\
  376. \
  377. "movq %%mm2, %%mm1 \n\t"\
  378. "movq %%mm4, %%mm3 \n\t"\
  379. \
  380. "punpcklbw %%mm7, %%mm3 \n\t"\
  381. "punpcklbw %%mm5, %%mm2 \n\t"\
  382. "punpckhbw %%mm7, %%mm4 \n\t"\
  383. "punpckhbw %%mm5, %%mm1 \n\t"\
  384. \
  385. "psllq $2, %%mm3 \n\t"\
  386. "psllq $2, %%mm4 \n\t"\
  387. \
  388. "por %%mm3, %%mm2 \n\t"\
  389. "por %%mm4, %%mm1 \n\t"\
  390. \
  391. MOVNTQ(%%mm2, (dst, index, 2))\
  392. MOVNTQ(%%mm1, 8(dst, index, 2))\
  393. \
  394. "add $8, "#index" \n\t"\
  395. "cmp "#dstw", "#index" \n\t"\
  396. " jb 1b \n\t"
  397. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  398. static void RENAME(yuv2rgb555_X_ar)(SwsContext *c, const int16_t *lumFilter,
  399. const int16_t **lumSrc, int lumFilterSize,
  400. const int16_t *chrFilter, const int16_t **chrUSrc,
  401. const int16_t **chrVSrc,
  402. int chrFilterSize, const int16_t **alpSrc,
  403. uint8_t *dest, int dstW, int dstY)
  404. {
  405. x86_reg dummy=0;
  406. x86_reg dstW_reg = dstW;
  407. x86_reg uv_off = c->uv_off_byte;
  408. YSCALEYUV2PACKEDX_ACCURATE
  409. YSCALEYUV2RGBX
  410. "pxor %%mm7, %%mm7 \n\t"
  411. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  412. #ifdef DITHER1XBPP
  413. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  414. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  415. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  416. #endif
  417. WRITERGB15(%4, %5, %%REGa)
  418. YSCALEYUV2PACKEDX_END
  419. }
  420. static void RENAME(yuv2rgb555_X)(SwsContext *c, const int16_t *lumFilter,
  421. const int16_t **lumSrc, int lumFilterSize,
  422. const int16_t *chrFilter, const int16_t **chrUSrc,
  423. const int16_t **chrVSrc,
  424. int chrFilterSize, const int16_t **alpSrc,
  425. uint8_t *dest, int dstW, int dstY)
  426. {
  427. x86_reg dummy=0;
  428. x86_reg dstW_reg = dstW;
  429. x86_reg uv_off = c->uv_off_byte;
  430. YSCALEYUV2PACKEDX
  431. YSCALEYUV2RGBX
  432. "pxor %%mm7, %%mm7 \n\t"
  433. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  434. #ifdef DITHER1XBPP
  435. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  436. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  437. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  438. #endif
  439. WRITERGB15(%4, %5, %%REGa)
  440. YSCALEYUV2PACKEDX_END
  441. }
  442. #define WRITEBGR24MMX(dst, dstw, index) \
  443. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  444. "movq %%mm2, %%mm1 \n\t" /* B */\
  445. "movq %%mm5, %%mm6 \n\t" /* R */\
  446. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  447. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  448. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  449. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  450. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  451. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  452. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  453. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  454. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  455. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  456. \
  457. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  458. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  459. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  460. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  461. \
  462. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  463. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  464. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  465. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  466. \
  467. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  468. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  469. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  470. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  471. \
  472. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  473. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  474. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  475. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  476. MOVNTQ(%%mm0, (dst))\
  477. \
  478. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  479. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  480. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  481. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  482. MOVNTQ(%%mm6, 8(dst))\
  483. \
  484. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  485. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  486. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  487. MOVNTQ(%%mm5, 16(dst))\
  488. \
  489. "add $24, "#dst" \n\t"\
  490. \
  491. "add $8, "#index" \n\t"\
  492. "cmp "#dstw", "#index" \n\t"\
  493. " jb 1b \n\t"
  494. #define WRITEBGR24MMX2(dst, dstw, index) \
  495. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  496. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  497. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  498. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  499. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  500. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  501. \
  502. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  503. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  504. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  505. \
  506. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  507. "por %%mm1, %%mm6 \n\t"\
  508. "por %%mm3, %%mm6 \n\t"\
  509. MOVNTQ(%%mm6, (dst))\
  510. \
  511. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  512. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  513. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  514. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  515. \
  516. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  517. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  518. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  519. \
  520. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  521. "por %%mm3, %%mm6 \n\t"\
  522. MOVNTQ(%%mm6, 8(dst))\
  523. \
  524. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  525. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  526. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  527. \
  528. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  529. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  530. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  531. \
  532. "por %%mm1, %%mm3 \n\t"\
  533. "por %%mm3, %%mm6 \n\t"\
  534. MOVNTQ(%%mm6, 16(dst))\
  535. \
  536. "add $24, "#dst" \n\t"\
  537. \
  538. "add $8, "#index" \n\t"\
  539. "cmp "#dstw", "#index" \n\t"\
  540. " jb 1b \n\t"
  541. #if COMPILE_TEMPLATE_MMX2
  542. #undef WRITEBGR24
  543. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  544. #else
  545. #undef WRITEBGR24
  546. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  547. #endif
  548. static void RENAME(yuv2bgr24_X_ar)(SwsContext *c, const int16_t *lumFilter,
  549. const int16_t **lumSrc, int lumFilterSize,
  550. const int16_t *chrFilter, const int16_t **chrUSrc,
  551. const int16_t **chrVSrc,
  552. int chrFilterSize, const int16_t **alpSrc,
  553. uint8_t *dest, int dstW, int dstY)
  554. {
  555. x86_reg dummy=0;
  556. x86_reg dstW_reg = dstW;
  557. x86_reg uv_off = c->uv_off_byte;
  558. YSCALEYUV2PACKEDX_ACCURATE
  559. YSCALEYUV2RGBX
  560. "pxor %%mm7, %%mm7 \n\t"
  561. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  562. "add %4, %%"REG_c" \n\t"
  563. WRITEBGR24(%%REGc, %5, %%REGa)
  564. :: "r" (&c->redDither),
  565. "m" (dummy), "m" (dummy), "m" (dummy),
  566. "r" (dest), "m" (dstW_reg), "m"(uv_off)
  567. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  568. );
  569. }
  570. static void RENAME(yuv2bgr24_X)(SwsContext *c, const int16_t *lumFilter,
  571. const int16_t **lumSrc, int lumFilterSize,
  572. const int16_t *chrFilter, const int16_t **chrUSrc,
  573. const int16_t **chrVSrc,
  574. int chrFilterSize, const int16_t **alpSrc,
  575. uint8_t *dest, int dstW, int dstY)
  576. {
  577. x86_reg dummy=0;
  578. x86_reg dstW_reg = dstW;
  579. x86_reg uv_off = c->uv_off_byte;
  580. YSCALEYUV2PACKEDX
  581. YSCALEYUV2RGBX
  582. "pxor %%mm7, %%mm7 \n\t"
  583. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  584. "add %4, %%"REG_c" \n\t"
  585. WRITEBGR24(%%REGc, %5, %%REGa)
  586. :: "r" (&c->redDither),
  587. "m" (dummy), "m" (dummy), "m" (dummy),
  588. "r" (dest), "m" (dstW_reg), "m"(uv_off)
  589. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  590. );
  591. }
  592. #define REAL_WRITEYUY2(dst, dstw, index) \
  593. "packuswb %%mm3, %%mm3 \n\t"\
  594. "packuswb %%mm4, %%mm4 \n\t"\
  595. "packuswb %%mm7, %%mm1 \n\t"\
  596. "punpcklbw %%mm4, %%mm3 \n\t"\
  597. "movq %%mm1, %%mm7 \n\t"\
  598. "punpcklbw %%mm3, %%mm1 \n\t"\
  599. "punpckhbw %%mm3, %%mm7 \n\t"\
  600. \
  601. MOVNTQ(%%mm1, (dst, index, 2))\
  602. MOVNTQ(%%mm7, 8(dst, index, 2))\
  603. \
  604. "add $8, "#index" \n\t"\
  605. "cmp "#dstw", "#index" \n\t"\
  606. " jb 1b \n\t"
  607. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  608. static void RENAME(yuv2yuyv422_X_ar)(SwsContext *c, const int16_t *lumFilter,
  609. const int16_t **lumSrc, int lumFilterSize,
  610. const int16_t *chrFilter, const int16_t **chrUSrc,
  611. const int16_t **chrVSrc,
  612. int chrFilterSize, const int16_t **alpSrc,
  613. uint8_t *dest, int dstW, int dstY)
  614. {
  615. x86_reg dummy=0;
  616. x86_reg dstW_reg = dstW;
  617. x86_reg uv_off = c->uv_off_byte;
  618. YSCALEYUV2PACKEDX_ACCURATE
  619. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  620. "psraw $3, %%mm3 \n\t"
  621. "psraw $3, %%mm4 \n\t"
  622. "psraw $3, %%mm1 \n\t"
  623. "psraw $3, %%mm7 \n\t"
  624. WRITEYUY2(%4, %5, %%REGa)
  625. YSCALEYUV2PACKEDX_END
  626. }
  627. static void RENAME(yuv2yuyv422_X)(SwsContext *c, const int16_t *lumFilter,
  628. const int16_t **lumSrc, int lumFilterSize,
  629. const int16_t *chrFilter, const int16_t **chrUSrc,
  630. const int16_t **chrVSrc,
  631. int chrFilterSize, const int16_t **alpSrc,
  632. uint8_t *dest, int dstW, int dstY)
  633. {
  634. x86_reg dummy=0;
  635. x86_reg dstW_reg = dstW;
  636. x86_reg uv_off = c->uv_off_byte;
  637. YSCALEYUV2PACKEDX
  638. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  639. "psraw $3, %%mm3 \n\t"
  640. "psraw $3, %%mm4 \n\t"
  641. "psraw $3, %%mm1 \n\t"
  642. "psraw $3, %%mm7 \n\t"
  643. WRITEYUY2(%4, %5, %%REGa)
  644. YSCALEYUV2PACKEDX_END
  645. }
  646. #define REAL_YSCALEYUV2RGB_UV(index, c) \
  647. "xor "#index", "#index" \n\t"\
  648. ".p2align 4 \n\t"\
  649. "1: \n\t"\
  650. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  651. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  652. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  653. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  654. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  655. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  656. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  657. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  658. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  659. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  660. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  661. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  662. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  663. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  664. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  665. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  666. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  667. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  668. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  669. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  670. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  671. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  672. #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
  673. "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  674. "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  675. "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  676. "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  677. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  678. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  679. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  680. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  681. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  682. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  683. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  684. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  685. #define REAL_YSCALEYUV2RGB_COEFF(c) \
  686. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  687. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  688. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  689. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  690. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  691. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  692. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  693. "paddw %%mm3, %%mm4 \n\t"\
  694. "movq %%mm2, %%mm0 \n\t"\
  695. "movq %%mm5, %%mm6 \n\t"\
  696. "movq %%mm4, %%mm3 \n\t"\
  697. "punpcklwd %%mm2, %%mm2 \n\t"\
  698. "punpcklwd %%mm5, %%mm5 \n\t"\
  699. "punpcklwd %%mm4, %%mm4 \n\t"\
  700. "paddw %%mm1, %%mm2 \n\t"\
  701. "paddw %%mm1, %%mm5 \n\t"\
  702. "paddw %%mm1, %%mm4 \n\t"\
  703. "punpckhwd %%mm0, %%mm0 \n\t"\
  704. "punpckhwd %%mm6, %%mm6 \n\t"\
  705. "punpckhwd %%mm3, %%mm3 \n\t"\
  706. "paddw %%mm7, %%mm0 \n\t"\
  707. "paddw %%mm7, %%mm6 \n\t"\
  708. "paddw %%mm7, %%mm3 \n\t"\
  709. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  710. "packuswb %%mm0, %%mm2 \n\t"\
  711. "packuswb %%mm6, %%mm5 \n\t"\
  712. "packuswb %%mm3, %%mm4 \n\t"\
  713. #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
  714. #define YSCALEYUV2RGB(index, c) \
  715. REAL_YSCALEYUV2RGB_UV(index, c) \
  716. REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
  717. REAL_YSCALEYUV2RGB_COEFF(c)
  718. /**
  719. * vertical bilinear scale YV12 to RGB
  720. */
  721. static void RENAME(yuv2rgb32_2)(SwsContext *c, const int16_t *buf[2],
  722. const int16_t *ubuf[2], const int16_t *vbuf[2],
  723. const int16_t *abuf[2], uint8_t *dest,
  724. int dstW, int yalpha, int uvalpha, int y)
  725. {
  726. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  727. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  728. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  729. const int16_t *abuf0 = abuf[0], *abuf1 = abuf[1];
  730. #if ARCH_X86_64
  731. __asm__ volatile(
  732. YSCALEYUV2RGB(%%r8, %5)
  733. YSCALEYUV2RGB_YA(%%r8, %5, %6, %7)
  734. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  735. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  736. "packuswb %%mm7, %%mm1 \n\t"
  737. WRITEBGR32(%4, 8280(%5), %%r8, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  738. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "r" (dest),
  739. "a" (&c->redDither),
  740. "r" (abuf0), "r" (abuf1)
  741. : "%r8"
  742. );
  743. #else
  744. *(const uint16_t **)(&c->u_temp)=abuf0;
  745. *(const uint16_t **)(&c->v_temp)=abuf1;
  746. __asm__ volatile(
  747. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  748. "mov %4, %%"REG_b" \n\t"
  749. "push %%"REG_BP" \n\t"
  750. YSCALEYUV2RGB(%%REGBP, %5)
  751. "push %0 \n\t"
  752. "push %1 \n\t"
  753. "mov "U_TEMP"(%5), %0 \n\t"
  754. "mov "V_TEMP"(%5), %1 \n\t"
  755. YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
  756. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  757. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  758. "packuswb %%mm7, %%mm1 \n\t"
  759. "pop %1 \n\t"
  760. "pop %0 \n\t"
  761. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  762. "pop %%"REG_BP" \n\t"
  763. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  764. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  765. "a" (&c->redDither)
  766. );
  767. #endif
  768. } else {
  769. __asm__ volatile(
  770. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  771. "mov %4, %%"REG_b" \n\t"
  772. "push %%"REG_BP" \n\t"
  773. YSCALEYUV2RGB(%%REGBP, %5)
  774. "pcmpeqd %%mm7, %%mm7 \n\t"
  775. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  776. "pop %%"REG_BP" \n\t"
  777. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  778. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  779. "a" (&c->redDither)
  780. );
  781. }
  782. }
  783. static void RENAME(yuv2bgr24_2)(SwsContext *c, const int16_t *buf[2],
  784. const int16_t *ubuf[2], const int16_t *vbuf[2],
  785. const int16_t *abuf[2], uint8_t *dest,
  786. int dstW, int yalpha, int uvalpha, int y)
  787. {
  788. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  789. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  790. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  791. __asm__ volatile(
  792. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  793. "mov %4, %%"REG_b" \n\t"
  794. "push %%"REG_BP" \n\t"
  795. YSCALEYUV2RGB(%%REGBP, %5)
  796. "pxor %%mm7, %%mm7 \n\t"
  797. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  798. "pop %%"REG_BP" \n\t"
  799. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  800. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  801. "a" (&c->redDither)
  802. );
  803. }
  804. static void RENAME(yuv2rgb555_2)(SwsContext *c, const int16_t *buf[2],
  805. const int16_t *ubuf[2], const int16_t *vbuf[2],
  806. const int16_t *abuf[2], uint8_t *dest,
  807. int dstW, int yalpha, int uvalpha, int y)
  808. {
  809. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  810. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  811. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  812. __asm__ volatile(
  813. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  814. "mov %4, %%"REG_b" \n\t"
  815. "push %%"REG_BP" \n\t"
  816. YSCALEYUV2RGB(%%REGBP, %5)
  817. "pxor %%mm7, %%mm7 \n\t"
  818. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  819. #ifdef DITHER1XBPP
  820. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  821. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  822. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  823. #endif
  824. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  825. "pop %%"REG_BP" \n\t"
  826. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  827. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  828. "a" (&c->redDither)
  829. );
  830. }
  831. static void RENAME(yuv2rgb565_2)(SwsContext *c, const int16_t *buf[2],
  832. const int16_t *ubuf[2], const int16_t *vbuf[2],
  833. const int16_t *abuf[2], uint8_t *dest,
  834. int dstW, int yalpha, int uvalpha, int y)
  835. {
  836. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  837. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  838. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  839. __asm__ volatile(
  840. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  841. "mov %4, %%"REG_b" \n\t"
  842. "push %%"REG_BP" \n\t"
  843. YSCALEYUV2RGB(%%REGBP, %5)
  844. "pxor %%mm7, %%mm7 \n\t"
  845. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  846. #ifdef DITHER1XBPP
  847. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  848. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  849. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  850. #endif
  851. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  852. "pop %%"REG_BP" \n\t"
  853. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  854. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  855. "a" (&c->redDither)
  856. );
  857. }
  858. #define REAL_YSCALEYUV2PACKED(index, c) \
  859. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  860. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  861. "psraw $3, %%mm0 \n\t"\
  862. "psraw $3, %%mm1 \n\t"\
  863. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  864. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  865. "xor "#index", "#index" \n\t"\
  866. ".p2align 4 \n\t"\
  867. "1: \n\t"\
  868. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  869. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  870. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  871. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  872. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  873. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  874. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  875. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  876. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  877. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  878. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  879. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  880. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  881. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  882. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  883. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  884. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  885. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  886. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  887. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  888. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  889. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  890. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  891. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  892. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  893. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  894. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  895. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  896. static void RENAME(yuv2yuyv422_2)(SwsContext *c, const int16_t *buf[2],
  897. const int16_t *ubuf[2], const int16_t *vbuf[2],
  898. const int16_t *abuf[2], uint8_t *dest,
  899. int dstW, int yalpha, int uvalpha, int y)
  900. {
  901. const int16_t *buf0 = buf[0], *buf1 = buf[1],
  902. *ubuf0 = ubuf[0], *ubuf1 = ubuf[1];
  903. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  904. __asm__ volatile(
  905. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  906. "mov %4, %%"REG_b" \n\t"
  907. "push %%"REG_BP" \n\t"
  908. YSCALEYUV2PACKED(%%REGBP, %5)
  909. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  910. "pop %%"REG_BP" \n\t"
  911. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  912. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  913. "a" (&c->redDither)
  914. );
  915. }
  916. #define REAL_YSCALEYUV2RGB1(index, c) \
  917. "xor "#index", "#index" \n\t"\
  918. ".p2align 4 \n\t"\
  919. "1: \n\t"\
  920. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  921. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  922. "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  923. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  924. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  925. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  926. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  927. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  928. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  929. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  930. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  931. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  932. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  933. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  934. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  935. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  936. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  937. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  938. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  939. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  940. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  941. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  942. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  943. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  944. "paddw %%mm3, %%mm4 \n\t"\
  945. "movq %%mm2, %%mm0 \n\t"\
  946. "movq %%mm5, %%mm6 \n\t"\
  947. "movq %%mm4, %%mm3 \n\t"\
  948. "punpcklwd %%mm2, %%mm2 \n\t"\
  949. "punpcklwd %%mm5, %%mm5 \n\t"\
  950. "punpcklwd %%mm4, %%mm4 \n\t"\
  951. "paddw %%mm1, %%mm2 \n\t"\
  952. "paddw %%mm1, %%mm5 \n\t"\
  953. "paddw %%mm1, %%mm4 \n\t"\
  954. "punpckhwd %%mm0, %%mm0 \n\t"\
  955. "punpckhwd %%mm6, %%mm6 \n\t"\
  956. "punpckhwd %%mm3, %%mm3 \n\t"\
  957. "paddw %%mm7, %%mm0 \n\t"\
  958. "paddw %%mm7, %%mm6 \n\t"\
  959. "paddw %%mm7, %%mm3 \n\t"\
  960. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  961. "packuswb %%mm0, %%mm2 \n\t"\
  962. "packuswb %%mm6, %%mm5 \n\t"\
  963. "packuswb %%mm3, %%mm4 \n\t"\
  964. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  965. // do vertical chrominance interpolation
  966. #define REAL_YSCALEYUV2RGB1b(index, c) \
  967. "xor "#index", "#index" \n\t"\
  968. ".p2align 4 \n\t"\
  969. "1: \n\t"\
  970. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  971. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  972. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  973. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  974. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  975. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  976. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  977. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  978. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  979. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  980. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  981. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  982. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  983. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  984. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  985. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  986. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  987. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  988. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  989. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  990. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  991. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  992. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  993. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  994. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  995. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  996. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  997. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  998. "paddw %%mm3, %%mm4 \n\t"\
  999. "movq %%mm2, %%mm0 \n\t"\
  1000. "movq %%mm5, %%mm6 \n\t"\
  1001. "movq %%mm4, %%mm3 \n\t"\
  1002. "punpcklwd %%mm2, %%mm2 \n\t"\
  1003. "punpcklwd %%mm5, %%mm5 \n\t"\
  1004. "punpcklwd %%mm4, %%mm4 \n\t"\
  1005. "paddw %%mm1, %%mm2 \n\t"\
  1006. "paddw %%mm1, %%mm5 \n\t"\
  1007. "paddw %%mm1, %%mm4 \n\t"\
  1008. "punpckhwd %%mm0, %%mm0 \n\t"\
  1009. "punpckhwd %%mm6, %%mm6 \n\t"\
  1010. "punpckhwd %%mm3, %%mm3 \n\t"\
  1011. "paddw %%mm7, %%mm0 \n\t"\
  1012. "paddw %%mm7, %%mm6 \n\t"\
  1013. "paddw %%mm7, %%mm3 \n\t"\
  1014. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  1015. "packuswb %%mm0, %%mm2 \n\t"\
  1016. "packuswb %%mm6, %%mm5 \n\t"\
  1017. "packuswb %%mm3, %%mm4 \n\t"\
  1018. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  1019. #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
  1020. "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
  1021. "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
  1022. "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
  1023. "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
  1024. "packuswb %%mm1, %%mm7 \n\t"
  1025. #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
  1026. /**
  1027. * YV12 to RGB without scaling or interpolating
  1028. */
  1029. static void RENAME(yuv2rgb32_1)(SwsContext *c, const int16_t *buf0,
  1030. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1031. const int16_t *abuf0, uint8_t *dest,
  1032. int dstW, int uvalpha, int y)
  1033. {
  1034. const int16_t *ubuf0 = ubuf[0];
  1035. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1036. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1037. const int16_t *ubuf1 = ubuf[0];
  1038. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1039. __asm__ volatile(
  1040. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1041. "mov %4, %%"REG_b" \n\t"
  1042. "push %%"REG_BP" \n\t"
  1043. YSCALEYUV2RGB1(%%REGBP, %5)
  1044. YSCALEYUV2RGB1_ALPHA(%%REGBP)
  1045. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1046. "pop %%"REG_BP" \n\t"
  1047. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1048. :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1049. "a" (&c->redDither)
  1050. );
  1051. } else {
  1052. __asm__ volatile(
  1053. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1054. "mov %4, %%"REG_b" \n\t"
  1055. "push %%"REG_BP" \n\t"
  1056. YSCALEYUV2RGB1(%%REGBP, %5)
  1057. "pcmpeqd %%mm7, %%mm7 \n\t"
  1058. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1059. "pop %%"REG_BP" \n\t"
  1060. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1061. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1062. "a" (&c->redDither)
  1063. );
  1064. }
  1065. } else {
  1066. const int16_t *ubuf1 = ubuf[1];
  1067. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf) {
  1068. __asm__ volatile(
  1069. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1070. "mov %4, %%"REG_b" \n\t"
  1071. "push %%"REG_BP" \n\t"
  1072. YSCALEYUV2RGB1b(%%REGBP, %5)
  1073. YSCALEYUV2RGB1_ALPHA(%%REGBP)
  1074. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1075. "pop %%"REG_BP" \n\t"
  1076. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1077. :: "c" (buf0), "d" (abuf0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1078. "a" (&c->redDither)
  1079. );
  1080. } else {
  1081. __asm__ volatile(
  1082. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1083. "mov %4, %%"REG_b" \n\t"
  1084. "push %%"REG_BP" \n\t"
  1085. YSCALEYUV2RGB1b(%%REGBP, %5)
  1086. "pcmpeqd %%mm7, %%mm7 \n\t"
  1087. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1088. "pop %%"REG_BP" \n\t"
  1089. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1090. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1091. "a" (&c->redDither)
  1092. );
  1093. }
  1094. }
  1095. }
  1096. static void RENAME(yuv2bgr24_1)(SwsContext *c, const int16_t *buf0,
  1097. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1098. const int16_t *abuf0, uint8_t *dest,
  1099. int dstW, int uvalpha, int y)
  1100. {
  1101. const int16_t *ubuf0 = ubuf[0];
  1102. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1103. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1104. const int16_t *ubuf1 = ubuf[0];
  1105. __asm__ volatile(
  1106. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1107. "mov %4, %%"REG_b" \n\t"
  1108. "push %%"REG_BP" \n\t"
  1109. YSCALEYUV2RGB1(%%REGBP, %5)
  1110. "pxor %%mm7, %%mm7 \n\t"
  1111. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1112. "pop %%"REG_BP" \n\t"
  1113. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1114. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1115. "a" (&c->redDither)
  1116. );
  1117. } else {
  1118. const int16_t *ubuf1 = ubuf[1];
  1119. __asm__ volatile(
  1120. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1121. "mov %4, %%"REG_b" \n\t"
  1122. "push %%"REG_BP" \n\t"
  1123. YSCALEYUV2RGB1b(%%REGBP, %5)
  1124. "pxor %%mm7, %%mm7 \n\t"
  1125. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1126. "pop %%"REG_BP" \n\t"
  1127. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1128. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1129. "a" (&c->redDither)
  1130. );
  1131. }
  1132. }
  1133. static void RENAME(yuv2rgb555_1)(SwsContext *c, const int16_t *buf0,
  1134. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1135. const int16_t *abuf0, uint8_t *dest,
  1136. int dstW, int uvalpha, int y)
  1137. {
  1138. const int16_t *ubuf0 = ubuf[0];
  1139. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1140. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1141. const int16_t *ubuf1 = ubuf[0];
  1142. __asm__ volatile(
  1143. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1144. "mov %4, %%"REG_b" \n\t"
  1145. "push %%"REG_BP" \n\t"
  1146. YSCALEYUV2RGB1(%%REGBP, %5)
  1147. "pxor %%mm7, %%mm7 \n\t"
  1148. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1149. #ifdef DITHER1XBPP
  1150. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1151. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1152. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1153. #endif
  1154. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1155. "pop %%"REG_BP" \n\t"
  1156. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1157. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1158. "a" (&c->redDither)
  1159. );
  1160. } else {
  1161. const int16_t *ubuf1 = ubuf[1];
  1162. __asm__ volatile(
  1163. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1164. "mov %4, %%"REG_b" \n\t"
  1165. "push %%"REG_BP" \n\t"
  1166. YSCALEYUV2RGB1b(%%REGBP, %5)
  1167. "pxor %%mm7, %%mm7 \n\t"
  1168. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1169. #ifdef DITHER1XBPP
  1170. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1171. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1172. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1173. #endif
  1174. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1175. "pop %%"REG_BP" \n\t"
  1176. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1177. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1178. "a" (&c->redDither)
  1179. );
  1180. }
  1181. }
  1182. static void RENAME(yuv2rgb565_1)(SwsContext *c, const int16_t *buf0,
  1183. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1184. const int16_t *abuf0, uint8_t *dest,
  1185. int dstW, int uvalpha, int y)
  1186. {
  1187. const int16_t *ubuf0 = ubuf[0];
  1188. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1189. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1190. const int16_t *ubuf1 = ubuf[0];
  1191. __asm__ volatile(
  1192. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1193. "mov %4, %%"REG_b" \n\t"
  1194. "push %%"REG_BP" \n\t"
  1195. YSCALEYUV2RGB1(%%REGBP, %5)
  1196. "pxor %%mm7, %%mm7 \n\t"
  1197. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1198. #ifdef DITHER1XBPP
  1199. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1200. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1201. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1202. #endif
  1203. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1204. "pop %%"REG_BP" \n\t"
  1205. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1206. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1207. "a" (&c->redDither)
  1208. );
  1209. } else {
  1210. const int16_t *ubuf1 = ubuf[1];
  1211. __asm__ volatile(
  1212. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1213. "mov %4, %%"REG_b" \n\t"
  1214. "push %%"REG_BP" \n\t"
  1215. YSCALEYUV2RGB1b(%%REGBP, %5)
  1216. "pxor %%mm7, %%mm7 \n\t"
  1217. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1218. #ifdef DITHER1XBPP
  1219. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1220. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1221. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1222. #endif
  1223. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1224. "pop %%"REG_BP" \n\t"
  1225. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1226. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1227. "a" (&c->redDither)
  1228. );
  1229. }
  1230. }
  1231. #define REAL_YSCALEYUV2PACKED1(index, c) \
  1232. "xor "#index", "#index" \n\t"\
  1233. ".p2align 4 \n\t"\
  1234. "1: \n\t"\
  1235. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  1236. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1237. "movq (%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  1238. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1239. "psraw $7, %%mm3 \n\t" \
  1240. "psraw $7, %%mm4 \n\t" \
  1241. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  1242. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  1243. "psraw $7, %%mm1 \n\t" \
  1244. "psraw $7, %%mm7 \n\t" \
  1245. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  1246. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  1247. "xor "#index", "#index" \n\t"\
  1248. ".p2align 4 \n\t"\
  1249. "1: \n\t"\
  1250. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  1251. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  1252. "add "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1253. "movq (%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  1254. "movq (%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  1255. "sub "UV_OFF_BYTE"("#c"), "#index" \n\t" \
  1256. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  1257. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  1258. "psrlw $8, %%mm3 \n\t" \
  1259. "psrlw $8, %%mm4 \n\t" \
  1260. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  1261. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  1262. "psraw $7, %%mm1 \n\t" \
  1263. "psraw $7, %%mm7 \n\t"
  1264. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  1265. static void RENAME(yuv2yuyv422_1)(SwsContext *c, const int16_t *buf0,
  1266. const int16_t *ubuf[2], const int16_t *vbuf[2],
  1267. const int16_t *abuf0, uint8_t *dest,
  1268. int dstW, int uvalpha, int y)
  1269. {
  1270. const int16_t *ubuf0 = ubuf[0];
  1271. const int16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1272. if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1273. const int16_t *ubuf1 = ubuf[0];
  1274. __asm__ volatile(
  1275. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1276. "mov %4, %%"REG_b" \n\t"
  1277. "push %%"REG_BP" \n\t"
  1278. YSCALEYUV2PACKED1(%%REGBP, %5)
  1279. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1280. "pop %%"REG_BP" \n\t"
  1281. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1282. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1283. "a" (&c->redDither)
  1284. );
  1285. } else {
  1286. const int16_t *ubuf1 = ubuf[1];
  1287. __asm__ volatile(
  1288. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1289. "mov %4, %%"REG_b" \n\t"
  1290. "push %%"REG_BP" \n\t"
  1291. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1292. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1293. "pop %%"REG_BP" \n\t"
  1294. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1295. :: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
  1296. "a" (&c->redDither)
  1297. );
  1298. }
  1299. }
  1300. #if COMPILE_TEMPLATE_MMX2
  1301. static void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
  1302. int dstWidth, const uint8_t *src,
  1303. int srcW, int xInc)
  1304. {
  1305. int32_t *filterPos = c->hLumFilterPos;
  1306. int16_t *filter = c->hLumFilter;
  1307. void *mmx2FilterCode= c->lumMmx2FilterCode;
  1308. int i;
  1309. #if defined(PIC)
  1310. uint64_t ebxsave;
  1311. #endif
  1312. #if ARCH_X86_64
  1313. uint64_t retsave;
  1314. #endif
  1315. __asm__ volatile(
  1316. #if defined(PIC)
  1317. "mov %%"REG_b", %5 \n\t"
  1318. #if ARCH_X86_64
  1319. "mov -8(%%rsp), %%"REG_a" \n\t"
  1320. "mov %%"REG_a", %6 \n\t"
  1321. #endif
  1322. #else
  1323. #if ARCH_X86_64
  1324. "mov -8(%%rsp), %%"REG_a" \n\t"
  1325. "mov %%"REG_a", %5 \n\t"
  1326. #endif
  1327. #endif
  1328. "pxor %%mm7, %%mm7 \n\t"
  1329. "mov %0, %%"REG_c" \n\t"
  1330. "mov %1, %%"REG_D" \n\t"
  1331. "mov %2, %%"REG_d" \n\t"
  1332. "mov %3, %%"REG_b" \n\t"
  1333. "xor %%"REG_a", %%"REG_a" \n\t" // i
  1334. PREFETCH" (%%"REG_c") \n\t"
  1335. PREFETCH" 32(%%"REG_c") \n\t"
  1336. PREFETCH" 64(%%"REG_c") \n\t"
  1337. #if ARCH_X86_64
  1338. #define CALL_MMX2_FILTER_CODE \
  1339. "movl (%%"REG_b"), %%esi \n\t"\
  1340. "call *%4 \n\t"\
  1341. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  1342. "add %%"REG_S", %%"REG_c" \n\t"\
  1343. "add %%"REG_a", %%"REG_D" \n\t"\
  1344. "xor %%"REG_a", %%"REG_a" \n\t"\
  1345. #else
  1346. #define CALL_MMX2_FILTER_CODE \
  1347. "movl (%%"REG_b"), %%esi \n\t"\
  1348. "call *%4 \n\t"\
  1349. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  1350. "add %%"REG_a", %%"REG_D" \n\t"\
  1351. "xor %%"REG_a", %%"REG_a" \n\t"\
  1352. #endif /* ARCH_X86_64 */
  1353. CALL_MMX2_FILTER_CODE
  1354. CALL_MMX2_FILTER_CODE
  1355. CALL_MMX2_FILTER_CODE
  1356. CALL_MMX2_FILTER_CODE
  1357. CALL_MMX2_FILTER_CODE
  1358. CALL_MMX2_FILTER_CODE
  1359. CALL_MMX2_FILTER_CODE
  1360. CALL_MMX2_FILTER_CODE
  1361. #if defined(PIC)
  1362. "mov %5, %%"REG_b" \n\t"
  1363. #if ARCH_X86_64
  1364. "mov %6, %%"REG_a" \n\t"
  1365. "mov %%"REG_a", -8(%%rsp) \n\t"
  1366. #endif
  1367. #else
  1368. #if ARCH_X86_64
  1369. "mov %5, %%"REG_a" \n\t"
  1370. "mov %%"REG_a", -8(%%rsp) \n\t"
  1371. #endif
  1372. #endif
  1373. :: "m" (src), "m" (dst), "m" (filter), "m" (filterPos),
  1374. "m" (mmx2FilterCode)
  1375. #if defined(PIC)
  1376. ,"m" (ebxsave)
  1377. #endif
  1378. #if ARCH_X86_64
  1379. ,"m"(retsave)
  1380. #endif
  1381. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  1382. #if !defined(PIC)
  1383. ,"%"REG_b
  1384. #endif
  1385. );
  1386. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  1387. dst[i] = src[srcW-1]*128;
  1388. }
  1389. static void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst1, int16_t *dst2,
  1390. int dstWidth, const uint8_t *src1,
  1391. const uint8_t *src2, int srcW, int xInc)
  1392. {
  1393. int32_t *filterPos = c->hChrFilterPos;
  1394. int16_t *filter = c->hChrFilter;
  1395. void *mmx2FilterCode= c->chrMmx2FilterCode;
  1396. int i;
  1397. #if defined(PIC)
  1398. DECLARE_ALIGNED(8, uint64_t, ebxsave);
  1399. #endif
  1400. #if ARCH_X86_64
  1401. DECLARE_ALIGNED(8, uint64_t, retsave);
  1402. #endif
  1403. __asm__ volatile(
  1404. #if defined(PIC)
  1405. "mov %%"REG_b", %7 \n\t"
  1406. #if ARCH_X86_64
  1407. "mov -8(%%rsp), %%"REG_a" \n\t"
  1408. "mov %%"REG_a", %8 \n\t"
  1409. #endif
  1410. #else
  1411. #if ARCH_X86_64
  1412. "mov -8(%%rsp), %%"REG_a" \n\t"
  1413. "mov %%"REG_a", %7 \n\t"
  1414. #endif
  1415. #endif
  1416. "pxor %%mm7, %%mm7 \n\t"
  1417. "mov %0, %%"REG_c" \n\t"
  1418. "mov %1, %%"REG_D" \n\t"
  1419. "mov %2, %%"REG_d" \n\t"
  1420. "mov %3, %%"REG_b" \n\t"
  1421. "xor %%"REG_a", %%"REG_a" \n\t" // i
  1422. PREFETCH" (%%"REG_c") \n\t"
  1423. PREFETCH" 32(%%"REG_c") \n\t"
  1424. PREFETCH" 64(%%"REG_c") \n\t"
  1425. CALL_MMX2_FILTER_CODE
  1426. CALL_MMX2_FILTER_CODE
  1427. CALL_MMX2_FILTER_CODE
  1428. CALL_MMX2_FILTER_CODE
  1429. "xor %%"REG_a", %%"REG_a" \n\t" // i
  1430. "mov %5, %%"REG_c" \n\t" // src
  1431. "mov %6, %%"REG_D" \n\t" // buf2
  1432. PREFETCH" (%%"REG_c") \n\t"
  1433. PREFETCH" 32(%%"REG_c") \n\t"
  1434. PREFETCH" 64(%%"REG_c") \n\t"
  1435. CALL_MMX2_FILTER_CODE
  1436. CALL_MMX2_FILTER_CODE
  1437. CALL_MMX2_FILTER_CODE
  1438. CALL_MMX2_FILTER_CODE
  1439. #if defined(PIC)
  1440. "mov %7, %%"REG_b" \n\t"
  1441. #if ARCH_X86_64
  1442. "mov %8, %%"REG_a" \n\t"
  1443. "mov %%"REG_a", -8(%%rsp) \n\t"
  1444. #endif
  1445. #else
  1446. #if ARCH_X86_64
  1447. "mov %7, %%"REG_a" \n\t"
  1448. "mov %%"REG_a", -8(%%rsp) \n\t"
  1449. #endif
  1450. #endif
  1451. :: "m" (src1), "m" (dst1), "m" (filter), "m" (filterPos),
  1452. "m" (mmx2FilterCode), "m" (src2), "m"(dst2)
  1453. #if defined(PIC)
  1454. ,"m" (ebxsave)
  1455. #endif
  1456. #if ARCH_X86_64
  1457. ,"m"(retsave)
  1458. #endif
  1459. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  1460. #if !defined(PIC)
  1461. ,"%"REG_b
  1462. #endif
  1463. );
  1464. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
  1465. dst1[i] = src1[srcW-1]*128;
  1466. dst2[i] = src2[srcW-1]*128;
  1467. }
  1468. }
  1469. #endif /* COMPILE_TEMPLATE_MMX2 */
  1470. static av_cold void RENAME(sws_init_swScale)(SwsContext *c)
  1471. {
  1472. enum PixelFormat dstFormat = c->dstFormat;
  1473. if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) &&
  1474. dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21) {
  1475. if (!(c->flags & SWS_BITEXACT)) {
  1476. if (c->flags & SWS_ACCURATE_RND) {
  1477. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1478. switch (c->dstFormat) {
  1479. case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X_ar); break;
  1480. case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X_ar); break;
  1481. case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X_ar); break;
  1482. case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X_ar); break;
  1483. case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X_ar); break;
  1484. default: break;
  1485. }
  1486. }
  1487. } else {
  1488. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1489. switch (c->dstFormat) {
  1490. case PIX_FMT_RGB32: c->yuv2packedX = RENAME(yuv2rgb32_X); break;
  1491. case PIX_FMT_BGR24: c->yuv2packedX = RENAME(yuv2bgr24_X); break;
  1492. case PIX_FMT_RGB555: c->yuv2packedX = RENAME(yuv2rgb555_X); break;
  1493. case PIX_FMT_RGB565: c->yuv2packedX = RENAME(yuv2rgb565_X); break;
  1494. case PIX_FMT_YUYV422: c->yuv2packedX = RENAME(yuv2yuyv422_X); break;
  1495. default: break;
  1496. }
  1497. }
  1498. }
  1499. }
  1500. if (!(c->flags & SWS_FULL_CHR_H_INT)) {
  1501. switch (c->dstFormat) {
  1502. case PIX_FMT_RGB32:
  1503. c->yuv2packed1 = RENAME(yuv2rgb32_1);
  1504. c->yuv2packed2 = RENAME(yuv2rgb32_2);
  1505. break;
  1506. case PIX_FMT_BGR24:
  1507. c->yuv2packed1 = RENAME(yuv2bgr24_1);
  1508. c->yuv2packed2 = RENAME(yuv2bgr24_2);
  1509. break;
  1510. case PIX_FMT_RGB555:
  1511. c->yuv2packed1 = RENAME(yuv2rgb555_1);
  1512. c->yuv2packed2 = RENAME(yuv2rgb555_2);
  1513. break;
  1514. case PIX_FMT_RGB565:
  1515. c->yuv2packed1 = RENAME(yuv2rgb565_1);
  1516. c->yuv2packed2 = RENAME(yuv2rgb565_2);
  1517. break;
  1518. case PIX_FMT_YUYV422:
  1519. c->yuv2packed1 = RENAME(yuv2yuyv422_1);
  1520. c->yuv2packed2 = RENAME(yuv2yuyv422_2);
  1521. break;
  1522. default:
  1523. break;
  1524. }
  1525. }
  1526. }
  1527. if (c->srcBpc == 8 && c->dstBpc <= 10) {
  1528. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  1529. #if COMPILE_TEMPLATE_MMX2
  1530. if (c->flags & SWS_FAST_BILINEAR && c->canMMX2BeUsed)
  1531. {
  1532. c->hyscale_fast = RENAME(hyscale_fast);
  1533. c->hcscale_fast = RENAME(hcscale_fast);
  1534. } else {
  1535. #endif /* COMPILE_TEMPLATE_MMX2 */
  1536. c->hyscale_fast = NULL;
  1537. c->hcscale_fast = NULL;
  1538. #if COMPILE_TEMPLATE_MMX2
  1539. }
  1540. #endif /* COMPILE_TEMPLATE_MMX2 */
  1541. }
  1542. }