You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2877 lines
88KB

  1. /*
  2. Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. #undef REAL_MOVNTQ
  16. #undef MOVNTQ
  17. #undef PAVGB
  18. #undef PREFETCH
  19. #undef PREFETCHW
  20. #undef EMMS
  21. #undef SFENCE
  22. #ifdef HAVE_3DNOW
  23. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  24. #define EMMS "femms"
  25. #else
  26. #define EMMS "emms"
  27. #endif
  28. #ifdef HAVE_3DNOW
  29. #define PREFETCH "prefetch"
  30. #define PREFETCHW "prefetchw"
  31. #elif defined ( HAVE_MMX2 )
  32. #define PREFETCH "prefetchnta"
  33. #define PREFETCHW "prefetcht0"
  34. #else
  35. #define PREFETCH "/nop"
  36. #define PREFETCHW "/nop"
  37. #endif
  38. #ifdef HAVE_MMX2
  39. #define SFENCE "sfence"
  40. #else
  41. #define SFENCE "/nop"
  42. #endif
  43. #ifdef HAVE_MMX2
  44. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  45. #elif defined (HAVE_3DNOW)
  46. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  47. #endif
  48. #ifdef HAVE_MMX2
  49. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  50. #else
  51. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  52. #endif
  53. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  54. #ifdef HAVE_ALTIVEC
  55. #include "swscale_altivec_template.c"
  56. #endif
  57. #define YSCALEYUV2YV12X(x, offset) \
  58. "xor %%"REG_a", %%"REG_a" \n\t"\
  59. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  60. "movq %%mm3, %%mm4 \n\t"\
  61. "lea " offset "(%0), %%"REG_d" \n\t"\
  62. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  63. ".balign 16 \n\t" /* FIXME Unroll? */\
  64. "1: \n\t"\
  65. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  66. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
  67. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
  68. "add $16, %%"REG_d" \n\t"\
  69. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  70. "test %%"REG_S", %%"REG_S" \n\t"\
  71. "pmulhw %%mm0, %%mm2 \n\t"\
  72. "pmulhw %%mm0, %%mm5 \n\t"\
  73. "paddw %%mm2, %%mm3 \n\t"\
  74. "paddw %%mm5, %%mm4 \n\t"\
  75. " jnz 1b \n\t"\
  76. "psraw $3, %%mm3 \n\t"\
  77. "psraw $3, %%mm4 \n\t"\
  78. "packuswb %%mm4, %%mm3 \n\t"\
  79. MOVNTQ(%%mm3, (%1, %%REGa))\
  80. "add $8, %%"REG_a" \n\t"\
  81. "cmp %2, %%"REG_a" \n\t"\
  82. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  83. "movq %%mm3, %%mm4 \n\t"\
  84. "lea " offset "(%0), %%"REG_d" \n\t"\
  85. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  86. "jb 1b \n\t"
  87. #define YSCALEYUV2YV121 \
  88. "mov %2, %%"REG_a" \n\t"\
  89. ".balign 16 \n\t" /* FIXME Unroll? */\
  90. "1: \n\t"\
  91. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  92. "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
  93. "psraw $7, %%mm0 \n\t"\
  94. "psraw $7, %%mm1 \n\t"\
  95. "packuswb %%mm1, %%mm0 \n\t"\
  96. MOVNTQ(%%mm0, (%1, %%REGa))\
  97. "add $8, %%"REG_a" \n\t"\
  98. "jnc 1b \n\t"
  99. /*
  100. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  101. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  102. "r" (dest), "m" (dstW),
  103. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  104. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  105. */
  106. #define YSCALEYUV2PACKEDX \
  107. "xor %%"REG_a", %%"REG_a" \n\t"\
  108. ".balign 16 \n\t"\
  109. "nop \n\t"\
  110. "1: \n\t"\
  111. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  112. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  113. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  114. "movq %%mm3, %%mm4 \n\t"\
  115. ".balign 16 \n\t"\
  116. "2: \n\t"\
  117. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  118. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  119. "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  120. "add $16, %%"REG_d" \n\t"\
  121. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  122. "pmulhw %%mm0, %%mm2 \n\t"\
  123. "pmulhw %%mm0, %%mm5 \n\t"\
  124. "paddw %%mm2, %%mm3 \n\t"\
  125. "paddw %%mm5, %%mm4 \n\t"\
  126. "test %%"REG_S", %%"REG_S" \n\t"\
  127. " jnz 2b \n\t"\
  128. \
  129. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  130. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  131. "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
  132. "movq %%mm1, %%mm7 \n\t"\
  133. ".balign 16 \n\t"\
  134. "2: \n\t"\
  135. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  136. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  137. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  138. "add $16, %%"REG_d" \n\t"\
  139. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  140. "pmulhw %%mm0, %%mm2 \n\t"\
  141. "pmulhw %%mm0, %%mm5 \n\t"\
  142. "paddw %%mm2, %%mm1 \n\t"\
  143. "paddw %%mm5, %%mm7 \n\t"\
  144. "test %%"REG_S", %%"REG_S" \n\t"\
  145. " jnz 2b \n\t"\
  146. #define YSCALEYUV2RGBX \
  147. YSCALEYUV2PACKEDX\
  148. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  149. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  150. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  151. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  152. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  153. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  154. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  155. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  156. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  157. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  158. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  159. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  160. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  161. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  162. "paddw %%mm3, %%mm4 \n\t"\
  163. "movq %%mm2, %%mm0 \n\t"\
  164. "movq %%mm5, %%mm6 \n\t"\
  165. "movq %%mm4, %%mm3 \n\t"\
  166. "punpcklwd %%mm2, %%mm2 \n\t"\
  167. "punpcklwd %%mm5, %%mm5 \n\t"\
  168. "punpcklwd %%mm4, %%mm4 \n\t"\
  169. "paddw %%mm1, %%mm2 \n\t"\
  170. "paddw %%mm1, %%mm5 \n\t"\
  171. "paddw %%mm1, %%mm4 \n\t"\
  172. "punpckhwd %%mm0, %%mm0 \n\t"\
  173. "punpckhwd %%mm6, %%mm6 \n\t"\
  174. "punpckhwd %%mm3, %%mm3 \n\t"\
  175. "paddw %%mm7, %%mm0 \n\t"\
  176. "paddw %%mm7, %%mm6 \n\t"\
  177. "paddw %%mm7, %%mm3 \n\t"\
  178. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  179. "packuswb %%mm0, %%mm2 \n\t"\
  180. "packuswb %%mm6, %%mm5 \n\t"\
  181. "packuswb %%mm3, %%mm4 \n\t"\
  182. "pxor %%mm7, %%mm7 \n\t"
  183. #if 0
  184. #define FULL_YSCALEYUV2RGB \
  185. "pxor %%mm7, %%mm7 \n\t"\
  186. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  187. "punpcklwd %%mm6, %%mm6 \n\t"\
  188. "punpcklwd %%mm6, %%mm6 \n\t"\
  189. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  190. "punpcklwd %%mm5, %%mm5 \n\t"\
  191. "punpcklwd %%mm5, %%mm5 \n\t"\
  192. "xor %%"REG_a", %%"REG_a" \n\t"\
  193. ".balign 16 \n\t"\
  194. "1: \n\t"\
  195. "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
  196. "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
  197. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  198. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  199. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  200. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  201. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  202. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  203. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  204. "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  205. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  206. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  207. "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  208. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  209. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  210. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  211. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  212. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  213. \
  214. \
  215. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  216. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  217. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  218. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  219. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  220. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  221. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  222. \
  223. \
  224. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  225. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  226. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  227. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  228. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  229. "packuswb %%mm3, %%mm3 \n\t"\
  230. \
  231. "packuswb %%mm0, %%mm0 \n\t"\
  232. "paddw %%mm4, %%mm2 \n\t"\
  233. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  234. \
  235. "packuswb %%mm1, %%mm1 \n\t"
  236. #endif
  237. #define REAL_YSCALEYUV2PACKED(index, c) \
  238. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  239. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
  240. "psraw $3, %%mm0 \n\t"\
  241. "psraw $3, %%mm1 \n\t"\
  242. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  243. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  244. "xor "#index", "#index" \n\t"\
  245. ".balign 16 \n\t"\
  246. "1: \n\t"\
  247. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  248. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  249. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  250. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  251. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  252. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  253. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  254. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  255. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  256. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  257. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  258. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  259. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  260. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  261. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  262. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  263. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  264. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  265. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  266. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  267. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  268. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  269. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  270. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  271. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  272. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  273. #define REAL_YSCALEYUV2RGB(index, c) \
  274. "xor "#index", "#index" \n\t"\
  275. ".balign 16 \n\t"\
  276. "1: \n\t"\
  277. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  278. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  279. "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
  280. "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
  281. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  282. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  283. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  284. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  285. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  286. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  287. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  288. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  289. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  290. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  291. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  292. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  293. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  294. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  295. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  296. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  297. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  298. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  299. "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
  300. "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
  301. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  302. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  303. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  304. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  305. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  306. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  307. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  308. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  309. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  310. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  311. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  312. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  313. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  314. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  315. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  316. "paddw %%mm3, %%mm4 \n\t"\
  317. "movq %%mm2, %%mm0 \n\t"\
  318. "movq %%mm5, %%mm6 \n\t"\
  319. "movq %%mm4, %%mm3 \n\t"\
  320. "punpcklwd %%mm2, %%mm2 \n\t"\
  321. "punpcklwd %%mm5, %%mm5 \n\t"\
  322. "punpcklwd %%mm4, %%mm4 \n\t"\
  323. "paddw %%mm1, %%mm2 \n\t"\
  324. "paddw %%mm1, %%mm5 \n\t"\
  325. "paddw %%mm1, %%mm4 \n\t"\
  326. "punpckhwd %%mm0, %%mm0 \n\t"\
  327. "punpckhwd %%mm6, %%mm6 \n\t"\
  328. "punpckhwd %%mm3, %%mm3 \n\t"\
  329. "paddw %%mm7, %%mm0 \n\t"\
  330. "paddw %%mm7, %%mm6 \n\t"\
  331. "paddw %%mm7, %%mm3 \n\t"\
  332. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  333. "packuswb %%mm0, %%mm2 \n\t"\
  334. "packuswb %%mm6, %%mm5 \n\t"\
  335. "packuswb %%mm3, %%mm4 \n\t"\
  336. "pxor %%mm7, %%mm7 \n\t"
  337. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  338. #define REAL_YSCALEYUV2PACKED1(index, c) \
  339. "xor "#index", "#index" \n\t"\
  340. ".balign 16 \n\t"\
  341. "1: \n\t"\
  342. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  343. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  344. "psraw $7, %%mm3 \n\t" \
  345. "psraw $7, %%mm4 \n\t" \
  346. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  347. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  348. "psraw $7, %%mm1 \n\t" \
  349. "psraw $7, %%mm7 \n\t" \
  350. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  351. #define REAL_YSCALEYUV2RGB1(index, c) \
  352. "xor "#index", "#index" \n\t"\
  353. ".balign 16 \n\t"\
  354. "1: \n\t"\
  355. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  356. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  357. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  358. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  359. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  360. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  361. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  362. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  363. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  364. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  365. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  366. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  367. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  368. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  369. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  370. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  371. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  372. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  373. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  374. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  375. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  376. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  377. "paddw %%mm3, %%mm4 \n\t"\
  378. "movq %%mm2, %%mm0 \n\t"\
  379. "movq %%mm5, %%mm6 \n\t"\
  380. "movq %%mm4, %%mm3 \n\t"\
  381. "punpcklwd %%mm2, %%mm2 \n\t"\
  382. "punpcklwd %%mm5, %%mm5 \n\t"\
  383. "punpcklwd %%mm4, %%mm4 \n\t"\
  384. "paddw %%mm1, %%mm2 \n\t"\
  385. "paddw %%mm1, %%mm5 \n\t"\
  386. "paddw %%mm1, %%mm4 \n\t"\
  387. "punpckhwd %%mm0, %%mm0 \n\t"\
  388. "punpckhwd %%mm6, %%mm6 \n\t"\
  389. "punpckhwd %%mm3, %%mm3 \n\t"\
  390. "paddw %%mm7, %%mm0 \n\t"\
  391. "paddw %%mm7, %%mm6 \n\t"\
  392. "paddw %%mm7, %%mm3 \n\t"\
  393. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  394. "packuswb %%mm0, %%mm2 \n\t"\
  395. "packuswb %%mm6, %%mm5 \n\t"\
  396. "packuswb %%mm3, %%mm4 \n\t"\
  397. "pxor %%mm7, %%mm7 \n\t"
  398. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  399. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  400. "xor "#index", "#index" \n\t"\
  401. ".balign 16 \n\t"\
  402. "1: \n\t"\
  403. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  404. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  405. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  406. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  407. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  408. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  409. "psrlw $8, %%mm3 \n\t" \
  410. "psrlw $8, %%mm4 \n\t" \
  411. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  412. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  413. "psraw $7, %%mm1 \n\t" \
  414. "psraw $7, %%mm7 \n\t"
  415. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  416. // do vertical chrominance interpolation
  417. #define REAL_YSCALEYUV2RGB1b(index, c) \
  418. "xor "#index", "#index" \n\t"\
  419. ".balign 16 \n\t"\
  420. "1: \n\t"\
  421. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  422. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  423. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  424. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  425. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  426. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  427. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  428. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  429. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  430. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  431. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  432. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  433. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  434. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  435. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  436. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  437. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  438. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  439. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  440. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  441. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  442. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  443. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  444. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  445. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  446. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  447. "paddw %%mm3, %%mm4 \n\t"\
  448. "movq %%mm2, %%mm0 \n\t"\
  449. "movq %%mm5, %%mm6 \n\t"\
  450. "movq %%mm4, %%mm3 \n\t"\
  451. "punpcklwd %%mm2, %%mm2 \n\t"\
  452. "punpcklwd %%mm5, %%mm5 \n\t"\
  453. "punpcklwd %%mm4, %%mm4 \n\t"\
  454. "paddw %%mm1, %%mm2 \n\t"\
  455. "paddw %%mm1, %%mm5 \n\t"\
  456. "paddw %%mm1, %%mm4 \n\t"\
  457. "punpckhwd %%mm0, %%mm0 \n\t"\
  458. "punpckhwd %%mm6, %%mm6 \n\t"\
  459. "punpckhwd %%mm3, %%mm3 \n\t"\
  460. "paddw %%mm7, %%mm0 \n\t"\
  461. "paddw %%mm7, %%mm6 \n\t"\
  462. "paddw %%mm7, %%mm3 \n\t"\
  463. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  464. "packuswb %%mm0, %%mm2 \n\t"\
  465. "packuswb %%mm6, %%mm5 \n\t"\
  466. "packuswb %%mm3, %%mm4 \n\t"\
  467. "pxor %%mm7, %%mm7 \n\t"
  468. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  469. #define REAL_WRITEBGR32(dst, dstw, index) \
  470. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  471. "movq %%mm2, %%mm1 \n\t" /* B */\
  472. "movq %%mm5, %%mm6 \n\t" /* R */\
  473. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  474. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  475. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  476. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  477. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  478. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  479. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  480. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  481. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  482. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  483. \
  484. MOVNTQ(%%mm0, (dst, index, 4))\
  485. MOVNTQ(%%mm2, 8(dst, index, 4))\
  486. MOVNTQ(%%mm1, 16(dst, index, 4))\
  487. MOVNTQ(%%mm3, 24(dst, index, 4))\
  488. \
  489. "add $8, "#index" \n\t"\
  490. "cmp "#dstw", "#index" \n\t"\
  491. " jb 1b \n\t"
  492. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  493. #define REAL_WRITEBGR16(dst, dstw, index) \
  494. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  495. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  496. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  497. "psrlq $3, %%mm2 \n\t"\
  498. \
  499. "movq %%mm2, %%mm1 \n\t"\
  500. "movq %%mm4, %%mm3 \n\t"\
  501. \
  502. "punpcklbw %%mm7, %%mm3 \n\t"\
  503. "punpcklbw %%mm5, %%mm2 \n\t"\
  504. "punpckhbw %%mm7, %%mm4 \n\t"\
  505. "punpckhbw %%mm5, %%mm1 \n\t"\
  506. \
  507. "psllq $3, %%mm3 \n\t"\
  508. "psllq $3, %%mm4 \n\t"\
  509. \
  510. "por %%mm3, %%mm2 \n\t"\
  511. "por %%mm4, %%mm1 \n\t"\
  512. \
  513. MOVNTQ(%%mm2, (dst, index, 2))\
  514. MOVNTQ(%%mm1, 8(dst, index, 2))\
  515. \
  516. "add $8, "#index" \n\t"\
  517. "cmp "#dstw", "#index" \n\t"\
  518. " jb 1b \n\t"
  519. #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
  520. #define REAL_WRITEBGR15(dst, dstw, index) \
  521. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  522. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  523. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  524. "psrlq $3, %%mm2 \n\t"\
  525. "psrlq $1, %%mm5 \n\t"\
  526. \
  527. "movq %%mm2, %%mm1 \n\t"\
  528. "movq %%mm4, %%mm3 \n\t"\
  529. \
  530. "punpcklbw %%mm7, %%mm3 \n\t"\
  531. "punpcklbw %%mm5, %%mm2 \n\t"\
  532. "punpckhbw %%mm7, %%mm4 \n\t"\
  533. "punpckhbw %%mm5, %%mm1 \n\t"\
  534. \
  535. "psllq $2, %%mm3 \n\t"\
  536. "psllq $2, %%mm4 \n\t"\
  537. \
  538. "por %%mm3, %%mm2 \n\t"\
  539. "por %%mm4, %%mm1 \n\t"\
  540. \
  541. MOVNTQ(%%mm2, (dst, index, 2))\
  542. MOVNTQ(%%mm1, 8(dst, index, 2))\
  543. \
  544. "add $8, "#index" \n\t"\
  545. "cmp "#dstw", "#index" \n\t"\
  546. " jb 1b \n\t"
  547. #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
  548. #define WRITEBGR24OLD(dst, dstw, index) \
  549. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  550. "movq %%mm2, %%mm1 \n\t" /* B */\
  551. "movq %%mm5, %%mm6 \n\t" /* R */\
  552. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  553. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  554. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  555. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  556. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  557. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  558. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  559. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  560. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  561. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  562. \
  563. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  564. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  565. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  566. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  567. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  568. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  569. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  570. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  571. \
  572. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  573. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  574. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  575. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  576. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  577. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  578. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  579. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  580. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  581. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  582. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  583. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  584. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  585. \
  586. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  587. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  588. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  589. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  590. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  591. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  592. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  593. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  594. \
  595. MOVNTQ(%%mm0, (dst))\
  596. MOVNTQ(%%mm2, 8(dst))\
  597. MOVNTQ(%%mm3, 16(dst))\
  598. "add $24, "#dst" \n\t"\
  599. \
  600. "add $8, "#index" \n\t"\
  601. "cmp "#dstw", "#index" \n\t"\
  602. " jb 1b \n\t"
  603. #define WRITEBGR24MMX(dst, dstw, index) \
  604. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  605. "movq %%mm2, %%mm1 \n\t" /* B */\
  606. "movq %%mm5, %%mm6 \n\t" /* R */\
  607. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  608. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  609. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  610. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  611. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  612. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  613. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  614. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  615. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  616. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  617. \
  618. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  619. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  620. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  621. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  622. \
  623. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  624. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  625. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  626. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  627. \
  628. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  629. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  630. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  631. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  632. \
  633. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  634. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  635. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  636. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  637. MOVNTQ(%%mm0, (dst))\
  638. \
  639. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  640. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  641. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  642. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  643. MOVNTQ(%%mm6, 8(dst))\
  644. \
  645. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  646. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  647. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  648. MOVNTQ(%%mm5, 16(dst))\
  649. \
  650. "add $24, "#dst" \n\t"\
  651. \
  652. "add $8, "#index" \n\t"\
  653. "cmp "#dstw", "#index" \n\t"\
  654. " jb 1b \n\t"
  655. #define WRITEBGR24MMX2(dst, dstw, index) \
  656. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  657. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  658. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  659. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  660. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  661. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  662. \
  663. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  664. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  665. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  666. \
  667. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  668. "por %%mm1, %%mm6 \n\t"\
  669. "por %%mm3, %%mm6 \n\t"\
  670. MOVNTQ(%%mm6, (dst))\
  671. \
  672. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  673. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  674. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  675. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  676. \
  677. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  678. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  679. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  680. \
  681. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  682. "por %%mm3, %%mm6 \n\t"\
  683. MOVNTQ(%%mm6, 8(dst))\
  684. \
  685. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  686. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  687. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  688. \
  689. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  690. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  691. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  692. \
  693. "por %%mm1, %%mm3 \n\t"\
  694. "por %%mm3, %%mm6 \n\t"\
  695. MOVNTQ(%%mm6, 16(dst))\
  696. \
  697. "add $24, "#dst" \n\t"\
  698. \
  699. "add $8, "#index" \n\t"\
  700. "cmp "#dstw", "#index" \n\t"\
  701. " jb 1b \n\t"
  702. #ifdef HAVE_MMX2
  703. #undef WRITEBGR24
  704. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  705. #else
  706. #undef WRITEBGR24
  707. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  708. #endif
  709. #define REAL_WRITEYUY2(dst, dstw, index) \
  710. "packuswb %%mm3, %%mm3 \n\t"\
  711. "packuswb %%mm4, %%mm4 \n\t"\
  712. "packuswb %%mm7, %%mm1 \n\t"\
  713. "punpcklbw %%mm4, %%mm3 \n\t"\
  714. "movq %%mm1, %%mm7 \n\t"\
  715. "punpcklbw %%mm3, %%mm1 \n\t"\
  716. "punpckhbw %%mm3, %%mm7 \n\t"\
  717. \
  718. MOVNTQ(%%mm1, (dst, index, 2))\
  719. MOVNTQ(%%mm7, 8(dst, index, 2))\
  720. \
  721. "add $8, "#index" \n\t"\
  722. "cmp "#dstw", "#index" \n\t"\
  723. " jb 1b \n\t"
  724. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  725. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  726. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  727. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  728. {
  729. #ifdef HAVE_MMX
  730. if(uDest != NULL)
  731. {
  732. asm volatile(
  733. YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
  734. :: "r" (&c->redDither),
  735. "r" (uDest), "m" ((long)chrDstW)
  736. : "%"REG_a, "%"REG_d, "%"REG_S
  737. );
  738. asm volatile(
  739. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
  740. :: "r" (&c->redDither),
  741. "r" (vDest), "m" ((long)chrDstW)
  742. : "%"REG_a, "%"REG_d, "%"REG_S
  743. );
  744. }
  745. asm volatile(
  746. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
  747. :: "r" (&c->redDither),
  748. "r" (dest), "m" ((long)dstW)
  749. : "%"REG_a, "%"REG_d, "%"REG_S
  750. );
  751. #else
  752. #ifdef HAVE_ALTIVEC
  753. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  754. chrFilter, chrSrc, chrFilterSize,
  755. dest, uDest, vDest, dstW, chrDstW);
  756. #else //HAVE_ALTIVEC
  757. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  758. chrFilter, chrSrc, chrFilterSize,
  759. dest, uDest, vDest, dstW, chrDstW);
  760. #endif //!HAVE_ALTIVEC
  761. #endif
  762. }
  763. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  764. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  765. {
  766. #ifdef HAVE_MMX
  767. if(uDest != NULL)
  768. {
  769. asm volatile(
  770. YSCALEYUV2YV121
  771. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  772. "g" ((long)-chrDstW)
  773. : "%"REG_a
  774. );
  775. asm volatile(
  776. YSCALEYUV2YV121
  777. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  778. "g" ((long)-chrDstW)
  779. : "%"REG_a
  780. );
  781. }
  782. asm volatile(
  783. YSCALEYUV2YV121
  784. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  785. "g" ((long)-dstW)
  786. : "%"REG_a
  787. );
  788. #else
  789. int i;
  790. for(i=0; i<dstW; i++)
  791. {
  792. int val= lumSrc[i]>>7;
  793. if(val&256){
  794. if(val<0) val=0;
  795. else val=255;
  796. }
  797. dest[i]= val;
  798. }
  799. if(uDest != NULL)
  800. for(i=0; i<chrDstW; i++)
  801. {
  802. int u=chrSrc[i]>>7;
  803. int v=chrSrc[i + 2048]>>7;
  804. if((u|v)&256){
  805. if(u<0) u=0;
  806. else if (u>255) u=255;
  807. if(v<0) v=0;
  808. else if (v>255) v=255;
  809. }
  810. uDest[i]= u;
  811. vDest[i]= v;
  812. }
  813. #endif
  814. }
  815. /**
  816. * vertical scale YV12 to RGB
  817. */
  818. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  819. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  820. uint8_t *dest, int dstW, int dstY)
  821. {
  822. int dummy=0;
  823. switch(c->dstFormat)
  824. {
  825. #ifdef HAVE_MMX
  826. case IMGFMT_BGR32:
  827. {
  828. asm volatile(
  829. YSCALEYUV2RGBX
  830. WRITEBGR32(%4, %5, %%REGa)
  831. :: "r" (&c->redDither),
  832. "m" (dummy), "m" (dummy), "m" (dummy),
  833. "r" (dest), "m" (dstW)
  834. : "%"REG_a, "%"REG_d, "%"REG_S
  835. );
  836. }
  837. break;
  838. case IMGFMT_BGR24:
  839. {
  840. asm volatile(
  841. YSCALEYUV2RGBX
  842. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
  843. "add %4, %%"REG_b" \n\t"
  844. WRITEBGR24(%%REGb, %5, %%REGa)
  845. :: "r" (&c->redDither),
  846. "m" (dummy), "m" (dummy), "m" (dummy),
  847. "r" (dest), "m" (dstW)
  848. : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
  849. );
  850. }
  851. break;
  852. case IMGFMT_BGR15:
  853. {
  854. asm volatile(
  855. YSCALEYUV2RGBX
  856. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  857. #ifdef DITHER1XBPP
  858. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  859. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  860. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  861. #endif
  862. WRITEBGR15(%4, %5, %%REGa)
  863. :: "r" (&c->redDither),
  864. "m" (dummy), "m" (dummy), "m" (dummy),
  865. "r" (dest), "m" (dstW)
  866. : "%"REG_a, "%"REG_d, "%"REG_S
  867. );
  868. }
  869. break;
  870. case IMGFMT_BGR16:
  871. {
  872. asm volatile(
  873. YSCALEYUV2RGBX
  874. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  875. #ifdef DITHER1XBPP
  876. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  877. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  878. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  879. #endif
  880. WRITEBGR16(%4, %5, %%REGa)
  881. :: "r" (&c->redDither),
  882. "m" (dummy), "m" (dummy), "m" (dummy),
  883. "r" (dest), "m" (dstW)
  884. : "%"REG_a, "%"REG_d, "%"REG_S
  885. );
  886. }
  887. break;
  888. case IMGFMT_YUY2:
  889. {
  890. asm volatile(
  891. YSCALEYUV2PACKEDX
  892. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  893. "psraw $3, %%mm3 \n\t"
  894. "psraw $3, %%mm4 \n\t"
  895. "psraw $3, %%mm1 \n\t"
  896. "psraw $3, %%mm7 \n\t"
  897. WRITEYUY2(%4, %5, %%REGa)
  898. :: "r" (&c->redDither),
  899. "m" (dummy), "m" (dummy), "m" (dummy),
  900. "r" (dest), "m" (dstW)
  901. : "%"REG_a, "%"REG_d, "%"REG_S
  902. );
  903. }
  904. break;
  905. #endif
  906. default:
  907. #ifdef HAVE_ALTIVEC
  908. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  909. chrFilter, chrSrc, chrFilterSize,
  910. dest, dstW, dstY);
  911. #else
  912. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  913. chrFilter, chrSrc, chrFilterSize,
  914. dest, dstW, dstY);
  915. #endif
  916. break;
  917. }
  918. }
  919. /**
  920. * vertical bilinear scale YV12 to RGB
  921. */
  922. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  923. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  924. {
  925. int yalpha1=yalpha^4095;
  926. int uvalpha1=uvalpha^4095;
  927. int i;
  928. #if 0 //isn't used
  929. if(flags&SWS_FULL_CHR_H_INT)
  930. {
  931. switch(dstFormat)
  932. {
  933. #ifdef HAVE_MMX
  934. case IMGFMT_BGR32:
  935. asm volatile(
  936. FULL_YSCALEYUV2RGB
  937. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  938. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  939. "movq %%mm3, %%mm1 \n\t"
  940. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  941. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  942. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  943. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  944. "add $4, %%"REG_a" \n\t"
  945. "cmp %5, %%"REG_a" \n\t"
  946. " jb 1b \n\t"
  947. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  948. "m" (yalpha1), "m" (uvalpha1)
  949. : "%"REG_a
  950. );
  951. break;
  952. case IMGFMT_BGR24:
  953. asm volatile(
  954. FULL_YSCALEYUV2RGB
  955. // lsb ... msb
  956. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  957. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  958. "movq %%mm3, %%mm1 \n\t"
  959. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  960. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  961. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  962. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  963. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  964. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  965. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  966. "movq %%mm1, %%mm2 \n\t"
  967. "psllq $48, %%mm1 \n\t" // 000000BG
  968. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  969. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  970. "psrld $16, %%mm2 \n\t" // R000R000
  971. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  972. "por %%mm2, %%mm1 \n\t" // RBGRR000
  973. "mov %4, %%"REG_b" \n\t"
  974. "add %%"REG_a", %%"REG_b" \n\t"
  975. #ifdef HAVE_MMX2
  976. //FIXME Alignment
  977. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
  978. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
  979. #else
  980. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  981. "psrlq $32, %%mm3 \n\t"
  982. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  983. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  984. #endif
  985. "add $4, %%"REG_a" \n\t"
  986. "cmp %5, %%"REG_a" \n\t"
  987. " jb 1b \n\t"
  988. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  989. "m" (yalpha1), "m" (uvalpha1)
  990. : "%"REG_a, "%"REG_b
  991. );
  992. break;
  993. case IMGFMT_BGR15:
  994. asm volatile(
  995. FULL_YSCALEYUV2RGB
  996. #ifdef DITHER1XBPP
  997. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  998. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  999. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1000. #endif
  1001. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1002. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1003. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1004. "psrlw $3, %%mm3 \n\t"
  1005. "psllw $2, %%mm1 \n\t"
  1006. "psllw $7, %%mm0 \n\t"
  1007. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1008. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1009. "por %%mm3, %%mm1 \n\t"
  1010. "por %%mm1, %%mm0 \n\t"
  1011. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1012. "add $4, %%"REG_a" \n\t"
  1013. "cmp %5, %%"REG_a" \n\t"
  1014. " jb 1b \n\t"
  1015. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1016. "m" (yalpha1), "m" (uvalpha1)
  1017. : "%"REG_a
  1018. );
  1019. break;
  1020. case IMGFMT_BGR16:
  1021. asm volatile(
  1022. FULL_YSCALEYUV2RGB
  1023. #ifdef DITHER1XBPP
  1024. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  1025. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1026. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1027. #endif
  1028. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1029. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1030. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1031. "psrlw $3, %%mm3 \n\t"
  1032. "psllw $3, %%mm1 \n\t"
  1033. "psllw $8, %%mm0 \n\t"
  1034. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1035. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1036. "por %%mm3, %%mm1 \n\t"
  1037. "por %%mm1, %%mm0 \n\t"
  1038. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1039. "add $4, %%"REG_a" \n\t"
  1040. "cmp %5, %%"REG_a" \n\t"
  1041. " jb 1b \n\t"
  1042. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1043. "m" (yalpha1), "m" (uvalpha1)
  1044. : "%"REG_a
  1045. );
  1046. break;
  1047. #endif
  1048. case IMGFMT_RGB32:
  1049. #ifndef HAVE_MMX
  1050. case IMGFMT_BGR32:
  1051. #endif
  1052. if(dstFormat==IMGFMT_BGR32)
  1053. {
  1054. int i;
  1055. #ifdef WORDS_BIGENDIAN
  1056. dest++;
  1057. #endif
  1058. for(i=0;i<dstW;i++){
  1059. // vertical linear interpolation && yuv2rgb in a single step:
  1060. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1061. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1062. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1063. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1064. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1065. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1066. dest+= 4;
  1067. }
  1068. }
  1069. else if(dstFormat==IMGFMT_BGR24)
  1070. {
  1071. int i;
  1072. for(i=0;i<dstW;i++){
  1073. // vertical linear interpolation && yuv2rgb in a single step:
  1074. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1075. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1076. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1077. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1078. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1079. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1080. dest+= 3;
  1081. }
  1082. }
  1083. else if(dstFormat==IMGFMT_BGR16)
  1084. {
  1085. int i;
  1086. for(i=0;i<dstW;i++){
  1087. // vertical linear interpolation && yuv2rgb in a single step:
  1088. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1089. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1090. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1091. ((uint16_t*)dest)[i] =
  1092. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1093. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1094. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1095. }
  1096. }
  1097. else if(dstFormat==IMGFMT_BGR15)
  1098. {
  1099. int i;
  1100. for(i=0;i<dstW;i++){
  1101. // vertical linear interpolation && yuv2rgb in a single step:
  1102. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1103. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1104. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1105. ((uint16_t*)dest)[i] =
  1106. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1107. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1108. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1109. }
  1110. }
  1111. }//FULL_UV_IPOL
  1112. else
  1113. {
  1114. #endif // if 0
  1115. #ifdef HAVE_MMX
  1116. switch(c->dstFormat)
  1117. {
  1118. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1119. case IMGFMT_BGR32:
  1120. asm volatile(
  1121. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1122. "mov %4, %%"REG_SP" \n\t"
  1123. YSCALEYUV2RGB(%%REGa, %5)
  1124. WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
  1125. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1126. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1127. "r" (&c->redDither)
  1128. : "%"REG_a
  1129. );
  1130. return;
  1131. case IMGFMT_BGR24:
  1132. asm volatile(
  1133. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1134. "mov %4, %%"REG_SP" \n\t"
  1135. YSCALEYUV2RGB(%%REGa, %5)
  1136. WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
  1137. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1138. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1139. "r" (&c->redDither)
  1140. : "%"REG_a
  1141. );
  1142. return;
  1143. case IMGFMT_BGR15:
  1144. asm volatile(
  1145. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1146. "mov %4, %%"REG_SP" \n\t"
  1147. YSCALEYUV2RGB(%%REGa, %5)
  1148. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1149. #ifdef DITHER1XBPP
  1150. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1151. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1152. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1153. #endif
  1154. WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
  1155. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1156. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1157. "r" (&c->redDither)
  1158. : "%"REG_a
  1159. );
  1160. return;
  1161. case IMGFMT_BGR16:
  1162. asm volatile(
  1163. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1164. "mov %4, %%"REG_SP" \n\t"
  1165. YSCALEYUV2RGB(%%REGa, %5)
  1166. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1167. #ifdef DITHER1XBPP
  1168. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1169. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1170. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1171. #endif
  1172. WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
  1173. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1174. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1175. "r" (&c->redDither)
  1176. : "%"REG_a
  1177. );
  1178. return;
  1179. case IMGFMT_YUY2:
  1180. asm volatile(
  1181. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1182. "mov %4, %%"REG_SP" \n\t"
  1183. YSCALEYUV2PACKED(%%REGa, %5)
  1184. WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
  1185. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1186. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1187. "r" (&c->redDither)
  1188. : "%"REG_a
  1189. );
  1190. return;
  1191. default: break;
  1192. }
  1193. #endif //HAVE_MMX
  1194. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1195. }
  1196. /**
  1197. * YV12 to RGB without scaling or interpolating
  1198. */
  1199. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1200. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1201. {
  1202. const int yalpha1=0;
  1203. int i;
  1204. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1205. const int yalpha= 4096; //FIXME ...
  1206. if(flags&SWS_FULL_CHR_H_INT)
  1207. {
  1208. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1209. return;
  1210. }
  1211. #ifdef HAVE_MMX
  1212. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1213. {
  1214. switch(dstFormat)
  1215. {
  1216. case IMGFMT_BGR32:
  1217. asm volatile(
  1218. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1219. "mov %4, %%"REG_SP" \n\t"
  1220. YSCALEYUV2RGB1(%%REGa, %5)
  1221. WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
  1222. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1223. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1224. "r" (&c->redDither)
  1225. : "%"REG_a
  1226. );
  1227. return;
  1228. case IMGFMT_BGR24:
  1229. asm volatile(
  1230. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1231. "mov %4, %%"REG_SP" \n\t"
  1232. YSCALEYUV2RGB1(%%REGa, %5)
  1233. WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
  1234. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1235. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1236. "r" (&c->redDither)
  1237. : "%"REG_a
  1238. );
  1239. return;
  1240. case IMGFMT_BGR15:
  1241. asm volatile(
  1242. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1243. "mov %4, %%"REG_SP" \n\t"
  1244. YSCALEYUV2RGB1(%%REGa, %5)
  1245. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1246. #ifdef DITHER1XBPP
  1247. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1248. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1249. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1250. #endif
  1251. WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
  1252. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1253. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1254. "r" (&c->redDither)
  1255. : "%"REG_a
  1256. );
  1257. return;
  1258. case IMGFMT_BGR16:
  1259. asm volatile(
  1260. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1261. "mov %4, %%"REG_SP" \n\t"
  1262. YSCALEYUV2RGB1(%%REGa, %5)
  1263. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1264. #ifdef DITHER1XBPP
  1265. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1266. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1267. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1268. #endif
  1269. WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
  1270. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1271. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1272. "r" (&c->redDither)
  1273. : "%"REG_a
  1274. );
  1275. return;
  1276. case IMGFMT_YUY2:
  1277. asm volatile(
  1278. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1279. "mov %4, %%"REG_SP" \n\t"
  1280. YSCALEYUV2PACKED1(%%REGa, %5)
  1281. WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
  1282. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1283. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1284. "r" (&c->redDither)
  1285. : "%"REG_a
  1286. );
  1287. return;
  1288. }
  1289. }
  1290. else
  1291. {
  1292. switch(dstFormat)
  1293. {
  1294. case IMGFMT_BGR32:
  1295. asm volatile(
  1296. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1297. "mov %4, %%"REG_SP" \n\t"
  1298. YSCALEYUV2RGB1b(%%REGa, %5)
  1299. WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
  1300. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1301. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1302. "r" (&c->redDither)
  1303. : "%"REG_a
  1304. );
  1305. return;
  1306. case IMGFMT_BGR24:
  1307. asm volatile(
  1308. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1309. "mov %4, %%"REG_SP" \n\t"
  1310. YSCALEYUV2RGB1b(%%REGa, %5)
  1311. WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
  1312. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1313. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1314. "r" (&c->redDither)
  1315. : "%"REG_a
  1316. );
  1317. return;
  1318. case IMGFMT_BGR15:
  1319. asm volatile(
  1320. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1321. "mov %4, %%"REG_SP" \n\t"
  1322. YSCALEYUV2RGB1b(%%REGa, %5)
  1323. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1324. #ifdef DITHER1XBPP
  1325. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1326. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1327. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1328. #endif
  1329. WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
  1330. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1331. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1332. "r" (&c->redDither)
  1333. : "%"REG_a
  1334. );
  1335. return;
  1336. case IMGFMT_BGR16:
  1337. asm volatile(
  1338. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1339. "mov %4, %%"REG_SP" \n\t"
  1340. YSCALEYUV2RGB1b(%%REGa, %5)
  1341. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1342. #ifdef DITHER1XBPP
  1343. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1344. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1345. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1346. #endif
  1347. WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
  1348. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1349. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1350. "r" (&c->redDither)
  1351. : "%"REG_a
  1352. );
  1353. return;
  1354. case IMGFMT_YUY2:
  1355. asm volatile(
  1356. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1357. "mov %4, %%"REG_SP" \n\t"
  1358. YSCALEYUV2PACKED1b(%%REGa, %5)
  1359. WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
  1360. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1361. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1362. "r" (&c->redDither)
  1363. : "%"REG_a
  1364. );
  1365. return;
  1366. }
  1367. }
  1368. #endif
  1369. if( uvalpha < 2048 )
  1370. {
  1371. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1372. }else{
  1373. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1374. }
  1375. }
  1376. //FIXME yuy2* can read upto 7 samples to much
  1377. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width)
  1378. {
  1379. #ifdef HAVE_MMX
  1380. asm volatile(
  1381. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1382. "mov %0, %%"REG_a" \n\t"
  1383. "1: \n\t"
  1384. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1385. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1386. "pand %%mm2, %%mm0 \n\t"
  1387. "pand %%mm2, %%mm1 \n\t"
  1388. "packuswb %%mm1, %%mm0 \n\t"
  1389. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1390. "add $8, %%"REG_a" \n\t"
  1391. " js 1b \n\t"
  1392. : : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
  1393. : "%"REG_a
  1394. );
  1395. #else
  1396. int i;
  1397. for(i=0; i<width; i++)
  1398. dst[i]= src[2*i];
  1399. #endif
  1400. }
  1401. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1402. {
  1403. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1404. asm volatile(
  1405. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1406. "mov %0, %%"REG_a" \n\t"
  1407. "1: \n\t"
  1408. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1409. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1410. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1411. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1412. PAVGB(%%mm2, %%mm0)
  1413. PAVGB(%%mm3, %%mm1)
  1414. "psrlw $8, %%mm0 \n\t"
  1415. "psrlw $8, %%mm1 \n\t"
  1416. "packuswb %%mm1, %%mm0 \n\t"
  1417. "movq %%mm0, %%mm1 \n\t"
  1418. "psrlw $8, %%mm0 \n\t"
  1419. "pand %%mm4, %%mm1 \n\t"
  1420. "packuswb %%mm0, %%mm0 \n\t"
  1421. "packuswb %%mm1, %%mm1 \n\t"
  1422. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1423. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1424. "add $4, %%"REG_a" \n\t"
  1425. " js 1b \n\t"
  1426. : : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1427. : "%"REG_a
  1428. );
  1429. #else
  1430. int i;
  1431. for(i=0; i<width; i++)
  1432. {
  1433. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1434. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1435. }
  1436. #endif
  1437. }
  1438. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1439. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, int width)
  1440. {
  1441. #ifdef HAVE_MMX
  1442. asm volatile(
  1443. "mov %0, %%"REG_a" \n\t"
  1444. "1: \n\t"
  1445. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1446. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1447. "psrlw $8, %%mm0 \n\t"
  1448. "psrlw $8, %%mm1 \n\t"
  1449. "packuswb %%mm1, %%mm0 \n\t"
  1450. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1451. "add $8, %%"REG_a" \n\t"
  1452. " js 1b \n\t"
  1453. : : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
  1454. : "%"REG_a
  1455. );
  1456. #else
  1457. int i;
  1458. for(i=0; i<width; i++)
  1459. dst[i]= src[2*i+1];
  1460. #endif
  1461. }
  1462. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1463. {
  1464. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1465. asm volatile(
  1466. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1467. "mov %0, %%"REG_a" \n\t"
  1468. "1: \n\t"
  1469. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1470. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1471. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1472. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1473. PAVGB(%%mm2, %%mm0)
  1474. PAVGB(%%mm3, %%mm1)
  1475. "pand %%mm4, %%mm0 \n\t"
  1476. "pand %%mm4, %%mm1 \n\t"
  1477. "packuswb %%mm1, %%mm0 \n\t"
  1478. "movq %%mm0, %%mm1 \n\t"
  1479. "psrlw $8, %%mm0 \n\t"
  1480. "pand %%mm4, %%mm1 \n\t"
  1481. "packuswb %%mm0, %%mm0 \n\t"
  1482. "packuswb %%mm1, %%mm1 \n\t"
  1483. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1484. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1485. "add $4, %%"REG_a" \n\t"
  1486. " js 1b \n\t"
  1487. : : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1488. : "%"REG_a
  1489. );
  1490. #else
  1491. int i;
  1492. for(i=0; i<width; i++)
  1493. {
  1494. dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
  1495. dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
  1496. }
  1497. #endif
  1498. }
  1499. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1500. {
  1501. #ifdef HAVE_MMXFIXME
  1502. #else
  1503. int i;
  1504. for(i=0; i<width; i++)
  1505. {
  1506. int b= ((uint32_t*)src)[i]&0xFF;
  1507. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1508. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1509. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1510. }
  1511. #endif
  1512. }
  1513. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1514. {
  1515. #ifdef HAVE_MMXFIXME
  1516. #else
  1517. int i;
  1518. for(i=0; i<width; i++)
  1519. {
  1520. const int a= ((uint32_t*)src1)[2*i+0];
  1521. const int e= ((uint32_t*)src1)[2*i+1];
  1522. const int c= ((uint32_t*)src2)[2*i+0];
  1523. const int d= ((uint32_t*)src2)[2*i+1];
  1524. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1525. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1526. const int b= l&0x3FF;
  1527. const int g= h>>8;
  1528. const int r= l>>16;
  1529. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1530. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1531. }
  1532. #endif
  1533. }
  1534. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
  1535. {
  1536. #ifdef HAVE_MMX
  1537. asm volatile(
  1538. "mov %2, %%"REG_a" \n\t"
  1539. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1540. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1541. "pxor %%mm7, %%mm7 \n\t"
  1542. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
  1543. ".balign 16 \n\t"
  1544. "1: \n\t"
  1545. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1546. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1547. "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
  1548. "punpcklbw %%mm7, %%mm0 \n\t"
  1549. "punpcklbw %%mm7, %%mm1 \n\t"
  1550. "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
  1551. "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
  1552. "punpcklbw %%mm7, %%mm2 \n\t"
  1553. "punpcklbw %%mm7, %%mm3 \n\t"
  1554. "pmaddwd %%mm6, %%mm0 \n\t"
  1555. "pmaddwd %%mm6, %%mm1 \n\t"
  1556. "pmaddwd %%mm6, %%mm2 \n\t"
  1557. "pmaddwd %%mm6, %%mm3 \n\t"
  1558. #ifndef FAST_BGR2YV12
  1559. "psrad $8, %%mm0 \n\t"
  1560. "psrad $8, %%mm1 \n\t"
  1561. "psrad $8, %%mm2 \n\t"
  1562. "psrad $8, %%mm3 \n\t"
  1563. #endif
  1564. "packssdw %%mm1, %%mm0 \n\t"
  1565. "packssdw %%mm3, %%mm2 \n\t"
  1566. "pmaddwd %%mm5, %%mm0 \n\t"
  1567. "pmaddwd %%mm5, %%mm2 \n\t"
  1568. "packssdw %%mm2, %%mm0 \n\t"
  1569. "psraw $7, %%mm0 \n\t"
  1570. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1571. "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
  1572. "punpcklbw %%mm7, %%mm4 \n\t"
  1573. "punpcklbw %%mm7, %%mm1 \n\t"
  1574. "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
  1575. "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
  1576. "punpcklbw %%mm7, %%mm2 \n\t"
  1577. "punpcklbw %%mm7, %%mm3 \n\t"
  1578. "pmaddwd %%mm6, %%mm4 \n\t"
  1579. "pmaddwd %%mm6, %%mm1 \n\t"
  1580. "pmaddwd %%mm6, %%mm2 \n\t"
  1581. "pmaddwd %%mm6, %%mm3 \n\t"
  1582. #ifndef FAST_BGR2YV12
  1583. "psrad $8, %%mm4 \n\t"
  1584. "psrad $8, %%mm1 \n\t"
  1585. "psrad $8, %%mm2 \n\t"
  1586. "psrad $8, %%mm3 \n\t"
  1587. #endif
  1588. "packssdw %%mm1, %%mm4 \n\t"
  1589. "packssdw %%mm3, %%mm2 \n\t"
  1590. "pmaddwd %%mm5, %%mm4 \n\t"
  1591. "pmaddwd %%mm5, %%mm2 \n\t"
  1592. "add $24, %%"REG_b" \n\t"
  1593. "packssdw %%mm2, %%mm4 \n\t"
  1594. "psraw $7, %%mm4 \n\t"
  1595. "packuswb %%mm4, %%mm0 \n\t"
  1596. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1597. "movq %%mm0, (%1, %%"REG_a") \n\t"
  1598. "add $8, %%"REG_a" \n\t"
  1599. " js 1b \n\t"
  1600. : : "r" (src+width*3), "r" (dst+width), "g" ((long)-width)
  1601. : "%"REG_a, "%"REG_b
  1602. );
  1603. #else
  1604. int i;
  1605. for(i=0; i<width; i++)
  1606. {
  1607. int b= src[i*3+0];
  1608. int g= src[i*3+1];
  1609. int r= src[i*3+2];
  1610. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1611. }
  1612. #endif
  1613. }
  1614. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1615. {
  1616. #ifdef HAVE_MMX
  1617. asm volatile(
  1618. "mov %4, %%"REG_a" \n\t"
  1619. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1620. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1621. "pxor %%mm7, %%mm7 \n\t"
  1622. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t"
  1623. "add %%"REG_b", %%"REG_b" \n\t"
  1624. ".balign 16 \n\t"
  1625. "1: \n\t"
  1626. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1627. PREFETCH" 64(%1, %%"REG_b") \n\t"
  1628. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1629. "movq (%0, %%"REG_b"), %%mm0 \n\t"
  1630. "movq (%1, %%"REG_b"), %%mm1 \n\t"
  1631. "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
  1632. "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
  1633. PAVGB(%%mm1, %%mm0)
  1634. PAVGB(%%mm3, %%mm2)
  1635. "movq %%mm0, %%mm1 \n\t"
  1636. "movq %%mm2, %%mm3 \n\t"
  1637. "psrlq $24, %%mm0 \n\t"
  1638. "psrlq $24, %%mm2 \n\t"
  1639. PAVGB(%%mm1, %%mm0)
  1640. PAVGB(%%mm3, %%mm2)
  1641. "punpcklbw %%mm7, %%mm0 \n\t"
  1642. "punpcklbw %%mm7, %%mm2 \n\t"
  1643. #else
  1644. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1645. "movd (%1, %%"REG_b"), %%mm1 \n\t"
  1646. "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
  1647. "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
  1648. "punpcklbw %%mm7, %%mm0 \n\t"
  1649. "punpcklbw %%mm7, %%mm1 \n\t"
  1650. "punpcklbw %%mm7, %%mm2 \n\t"
  1651. "punpcklbw %%mm7, %%mm3 \n\t"
  1652. "paddw %%mm1, %%mm0 \n\t"
  1653. "paddw %%mm3, %%mm2 \n\t"
  1654. "paddw %%mm2, %%mm0 \n\t"
  1655. "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
  1656. "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
  1657. "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
  1658. "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
  1659. "punpcklbw %%mm7, %%mm4 \n\t"
  1660. "punpcklbw %%mm7, %%mm1 \n\t"
  1661. "punpcklbw %%mm7, %%mm2 \n\t"
  1662. "punpcklbw %%mm7, %%mm3 \n\t"
  1663. "paddw %%mm1, %%mm4 \n\t"
  1664. "paddw %%mm3, %%mm2 \n\t"
  1665. "paddw %%mm4, %%mm2 \n\t"
  1666. "psrlw $2, %%mm0 \n\t"
  1667. "psrlw $2, %%mm2 \n\t"
  1668. #endif
  1669. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1670. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1671. "pmaddwd %%mm0, %%mm1 \n\t"
  1672. "pmaddwd %%mm2, %%mm3 \n\t"
  1673. "pmaddwd %%mm6, %%mm0 \n\t"
  1674. "pmaddwd %%mm6, %%mm2 \n\t"
  1675. #ifndef FAST_BGR2YV12
  1676. "psrad $8, %%mm0 \n\t"
  1677. "psrad $8, %%mm1 \n\t"
  1678. "psrad $8, %%mm2 \n\t"
  1679. "psrad $8, %%mm3 \n\t"
  1680. #endif
  1681. "packssdw %%mm2, %%mm0 \n\t"
  1682. "packssdw %%mm3, %%mm1 \n\t"
  1683. "pmaddwd %%mm5, %%mm0 \n\t"
  1684. "pmaddwd %%mm5, %%mm1 \n\t"
  1685. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1686. "psraw $7, %%mm0 \n\t"
  1687. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1688. "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
  1689. "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
  1690. "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
  1691. "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
  1692. PAVGB(%%mm1, %%mm4)
  1693. PAVGB(%%mm3, %%mm2)
  1694. "movq %%mm4, %%mm1 \n\t"
  1695. "movq %%mm2, %%mm3 \n\t"
  1696. "psrlq $24, %%mm4 \n\t"
  1697. "psrlq $24, %%mm2 \n\t"
  1698. PAVGB(%%mm1, %%mm4)
  1699. PAVGB(%%mm3, %%mm2)
  1700. "punpcklbw %%mm7, %%mm4 \n\t"
  1701. "punpcklbw %%mm7, %%mm2 \n\t"
  1702. #else
  1703. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1704. "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
  1705. "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
  1706. "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
  1707. "punpcklbw %%mm7, %%mm4 \n\t"
  1708. "punpcklbw %%mm7, %%mm1 \n\t"
  1709. "punpcklbw %%mm7, %%mm2 \n\t"
  1710. "punpcklbw %%mm7, %%mm3 \n\t"
  1711. "paddw %%mm1, %%mm4 \n\t"
  1712. "paddw %%mm3, %%mm2 \n\t"
  1713. "paddw %%mm2, %%mm4 \n\t"
  1714. "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
  1715. "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
  1716. "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
  1717. "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
  1718. "punpcklbw %%mm7, %%mm5 \n\t"
  1719. "punpcklbw %%mm7, %%mm1 \n\t"
  1720. "punpcklbw %%mm7, %%mm2 \n\t"
  1721. "punpcklbw %%mm7, %%mm3 \n\t"
  1722. "paddw %%mm1, %%mm5 \n\t"
  1723. "paddw %%mm3, %%mm2 \n\t"
  1724. "paddw %%mm5, %%mm2 \n\t"
  1725. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1726. "psrlw $2, %%mm4 \n\t"
  1727. "psrlw $2, %%mm2 \n\t"
  1728. #endif
  1729. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1730. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1731. "pmaddwd %%mm4, %%mm1 \n\t"
  1732. "pmaddwd %%mm2, %%mm3 \n\t"
  1733. "pmaddwd %%mm6, %%mm4 \n\t"
  1734. "pmaddwd %%mm6, %%mm2 \n\t"
  1735. #ifndef FAST_BGR2YV12
  1736. "psrad $8, %%mm4 \n\t"
  1737. "psrad $8, %%mm1 \n\t"
  1738. "psrad $8, %%mm2 \n\t"
  1739. "psrad $8, %%mm3 \n\t"
  1740. #endif
  1741. "packssdw %%mm2, %%mm4 \n\t"
  1742. "packssdw %%mm3, %%mm1 \n\t"
  1743. "pmaddwd %%mm5, %%mm4 \n\t"
  1744. "pmaddwd %%mm5, %%mm1 \n\t"
  1745. "add $24, %%"REG_b" \n\t"
  1746. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1747. "psraw $7, %%mm4 \n\t"
  1748. "movq %%mm0, %%mm1 \n\t"
  1749. "punpckldq %%mm4, %%mm0 \n\t"
  1750. "punpckhdq %%mm4, %%mm1 \n\t"
  1751. "packsswb %%mm1, %%mm0 \n\t"
  1752. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1753. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1754. "punpckhdq %%mm0, %%mm0 \n\t"
  1755. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1756. "add $4, %%"REG_a" \n\t"
  1757. " js 1b \n\t"
  1758. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" ((long)-width)
  1759. : "%"REG_a, "%"REG_b
  1760. );
  1761. #else
  1762. int i;
  1763. for(i=0; i<width; i++)
  1764. {
  1765. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1766. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1767. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1768. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1769. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1770. }
  1771. #endif
  1772. }
  1773. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1774. {
  1775. int i;
  1776. for(i=0; i<width; i++)
  1777. {
  1778. int d= ((uint16_t*)src)[i];
  1779. int b= d&0x1F;
  1780. int g= (d>>5)&0x3F;
  1781. int r= (d>>11)&0x1F;
  1782. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1783. }
  1784. }
  1785. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1786. {
  1787. int i;
  1788. for(i=0; i<width; i++)
  1789. {
  1790. int d0= ((uint32_t*)src1)[i];
  1791. int d1= ((uint32_t*)src2)[i];
  1792. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1793. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1794. int dh2= (dh>>11) + (dh<<21);
  1795. int d= dh2 + dl;
  1796. int b= d&0x7F;
  1797. int r= (d>>11)&0x7F;
  1798. int g= d>>21;
  1799. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1800. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1801. }
  1802. }
  1803. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1804. {
  1805. int i;
  1806. for(i=0; i<width; i++)
  1807. {
  1808. int d= ((uint16_t*)src)[i];
  1809. int b= d&0x1F;
  1810. int g= (d>>5)&0x1F;
  1811. int r= (d>>10)&0x1F;
  1812. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1813. }
  1814. }
  1815. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1816. {
  1817. int i;
  1818. for(i=0; i<width; i++)
  1819. {
  1820. int d0= ((uint32_t*)src1)[i];
  1821. int d1= ((uint32_t*)src2)[i];
  1822. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1823. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1824. int dh2= (dh>>11) + (dh<<21);
  1825. int d= dh2 + dl;
  1826. int b= d&0x7F;
  1827. int r= (d>>10)&0x7F;
  1828. int g= d>>21;
  1829. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1830. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1831. }
  1832. }
  1833. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1834. {
  1835. int i;
  1836. for(i=0; i<width; i++)
  1837. {
  1838. int r= ((uint32_t*)src)[i]&0xFF;
  1839. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1840. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  1841. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1842. }
  1843. }
  1844. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1845. {
  1846. int i;
  1847. for(i=0; i<width; i++)
  1848. {
  1849. const int a= ((uint32_t*)src1)[2*i+0];
  1850. const int e= ((uint32_t*)src1)[2*i+1];
  1851. const int c= ((uint32_t*)src2)[2*i+0];
  1852. const int d= ((uint32_t*)src2)[2*i+1];
  1853. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1854. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1855. const int r= l&0x3FF;
  1856. const int g= h>>8;
  1857. const int b= l>>16;
  1858. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1859. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1860. }
  1861. }
  1862. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1863. {
  1864. int i;
  1865. for(i=0; i<width; i++)
  1866. {
  1867. int r= src[i*3+0];
  1868. int g= src[i*3+1];
  1869. int b= src[i*3+2];
  1870. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1871. }
  1872. }
  1873. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1874. {
  1875. int i;
  1876. for(i=0; i<width; i++)
  1877. {
  1878. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1879. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1880. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1881. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1882. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1883. }
  1884. }
  1885. // Bilinear / Bicubic scaling
  1886. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1887. int16_t *filter, int16_t *filterPos, int filterSize)
  1888. {
  1889. #ifdef HAVE_MMX
  1890. assert(filterSize % 4 == 0 && filterSize>0);
  1891. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1892. {
  1893. long counter= -2*dstW;
  1894. filter-= counter*2;
  1895. filterPos-= counter/2;
  1896. dst-= counter/2;
  1897. asm volatile(
  1898. "pxor %%mm7, %%mm7 \n\t"
  1899. "movq "MANGLE(w02)", %%mm6 \n\t"
  1900. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1901. "mov %%"REG_a", %%"REG_BP" \n\t"
  1902. ".balign 16 \n\t"
  1903. "1: \n\t"
  1904. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1905. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  1906. "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
  1907. "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
  1908. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1909. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1910. "punpcklbw %%mm7, %%mm0 \n\t"
  1911. "punpcklbw %%mm7, %%mm2 \n\t"
  1912. "pmaddwd %%mm1, %%mm0 \n\t"
  1913. "pmaddwd %%mm2, %%mm3 \n\t"
  1914. "psrad $8, %%mm0 \n\t"
  1915. "psrad $8, %%mm3 \n\t"
  1916. "packssdw %%mm3, %%mm0 \n\t"
  1917. "pmaddwd %%mm6, %%mm0 \n\t"
  1918. "packssdw %%mm0, %%mm0 \n\t"
  1919. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1920. "add $4, %%"REG_BP" \n\t"
  1921. " jnc 1b \n\t"
  1922. "pop %%"REG_BP" \n\t"
  1923. : "+a" (counter)
  1924. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1925. : "%"REG_b
  1926. );
  1927. }
  1928. else if(filterSize==8)
  1929. {
  1930. long counter= -2*dstW;
  1931. filter-= counter*4;
  1932. filterPos-= counter/2;
  1933. dst-= counter/2;
  1934. asm volatile(
  1935. "pxor %%mm7, %%mm7 \n\t"
  1936. "movq "MANGLE(w02)", %%mm6 \n\t"
  1937. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1938. "mov %%"REG_a", %%"REG_BP" \n\t"
  1939. ".balign 16 \n\t"
  1940. "1: \n\t"
  1941. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1942. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  1943. "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
  1944. "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
  1945. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1946. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1947. "punpcklbw %%mm7, %%mm0 \n\t"
  1948. "punpcklbw %%mm7, %%mm2 \n\t"
  1949. "pmaddwd %%mm1, %%mm0 \n\t"
  1950. "pmaddwd %%mm2, %%mm3 \n\t"
  1951. "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
  1952. "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
  1953. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  1954. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  1955. "punpcklbw %%mm7, %%mm4 \n\t"
  1956. "punpcklbw %%mm7, %%mm2 \n\t"
  1957. "pmaddwd %%mm1, %%mm4 \n\t"
  1958. "pmaddwd %%mm2, %%mm5 \n\t"
  1959. "paddd %%mm4, %%mm0 \n\t"
  1960. "paddd %%mm5, %%mm3 \n\t"
  1961. "psrad $8, %%mm0 \n\t"
  1962. "psrad $8, %%mm3 \n\t"
  1963. "packssdw %%mm3, %%mm0 \n\t"
  1964. "pmaddwd %%mm6, %%mm0 \n\t"
  1965. "packssdw %%mm0, %%mm0 \n\t"
  1966. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1967. "add $4, %%"REG_BP" \n\t"
  1968. " jnc 1b \n\t"
  1969. "pop %%"REG_BP" \n\t"
  1970. : "+a" (counter)
  1971. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1972. : "%"REG_b
  1973. );
  1974. }
  1975. else
  1976. {
  1977. long counter= -2*dstW;
  1978. // filter-= counter*filterSize/2;
  1979. filterPos-= counter/2;
  1980. dst-= counter/2;
  1981. asm volatile(
  1982. "pxor %%mm7, %%mm7 \n\t"
  1983. "movq "MANGLE(w02)", %%mm6 \n\t"
  1984. ".balign 16 \n\t"
  1985. "1: \n\t"
  1986. "mov %2, %%"REG_c" \n\t"
  1987. "movzwl (%%"REG_c", %0), %%eax \n\t"
  1988. "movzwl 2(%%"REG_c", %0), %%ebx \n\t"
  1989. "mov %5, %%"REG_c" \n\t"
  1990. "pxor %%mm4, %%mm4 \n\t"
  1991. "pxor %%mm5, %%mm5 \n\t"
  1992. "2: \n\t"
  1993. "movq (%1), %%mm1 \n\t"
  1994. "movq (%1, %6), %%mm3 \n\t"
  1995. "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
  1996. "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
  1997. "punpcklbw %%mm7, %%mm0 \n\t"
  1998. "punpcklbw %%mm7, %%mm2 \n\t"
  1999. "pmaddwd %%mm1, %%mm0 \n\t"
  2000. "pmaddwd %%mm2, %%mm3 \n\t"
  2001. "paddd %%mm3, %%mm5 \n\t"
  2002. "paddd %%mm0, %%mm4 \n\t"
  2003. "add $8, %1 \n\t"
  2004. "add $4, %%"REG_c" \n\t"
  2005. "cmp %4, %%"REG_c" \n\t"
  2006. " jb 2b \n\t"
  2007. "add %6, %1 \n\t"
  2008. "psrad $8, %%mm4 \n\t"
  2009. "psrad $8, %%mm5 \n\t"
  2010. "packssdw %%mm5, %%mm4 \n\t"
  2011. "pmaddwd %%mm6, %%mm4 \n\t"
  2012. "packssdw %%mm4, %%mm4 \n\t"
  2013. "mov %3, %%"REG_a" \n\t"
  2014. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2015. "add $4, %0 \n\t"
  2016. " jnc 1b \n\t"
  2017. : "+r" (counter), "+r" (filter)
  2018. : "m" (filterPos), "m" (dst), "m"(src+filterSize),
  2019. "m" (src), "r" ((long)filterSize*2)
  2020. : "%"REG_b, "%"REG_a, "%"REG_c
  2021. );
  2022. }
  2023. #else
  2024. #ifdef HAVE_ALTIVEC
  2025. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2026. #else
  2027. int i;
  2028. for(i=0; i<dstW; i++)
  2029. {
  2030. int j;
  2031. int srcPos= filterPos[i];
  2032. int val=0;
  2033. // printf("filterPos: %d\n", filterPos[i]);
  2034. for(j=0; j<filterSize; j++)
  2035. {
  2036. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2037. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2038. }
  2039. // filter += hFilterSize;
  2040. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2041. // dst[i] = val>>7;
  2042. }
  2043. #endif
  2044. #endif
  2045. }
  2046. // *** horizontal scale Y line to temp buffer
  2047. static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
  2048. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2049. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2050. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2051. int32_t *mmx2FilterPos)
  2052. {
  2053. if(srcFormat==IMGFMT_YUY2)
  2054. {
  2055. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2056. src= formatConvBuffer;
  2057. }
  2058. else if(srcFormat==IMGFMT_UYVY)
  2059. {
  2060. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2061. src= formatConvBuffer;
  2062. }
  2063. else if(srcFormat==IMGFMT_BGR32)
  2064. {
  2065. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2066. src= formatConvBuffer;
  2067. }
  2068. else if(srcFormat==IMGFMT_BGR24)
  2069. {
  2070. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2071. src= formatConvBuffer;
  2072. }
  2073. else if(srcFormat==IMGFMT_BGR16)
  2074. {
  2075. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2076. src= formatConvBuffer;
  2077. }
  2078. else if(srcFormat==IMGFMT_BGR15)
  2079. {
  2080. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2081. src= formatConvBuffer;
  2082. }
  2083. else if(srcFormat==IMGFMT_RGB32)
  2084. {
  2085. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2086. src= formatConvBuffer;
  2087. }
  2088. else if(srcFormat==IMGFMT_RGB24)
  2089. {
  2090. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2091. src= formatConvBuffer;
  2092. }
  2093. #ifdef HAVE_MMX
  2094. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2095. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2096. #else
  2097. if(!(flags&SWS_FAST_BILINEAR))
  2098. #endif
  2099. {
  2100. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2101. }
  2102. else // Fast Bilinear upscale / crap downscale
  2103. {
  2104. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2105. #ifdef HAVE_MMX2
  2106. int i;
  2107. if(canMMX2BeUsed)
  2108. {
  2109. asm volatile(
  2110. "pxor %%mm7, %%mm7 \n\t"
  2111. "mov %0, %%"REG_c" \n\t"
  2112. "mov %1, %%"REG_D" \n\t"
  2113. "mov %2, %%"REG_d" \n\t"
  2114. "mov %3, %%"REG_b" \n\t"
  2115. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2116. PREFETCH" (%%"REG_c") \n\t"
  2117. PREFETCH" 32(%%"REG_c") \n\t"
  2118. PREFETCH" 64(%%"REG_c") \n\t"
  2119. #ifdef ARCH_X86_64
  2120. #define FUNNY_Y_CODE \
  2121. "movl (%%"REG_b"), %%esi \n\t"\
  2122. "call *%4 \n\t"\
  2123. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2124. "add %%"REG_S", %%"REG_c" \n\t"\
  2125. "add %%"REG_a", %%"REG_D" \n\t"\
  2126. "xor %%"REG_a", %%"REG_a" \n\t"\
  2127. #else
  2128. #define FUNNY_Y_CODE \
  2129. "movl (%%"REG_b"), %%esi \n\t"\
  2130. "call *%4 \n\t"\
  2131. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2132. "add %%"REG_a", %%"REG_D" \n\t"\
  2133. "xor %%"REG_a", %%"REG_a" \n\t"\
  2134. #endif
  2135. FUNNY_Y_CODE
  2136. FUNNY_Y_CODE
  2137. FUNNY_Y_CODE
  2138. FUNNY_Y_CODE
  2139. FUNNY_Y_CODE
  2140. FUNNY_Y_CODE
  2141. FUNNY_Y_CODE
  2142. FUNNY_Y_CODE
  2143. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2144. "m" (funnyYCode)
  2145. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2146. );
  2147. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2148. }
  2149. else
  2150. {
  2151. #endif
  2152. //NO MMX just normal asm ...
  2153. asm volatile(
  2154. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2155. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2156. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2157. ".balign 16 \n\t"
  2158. "1: \n\t"
  2159. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2160. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2161. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2162. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2163. "shll $16, %%edi \n\t"
  2164. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2165. "mov %1, %%"REG_D" \n\t"
  2166. "shrl $9, %%esi \n\t"
  2167. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2168. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2169. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2170. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2171. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2172. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2173. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2174. "shll $16, %%edi \n\t"
  2175. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2176. "mov %1, %%"REG_D" \n\t"
  2177. "shrl $9, %%esi \n\t"
  2178. "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
  2179. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2180. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2181. "add $2, %%"REG_a" \n\t"
  2182. "cmp %2, %%"REG_a" \n\t"
  2183. " jb 1b \n\t"
  2184. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
  2185. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2186. );
  2187. #ifdef HAVE_MMX2
  2188. } //if MMX2 can't be used
  2189. #endif
  2190. #else
  2191. int i;
  2192. unsigned int xpos=0;
  2193. for(i=0;i<dstWidth;i++)
  2194. {
  2195. register unsigned int xx=xpos>>16;
  2196. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2197. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2198. xpos+=xInc;
  2199. }
  2200. #endif
  2201. }
  2202. }
  2203. inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2,
  2204. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2205. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2206. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2207. int32_t *mmx2FilterPos)
  2208. {
  2209. if(srcFormat==IMGFMT_YUY2)
  2210. {
  2211. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2212. src1= formatConvBuffer;
  2213. src2= formatConvBuffer+2048;
  2214. }
  2215. else if(srcFormat==IMGFMT_UYVY)
  2216. {
  2217. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2218. src1= formatConvBuffer;
  2219. src2= formatConvBuffer+2048;
  2220. }
  2221. else if(srcFormat==IMGFMT_BGR32)
  2222. {
  2223. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2224. src1= formatConvBuffer;
  2225. src2= formatConvBuffer+2048;
  2226. }
  2227. else if(srcFormat==IMGFMT_BGR24)
  2228. {
  2229. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2230. src1= formatConvBuffer;
  2231. src2= formatConvBuffer+2048;
  2232. }
  2233. else if(srcFormat==IMGFMT_BGR16)
  2234. {
  2235. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2236. src1= formatConvBuffer;
  2237. src2= formatConvBuffer+2048;
  2238. }
  2239. else if(srcFormat==IMGFMT_BGR15)
  2240. {
  2241. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2242. src1= formatConvBuffer;
  2243. src2= formatConvBuffer+2048;
  2244. }
  2245. else if(srcFormat==IMGFMT_RGB32)
  2246. {
  2247. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2248. src1= formatConvBuffer;
  2249. src2= formatConvBuffer+2048;
  2250. }
  2251. else if(srcFormat==IMGFMT_RGB24)
  2252. {
  2253. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2254. src1= formatConvBuffer;
  2255. src2= formatConvBuffer+2048;
  2256. }
  2257. else if(isGray(srcFormat))
  2258. {
  2259. return;
  2260. }
  2261. #ifdef HAVE_MMX
  2262. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2263. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2264. #else
  2265. if(!(flags&SWS_FAST_BILINEAR))
  2266. #endif
  2267. {
  2268. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2269. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2270. }
  2271. else // Fast Bilinear upscale / crap downscale
  2272. {
  2273. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2274. #ifdef HAVE_MMX2
  2275. int i;
  2276. if(canMMX2BeUsed)
  2277. {
  2278. asm volatile(
  2279. "pxor %%mm7, %%mm7 \n\t"
  2280. "mov %0, %%"REG_c" \n\t"
  2281. "mov %1, %%"REG_D" \n\t"
  2282. "mov %2, %%"REG_d" \n\t"
  2283. "mov %3, %%"REG_b" \n\t"
  2284. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2285. PREFETCH" (%%"REG_c") \n\t"
  2286. PREFETCH" 32(%%"REG_c") \n\t"
  2287. PREFETCH" 64(%%"REG_c") \n\t"
  2288. #ifdef ARCH_X86_64
  2289. #define FUNNY_UV_CODE \
  2290. "movl (%%"REG_b"), %%esi \n\t"\
  2291. "call *%4 \n\t"\
  2292. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2293. "add %%"REG_S", %%"REG_c" \n\t"\
  2294. "add %%"REG_a", %%"REG_D" \n\t"\
  2295. "xor %%"REG_a", %%"REG_a" \n\t"\
  2296. #else
  2297. #define FUNNY_UV_CODE \
  2298. "movl (%%"REG_b"), %%esi \n\t"\
  2299. "call *%4 \n\t"\
  2300. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2301. "add %%"REG_a", %%"REG_D" \n\t"\
  2302. "xor %%"REG_a", %%"REG_a" \n\t"\
  2303. #endif
  2304. FUNNY_UV_CODE
  2305. FUNNY_UV_CODE
  2306. FUNNY_UV_CODE
  2307. FUNNY_UV_CODE
  2308. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2309. "mov %5, %%"REG_c" \n\t" // src
  2310. "mov %1, %%"REG_D" \n\t" // buf1
  2311. "add $4096, %%"REG_D" \n\t"
  2312. PREFETCH" (%%"REG_c") \n\t"
  2313. PREFETCH" 32(%%"REG_c") \n\t"
  2314. PREFETCH" 64(%%"REG_c") \n\t"
  2315. FUNNY_UV_CODE
  2316. FUNNY_UV_CODE
  2317. FUNNY_UV_CODE
  2318. FUNNY_UV_CODE
  2319. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2320. "m" (funnyUVCode), "m" (src2)
  2321. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2322. );
  2323. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2324. {
  2325. // printf("%d %d %d\n", dstWidth, i, srcW);
  2326. dst[i] = src1[srcW-1]*128;
  2327. dst[i+2048] = src2[srcW-1]*128;
  2328. }
  2329. }
  2330. else
  2331. {
  2332. #endif
  2333. asm volatile(
  2334. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2335. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2336. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2337. ".balign 16 \n\t"
  2338. "1: \n\t"
  2339. "mov %0, %%"REG_S" \n\t"
  2340. "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
  2341. "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
  2342. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2343. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2344. "shll $16, %%edi \n\t"
  2345. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2346. "mov %1, %%"REG_D" \n\t"
  2347. "shrl $9, %%esi \n\t"
  2348. "movw %%si, (%%"REG_d", %%"REG_a", 2)\n\t"
  2349. "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
  2350. "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2351. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2352. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2353. "shll $16, %%edi \n\t"
  2354. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2355. "mov %1, %%"REG_D" \n\t"
  2356. "shrl $9, %%esi \n\t"
  2357. "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
  2358. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2359. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2360. "add $1, %%"REG_a" \n\t"
  2361. "cmp %2, %%"REG_a" \n\t"
  2362. " jb 1b \n\t"
  2363. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" ((long)(xInc>>16)), "m" ((xInc&0xFFFF)),
  2364. "r" (src2)
  2365. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2366. );
  2367. #ifdef HAVE_MMX2
  2368. } //if MMX2 can't be used
  2369. #endif
  2370. #else
  2371. int i;
  2372. unsigned int xpos=0;
  2373. for(i=0;i<dstWidth;i++)
  2374. {
  2375. register unsigned int xx=xpos>>16;
  2376. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2377. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2378. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2379. /* slower
  2380. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2381. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2382. */
  2383. xpos+=xInc;
  2384. }
  2385. #endif
  2386. }
  2387. }
  2388. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2389. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2390. /* load a few things into local vars to make the code more readable? and faster */
  2391. const int srcW= c->srcW;
  2392. const int dstW= c->dstW;
  2393. const int dstH= c->dstH;
  2394. const int chrDstW= c->chrDstW;
  2395. const int chrSrcW= c->chrSrcW;
  2396. const int lumXInc= c->lumXInc;
  2397. const int chrXInc= c->chrXInc;
  2398. const int dstFormat= c->dstFormat;
  2399. const int srcFormat= c->srcFormat;
  2400. const int flags= c->flags;
  2401. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2402. int16_t *vLumFilterPos= c->vLumFilterPos;
  2403. int16_t *vChrFilterPos= c->vChrFilterPos;
  2404. int16_t *hLumFilterPos= c->hLumFilterPos;
  2405. int16_t *hChrFilterPos= c->hChrFilterPos;
  2406. int16_t *vLumFilter= c->vLumFilter;
  2407. int16_t *vChrFilter= c->vChrFilter;
  2408. int16_t *hLumFilter= c->hLumFilter;
  2409. int16_t *hChrFilter= c->hChrFilter;
  2410. int32_t *lumMmxFilter= c->lumMmxFilter;
  2411. int32_t *chrMmxFilter= c->chrMmxFilter;
  2412. const int vLumFilterSize= c->vLumFilterSize;
  2413. const int vChrFilterSize= c->vChrFilterSize;
  2414. const int hLumFilterSize= c->hLumFilterSize;
  2415. const int hChrFilterSize= c->hChrFilterSize;
  2416. int16_t **lumPixBuf= c->lumPixBuf;
  2417. int16_t **chrPixBuf= c->chrPixBuf;
  2418. const int vLumBufSize= c->vLumBufSize;
  2419. const int vChrBufSize= c->vChrBufSize;
  2420. uint8_t *funnyYCode= c->funnyYCode;
  2421. uint8_t *funnyUVCode= c->funnyUVCode;
  2422. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2423. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2424. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2425. int lastDstY;
  2426. /* vars whch will change and which we need to storw back in the context */
  2427. int dstY= c->dstY;
  2428. int lumBufIndex= c->lumBufIndex;
  2429. int chrBufIndex= c->chrBufIndex;
  2430. int lastInLumBuf= c->lastInLumBuf;
  2431. int lastInChrBuf= c->lastInChrBuf;
  2432. if(isPacked(c->srcFormat)){
  2433. src[0]=
  2434. src[1]=
  2435. src[2]= src[0];
  2436. srcStride[0]=
  2437. srcStride[1]=
  2438. srcStride[2]= srcStride[0];
  2439. }
  2440. srcStride[1]<<= c->vChrDrop;
  2441. srcStride[2]<<= c->vChrDrop;
  2442. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2443. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2444. #if 0 //self test FIXME move to a vfilter or something
  2445. {
  2446. static volatile int i=0;
  2447. i++;
  2448. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2449. selfTest(src, srcStride, c->srcW, c->srcH);
  2450. i--;
  2451. }
  2452. #endif
  2453. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2454. //dstStride[0],dstStride[1],dstStride[2]);
  2455. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2456. {
  2457. static int firstTime=1; //FIXME move this into the context perhaps
  2458. if(flags & SWS_PRINT_INFO && firstTime)
  2459. {
  2460. MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
  2461. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2462. firstTime=0;
  2463. }
  2464. }
  2465. /* Note the user might start scaling the picture in the middle so this will not get executed
  2466. this is not really intended but works currently, so ppl might do it */
  2467. if(srcSliceY ==0){
  2468. lumBufIndex=0;
  2469. chrBufIndex=0;
  2470. dstY=0;
  2471. lastInLumBuf= -1;
  2472. lastInChrBuf= -1;
  2473. }
  2474. lastDstY= dstY;
  2475. for(;dstY < dstH; dstY++){
  2476. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2477. const int chrDstY= dstY>>c->chrDstVSubSample;
  2478. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2479. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2480. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2481. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2482. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2483. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2484. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2485. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2486. //handle holes (FAST_BILINEAR & weird filters)
  2487. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2488. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2489. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2490. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2491. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2492. // Do we have enough lines in this slice to output the dstY line
  2493. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2494. {
  2495. //Do horizontal scaling
  2496. while(lastInLumBuf < lastLumSrcY)
  2497. {
  2498. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2499. lumBufIndex++;
  2500. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2501. ASSERT(lumBufIndex < 2*vLumBufSize)
  2502. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2503. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2504. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2505. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2506. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2507. funnyYCode, c->srcFormat, formatConvBuffer,
  2508. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2509. lastInLumBuf++;
  2510. }
  2511. while(lastInChrBuf < lastChrSrcY)
  2512. {
  2513. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2514. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2515. chrBufIndex++;
  2516. ASSERT(chrBufIndex < 2*vChrBufSize)
  2517. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2518. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2519. //FIXME replace parameters through context struct (some at least)
  2520. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2521. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2522. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2523. funnyUVCode, c->srcFormat, formatConvBuffer,
  2524. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2525. lastInChrBuf++;
  2526. }
  2527. //wrap buf index around to stay inside the ring buffer
  2528. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2529. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2530. }
  2531. else // not enough lines left in this slice -> load the rest in the buffer
  2532. {
  2533. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2534. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2535. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2536. vChrBufSize, vLumBufSize);*/
  2537. //Do horizontal scaling
  2538. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2539. {
  2540. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2541. lumBufIndex++;
  2542. ASSERT(lumBufIndex < 2*vLumBufSize)
  2543. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2544. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2545. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2546. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2547. funnyYCode, c->srcFormat, formatConvBuffer,
  2548. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2549. lastInLumBuf++;
  2550. }
  2551. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2552. {
  2553. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2554. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2555. chrBufIndex++;
  2556. ASSERT(chrBufIndex < 2*vChrBufSize)
  2557. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2558. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2559. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2560. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2561. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2562. funnyUVCode, c->srcFormat, formatConvBuffer,
  2563. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2564. lastInChrBuf++;
  2565. }
  2566. //wrap buf index around to stay inside the ring buffer
  2567. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2568. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2569. break; //we can't output a dstY line so let's try with the next slice
  2570. }
  2571. #ifdef HAVE_MMX
  2572. b5Dither= dither8[dstY&1];
  2573. g6Dither= dither4[dstY&1];
  2574. g5Dither= dither8[dstY&1];
  2575. r5Dither= dither8[(dstY+1)&1];
  2576. #endif
  2577. if(dstY < dstH-2)
  2578. {
  2579. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2580. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2581. #ifdef HAVE_MMX
  2582. int i;
  2583. for(i=0; i<vLumFilterSize; i++)
  2584. {
  2585. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2586. lumMmxFilter[4*i+2]=
  2587. lumMmxFilter[4*i+3]=
  2588. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2589. }
  2590. for(i=0; i<vChrFilterSize; i++)
  2591. {
  2592. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2593. chrMmxFilter[4*i+2]=
  2594. chrMmxFilter[4*i+3]=
  2595. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2596. }
  2597. #endif
  2598. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2599. {
  2600. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2601. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2602. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2603. {
  2604. int16_t *lumBuf = lumPixBuf[0];
  2605. int16_t *chrBuf= chrPixBuf[0];
  2606. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2607. }
  2608. else //General YV12
  2609. {
  2610. RENAME(yuv2yuvX)(c,
  2611. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2612. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2613. dest, uDest, vDest, dstW, chrDstW);
  2614. }
  2615. }
  2616. else
  2617. {
  2618. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2619. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2620. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2621. {
  2622. int chrAlpha= vChrFilter[2*dstY+1];
  2623. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2624. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2625. }
  2626. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2627. {
  2628. int lumAlpha= vLumFilter[2*dstY+1];
  2629. int chrAlpha= vChrFilter[2*dstY+1];
  2630. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2631. dest, dstW, lumAlpha, chrAlpha, dstY);
  2632. }
  2633. else //General RGB
  2634. {
  2635. RENAME(yuv2packedX)(c,
  2636. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2637. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2638. dest, dstW, dstY);
  2639. }
  2640. }
  2641. }
  2642. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2643. {
  2644. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2645. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2646. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2647. {
  2648. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2649. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2650. yuv2yuvXinC(
  2651. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2652. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2653. dest, uDest, vDest, dstW, chrDstW);
  2654. }
  2655. else
  2656. {
  2657. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2658. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2659. yuv2packedXinC(c,
  2660. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2661. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2662. dest, dstW, dstY);
  2663. }
  2664. }
  2665. }
  2666. #ifdef HAVE_MMX
  2667. __asm __volatile(SFENCE:::"memory");
  2668. __asm __volatile(EMMS:::"memory");
  2669. #endif
  2670. /* store changed local vars back in the context */
  2671. c->dstY= dstY;
  2672. c->lumBufIndex= lumBufIndex;
  2673. c->chrBufIndex= chrBufIndex;
  2674. c->lastInLumBuf= lastInLumBuf;
  2675. c->lastInChrBuf= lastInChrBuf;
  2676. return dstY - lastDstY;
  2677. }