You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2733 lines
81KB

  1. /*
  2. Copyright (C) 2001-2002 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. #undef MOVNTQ
  16. #undef PAVGB
  17. #undef PREFETCH
  18. #undef PREFETCHW
  19. #undef EMMS
  20. #undef SFENCE
  21. #ifdef HAVE_3DNOW
  22. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  23. #define EMMS "femms"
  24. #else
  25. #define EMMS "emms"
  26. #endif
  27. #ifdef HAVE_3DNOW
  28. #define PREFETCH "prefetch"
  29. #define PREFETCHW "prefetchw"
  30. #elif defined ( HAVE_MMX2 )
  31. #define PREFETCH "prefetchnta"
  32. #define PREFETCHW "prefetcht0"
  33. #else
  34. #define PREFETCH "/nop"
  35. #define PREFETCHW "/nop"
  36. #endif
  37. #ifdef HAVE_MMX2
  38. #define SFENCE "sfence"
  39. #else
  40. #define SFENCE "/nop"
  41. #endif
  42. #ifdef HAVE_MMX2
  43. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  44. #elif defined (HAVE_3DNOW)
  45. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  46. #endif
  47. #ifdef HAVE_MMX2
  48. #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  49. #else
  50. #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  51. #endif
  52. #define YSCALEYUV2YV12X(x) \
  53. "xorl %%eax, %%eax \n\t"\
  54. "pxor %%mm3, %%mm3 \n\t"\
  55. "pxor %%mm4, %%mm4 \n\t"\
  56. "movl %0, %%edx \n\t"\
  57. ".balign 16 \n\t" /* FIXME Unroll? */\
  58. "1: \n\t"\
  59. "movl (%1, %%edx, 4), %%esi \n\t"\
  60. "movq (%2, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  61. "movq " #x "(%%esi, %%eax, 2), %%mm2 \n\t" /* srcData */\
  62. "movq 8+" #x "(%%esi, %%eax, 2), %%mm5 \n\t" /* srcData */\
  63. "pmulhw %%mm0, %%mm2 \n\t"\
  64. "pmulhw %%mm0, %%mm5 \n\t"\
  65. "paddw %%mm2, %%mm3 \n\t"\
  66. "paddw %%mm5, %%mm4 \n\t"\
  67. "addl $1, %%edx \n\t"\
  68. " jnz 1b \n\t"\
  69. "psraw $3, %%mm3 \n\t"\
  70. "psraw $3, %%mm4 \n\t"\
  71. "packuswb %%mm4, %%mm3 \n\t"\
  72. MOVNTQ(%%mm3, (%3, %%eax))\
  73. "addl $8, %%eax \n\t"\
  74. "cmpl %4, %%eax \n\t"\
  75. "pxor %%mm3, %%mm3 \n\t"\
  76. "pxor %%mm4, %%mm4 \n\t"\
  77. "movl %0, %%edx \n\t"\
  78. "jb 1b \n\t"
  79. #define YSCALEYUV2YV121 \
  80. "movl %2, %%eax \n\t"\
  81. ".balign 16 \n\t" /* FIXME Unroll? */\
  82. "1: \n\t"\
  83. "movq (%0, %%eax, 2), %%mm0 \n\t"\
  84. "movq 8(%0, %%eax, 2), %%mm1 \n\t"\
  85. "psraw $7, %%mm0 \n\t"\
  86. "psraw $7, %%mm1 \n\t"\
  87. "packuswb %%mm1, %%mm0 \n\t"\
  88. MOVNTQ(%%mm0, (%1, %%eax))\
  89. "addl $8, %%eax \n\t"\
  90. "jnc 1b \n\t"
  91. /*
  92. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  93. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  94. "r" (dest), "m" (dstW),
  95. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  96. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  97. */
  98. #define YSCALEYUV2PACKEDX \
  99. "xorl %%eax, %%eax \n\t"\
  100. ".balign 16 \n\t"\
  101. "1: \n\t"\
  102. "movl %1, %%edx \n\t" /* -chrFilterSize */\
  103. "movl %3, %%ebx \n\t" /* chrMmxFilter+chrFilterSize */\
  104. "movl %7, %%ecx \n\t" /* chrSrc+chrFilterSize */\
  105. "pxor %%mm3, %%mm3 \n\t"\
  106. "pxor %%mm4, %%mm4 \n\t"\
  107. "2: \n\t"\
  108. "movl (%%ecx, %%edx, 4), %%esi \n\t"\
  109. "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  110. "movq (%%esi, %%eax), %%mm2 \n\t" /* UsrcData */\
  111. "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\
  112. "pmulhw %%mm0, %%mm2 \n\t"\
  113. "pmulhw %%mm0, %%mm5 \n\t"\
  114. "paddw %%mm2, %%mm3 \n\t"\
  115. "paddw %%mm5, %%mm4 \n\t"\
  116. "addl $1, %%edx \n\t"\
  117. " jnz 2b \n\t"\
  118. \
  119. "movl %0, %%edx \n\t" /* -lumFilterSize */\
  120. "movl %2, %%ebx \n\t" /* lumMmxFilter+lumFilterSize */\
  121. "movl %6, %%ecx \n\t" /* lumSrc+lumFilterSize */\
  122. "pxor %%mm1, %%mm1 \n\t"\
  123. "pxor %%mm7, %%mm7 \n\t"\
  124. "2: \n\t"\
  125. "movl (%%ecx, %%edx, 4), %%esi \n\t"\
  126. "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  127. "movq (%%esi, %%eax, 2), %%mm2 \n\t" /* Y1srcData */\
  128. "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\
  129. "pmulhw %%mm0, %%mm2 \n\t"\
  130. "pmulhw %%mm0, %%mm5 \n\t"\
  131. "paddw %%mm2, %%mm1 \n\t"\
  132. "paddw %%mm5, %%mm7 \n\t"\
  133. "addl $1, %%edx \n\t"\
  134. " jnz 2b \n\t"\
  135. #define YSCALEYUV2RGBX \
  136. YSCALEYUV2PACKEDX\
  137. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  138. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  139. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  140. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  141. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  142. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  143. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  144. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  145. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  146. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  147. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  148. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  149. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  150. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  151. "paddw %%mm3, %%mm4 \n\t"\
  152. "movq %%mm2, %%mm0 \n\t"\
  153. "movq %%mm5, %%mm6 \n\t"\
  154. "movq %%mm4, %%mm3 \n\t"\
  155. "punpcklwd %%mm2, %%mm2 \n\t"\
  156. "punpcklwd %%mm5, %%mm5 \n\t"\
  157. "punpcklwd %%mm4, %%mm4 \n\t"\
  158. "paddw %%mm1, %%mm2 \n\t"\
  159. "paddw %%mm1, %%mm5 \n\t"\
  160. "paddw %%mm1, %%mm4 \n\t"\
  161. "punpckhwd %%mm0, %%mm0 \n\t"\
  162. "punpckhwd %%mm6, %%mm6 \n\t"\
  163. "punpckhwd %%mm3, %%mm3 \n\t"\
  164. "paddw %%mm7, %%mm0 \n\t"\
  165. "paddw %%mm7, %%mm6 \n\t"\
  166. "paddw %%mm7, %%mm3 \n\t"\
  167. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  168. "packuswb %%mm0, %%mm2 \n\t"\
  169. "packuswb %%mm6, %%mm5 \n\t"\
  170. "packuswb %%mm3, %%mm4 \n\t"\
  171. "pxor %%mm7, %%mm7 \n\t"
  172. #define FULL_YSCALEYUV2RGB \
  173. "pxor %%mm7, %%mm7 \n\t"\
  174. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  175. "punpcklwd %%mm6, %%mm6 \n\t"\
  176. "punpcklwd %%mm6, %%mm6 \n\t"\
  177. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  178. "punpcklwd %%mm5, %%mm5 \n\t"\
  179. "punpcklwd %%mm5, %%mm5 \n\t"\
  180. "xorl %%eax, %%eax \n\t"\
  181. ".balign 16 \n\t"\
  182. "1: \n\t"\
  183. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  184. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  185. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  186. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  187. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  188. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  189. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  190. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  191. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  192. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  193. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  194. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  195. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  196. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  197. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  198. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  199. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  200. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  201. \
  202. \
  203. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  204. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  205. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  206. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  207. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  208. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  209. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  210. \
  211. \
  212. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  213. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  214. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  215. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  216. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  217. "packuswb %%mm3, %%mm3 \n\t"\
  218. \
  219. "packuswb %%mm0, %%mm0 \n\t"\
  220. "paddw %%mm4, %%mm2 \n\t"\
  221. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  222. \
  223. "packuswb %%mm1, %%mm1 \n\t"
  224. #define YSCALEYUV2PACKED \
  225. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  226. "punpcklwd %%mm6, %%mm6 \n\t"\
  227. "punpcklwd %%mm6, %%mm6 \n\t"\
  228. "psraw $3, %%mm6 \n\t"\
  229. "movq %%mm6, 3968(%2) \n\t"\
  230. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  231. "punpcklwd %%mm5, %%mm5 \n\t"\
  232. "punpcklwd %%mm5, %%mm5 \n\t"\
  233. "psraw $3, %%mm5 \n\t"\
  234. "movq %%mm5, 3976(%2) \n\t"\
  235. "xorl %%eax, %%eax \n\t"\
  236. ".balign 16 \n\t"\
  237. "1: \n\t"\
  238. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  239. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  240. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  241. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  242. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  243. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  244. "movq 3976(%2), %%mm0 \n\t"\
  245. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  246. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  247. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  248. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  249. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  250. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  251. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  252. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  253. "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
  254. "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
  255. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  256. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  257. "pmulhw 3968(%2), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  258. "pmulhw 3968(%2), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  259. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  260. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  261. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  262. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  263. #define YSCALEYUV2RGB \
  264. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  265. "punpcklwd %%mm6, %%mm6 \n\t"\
  266. "punpcklwd %%mm6, %%mm6 \n\t"\
  267. "movq %%mm6, 3968(%2) \n\t"\
  268. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  269. "punpcklwd %%mm5, %%mm5 \n\t"\
  270. "punpcklwd %%mm5, %%mm5 \n\t"\
  271. "movq %%mm5, 3976(%2) \n\t"\
  272. "xorl %%eax, %%eax \n\t"\
  273. ".balign 16 \n\t"\
  274. "1: \n\t"\
  275. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  276. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  277. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  278. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  279. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  280. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  281. "movq 3976(%2), %%mm0 \n\t"\
  282. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  283. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  284. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  285. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  286. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  287. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  288. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  289. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  290. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  291. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  292. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  293. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  294. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  295. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  296. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  297. "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
  298. "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
  299. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  300. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  301. "pmulhw 3968(%2), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  302. "pmulhw 3968(%2), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  303. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  304. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  305. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  306. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  307. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  308. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  309. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  310. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  311. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  312. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  313. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  314. "paddw %%mm3, %%mm4 \n\t"\
  315. "movq %%mm2, %%mm0 \n\t"\
  316. "movq %%mm5, %%mm6 \n\t"\
  317. "movq %%mm4, %%mm3 \n\t"\
  318. "punpcklwd %%mm2, %%mm2 \n\t"\
  319. "punpcklwd %%mm5, %%mm5 \n\t"\
  320. "punpcklwd %%mm4, %%mm4 \n\t"\
  321. "paddw %%mm1, %%mm2 \n\t"\
  322. "paddw %%mm1, %%mm5 \n\t"\
  323. "paddw %%mm1, %%mm4 \n\t"\
  324. "punpckhwd %%mm0, %%mm0 \n\t"\
  325. "punpckhwd %%mm6, %%mm6 \n\t"\
  326. "punpckhwd %%mm3, %%mm3 \n\t"\
  327. "paddw %%mm7, %%mm0 \n\t"\
  328. "paddw %%mm7, %%mm6 \n\t"\
  329. "paddw %%mm7, %%mm3 \n\t"\
  330. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  331. "packuswb %%mm0, %%mm2 \n\t"\
  332. "packuswb %%mm6, %%mm5 \n\t"\
  333. "packuswb %%mm3, %%mm4 \n\t"\
  334. "pxor %%mm7, %%mm7 \n\t"
  335. #define YSCALEYUV2PACKED1 \
  336. "xorl %%eax, %%eax \n\t"\
  337. ".balign 16 \n\t"\
  338. "1: \n\t"\
  339. "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
  340. "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  341. "psraw $7, %%mm3 \n\t" \
  342. "psraw $7, %%mm4 \n\t" \
  343. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  344. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  345. "psraw $7, %%mm1 \n\t" \
  346. "psraw $7, %%mm7 \n\t" \
  347. #define YSCALEYUV2RGB1 \
  348. "xorl %%eax, %%eax \n\t"\
  349. ".balign 16 \n\t"\
  350. "1: \n\t"\
  351. "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
  352. "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  353. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  354. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  355. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  356. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  357. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  358. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  359. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  360. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  361. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  362. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  363. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  364. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  365. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  366. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  367. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  368. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  369. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  370. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  371. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  372. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  373. "paddw %%mm3, %%mm4 \n\t"\
  374. "movq %%mm2, %%mm0 \n\t"\
  375. "movq %%mm5, %%mm6 \n\t"\
  376. "movq %%mm4, %%mm3 \n\t"\
  377. "punpcklwd %%mm2, %%mm2 \n\t"\
  378. "punpcklwd %%mm5, %%mm5 \n\t"\
  379. "punpcklwd %%mm4, %%mm4 \n\t"\
  380. "paddw %%mm1, %%mm2 \n\t"\
  381. "paddw %%mm1, %%mm5 \n\t"\
  382. "paddw %%mm1, %%mm4 \n\t"\
  383. "punpckhwd %%mm0, %%mm0 \n\t"\
  384. "punpckhwd %%mm6, %%mm6 \n\t"\
  385. "punpckhwd %%mm3, %%mm3 \n\t"\
  386. "paddw %%mm7, %%mm0 \n\t"\
  387. "paddw %%mm7, %%mm6 \n\t"\
  388. "paddw %%mm7, %%mm3 \n\t"\
  389. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  390. "packuswb %%mm0, %%mm2 \n\t"\
  391. "packuswb %%mm6, %%mm5 \n\t"\
  392. "packuswb %%mm3, %%mm4 \n\t"\
  393. "pxor %%mm7, %%mm7 \n\t"
  394. #define YSCALEYUV2PACKED1b \
  395. "xorl %%eax, %%eax \n\t"\
  396. ".balign 16 \n\t"\
  397. "1: \n\t"\
  398. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  399. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  400. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  401. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  402. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  403. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  404. "psrlw $8, %%mm3 \n\t" \
  405. "psrlw $8, %%mm4 \n\t" \
  406. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  407. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  408. "psraw $7, %%mm1 \n\t" \
  409. "psraw $7, %%mm7 \n\t"
  410. // do vertical chrominance interpolation
  411. #define YSCALEYUV2RGB1b \
  412. "xorl %%eax, %%eax \n\t"\
  413. ".balign 16 \n\t"\
  414. "1: \n\t"\
  415. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  416. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  417. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  418. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  419. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  420. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  421. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  422. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  423. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  424. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  425. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  426. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  427. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  428. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  429. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  430. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  431. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  432. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  433. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  434. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  435. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  436. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  437. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  438. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  439. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  440. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  441. "paddw %%mm3, %%mm4 \n\t"\
  442. "movq %%mm2, %%mm0 \n\t"\
  443. "movq %%mm5, %%mm6 \n\t"\
  444. "movq %%mm4, %%mm3 \n\t"\
  445. "punpcklwd %%mm2, %%mm2 \n\t"\
  446. "punpcklwd %%mm5, %%mm5 \n\t"\
  447. "punpcklwd %%mm4, %%mm4 \n\t"\
  448. "paddw %%mm1, %%mm2 \n\t"\
  449. "paddw %%mm1, %%mm5 \n\t"\
  450. "paddw %%mm1, %%mm4 \n\t"\
  451. "punpckhwd %%mm0, %%mm0 \n\t"\
  452. "punpckhwd %%mm6, %%mm6 \n\t"\
  453. "punpckhwd %%mm3, %%mm3 \n\t"\
  454. "paddw %%mm7, %%mm0 \n\t"\
  455. "paddw %%mm7, %%mm6 \n\t"\
  456. "paddw %%mm7, %%mm3 \n\t"\
  457. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  458. "packuswb %%mm0, %%mm2 \n\t"\
  459. "packuswb %%mm6, %%mm5 \n\t"\
  460. "packuswb %%mm3, %%mm4 \n\t"\
  461. "pxor %%mm7, %%mm7 \n\t"
  462. #define WRITEBGR32 \
  463. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  464. "movq %%mm2, %%mm1 \n\t" /* B */\
  465. "movq %%mm5, %%mm6 \n\t" /* R */\
  466. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  467. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  468. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  469. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  470. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  471. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  472. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  473. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  474. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  475. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  476. \
  477. MOVNTQ(%%mm0, (%4, %%eax, 4))\
  478. MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
  479. MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
  480. MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
  481. \
  482. "addl $8, %%eax \n\t"\
  483. "cmpl %5, %%eax \n\t"\
  484. " jb 1b \n\t"
  485. #define WRITEBGR16 \
  486. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  487. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  488. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  489. "psrlq $3, %%mm2 \n\t"\
  490. \
  491. "movq %%mm2, %%mm1 \n\t"\
  492. "movq %%mm4, %%mm3 \n\t"\
  493. \
  494. "punpcklbw %%mm7, %%mm3 \n\t"\
  495. "punpcklbw %%mm5, %%mm2 \n\t"\
  496. "punpckhbw %%mm7, %%mm4 \n\t"\
  497. "punpckhbw %%mm5, %%mm1 \n\t"\
  498. \
  499. "psllq $3, %%mm3 \n\t"\
  500. "psllq $3, %%mm4 \n\t"\
  501. \
  502. "por %%mm3, %%mm2 \n\t"\
  503. "por %%mm4, %%mm1 \n\t"\
  504. \
  505. MOVNTQ(%%mm2, (%4, %%eax, 2))\
  506. MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
  507. \
  508. "addl $8, %%eax \n\t"\
  509. "cmpl %5, %%eax \n\t"\
  510. " jb 1b \n\t"
  511. #define WRITEBGR15 \
  512. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  513. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  514. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  515. "psrlq $3, %%mm2 \n\t"\
  516. "psrlq $1, %%mm5 \n\t"\
  517. \
  518. "movq %%mm2, %%mm1 \n\t"\
  519. "movq %%mm4, %%mm3 \n\t"\
  520. \
  521. "punpcklbw %%mm7, %%mm3 \n\t"\
  522. "punpcklbw %%mm5, %%mm2 \n\t"\
  523. "punpckhbw %%mm7, %%mm4 \n\t"\
  524. "punpckhbw %%mm5, %%mm1 \n\t"\
  525. \
  526. "psllq $2, %%mm3 \n\t"\
  527. "psllq $2, %%mm4 \n\t"\
  528. \
  529. "por %%mm3, %%mm2 \n\t"\
  530. "por %%mm4, %%mm1 \n\t"\
  531. \
  532. MOVNTQ(%%mm2, (%4, %%eax, 2))\
  533. MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
  534. \
  535. "addl $8, %%eax \n\t"\
  536. "cmpl %5, %%eax \n\t"\
  537. " jb 1b \n\t"
  538. #define WRITEBGR24OLD \
  539. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  540. "movq %%mm2, %%mm1 \n\t" /* B */\
  541. "movq %%mm5, %%mm6 \n\t" /* R */\
  542. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  543. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  544. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  545. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  546. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  547. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  548. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  549. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  550. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  551. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  552. \
  553. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  554. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  555. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  556. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  557. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  558. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  559. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  560. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  561. \
  562. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  563. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  564. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  565. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  566. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  567. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  568. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  569. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  570. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  571. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  572. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  573. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  574. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  575. \
  576. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  577. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  578. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  579. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  580. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  581. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  582. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  583. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  584. \
  585. MOVNTQ(%%mm0, (%%ebx))\
  586. MOVNTQ(%%mm2, 8(%%ebx))\
  587. MOVNTQ(%%mm3, 16(%%ebx))\
  588. "addl $24, %%ebx \n\t"\
  589. \
  590. "addl $8, %%eax \n\t"\
  591. "cmpl %5, %%eax \n\t"\
  592. " jb 1b \n\t"
  593. #define WRITEBGR24MMX \
  594. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  595. "movq %%mm2, %%mm1 \n\t" /* B */\
  596. "movq %%mm5, %%mm6 \n\t" /* R */\
  597. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  598. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  599. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  600. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  601. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  602. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  603. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  604. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  605. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  606. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  607. \
  608. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  609. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  610. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  611. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  612. \
  613. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  614. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  615. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  616. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  617. \
  618. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  619. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  620. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  621. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  622. \
  623. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  624. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  625. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  626. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  627. MOVNTQ(%%mm0, (%%ebx))\
  628. \
  629. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  630. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  631. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  632. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  633. MOVNTQ(%%mm6, 8(%%ebx))\
  634. \
  635. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  636. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  637. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  638. MOVNTQ(%%mm5, 16(%%ebx))\
  639. \
  640. "addl $24, %%ebx \n\t"\
  641. \
  642. "addl $8, %%eax \n\t"\
  643. "cmpl %5, %%eax \n\t"\
  644. " jb 1b \n\t"
  645. #define WRITEBGR24MMX2 \
  646. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  647. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  648. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  649. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  650. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  651. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  652. \
  653. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  654. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  655. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  656. \
  657. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  658. "por %%mm1, %%mm6 \n\t"\
  659. "por %%mm3, %%mm6 \n\t"\
  660. MOVNTQ(%%mm6, (%%ebx))\
  661. \
  662. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  663. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  664. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  665. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  666. \
  667. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  668. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  669. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  670. \
  671. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  672. "por %%mm3, %%mm6 \n\t"\
  673. MOVNTQ(%%mm6, 8(%%ebx))\
  674. \
  675. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  676. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  677. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  678. \
  679. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  680. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  681. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  682. \
  683. "por %%mm1, %%mm3 \n\t"\
  684. "por %%mm3, %%mm6 \n\t"\
  685. MOVNTQ(%%mm6, 16(%%ebx))\
  686. \
  687. "addl $24, %%ebx \n\t"\
  688. \
  689. "addl $8, %%eax \n\t"\
  690. "cmpl %5, %%eax \n\t"\
  691. " jb 1b \n\t"
  692. #ifdef HAVE_MMX2
  693. #undef WRITEBGR24
  694. #define WRITEBGR24 WRITEBGR24MMX2
  695. #else
  696. #undef WRITEBGR24
  697. #define WRITEBGR24 WRITEBGR24MMX
  698. #endif
  699. #define WRITEYUY2 \
  700. "packuswb %%mm3, %%mm3 \n\t"\
  701. "packuswb %%mm4, %%mm4 \n\t"\
  702. "packuswb %%mm7, %%mm1 \n\t"\
  703. "punpcklbw %%mm4, %%mm3 \n\t"\
  704. "movq %%mm1, %%mm7 \n\t"\
  705. "punpcklbw %%mm3, %%mm1 \n\t"\
  706. "punpckhbw %%mm3, %%mm7 \n\t"\
  707. \
  708. MOVNTQ(%%mm1, (%4, %%eax, 2))\
  709. MOVNTQ(%%mm7, 8(%4, %%eax, 2))\
  710. \
  711. "addl $8, %%eax \n\t"\
  712. "cmpl %5, %%eax \n\t"\
  713. " jb 1b \n\t"
  714. static inline void RENAME(yuv2yuvX)(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  715. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  716. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW,
  717. int16_t * lumMmxFilter, int16_t * chrMmxFilter)
  718. {
  719. #ifdef HAVE_MMX
  720. if(uDest != NULL)
  721. {
  722. asm volatile(
  723. YSCALEYUV2YV12X(0)
  724. :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize),
  725. "r" (chrMmxFilter+chrFilterSize*4), "r" (uDest), "m" (chrDstW)
  726. : "%eax", "%edx", "%esi"
  727. );
  728. asm volatile(
  729. YSCALEYUV2YV12X(4096)
  730. :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize),
  731. "r" (chrMmxFilter+chrFilterSize*4), "r" (vDest), "m" (chrDstW)
  732. : "%eax", "%edx", "%esi"
  733. );
  734. }
  735. asm volatile(
  736. YSCALEYUV2YV12X(0)
  737. :: "m" (-lumFilterSize), "r" (lumSrc+lumFilterSize),
  738. "r" (lumMmxFilter+lumFilterSize*4), "r" (dest), "m" (dstW)
  739. : "%eax", "%edx", "%esi"
  740. );
  741. #else
  742. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  743. chrFilter, chrSrc, chrFilterSize,
  744. dest, uDest, vDest, dstW, chrDstW);
  745. #endif
  746. }
  747. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  748. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  749. {
  750. #ifdef HAVE_MMX
  751. if(uDest != NULL)
  752. {
  753. asm volatile(
  754. YSCALEYUV2YV121
  755. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  756. "g" (-chrDstW)
  757. : "%eax"
  758. );
  759. asm volatile(
  760. YSCALEYUV2YV121
  761. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  762. "g" (-chrDstW)
  763. : "%eax"
  764. );
  765. }
  766. asm volatile(
  767. YSCALEYUV2YV121
  768. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  769. "g" (-dstW)
  770. : "%eax"
  771. );
  772. #else
  773. int i;
  774. for(i=0; i<dstW; i++)
  775. {
  776. int val= lumSrc[i]>>7;
  777. if(val&256){
  778. if(val<0) val=0;
  779. else val=255;
  780. }
  781. dest[i]= val;
  782. }
  783. if(uDest != NULL)
  784. for(i=0; i<chrDstW; i++)
  785. {
  786. int u=chrSrc[i]>>7;
  787. int v=chrSrc[i + 2048]>>7;
  788. if((u|v)&256){
  789. if(u<0) u=0;
  790. else if (u>255) u=255;
  791. if(v<0) v=0;
  792. else if (v>255) v=255;
  793. }
  794. uDest[i]= u;
  795. vDest[i]= v;
  796. }
  797. #endif
  798. }
  799. /**
  800. * vertical scale YV12 to RGB
  801. */
  802. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  803. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  804. uint8_t *dest, int dstW, int16_t * lumMmxFilter, int16_t * chrMmxFilter, int dstY)
  805. {
  806. switch(c->dstFormat)
  807. {
  808. #ifdef HAVE_MMX
  809. case IMGFMT_BGR32:
  810. {
  811. asm volatile(
  812. YSCALEYUV2RGBX
  813. WRITEBGR32
  814. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  815. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  816. "r" (dest), "m" (dstW),
  817. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  818. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  819. );
  820. }
  821. break;
  822. case IMGFMT_BGR24:
  823. {
  824. asm volatile(
  825. YSCALEYUV2RGBX
  826. "leal (%%eax, %%eax, 2), %%ebx \n\t" //FIXME optimize
  827. "addl %4, %%ebx \n\t"
  828. WRITEBGR24
  829. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  830. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  831. "r" (dest), "m" (dstW),
  832. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  833. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  834. );
  835. }
  836. break;
  837. case IMGFMT_BGR15:
  838. {
  839. asm volatile(
  840. YSCALEYUV2RGBX
  841. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  842. #ifdef DITHER1XBPP
  843. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  844. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  845. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  846. #endif
  847. WRITEBGR15
  848. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  849. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  850. "r" (dest), "m" (dstW),
  851. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  852. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  853. );
  854. }
  855. break;
  856. case IMGFMT_BGR16:
  857. {
  858. asm volatile(
  859. YSCALEYUV2RGBX
  860. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  861. #ifdef DITHER1XBPP
  862. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  863. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  864. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  865. #endif
  866. WRITEBGR16
  867. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  868. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  869. "r" (dest), "m" (dstW),
  870. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  871. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  872. );
  873. }
  874. break;
  875. case IMGFMT_YUY2:
  876. {
  877. asm volatile(
  878. YSCALEYUV2PACKEDX
  879. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  880. "psraw $3, %%mm3 \n\t"
  881. "psraw $3, %%mm4 \n\t"
  882. "psraw $3, %%mm1 \n\t"
  883. "psraw $3, %%mm7 \n\t"
  884. WRITEYUY2
  885. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  886. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  887. "r" (dest), "m" (dstW),
  888. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  889. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  890. );
  891. }
  892. break;
  893. #endif
  894. default:
  895. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  896. chrFilter, chrSrc, chrFilterSize,
  897. dest, dstW, dstY);
  898. break;
  899. }
  900. }
  901. /**
  902. * vertical bilinear scale YV12 to RGB
  903. */
  904. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  905. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  906. {
  907. int yalpha1=yalpha^4095;
  908. int uvalpha1=uvalpha^4095;
  909. int i;
  910. #if 0 //isnt used
  911. if(flags&SWS_FULL_CHR_H_INT)
  912. {
  913. switch(dstFormat)
  914. {
  915. #ifdef HAVE_MMX
  916. case IMGFMT_BGR32:
  917. asm volatile(
  918. FULL_YSCALEYUV2RGB
  919. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  920. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  921. "movq %%mm3, %%mm1 \n\t"
  922. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  923. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  924. MOVNTQ(%%mm3, (%4, %%eax, 4))
  925. MOVNTQ(%%mm1, 8(%4, %%eax, 4))
  926. "addl $4, %%eax \n\t"
  927. "cmpl %5, %%eax \n\t"
  928. " jb 1b \n\t"
  929. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  930. "m" (yalpha1), "m" (uvalpha1)
  931. : "%eax"
  932. );
  933. break;
  934. case IMGFMT_BGR24:
  935. asm volatile(
  936. FULL_YSCALEYUV2RGB
  937. // lsb ... msb
  938. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  939. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  940. "movq %%mm3, %%mm1 \n\t"
  941. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  942. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  943. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  944. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  945. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  946. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  947. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  948. "movq %%mm1, %%mm2 \n\t"
  949. "psllq $48, %%mm1 \n\t" // 000000BG
  950. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  951. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  952. "psrld $16, %%mm2 \n\t" // R000R000
  953. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  954. "por %%mm2, %%mm1 \n\t" // RBGRR000
  955. "movl %4, %%ebx \n\t"
  956. "addl %%eax, %%ebx \n\t"
  957. #ifdef HAVE_MMX2
  958. //FIXME Alignment
  959. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  960. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  961. #else
  962. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  963. "psrlq $32, %%mm3 \n\t"
  964. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  965. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  966. #endif
  967. "addl $4, %%eax \n\t"
  968. "cmpl %5, %%eax \n\t"
  969. " jb 1b \n\t"
  970. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  971. "m" (yalpha1), "m" (uvalpha1)
  972. : "%eax", "%ebx"
  973. );
  974. break;
  975. case IMGFMT_BGR15:
  976. asm volatile(
  977. FULL_YSCALEYUV2RGB
  978. #ifdef DITHER1XBPP
  979. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  980. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  981. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  982. #endif
  983. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  984. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  985. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  986. "psrlw $3, %%mm3 \n\t"
  987. "psllw $2, %%mm1 \n\t"
  988. "psllw $7, %%mm0 \n\t"
  989. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  990. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  991. "por %%mm3, %%mm1 \n\t"
  992. "por %%mm1, %%mm0 \n\t"
  993. MOVNTQ(%%mm0, (%4, %%eax, 2))
  994. "addl $4, %%eax \n\t"
  995. "cmpl %5, %%eax \n\t"
  996. " jb 1b \n\t"
  997. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  998. "m" (yalpha1), "m" (uvalpha1)
  999. : "%eax"
  1000. );
  1001. break;
  1002. case IMGFMT_BGR16:
  1003. asm volatile(
  1004. FULL_YSCALEYUV2RGB
  1005. #ifdef DITHER1XBPP
  1006. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  1007. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1008. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1009. #endif
  1010. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1011. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1012. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1013. "psrlw $3, %%mm3 \n\t"
  1014. "psllw $3, %%mm1 \n\t"
  1015. "psllw $8, %%mm0 \n\t"
  1016. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1017. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1018. "por %%mm3, %%mm1 \n\t"
  1019. "por %%mm1, %%mm0 \n\t"
  1020. MOVNTQ(%%mm0, (%4, %%eax, 2))
  1021. "addl $4, %%eax \n\t"
  1022. "cmpl %5, %%eax \n\t"
  1023. " jb 1b \n\t"
  1024. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1025. "m" (yalpha1), "m" (uvalpha1)
  1026. : "%eax"
  1027. );
  1028. break;
  1029. #endif
  1030. case IMGFMT_RGB32:
  1031. #ifndef HAVE_MMX
  1032. case IMGFMT_BGR32:
  1033. #endif
  1034. if(dstFormat==IMGFMT_BGR32)
  1035. {
  1036. int i;
  1037. #ifdef WORDS_BIGENDIAN
  1038. dest++;
  1039. #endif
  1040. for(i=0;i<dstW;i++){
  1041. // vertical linear interpolation && yuv2rgb in a single step:
  1042. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1043. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1044. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1045. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1046. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1047. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1048. dest+= 4;
  1049. }
  1050. }
  1051. else if(dstFormat==IMGFMT_BGR24)
  1052. {
  1053. int i;
  1054. for(i=0;i<dstW;i++){
  1055. // vertical linear interpolation && yuv2rgb in a single step:
  1056. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1057. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1058. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1059. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1060. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1061. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1062. dest+= 3;
  1063. }
  1064. }
  1065. else if(dstFormat==IMGFMT_BGR16)
  1066. {
  1067. int i;
  1068. for(i=0;i<dstW;i++){
  1069. // vertical linear interpolation && yuv2rgb in a single step:
  1070. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1071. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1072. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1073. ((uint16_t*)dest)[i] =
  1074. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1075. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1076. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1077. }
  1078. }
  1079. else if(dstFormat==IMGFMT_BGR15)
  1080. {
  1081. int i;
  1082. for(i=0;i<dstW;i++){
  1083. // vertical linear interpolation && yuv2rgb in a single step:
  1084. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1085. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1086. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1087. ((uint16_t*)dest)[i] =
  1088. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1089. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1090. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1091. }
  1092. }
  1093. }//FULL_UV_IPOL
  1094. else
  1095. {
  1096. #endif // if 0
  1097. #ifdef HAVE_MMX
  1098. switch(c->dstFormat)
  1099. {
  1100. case IMGFMT_BGR32:
  1101. asm volatile(
  1102. YSCALEYUV2RGB
  1103. WRITEBGR32
  1104. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1105. "m" (yalpha1), "m" (uvalpha1)
  1106. : "%eax"
  1107. );
  1108. return;
  1109. case IMGFMT_BGR24:
  1110. asm volatile(
  1111. "movl %4, %%ebx \n\t"
  1112. YSCALEYUV2RGB
  1113. WRITEBGR24
  1114. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1115. "m" (yalpha1), "m" (uvalpha1)
  1116. : "%eax", "%ebx"
  1117. );
  1118. return;
  1119. case IMGFMT_BGR15:
  1120. asm volatile(
  1121. YSCALEYUV2RGB
  1122. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1123. #ifdef DITHER1XBPP
  1124. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1125. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1126. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1127. #endif
  1128. WRITEBGR15
  1129. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1130. "m" (yalpha1), "m" (uvalpha1)
  1131. : "%eax"
  1132. );
  1133. return;
  1134. case IMGFMT_BGR16:
  1135. asm volatile(
  1136. YSCALEYUV2RGB
  1137. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1138. #ifdef DITHER1XBPP
  1139. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1140. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1141. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1142. #endif
  1143. WRITEBGR16
  1144. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1145. "m" (yalpha1), "m" (uvalpha1)
  1146. : "%eax"
  1147. );
  1148. return;
  1149. case IMGFMT_YUY2:
  1150. asm volatile(
  1151. YSCALEYUV2PACKED
  1152. WRITEYUY2
  1153. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1154. "m" (yalpha1), "m" (uvalpha1)
  1155. : "%eax"
  1156. );
  1157. return;
  1158. default: break;
  1159. }
  1160. #endif //HAVE_MMX
  1161. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1162. }
  1163. /**
  1164. * YV12 to RGB without scaling or interpolating
  1165. */
  1166. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1167. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1168. {
  1169. #ifdef HAVE_MMX
  1170. int uvalpha1=uvalpha^4095;
  1171. #endif
  1172. const int yalpha1=0;
  1173. int i;
  1174. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1175. const int yalpha= 4096; //FIXME ...
  1176. if(flags&SWS_FULL_CHR_H_INT)
  1177. {
  1178. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1179. return;
  1180. }
  1181. #ifdef HAVE_MMX
  1182. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1183. {
  1184. switch(dstFormat)
  1185. {
  1186. case IMGFMT_BGR32:
  1187. asm volatile(
  1188. YSCALEYUV2RGB1
  1189. WRITEBGR32
  1190. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1191. "m" (yalpha1), "m" (uvalpha1)
  1192. : "%eax"
  1193. );
  1194. return;
  1195. case IMGFMT_BGR24:
  1196. asm volatile(
  1197. "movl %4, %%ebx \n\t"
  1198. YSCALEYUV2RGB1
  1199. WRITEBGR24
  1200. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1201. "m" (yalpha1), "m" (uvalpha1)
  1202. : "%eax", "%ebx"
  1203. );
  1204. return;
  1205. case IMGFMT_BGR15:
  1206. asm volatile(
  1207. YSCALEYUV2RGB1
  1208. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1209. #ifdef DITHER1XBPP
  1210. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1211. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1212. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1213. #endif
  1214. WRITEBGR15
  1215. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1216. "m" (yalpha1), "m" (uvalpha1)
  1217. : "%eax"
  1218. );
  1219. return;
  1220. case IMGFMT_BGR16:
  1221. asm volatile(
  1222. YSCALEYUV2RGB1
  1223. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1224. #ifdef DITHER1XBPP
  1225. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1226. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1227. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1228. #endif
  1229. WRITEBGR16
  1230. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1231. "m" (yalpha1), "m" (uvalpha1)
  1232. : "%eax"
  1233. );
  1234. return;
  1235. case IMGFMT_YUY2:
  1236. asm volatile(
  1237. YSCALEYUV2PACKED1
  1238. WRITEYUY2
  1239. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1240. "m" (yalpha1), "m" (uvalpha1)
  1241. : "%eax"
  1242. );
  1243. return;
  1244. }
  1245. }
  1246. else
  1247. {
  1248. switch(dstFormat)
  1249. {
  1250. case IMGFMT_BGR32:
  1251. asm volatile(
  1252. YSCALEYUV2RGB1b
  1253. WRITEBGR32
  1254. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1255. "m" (yalpha1), "m" (uvalpha1)
  1256. : "%eax"
  1257. );
  1258. return;
  1259. case IMGFMT_BGR24:
  1260. asm volatile(
  1261. "movl %4, %%ebx \n\t"
  1262. YSCALEYUV2RGB1b
  1263. WRITEBGR24
  1264. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1265. "m" (yalpha1), "m" (uvalpha1)
  1266. : "%eax", "%ebx"
  1267. );
  1268. return;
  1269. case IMGFMT_BGR15:
  1270. asm volatile(
  1271. YSCALEYUV2RGB1b
  1272. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1273. #ifdef DITHER1XBPP
  1274. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1275. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1276. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1277. #endif
  1278. WRITEBGR15
  1279. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1280. "m" (yalpha1), "m" (uvalpha1)
  1281. : "%eax"
  1282. );
  1283. return;
  1284. case IMGFMT_BGR16:
  1285. asm volatile(
  1286. YSCALEYUV2RGB1b
  1287. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1288. #ifdef DITHER1XBPP
  1289. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1290. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1291. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1292. #endif
  1293. WRITEBGR16
  1294. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1295. "m" (yalpha1), "m" (uvalpha1)
  1296. : "%eax"
  1297. );
  1298. return;
  1299. case IMGFMT_YUY2:
  1300. asm volatile(
  1301. YSCALEYUV2PACKED1b
  1302. WRITEYUY2
  1303. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1304. "m" (yalpha1), "m" (uvalpha1)
  1305. : "%eax"
  1306. );
  1307. return;
  1308. }
  1309. }
  1310. #endif
  1311. if( uvalpha < 2048 )
  1312. {
  1313. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1314. }else{
  1315. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1316. }
  1317. }
  1318. //FIXME yuy2* can read upto 7 samples to much
  1319. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width)
  1320. {
  1321. #ifdef HAVE_MMX
  1322. asm volatile(
  1323. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1324. "movl %0, %%eax \n\t"
  1325. "1: \n\t"
  1326. "movq (%1, %%eax,2), %%mm0 \n\t"
  1327. "movq 8(%1, %%eax,2), %%mm1 \n\t"
  1328. "pand %%mm2, %%mm0 \n\t"
  1329. "pand %%mm2, %%mm1 \n\t"
  1330. "packuswb %%mm1, %%mm0 \n\t"
  1331. "movq %%mm0, (%2, %%eax) \n\t"
  1332. "addl $8, %%eax \n\t"
  1333. " js 1b \n\t"
  1334. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1335. : "%eax"
  1336. );
  1337. #else
  1338. int i;
  1339. for(i=0; i<width; i++)
  1340. dst[i]= src[2*i];
  1341. #endif
  1342. }
  1343. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1344. {
  1345. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1346. asm volatile(
  1347. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1348. "movl %0, %%eax \n\t"
  1349. "1: \n\t"
  1350. "movq (%1, %%eax,4), %%mm0 \n\t"
  1351. "movq 8(%1, %%eax,4), %%mm1 \n\t"
  1352. "movq (%2, %%eax,4), %%mm2 \n\t"
  1353. "movq 8(%2, %%eax,4), %%mm3 \n\t"
  1354. PAVGB(%%mm2, %%mm0)
  1355. PAVGB(%%mm3, %%mm1)
  1356. "psrlw $8, %%mm0 \n\t"
  1357. "psrlw $8, %%mm1 \n\t"
  1358. "packuswb %%mm1, %%mm0 \n\t"
  1359. "movq %%mm0, %%mm1 \n\t"
  1360. "psrlw $8, %%mm0 \n\t"
  1361. "pand %%mm4, %%mm1 \n\t"
  1362. "packuswb %%mm0, %%mm0 \n\t"
  1363. "packuswb %%mm1, %%mm1 \n\t"
  1364. "movd %%mm0, (%4, %%eax) \n\t"
  1365. "movd %%mm1, (%3, %%eax) \n\t"
  1366. "addl $4, %%eax \n\t"
  1367. " js 1b \n\t"
  1368. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1369. : "%eax"
  1370. );
  1371. #else
  1372. int i;
  1373. for(i=0; i<width; i++)
  1374. {
  1375. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1376. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1377. }
  1378. #endif
  1379. }
  1380. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1381. {
  1382. #ifdef HAVE_MMXFIXME
  1383. #else
  1384. int i;
  1385. for(i=0; i<width; i++)
  1386. {
  1387. int b= src[i*4+0];
  1388. int g= src[i*4+1];
  1389. int r= src[i*4+2];
  1390. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1391. }
  1392. #endif
  1393. }
  1394. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1395. {
  1396. #ifdef HAVE_MMXFIXME
  1397. #else
  1398. int i;
  1399. for(i=0; i<width; i++)
  1400. {
  1401. int b= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4];
  1402. int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5];
  1403. int r= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6];
  1404. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1405. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1406. }
  1407. #endif
  1408. }
  1409. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
  1410. {
  1411. #ifdef HAVE_MMX
  1412. asm volatile(
  1413. "movl %2, %%eax \n\t"
  1414. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1415. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1416. "pxor %%mm7, %%mm7 \n\t"
  1417. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1418. ".balign 16 \n\t"
  1419. "1: \n\t"
  1420. PREFETCH" 64(%0, %%ebx) \n\t"
  1421. "movd (%0, %%ebx), %%mm0 \n\t"
  1422. "movd 3(%0, %%ebx), %%mm1 \n\t"
  1423. "punpcklbw %%mm7, %%mm0 \n\t"
  1424. "punpcklbw %%mm7, %%mm1 \n\t"
  1425. "movd 6(%0, %%ebx), %%mm2 \n\t"
  1426. "movd 9(%0, %%ebx), %%mm3 \n\t"
  1427. "punpcklbw %%mm7, %%mm2 \n\t"
  1428. "punpcklbw %%mm7, %%mm3 \n\t"
  1429. "pmaddwd %%mm6, %%mm0 \n\t"
  1430. "pmaddwd %%mm6, %%mm1 \n\t"
  1431. "pmaddwd %%mm6, %%mm2 \n\t"
  1432. "pmaddwd %%mm6, %%mm3 \n\t"
  1433. #ifndef FAST_BGR2YV12
  1434. "psrad $8, %%mm0 \n\t"
  1435. "psrad $8, %%mm1 \n\t"
  1436. "psrad $8, %%mm2 \n\t"
  1437. "psrad $8, %%mm3 \n\t"
  1438. #endif
  1439. "packssdw %%mm1, %%mm0 \n\t"
  1440. "packssdw %%mm3, %%mm2 \n\t"
  1441. "pmaddwd %%mm5, %%mm0 \n\t"
  1442. "pmaddwd %%mm5, %%mm2 \n\t"
  1443. "packssdw %%mm2, %%mm0 \n\t"
  1444. "psraw $7, %%mm0 \n\t"
  1445. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1446. "movd 15(%0, %%ebx), %%mm1 \n\t"
  1447. "punpcklbw %%mm7, %%mm4 \n\t"
  1448. "punpcklbw %%mm7, %%mm1 \n\t"
  1449. "movd 18(%0, %%ebx), %%mm2 \n\t"
  1450. "movd 21(%0, %%ebx), %%mm3 \n\t"
  1451. "punpcklbw %%mm7, %%mm2 \n\t"
  1452. "punpcklbw %%mm7, %%mm3 \n\t"
  1453. "pmaddwd %%mm6, %%mm4 \n\t"
  1454. "pmaddwd %%mm6, %%mm1 \n\t"
  1455. "pmaddwd %%mm6, %%mm2 \n\t"
  1456. "pmaddwd %%mm6, %%mm3 \n\t"
  1457. #ifndef FAST_BGR2YV12
  1458. "psrad $8, %%mm4 \n\t"
  1459. "psrad $8, %%mm1 \n\t"
  1460. "psrad $8, %%mm2 \n\t"
  1461. "psrad $8, %%mm3 \n\t"
  1462. #endif
  1463. "packssdw %%mm1, %%mm4 \n\t"
  1464. "packssdw %%mm3, %%mm2 \n\t"
  1465. "pmaddwd %%mm5, %%mm4 \n\t"
  1466. "pmaddwd %%mm5, %%mm2 \n\t"
  1467. "addl $24, %%ebx \n\t"
  1468. "packssdw %%mm2, %%mm4 \n\t"
  1469. "psraw $7, %%mm4 \n\t"
  1470. "packuswb %%mm4, %%mm0 \n\t"
  1471. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1472. "movq %%mm0, (%1, %%eax) \n\t"
  1473. "addl $8, %%eax \n\t"
  1474. " js 1b \n\t"
  1475. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1476. : "%eax", "%ebx"
  1477. );
  1478. #else
  1479. int i;
  1480. for(i=0; i<width; i++)
  1481. {
  1482. int b= src[i*3+0];
  1483. int g= src[i*3+1];
  1484. int r= src[i*3+2];
  1485. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1486. }
  1487. #endif
  1488. }
  1489. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1490. {
  1491. #ifdef HAVE_MMX
  1492. asm volatile(
  1493. "movl %4, %%eax \n\t"
  1494. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1495. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1496. "pxor %%mm7, %%mm7 \n\t"
  1497. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1498. "addl %%ebx, %%ebx \n\t"
  1499. ".balign 16 \n\t"
  1500. "1: \n\t"
  1501. PREFETCH" 64(%0, %%ebx) \n\t"
  1502. PREFETCH" 64(%1, %%ebx) \n\t"
  1503. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1504. "movq (%0, %%ebx), %%mm0 \n\t"
  1505. "movq (%1, %%ebx), %%mm1 \n\t"
  1506. "movq 6(%0, %%ebx), %%mm2 \n\t"
  1507. "movq 6(%1, %%ebx), %%mm3 \n\t"
  1508. PAVGB(%%mm1, %%mm0)
  1509. PAVGB(%%mm3, %%mm2)
  1510. "movq %%mm0, %%mm1 \n\t"
  1511. "movq %%mm2, %%mm3 \n\t"
  1512. "psrlq $24, %%mm0 \n\t"
  1513. "psrlq $24, %%mm2 \n\t"
  1514. PAVGB(%%mm1, %%mm0)
  1515. PAVGB(%%mm3, %%mm2)
  1516. "punpcklbw %%mm7, %%mm0 \n\t"
  1517. "punpcklbw %%mm7, %%mm2 \n\t"
  1518. #else
  1519. "movd (%0, %%ebx), %%mm0 \n\t"
  1520. "movd (%1, %%ebx), %%mm1 \n\t"
  1521. "movd 3(%0, %%ebx), %%mm2 \n\t"
  1522. "movd 3(%1, %%ebx), %%mm3 \n\t"
  1523. "punpcklbw %%mm7, %%mm0 \n\t"
  1524. "punpcklbw %%mm7, %%mm1 \n\t"
  1525. "punpcklbw %%mm7, %%mm2 \n\t"
  1526. "punpcklbw %%mm7, %%mm3 \n\t"
  1527. "paddw %%mm1, %%mm0 \n\t"
  1528. "paddw %%mm3, %%mm2 \n\t"
  1529. "paddw %%mm2, %%mm0 \n\t"
  1530. "movd 6(%0, %%ebx), %%mm4 \n\t"
  1531. "movd 6(%1, %%ebx), %%mm1 \n\t"
  1532. "movd 9(%0, %%ebx), %%mm2 \n\t"
  1533. "movd 9(%1, %%ebx), %%mm3 \n\t"
  1534. "punpcklbw %%mm7, %%mm4 \n\t"
  1535. "punpcklbw %%mm7, %%mm1 \n\t"
  1536. "punpcklbw %%mm7, %%mm2 \n\t"
  1537. "punpcklbw %%mm7, %%mm3 \n\t"
  1538. "paddw %%mm1, %%mm4 \n\t"
  1539. "paddw %%mm3, %%mm2 \n\t"
  1540. "paddw %%mm4, %%mm2 \n\t"
  1541. "psrlw $2, %%mm0 \n\t"
  1542. "psrlw $2, %%mm2 \n\t"
  1543. #endif
  1544. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1545. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1546. "pmaddwd %%mm0, %%mm1 \n\t"
  1547. "pmaddwd %%mm2, %%mm3 \n\t"
  1548. "pmaddwd %%mm6, %%mm0 \n\t"
  1549. "pmaddwd %%mm6, %%mm2 \n\t"
  1550. #ifndef FAST_BGR2YV12
  1551. "psrad $8, %%mm0 \n\t"
  1552. "psrad $8, %%mm1 \n\t"
  1553. "psrad $8, %%mm2 \n\t"
  1554. "psrad $8, %%mm3 \n\t"
  1555. #endif
  1556. "packssdw %%mm2, %%mm0 \n\t"
  1557. "packssdw %%mm3, %%mm1 \n\t"
  1558. "pmaddwd %%mm5, %%mm0 \n\t"
  1559. "pmaddwd %%mm5, %%mm1 \n\t"
  1560. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1561. "psraw $7, %%mm0 \n\t"
  1562. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1563. "movq 12(%0, %%ebx), %%mm4 \n\t"
  1564. "movq 12(%1, %%ebx), %%mm1 \n\t"
  1565. "movq 18(%0, %%ebx), %%mm2 \n\t"
  1566. "movq 18(%1, %%ebx), %%mm3 \n\t"
  1567. PAVGB(%%mm1, %%mm4)
  1568. PAVGB(%%mm3, %%mm2)
  1569. "movq %%mm4, %%mm1 \n\t"
  1570. "movq %%mm2, %%mm3 \n\t"
  1571. "psrlq $24, %%mm4 \n\t"
  1572. "psrlq $24, %%mm2 \n\t"
  1573. PAVGB(%%mm1, %%mm4)
  1574. PAVGB(%%mm3, %%mm2)
  1575. "punpcklbw %%mm7, %%mm4 \n\t"
  1576. "punpcklbw %%mm7, %%mm2 \n\t"
  1577. #else
  1578. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1579. "movd 12(%1, %%ebx), %%mm1 \n\t"
  1580. "movd 15(%0, %%ebx), %%mm2 \n\t"
  1581. "movd 15(%1, %%ebx), %%mm3 \n\t"
  1582. "punpcklbw %%mm7, %%mm4 \n\t"
  1583. "punpcklbw %%mm7, %%mm1 \n\t"
  1584. "punpcklbw %%mm7, %%mm2 \n\t"
  1585. "punpcklbw %%mm7, %%mm3 \n\t"
  1586. "paddw %%mm1, %%mm4 \n\t"
  1587. "paddw %%mm3, %%mm2 \n\t"
  1588. "paddw %%mm2, %%mm4 \n\t"
  1589. "movd 18(%0, %%ebx), %%mm5 \n\t"
  1590. "movd 18(%1, %%ebx), %%mm1 \n\t"
  1591. "movd 21(%0, %%ebx), %%mm2 \n\t"
  1592. "movd 21(%1, %%ebx), %%mm3 \n\t"
  1593. "punpcklbw %%mm7, %%mm5 \n\t"
  1594. "punpcklbw %%mm7, %%mm1 \n\t"
  1595. "punpcklbw %%mm7, %%mm2 \n\t"
  1596. "punpcklbw %%mm7, %%mm3 \n\t"
  1597. "paddw %%mm1, %%mm5 \n\t"
  1598. "paddw %%mm3, %%mm2 \n\t"
  1599. "paddw %%mm5, %%mm2 \n\t"
  1600. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1601. "psrlw $2, %%mm4 \n\t"
  1602. "psrlw $2, %%mm2 \n\t"
  1603. #endif
  1604. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1605. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1606. "pmaddwd %%mm4, %%mm1 \n\t"
  1607. "pmaddwd %%mm2, %%mm3 \n\t"
  1608. "pmaddwd %%mm6, %%mm4 \n\t"
  1609. "pmaddwd %%mm6, %%mm2 \n\t"
  1610. #ifndef FAST_BGR2YV12
  1611. "psrad $8, %%mm4 \n\t"
  1612. "psrad $8, %%mm1 \n\t"
  1613. "psrad $8, %%mm2 \n\t"
  1614. "psrad $8, %%mm3 \n\t"
  1615. #endif
  1616. "packssdw %%mm2, %%mm4 \n\t"
  1617. "packssdw %%mm3, %%mm1 \n\t"
  1618. "pmaddwd %%mm5, %%mm4 \n\t"
  1619. "pmaddwd %%mm5, %%mm1 \n\t"
  1620. "addl $24, %%ebx \n\t"
  1621. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1622. "psraw $7, %%mm4 \n\t"
  1623. "movq %%mm0, %%mm1 \n\t"
  1624. "punpckldq %%mm4, %%mm0 \n\t"
  1625. "punpckhdq %%mm4, %%mm1 \n\t"
  1626. "packsswb %%mm1, %%mm0 \n\t"
  1627. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1628. "movd %%mm0, (%2, %%eax) \n\t"
  1629. "punpckhdq %%mm0, %%mm0 \n\t"
  1630. "movd %%mm0, (%3, %%eax) \n\t"
  1631. "addl $4, %%eax \n\t"
  1632. " js 1b \n\t"
  1633. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1634. : "%eax", "%ebx"
  1635. );
  1636. #else
  1637. int i;
  1638. for(i=0; i<width; i++)
  1639. {
  1640. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1641. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1642. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1643. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1644. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1645. }
  1646. #endif
  1647. }
  1648. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1649. {
  1650. int i;
  1651. for(i=0; i<width; i++)
  1652. {
  1653. int d= src[i*2] + (src[i*2+1]<<8);
  1654. int b= d&0x1F;
  1655. int g= (d>>5)&0x3F;
  1656. int r= (d>>11)&0x1F;
  1657. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1658. }
  1659. }
  1660. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1661. {
  1662. int i;
  1663. for(i=0; i<width; i++)
  1664. {
  1665. #if 1
  1666. int d0= le2me_32( ((uint32_t*)src1)[i] );
  1667. int d1= le2me_32( ((uint32_t*)src2)[i] );
  1668. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1669. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1670. int dh2= (dh>>11) + (dh<<21);
  1671. int d= dh2 + dl;
  1672. int b= d&0x7F;
  1673. int r= (d>>11)&0x7F;
  1674. int g= d>>21;
  1675. #else
  1676. int d0= src1[i*4] + (src1[i*4+1]<<8);
  1677. int b0= d0&0x1F;
  1678. int g0= (d0>>5)&0x3F;
  1679. int r0= (d0>>11)&0x1F;
  1680. int d1= src1[i*4+2] + (src1[i*4+3]<<8);
  1681. int b1= d1&0x1F;
  1682. int g1= (d1>>5)&0x3F;
  1683. int r1= (d1>>11)&0x1F;
  1684. int d2= src2[i*4] + (src2[i*4+1]<<8);
  1685. int b2= d2&0x1F;
  1686. int g2= (d2>>5)&0x3F;
  1687. int r2= (d2>>11)&0x1F;
  1688. int d3= src2[i*4+2] + (src2[i*4+3]<<8);
  1689. int b3= d3&0x1F;
  1690. int g3= (d3>>5)&0x3F;
  1691. int r3= (d3>>11)&0x1F;
  1692. int b= b0 + b1 + b2 + b3;
  1693. int g= g0 + g1 + g2 + g3;
  1694. int r= r0 + r1 + r2 + r3;
  1695. #endif
  1696. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1697. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1698. }
  1699. }
  1700. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1701. {
  1702. int i;
  1703. for(i=0; i<width; i++)
  1704. {
  1705. int d= src[i*2] + (src[i*2+1]<<8);
  1706. int b= d&0x1F;
  1707. int g= (d>>5)&0x1F;
  1708. int r= (d>>10)&0x1F;
  1709. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1710. }
  1711. }
  1712. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1713. {
  1714. int i;
  1715. for(i=0; i<width; i++)
  1716. {
  1717. #if 1
  1718. int d0= le2me_32( ((uint32_t*)src1)[i] );
  1719. int d1= le2me_32( ((uint32_t*)src2)[i] );
  1720. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1721. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1722. int dh2= (dh>>11) + (dh<<21);
  1723. int d= dh2 + dl;
  1724. int b= d&0x7F;
  1725. int r= (d>>10)&0x7F;
  1726. int g= d>>21;
  1727. #else
  1728. int d0= src1[i*4] + (src1[i*4+1]<<8);
  1729. int b0= d0&0x1F;
  1730. int g0= (d0>>5)&0x1F;
  1731. int r0= (d0>>10)&0x1F;
  1732. int d1= src1[i*4+2] + (src1[i*4+3]<<8);
  1733. int b1= d1&0x1F;
  1734. int g1= (d1>>5)&0x1F;
  1735. int r1= (d1>>10)&0x1F;
  1736. int d2= src2[i*4] + (src2[i*4+1]<<8);
  1737. int b2= d2&0x1F;
  1738. int g2= (d2>>5)&0x1F;
  1739. int r2= (d2>>10)&0x1F;
  1740. int d3= src2[i*4+2] + (src2[i*4+3]<<8);
  1741. int b3= d3&0x1F;
  1742. int g3= (d3>>5)&0x1F;
  1743. int r3= (d3>>10)&0x1F;
  1744. int b= b0 + b1 + b2 + b3;
  1745. int g= g0 + g1 + g2 + g3;
  1746. int r= r0 + r1 + r2 + r3;
  1747. #endif
  1748. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1749. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1750. }
  1751. }
  1752. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1753. {
  1754. int i;
  1755. for(i=0; i<width; i++)
  1756. {
  1757. int r= src[i*4+0];
  1758. int g= src[i*4+1];
  1759. int b= src[i*4+2];
  1760. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1761. }
  1762. }
  1763. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1764. {
  1765. int i;
  1766. for(i=0; i<width; i++)
  1767. {
  1768. int r= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4];
  1769. int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5];
  1770. int b= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6];
  1771. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1772. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1773. }
  1774. }
  1775. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1776. {
  1777. int i;
  1778. for(i=0; i<width; i++)
  1779. {
  1780. int r= src[i*3+0];
  1781. int g= src[i*3+1];
  1782. int b= src[i*3+2];
  1783. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1784. }
  1785. }
  1786. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1787. {
  1788. int i;
  1789. for(i=0; i<width; i++)
  1790. {
  1791. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1792. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1793. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1794. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1795. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1796. }
  1797. }
  1798. // Bilinear / Bicubic scaling
  1799. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1800. int16_t *filter, int16_t *filterPos, int filterSize)
  1801. {
  1802. #ifdef HAVE_MMX
  1803. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1804. {
  1805. int counter= -2*dstW;
  1806. filter-= counter*2;
  1807. filterPos-= counter/2;
  1808. dst-= counter/2;
  1809. asm volatile(
  1810. "pxor %%mm7, %%mm7 \n\t"
  1811. "movq "MANGLE(w02)", %%mm6 \n\t"
  1812. "pushl %%ebp \n\t" // we use 7 regs here ...
  1813. "movl %%eax, %%ebp \n\t"
  1814. ".balign 16 \n\t"
  1815. "1: \n\t"
  1816. "movzwl (%2, %%ebp), %%eax \n\t"
  1817. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1818. "movq (%1, %%ebp, 4), %%mm1 \n\t"
  1819. "movq 8(%1, %%ebp, 4), %%mm3 \n\t"
  1820. "movd (%3, %%eax), %%mm0 \n\t"
  1821. "movd (%3, %%ebx), %%mm2 \n\t"
  1822. "punpcklbw %%mm7, %%mm0 \n\t"
  1823. "punpcklbw %%mm7, %%mm2 \n\t"
  1824. "pmaddwd %%mm1, %%mm0 \n\t"
  1825. "pmaddwd %%mm2, %%mm3 \n\t"
  1826. "psrad $8, %%mm0 \n\t"
  1827. "psrad $8, %%mm3 \n\t"
  1828. "packssdw %%mm3, %%mm0 \n\t"
  1829. "pmaddwd %%mm6, %%mm0 \n\t"
  1830. "packssdw %%mm0, %%mm0 \n\t"
  1831. "movd %%mm0, (%4, %%ebp) \n\t"
  1832. "addl $4, %%ebp \n\t"
  1833. " jnc 1b \n\t"
  1834. "popl %%ebp \n\t"
  1835. : "+a" (counter)
  1836. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1837. : "%ebx"
  1838. );
  1839. }
  1840. else if(filterSize==8)
  1841. {
  1842. int counter= -2*dstW;
  1843. filter-= counter*4;
  1844. filterPos-= counter/2;
  1845. dst-= counter/2;
  1846. asm volatile(
  1847. "pxor %%mm7, %%mm7 \n\t"
  1848. "movq "MANGLE(w02)", %%mm6 \n\t"
  1849. "pushl %%ebp \n\t" // we use 7 regs here ...
  1850. "movl %%eax, %%ebp \n\t"
  1851. ".balign 16 \n\t"
  1852. "1: \n\t"
  1853. "movzwl (%2, %%ebp), %%eax \n\t"
  1854. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1855. "movq (%1, %%ebp, 8), %%mm1 \n\t"
  1856. "movq 16(%1, %%ebp, 8), %%mm3 \n\t"
  1857. "movd (%3, %%eax), %%mm0 \n\t"
  1858. "movd (%3, %%ebx), %%mm2 \n\t"
  1859. "punpcklbw %%mm7, %%mm0 \n\t"
  1860. "punpcklbw %%mm7, %%mm2 \n\t"
  1861. "pmaddwd %%mm1, %%mm0 \n\t"
  1862. "pmaddwd %%mm2, %%mm3 \n\t"
  1863. "movq 8(%1, %%ebp, 8), %%mm1 \n\t"
  1864. "movq 24(%1, %%ebp, 8), %%mm5 \n\t"
  1865. "movd 4(%3, %%eax), %%mm4 \n\t"
  1866. "movd 4(%3, %%ebx), %%mm2 \n\t"
  1867. "punpcklbw %%mm7, %%mm4 \n\t"
  1868. "punpcklbw %%mm7, %%mm2 \n\t"
  1869. "pmaddwd %%mm1, %%mm4 \n\t"
  1870. "pmaddwd %%mm2, %%mm5 \n\t"
  1871. "paddd %%mm4, %%mm0 \n\t"
  1872. "paddd %%mm5, %%mm3 \n\t"
  1873. "psrad $8, %%mm0 \n\t"
  1874. "psrad $8, %%mm3 \n\t"
  1875. "packssdw %%mm3, %%mm0 \n\t"
  1876. "pmaddwd %%mm6, %%mm0 \n\t"
  1877. "packssdw %%mm0, %%mm0 \n\t"
  1878. "movd %%mm0, (%4, %%ebp) \n\t"
  1879. "addl $4, %%ebp \n\t"
  1880. " jnc 1b \n\t"
  1881. "popl %%ebp \n\t"
  1882. : "+a" (counter)
  1883. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1884. : "%ebx"
  1885. );
  1886. }
  1887. else
  1888. {
  1889. int counter= -2*dstW;
  1890. // filter-= counter*filterSize/2;
  1891. filterPos-= counter/2;
  1892. dst-= counter/2;
  1893. asm volatile(
  1894. "pxor %%mm7, %%mm7 \n\t"
  1895. "movq "MANGLE(w02)", %%mm6 \n\t"
  1896. ".balign 16 \n\t"
  1897. "1: \n\t"
  1898. "movl %2, %%ecx \n\t"
  1899. "movzwl (%%ecx, %0), %%eax \n\t"
  1900. "movzwl 2(%%ecx, %0), %%ebx \n\t"
  1901. "movl %5, %%ecx \n\t"
  1902. "pxor %%mm4, %%mm4 \n\t"
  1903. "pxor %%mm5, %%mm5 \n\t"
  1904. "2: \n\t"
  1905. "movq (%1), %%mm1 \n\t"
  1906. "movq (%1, %6), %%mm3 \n\t"
  1907. "movd (%%ecx, %%eax), %%mm0 \n\t"
  1908. "movd (%%ecx, %%ebx), %%mm2 \n\t"
  1909. "punpcklbw %%mm7, %%mm0 \n\t"
  1910. "punpcklbw %%mm7, %%mm2 \n\t"
  1911. "pmaddwd %%mm1, %%mm0 \n\t"
  1912. "pmaddwd %%mm2, %%mm3 \n\t"
  1913. "paddd %%mm3, %%mm5 \n\t"
  1914. "paddd %%mm0, %%mm4 \n\t"
  1915. "addl $8, %1 \n\t"
  1916. "addl $4, %%ecx \n\t"
  1917. "cmpl %4, %%ecx \n\t"
  1918. " jb 2b \n\t"
  1919. "addl %6, %1 \n\t"
  1920. "psrad $8, %%mm4 \n\t"
  1921. "psrad $8, %%mm5 \n\t"
  1922. "packssdw %%mm5, %%mm4 \n\t"
  1923. "pmaddwd %%mm6, %%mm4 \n\t"
  1924. "packssdw %%mm4, %%mm4 \n\t"
  1925. "movl %3, %%eax \n\t"
  1926. "movd %%mm4, (%%eax, %0) \n\t"
  1927. "addl $4, %0 \n\t"
  1928. " jnc 1b \n\t"
  1929. : "+r" (counter), "+r" (filter)
  1930. : "m" (filterPos), "m" (dst), "m"(src+filterSize),
  1931. "m" (src), "r" (filterSize*2)
  1932. : "%ebx", "%eax", "%ecx"
  1933. );
  1934. }
  1935. #else
  1936. int i;
  1937. for(i=0; i<dstW; i++)
  1938. {
  1939. int j;
  1940. int srcPos= filterPos[i];
  1941. int val=0;
  1942. // printf("filterPos: %d\n", filterPos[i]);
  1943. for(j=0; j<filterSize; j++)
  1944. {
  1945. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  1946. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  1947. }
  1948. // filter += hFilterSize;
  1949. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  1950. // dst[i] = val>>7;
  1951. }
  1952. #endif
  1953. }
  1954. // *** horizontal scale Y line to temp buffer
  1955. static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
  1956. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  1957. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  1958. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  1959. int32_t *mmx2FilterPos)
  1960. {
  1961. if(srcFormat==IMGFMT_YUY2)
  1962. {
  1963. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  1964. src= formatConvBuffer;
  1965. }
  1966. else if(srcFormat==IMGFMT_BGR32)
  1967. {
  1968. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  1969. src= formatConvBuffer;
  1970. }
  1971. else if(srcFormat==IMGFMT_BGR24)
  1972. {
  1973. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  1974. src= formatConvBuffer;
  1975. }
  1976. else if(srcFormat==IMGFMT_BGR16)
  1977. {
  1978. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  1979. src= formatConvBuffer;
  1980. }
  1981. else if(srcFormat==IMGFMT_BGR15)
  1982. {
  1983. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  1984. src= formatConvBuffer;
  1985. }
  1986. else if(srcFormat==IMGFMT_RGB32)
  1987. {
  1988. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  1989. src= formatConvBuffer;
  1990. }
  1991. else if(srcFormat==IMGFMT_RGB24)
  1992. {
  1993. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  1994. src= formatConvBuffer;
  1995. }
  1996. #ifdef HAVE_MMX
  1997. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  1998. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  1999. #else
  2000. if(!(flags&SWS_FAST_BILINEAR))
  2001. #endif
  2002. {
  2003. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2004. }
  2005. else // Fast Bilinear upscale / crap downscale
  2006. {
  2007. #ifdef ARCH_X86
  2008. #ifdef HAVE_MMX2
  2009. int i;
  2010. if(canMMX2BeUsed)
  2011. {
  2012. asm volatile(
  2013. "pxor %%mm7, %%mm7 \n\t"
  2014. "movl %0, %%ecx \n\t"
  2015. "movl %1, %%edi \n\t"
  2016. "movl %2, %%edx \n\t"
  2017. "movl %3, %%ebx \n\t"
  2018. "xorl %%eax, %%eax \n\t" // i
  2019. PREFETCH" (%%ecx) \n\t"
  2020. PREFETCH" 32(%%ecx) \n\t"
  2021. PREFETCH" 64(%%ecx) \n\t"
  2022. #define FUNNY_Y_CODE \
  2023. "movl (%%ebx), %%esi \n\t"\
  2024. "call *%4 \n\t"\
  2025. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2026. "addl %%eax, %%edi \n\t"\
  2027. "xorl %%eax, %%eax \n\t"\
  2028. FUNNY_Y_CODE
  2029. FUNNY_Y_CODE
  2030. FUNNY_Y_CODE
  2031. FUNNY_Y_CODE
  2032. FUNNY_Y_CODE
  2033. FUNNY_Y_CODE
  2034. FUNNY_Y_CODE
  2035. FUNNY_Y_CODE
  2036. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2037. "m" (funnyYCode)
  2038. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2039. );
  2040. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2041. }
  2042. else
  2043. {
  2044. #endif
  2045. //NO MMX just normal asm ...
  2046. asm volatile(
  2047. "xorl %%eax, %%eax \n\t" // i
  2048. "xorl %%ebx, %%ebx \n\t" // xx
  2049. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2050. ".balign 16 \n\t"
  2051. "1: \n\t"
  2052. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2053. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2054. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2055. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2056. "shll $16, %%edi \n\t"
  2057. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2058. "movl %1, %%edi \n\t"
  2059. "shrl $9, %%esi \n\t"
  2060. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2061. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2062. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2063. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2064. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2065. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2066. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2067. "shll $16, %%edi \n\t"
  2068. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2069. "movl %1, %%edi \n\t"
  2070. "shrl $9, %%esi \n\t"
  2071. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  2072. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2073. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2074. "addl $2, %%eax \n\t"
  2075. "cmpl %2, %%eax \n\t"
  2076. " jb 1b \n\t"
  2077. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
  2078. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2079. );
  2080. #ifdef HAVE_MMX2
  2081. } //if MMX2 cant be used
  2082. #endif
  2083. #else
  2084. int i;
  2085. unsigned int xpos=0;
  2086. for(i=0;i<dstWidth;i++)
  2087. {
  2088. register unsigned int xx=xpos>>16;
  2089. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2090. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2091. xpos+=xInc;
  2092. }
  2093. #endif
  2094. }
  2095. }
  2096. inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2,
  2097. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2098. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2099. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2100. int32_t *mmx2FilterPos)
  2101. {
  2102. if(srcFormat==IMGFMT_YUY2)
  2103. {
  2104. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2105. src1= formatConvBuffer;
  2106. src2= formatConvBuffer+2048;
  2107. }
  2108. else if(srcFormat==IMGFMT_BGR32)
  2109. {
  2110. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2111. src1= formatConvBuffer;
  2112. src2= formatConvBuffer+2048;
  2113. }
  2114. else if(srcFormat==IMGFMT_BGR24)
  2115. {
  2116. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2117. src1= formatConvBuffer;
  2118. src2= formatConvBuffer+2048;
  2119. }
  2120. else if(srcFormat==IMGFMT_BGR16)
  2121. {
  2122. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2123. src1= formatConvBuffer;
  2124. src2= formatConvBuffer+2048;
  2125. }
  2126. else if(srcFormat==IMGFMT_BGR15)
  2127. {
  2128. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2129. src1= formatConvBuffer;
  2130. src2= formatConvBuffer+2048;
  2131. }
  2132. else if(srcFormat==IMGFMT_RGB32)
  2133. {
  2134. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2135. src1= formatConvBuffer;
  2136. src2= formatConvBuffer+2048;
  2137. }
  2138. else if(srcFormat==IMGFMT_RGB24)
  2139. {
  2140. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2141. src1= formatConvBuffer;
  2142. src2= formatConvBuffer+2048;
  2143. }
  2144. else if(isGray(srcFormat))
  2145. {
  2146. return;
  2147. }
  2148. #ifdef HAVE_MMX
  2149. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  2150. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2151. #else
  2152. if(!(flags&SWS_FAST_BILINEAR))
  2153. #endif
  2154. {
  2155. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2156. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2157. }
  2158. else // Fast Bilinear upscale / crap downscale
  2159. {
  2160. #ifdef ARCH_X86
  2161. #ifdef HAVE_MMX2
  2162. int i;
  2163. if(canMMX2BeUsed)
  2164. {
  2165. asm volatile(
  2166. "pxor %%mm7, %%mm7 \n\t"
  2167. "movl %0, %%ecx \n\t"
  2168. "movl %1, %%edi \n\t"
  2169. "movl %2, %%edx \n\t"
  2170. "movl %3, %%ebx \n\t"
  2171. "xorl %%eax, %%eax \n\t" // i
  2172. PREFETCH" (%%ecx) \n\t"
  2173. PREFETCH" 32(%%ecx) \n\t"
  2174. PREFETCH" 64(%%ecx) \n\t"
  2175. #define FUNNY_UV_CODE \
  2176. "movl (%%ebx), %%esi \n\t"\
  2177. "call *%4 \n\t"\
  2178. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2179. "addl %%eax, %%edi \n\t"\
  2180. "xorl %%eax, %%eax \n\t"\
  2181. FUNNY_UV_CODE
  2182. FUNNY_UV_CODE
  2183. FUNNY_UV_CODE
  2184. FUNNY_UV_CODE
  2185. "xorl %%eax, %%eax \n\t" // i
  2186. "movl %5, %%ecx \n\t" // src
  2187. "movl %1, %%edi \n\t" // buf1
  2188. "addl $4096, %%edi \n\t"
  2189. PREFETCH" (%%ecx) \n\t"
  2190. PREFETCH" 32(%%ecx) \n\t"
  2191. PREFETCH" 64(%%ecx) \n\t"
  2192. FUNNY_UV_CODE
  2193. FUNNY_UV_CODE
  2194. FUNNY_UV_CODE
  2195. FUNNY_UV_CODE
  2196. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2197. "m" (funnyUVCode), "m" (src2)
  2198. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2199. );
  2200. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2201. {
  2202. // printf("%d %d %d\n", dstWidth, i, srcW);
  2203. dst[i] = src1[srcW-1]*128;
  2204. dst[i+2048] = src2[srcW-1]*128;
  2205. }
  2206. }
  2207. else
  2208. {
  2209. #endif
  2210. asm volatile(
  2211. "xorl %%eax, %%eax \n\t" // i
  2212. "xorl %%ebx, %%ebx \n\t" // xx
  2213. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2214. ".balign 16 \n\t"
  2215. "1: \n\t"
  2216. "movl %0, %%esi \n\t"
  2217. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  2218. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  2219. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2220. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2221. "shll $16, %%edi \n\t"
  2222. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2223. "movl %1, %%edi \n\t"
  2224. "shrl $9, %%esi \n\t"
  2225. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2226. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  2227. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  2228. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2229. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2230. "shll $16, %%edi \n\t"
  2231. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2232. "movl %1, %%edi \n\t"
  2233. "shrl $9, %%esi \n\t"
  2234. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  2235. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2236. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2237. "addl $1, %%eax \n\t"
  2238. "cmpl %2, %%eax \n\t"
  2239. " jb 1b \n\t"
  2240. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
  2241. "r" (src2)
  2242. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2243. );
  2244. #ifdef HAVE_MMX2
  2245. } //if MMX2 cant be used
  2246. #endif
  2247. #else
  2248. int i;
  2249. unsigned int xpos=0;
  2250. for(i=0;i<dstWidth;i++)
  2251. {
  2252. register unsigned int xx=xpos>>16;
  2253. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2254. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2255. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2256. /* slower
  2257. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2258. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2259. */
  2260. xpos+=xInc;
  2261. }
  2262. #endif
  2263. }
  2264. }
  2265. static void RENAME(swScale)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
  2266. int srcSliceH, uint8_t* dstParam[], int dstStrideParam[]){
  2267. /* load a few things into local vars to make the code more readable? and faster */
  2268. const int srcW= c->srcW;
  2269. const int dstW= c->dstW;
  2270. const int dstH= c->dstH;
  2271. const int chrDstW= c->chrDstW;
  2272. const int chrSrcW= c->chrSrcW;
  2273. const int lumXInc= c->lumXInc;
  2274. const int chrXInc= c->chrXInc;
  2275. const int dstFormat= c->dstFormat;
  2276. const int srcFormat= c->srcFormat;
  2277. const int flags= c->flags;
  2278. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2279. int16_t *vLumFilterPos= c->vLumFilterPos;
  2280. int16_t *vChrFilterPos= c->vChrFilterPos;
  2281. int16_t *hLumFilterPos= c->hLumFilterPos;
  2282. int16_t *hChrFilterPos= c->hChrFilterPos;
  2283. int16_t *vLumFilter= c->vLumFilter;
  2284. int16_t *vChrFilter= c->vChrFilter;
  2285. int16_t *hLumFilter= c->hLumFilter;
  2286. int16_t *hChrFilter= c->hChrFilter;
  2287. int16_t *lumMmxFilter= c->lumMmxFilter;
  2288. int16_t *chrMmxFilter= c->chrMmxFilter;
  2289. const int vLumFilterSize= c->vLumFilterSize;
  2290. const int vChrFilterSize= c->vChrFilterSize;
  2291. const int hLumFilterSize= c->hLumFilterSize;
  2292. const int hChrFilterSize= c->hChrFilterSize;
  2293. int16_t **lumPixBuf= c->lumPixBuf;
  2294. int16_t **chrPixBuf= c->chrPixBuf;
  2295. const int vLumBufSize= c->vLumBufSize;
  2296. const int vChrBufSize= c->vChrBufSize;
  2297. uint8_t *funnyYCode= c->funnyYCode;
  2298. uint8_t *funnyUVCode= c->funnyUVCode;
  2299. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2300. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2301. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2302. /* vars whch will change and which we need to storw back in the context */
  2303. int dstY= c->dstY;
  2304. int lumBufIndex= c->lumBufIndex;
  2305. int chrBufIndex= c->chrBufIndex;
  2306. int lastInLumBuf= c->lastInLumBuf;
  2307. int lastInChrBuf= c->lastInChrBuf;
  2308. int srcStride[3];
  2309. int dstStride[3];
  2310. uint8_t *src[3];
  2311. uint8_t *dst[3];
  2312. orderYUV(c->srcFormat, src, srcStride, srcParam, srcStrideParam);
  2313. orderYUV(c->dstFormat, dst, dstStride, dstParam, dstStrideParam);
  2314. if(isPacked(c->srcFormat)){
  2315. src[0]=
  2316. src[1]=
  2317. src[2]= srcParam[0];
  2318. srcStride[0]=
  2319. srcStride[1]=
  2320. srcStride[2]= srcStrideParam[0];
  2321. }
  2322. srcStride[1]<<= c->vChrDrop;
  2323. srcStride[2]<<= c->vChrDrop;
  2324. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2325. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2326. #if 0 //self test FIXME move to a vfilter or something
  2327. {
  2328. static volatile int i=0;
  2329. i++;
  2330. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2331. selfTest(src, srcStride, c->srcW, c->srcH);
  2332. i--;
  2333. }
  2334. #endif
  2335. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2336. //dstStride[0],dstStride[1],dstStride[2]);
  2337. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2338. {
  2339. static int firstTime=1; //FIXME move this into the context perhaps
  2340. if(flags & SWS_PRINT_INFO && firstTime)
  2341. {
  2342. mp_msg(MSGT_SWS,MSGL_WARN,"SwScaler: Warning: dstStride is not aligned!\n"
  2343. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2344. firstTime=0;
  2345. }
  2346. }
  2347. /* Note the user might start scaling the picture in the middle so this will not get executed
  2348. this is not really intended but works currently, so ppl might do it */
  2349. if(srcSliceY ==0){
  2350. lumBufIndex=0;
  2351. chrBufIndex=0;
  2352. dstY=0;
  2353. lastInLumBuf= -1;
  2354. lastInChrBuf= -1;
  2355. }
  2356. for(;dstY < dstH; dstY++){
  2357. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2358. const int chrDstY= dstY>>c->chrDstVSubSample;
  2359. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2360. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2361. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2362. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2363. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2364. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2365. //handle holes (FAST_BILINEAR & weird filters)
  2366. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2367. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2368. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2369. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2370. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2371. // Do we have enough lines in this slice to output the dstY line
  2372. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2373. {
  2374. //Do horizontal scaling
  2375. while(lastInLumBuf < lastLumSrcY)
  2376. {
  2377. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2378. lumBufIndex++;
  2379. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2380. ASSERT(lumBufIndex < 2*vLumBufSize)
  2381. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2382. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2383. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2384. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2385. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2386. funnyYCode, c->srcFormat, formatConvBuffer,
  2387. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2388. lastInLumBuf++;
  2389. }
  2390. while(lastInChrBuf < lastChrSrcY)
  2391. {
  2392. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2393. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2394. chrBufIndex++;
  2395. ASSERT(chrBufIndex < 2*vChrBufSize)
  2396. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2397. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2398. //FIXME replace parameters through context struct (some at least)
  2399. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2400. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2401. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2402. funnyUVCode, c->srcFormat, formatConvBuffer,
  2403. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2404. lastInChrBuf++;
  2405. }
  2406. //wrap buf index around to stay inside the ring buffer
  2407. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2408. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2409. }
  2410. else // not enough lines left in this slice -> load the rest in the buffer
  2411. {
  2412. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2413. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2414. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2415. vChrBufSize, vLumBufSize);*/
  2416. //Do horizontal scaling
  2417. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2418. {
  2419. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2420. lumBufIndex++;
  2421. ASSERT(lumBufIndex < 2*vLumBufSize)
  2422. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2423. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2424. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2425. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2426. funnyYCode, c->srcFormat, formatConvBuffer,
  2427. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2428. lastInLumBuf++;
  2429. }
  2430. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2431. {
  2432. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2433. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2434. chrBufIndex++;
  2435. ASSERT(chrBufIndex < 2*vChrBufSize)
  2436. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2437. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2438. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2439. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2440. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2441. funnyUVCode, c->srcFormat, formatConvBuffer,
  2442. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2443. lastInChrBuf++;
  2444. }
  2445. //wrap buf index around to stay inside the ring buffer
  2446. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2447. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2448. break; //we cant output a dstY line so lets try with the next slice
  2449. }
  2450. #ifdef HAVE_MMX
  2451. b5Dither= dither8[dstY&1];
  2452. g6Dither= dither4[dstY&1];
  2453. g5Dither= dither8[dstY&1];
  2454. r5Dither= dither8[(dstY+1)&1];
  2455. #endif
  2456. if(dstY < dstH-2)
  2457. {
  2458. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2459. {
  2460. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2461. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2462. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2463. {
  2464. int16_t *lumBuf = lumPixBuf[0];
  2465. int16_t *chrBuf= chrPixBuf[0];
  2466. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2467. }
  2468. else //General YV12
  2469. {
  2470. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2471. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2472. RENAME(yuv2yuvX)(
  2473. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2474. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2475. dest, uDest, vDest, dstW, chrDstW,
  2476. lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+chrDstY*vChrFilterSize*4);
  2477. }
  2478. }
  2479. else
  2480. {
  2481. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2482. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2483. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2484. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2485. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2486. {
  2487. int chrAlpha= vChrFilter[2*dstY+1];
  2488. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2489. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2490. }
  2491. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2492. {
  2493. int lumAlpha= vLumFilter[2*dstY+1];
  2494. int chrAlpha= vChrFilter[2*dstY+1];
  2495. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2496. dest, dstW, lumAlpha, chrAlpha, dstY);
  2497. }
  2498. else //General RGB
  2499. {
  2500. RENAME(yuv2packedX)(c,
  2501. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2502. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2503. dest, dstW,
  2504. lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+dstY*vChrFilterSize*4, dstY);
  2505. }
  2506. }
  2507. }
  2508. else // hmm looks like we cant use MMX here without overwriting this arrays tail
  2509. {
  2510. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2511. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2512. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2513. {
  2514. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2515. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2516. yuv2yuvXinC(
  2517. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2518. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2519. dest, uDest, vDest, dstW, chrDstW);
  2520. }
  2521. else
  2522. {
  2523. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2524. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2525. yuv2packedXinC(c,
  2526. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2527. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2528. dest, dstW, dstY);
  2529. }
  2530. }
  2531. }
  2532. #ifdef HAVE_MMX
  2533. __asm __volatile(SFENCE:::"memory");
  2534. __asm __volatile(EMMS:::"memory");
  2535. #endif
  2536. /* store changed local vars back in the context */
  2537. c->dstY= dstY;
  2538. c->lumBufIndex= lumBufIndex;
  2539. c->chrBufIndex= chrBufIndex;
  2540. c->lastInLumBuf= lastInLumBuf;
  2541. c->lastInChrBuf= lastInChrBuf;
  2542. }