You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2835 lines
85KB

  1. /*
  2. Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. #undef MOVNTQ
  16. #undef PAVGB
  17. #undef PREFETCH
  18. #undef PREFETCHW
  19. #undef EMMS
  20. #undef SFENCE
  21. #ifdef HAVE_3DNOW
  22. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  23. #define EMMS "femms"
  24. #else
  25. #define EMMS "emms"
  26. #endif
  27. #ifdef HAVE_3DNOW
  28. #define PREFETCH "prefetch"
  29. #define PREFETCHW "prefetchw"
  30. #elif defined ( HAVE_MMX2 )
  31. #define PREFETCH "prefetchnta"
  32. #define PREFETCHW "prefetcht0"
  33. #else
  34. #define PREFETCH "/nop"
  35. #define PREFETCHW "/nop"
  36. #endif
  37. #ifdef HAVE_MMX2
  38. #define SFENCE "sfence"
  39. #else
  40. #define SFENCE "/nop"
  41. #endif
  42. #ifdef HAVE_MMX2
  43. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  44. #elif defined (HAVE_3DNOW)
  45. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  46. #endif
  47. #ifdef HAVE_MMX2
  48. #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  49. #else
  50. #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  51. #endif
  52. #ifdef HAVE_ALTIVEC
  53. #include "swscale_altivec_template.c"
  54. #endif
  55. #define YSCALEYUV2YV12X(x, offset) \
  56. "xorl %%eax, %%eax \n\t"\
  57. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  58. "movq %%mm3, %%mm4 \n\t"\
  59. "leal " offset "(%0), %%edx \n\t"\
  60. "movl (%%edx), %%esi \n\t"\
  61. ".balign 16 \n\t" /* FIXME Unroll? */\
  62. "1: \n\t"\
  63. "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
  64. "movq " #x "(%%esi, %%eax, 2), %%mm2 \n\t" /* srcData */\
  65. "movq 8+" #x "(%%esi, %%eax, 2), %%mm5 \n\t" /* srcData */\
  66. "addl $16, %%edx \n\t"\
  67. "movl (%%edx), %%esi \n\t"\
  68. "testl %%esi, %%esi \n\t"\
  69. "pmulhw %%mm0, %%mm2 \n\t"\
  70. "pmulhw %%mm0, %%mm5 \n\t"\
  71. "paddw %%mm2, %%mm3 \n\t"\
  72. "paddw %%mm5, %%mm4 \n\t"\
  73. " jnz 1b \n\t"\
  74. "psraw $3, %%mm3 \n\t"\
  75. "psraw $3, %%mm4 \n\t"\
  76. "packuswb %%mm4, %%mm3 \n\t"\
  77. MOVNTQ(%%mm3, (%1, %%eax))\
  78. "addl $8, %%eax \n\t"\
  79. "cmpl %2, %%eax \n\t"\
  80. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  81. "movq %%mm3, %%mm4 \n\t"\
  82. "leal " offset "(%0), %%edx \n\t"\
  83. "movl (%%edx), %%esi \n\t"\
  84. "jb 1b \n\t"
  85. #define YSCALEYUV2YV121 \
  86. "movl %2, %%eax \n\t"\
  87. ".balign 16 \n\t" /* FIXME Unroll? */\
  88. "1: \n\t"\
  89. "movq (%0, %%eax, 2), %%mm0 \n\t"\
  90. "movq 8(%0, %%eax, 2), %%mm1 \n\t"\
  91. "psraw $7, %%mm0 \n\t"\
  92. "psraw $7, %%mm1 \n\t"\
  93. "packuswb %%mm1, %%mm0 \n\t"\
  94. MOVNTQ(%%mm0, (%1, %%eax))\
  95. "addl $8, %%eax \n\t"\
  96. "jnc 1b \n\t"
  97. /*
  98. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  99. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  100. "r" (dest), "m" (dstW),
  101. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  102. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  103. */
  104. #define YSCALEYUV2PACKEDX \
  105. "xorl %%eax, %%eax \n\t"\
  106. ".balign 16 \n\t"\
  107. "nop \n\t"\
  108. "1: \n\t"\
  109. "leal "CHR_MMX_FILTER_OFFSET"(%0), %%edx \n\t"\
  110. "movl (%%edx), %%esi \n\t"\
  111. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  112. "movq %%mm3, %%mm4 \n\t"\
  113. ".balign 16 \n\t"\
  114. "2: \n\t"\
  115. "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
  116. "movq (%%esi, %%eax), %%mm2 \n\t" /* UsrcData */\
  117. "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\
  118. "addl $16, %%edx \n\t"\
  119. "movl (%%edx), %%esi \n\t"\
  120. "pmulhw %%mm0, %%mm2 \n\t"\
  121. "pmulhw %%mm0, %%mm5 \n\t"\
  122. "paddw %%mm2, %%mm3 \n\t"\
  123. "paddw %%mm5, %%mm4 \n\t"\
  124. "testl %%esi, %%esi \n\t"\
  125. " jnz 2b \n\t"\
  126. \
  127. "leal "LUM_MMX_FILTER_OFFSET"(%0), %%edx \n\t"\
  128. "movl (%%edx), %%esi \n\t"\
  129. "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
  130. "movq %%mm1, %%mm7 \n\t"\
  131. ".balign 16 \n\t"\
  132. "2: \n\t"\
  133. "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
  134. "movq (%%esi, %%eax, 2), %%mm2 \n\t" /* Y1srcData */\
  135. "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\
  136. "addl $16, %%edx \n\t"\
  137. "movl (%%edx), %%esi \n\t"\
  138. "pmulhw %%mm0, %%mm2 \n\t"\
  139. "pmulhw %%mm0, %%mm5 \n\t"\
  140. "paddw %%mm2, %%mm1 \n\t"\
  141. "paddw %%mm5, %%mm7 \n\t"\
  142. "testl %%esi, %%esi \n\t"\
  143. " jnz 2b \n\t"\
  144. #define YSCALEYUV2RGBX \
  145. YSCALEYUV2PACKEDX\
  146. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  147. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  148. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  149. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  150. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  151. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  152. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  153. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  154. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  155. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  156. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  157. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  158. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  159. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  160. "paddw %%mm3, %%mm4 \n\t"\
  161. "movq %%mm2, %%mm0 \n\t"\
  162. "movq %%mm5, %%mm6 \n\t"\
  163. "movq %%mm4, %%mm3 \n\t"\
  164. "punpcklwd %%mm2, %%mm2 \n\t"\
  165. "punpcklwd %%mm5, %%mm5 \n\t"\
  166. "punpcklwd %%mm4, %%mm4 \n\t"\
  167. "paddw %%mm1, %%mm2 \n\t"\
  168. "paddw %%mm1, %%mm5 \n\t"\
  169. "paddw %%mm1, %%mm4 \n\t"\
  170. "punpckhwd %%mm0, %%mm0 \n\t"\
  171. "punpckhwd %%mm6, %%mm6 \n\t"\
  172. "punpckhwd %%mm3, %%mm3 \n\t"\
  173. "paddw %%mm7, %%mm0 \n\t"\
  174. "paddw %%mm7, %%mm6 \n\t"\
  175. "paddw %%mm7, %%mm3 \n\t"\
  176. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  177. "packuswb %%mm0, %%mm2 \n\t"\
  178. "packuswb %%mm6, %%mm5 \n\t"\
  179. "packuswb %%mm3, %%mm4 \n\t"\
  180. "pxor %%mm7, %%mm7 \n\t"
  181. #if 0
  182. #define FULL_YSCALEYUV2RGB \
  183. "pxor %%mm7, %%mm7 \n\t"\
  184. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  185. "punpcklwd %%mm6, %%mm6 \n\t"\
  186. "punpcklwd %%mm6, %%mm6 \n\t"\
  187. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  188. "punpcklwd %%mm5, %%mm5 \n\t"\
  189. "punpcklwd %%mm5, %%mm5 \n\t"\
  190. "xorl %%eax, %%eax \n\t"\
  191. ".balign 16 \n\t"\
  192. "1: \n\t"\
  193. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  194. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  195. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  196. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  197. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  198. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  199. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  200. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  201. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  202. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  203. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  204. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  205. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  206. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  207. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  208. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  209. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  210. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  211. \
  212. \
  213. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  214. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  215. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  216. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  217. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  218. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  219. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  220. \
  221. \
  222. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  223. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  224. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  225. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  226. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  227. "packuswb %%mm3, %%mm3 \n\t"\
  228. \
  229. "packuswb %%mm0, %%mm0 \n\t"\
  230. "paddw %%mm4, %%mm2 \n\t"\
  231. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  232. \
  233. "packuswb %%mm1, %%mm1 \n\t"
  234. #endif
  235. #define YSCALEYUV2PACKED(index, c) \
  236. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  237. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
  238. "psraw $3, %%mm0 \n\t"\
  239. "psraw $3, %%mm1 \n\t"\
  240. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  241. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  242. "xorl "#index", "#index" \n\t"\
  243. ".balign 16 \n\t"\
  244. "1: \n\t"\
  245. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  246. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  247. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  248. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  249. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  250. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  251. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  252. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  253. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  254. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  255. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  256. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  257. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  258. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  259. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  260. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  261. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  262. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  263. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  264. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  265. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  266. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  267. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  268. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  269. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  270. #define YSCALEYUV2RGB(index, c) \
  271. "xorl "#index", "#index" \n\t"\
  272. ".balign 16 \n\t"\
  273. "1: \n\t"\
  274. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  275. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  276. "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
  277. "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
  278. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  279. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  280. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  281. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  282. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  283. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  284. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  285. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  286. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  287. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  288. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  289. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  290. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  291. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  292. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  293. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  294. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  295. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  296. "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
  297. "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
  298. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  299. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  300. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  301. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  302. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  303. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  304. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  305. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  306. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  307. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  308. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  309. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  310. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  311. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  312. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  313. "paddw %%mm3, %%mm4 \n\t"\
  314. "movq %%mm2, %%mm0 \n\t"\
  315. "movq %%mm5, %%mm6 \n\t"\
  316. "movq %%mm4, %%mm3 \n\t"\
  317. "punpcklwd %%mm2, %%mm2 \n\t"\
  318. "punpcklwd %%mm5, %%mm5 \n\t"\
  319. "punpcklwd %%mm4, %%mm4 \n\t"\
  320. "paddw %%mm1, %%mm2 \n\t"\
  321. "paddw %%mm1, %%mm5 \n\t"\
  322. "paddw %%mm1, %%mm4 \n\t"\
  323. "punpckhwd %%mm0, %%mm0 \n\t"\
  324. "punpckhwd %%mm6, %%mm6 \n\t"\
  325. "punpckhwd %%mm3, %%mm3 \n\t"\
  326. "paddw %%mm7, %%mm0 \n\t"\
  327. "paddw %%mm7, %%mm6 \n\t"\
  328. "paddw %%mm7, %%mm3 \n\t"\
  329. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  330. "packuswb %%mm0, %%mm2 \n\t"\
  331. "packuswb %%mm6, %%mm5 \n\t"\
  332. "packuswb %%mm3, %%mm4 \n\t"\
  333. "pxor %%mm7, %%mm7 \n\t"
  334. #define YSCALEYUV2PACKED1(index, c) \
  335. "xorl "#index", "#index" \n\t"\
  336. ".balign 16 \n\t"\
  337. "1: \n\t"\
  338. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  339. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  340. "psraw $7, %%mm3 \n\t" \
  341. "psraw $7, %%mm4 \n\t" \
  342. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  343. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  344. "psraw $7, %%mm1 \n\t" \
  345. "psraw $7, %%mm7 \n\t" \
  346. #define YSCALEYUV2RGB1(index, c) \
  347. "xorl "#index", "#index" \n\t"\
  348. ".balign 16 \n\t"\
  349. "1: \n\t"\
  350. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  351. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  352. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  353. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  354. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  355. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  356. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  357. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  358. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  359. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  360. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  361. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  362. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  363. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  364. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  365. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  366. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  367. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  368. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  369. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  370. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  371. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  372. "paddw %%mm3, %%mm4 \n\t"\
  373. "movq %%mm2, %%mm0 \n\t"\
  374. "movq %%mm5, %%mm6 \n\t"\
  375. "movq %%mm4, %%mm3 \n\t"\
  376. "punpcklwd %%mm2, %%mm2 \n\t"\
  377. "punpcklwd %%mm5, %%mm5 \n\t"\
  378. "punpcklwd %%mm4, %%mm4 \n\t"\
  379. "paddw %%mm1, %%mm2 \n\t"\
  380. "paddw %%mm1, %%mm5 \n\t"\
  381. "paddw %%mm1, %%mm4 \n\t"\
  382. "punpckhwd %%mm0, %%mm0 \n\t"\
  383. "punpckhwd %%mm6, %%mm6 \n\t"\
  384. "punpckhwd %%mm3, %%mm3 \n\t"\
  385. "paddw %%mm7, %%mm0 \n\t"\
  386. "paddw %%mm7, %%mm6 \n\t"\
  387. "paddw %%mm7, %%mm3 \n\t"\
  388. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  389. "packuswb %%mm0, %%mm2 \n\t"\
  390. "packuswb %%mm6, %%mm5 \n\t"\
  391. "packuswb %%mm3, %%mm4 \n\t"\
  392. "pxor %%mm7, %%mm7 \n\t"
  393. #define YSCALEYUV2PACKED1b(index, c) \
  394. "xorl "#index", "#index" \n\t"\
  395. ".balign 16 \n\t"\
  396. "1: \n\t"\
  397. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  398. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  399. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  400. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  401. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  402. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  403. "psrlw $8, %%mm3 \n\t" \
  404. "psrlw $8, %%mm4 \n\t" \
  405. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  406. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  407. "psraw $7, %%mm1 \n\t" \
  408. "psraw $7, %%mm7 \n\t"
  409. // do vertical chrominance interpolation
  410. #define YSCALEYUV2RGB1b(index, c) \
  411. "xorl "#index", "#index" \n\t"\
  412. ".balign 16 \n\t"\
  413. "1: \n\t"\
  414. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  415. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  416. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  417. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  418. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  419. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  420. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  421. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  422. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  423. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  424. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  425. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  426. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  427. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  428. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  429. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  430. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  431. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  432. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  433. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  434. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  435. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  436. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  437. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  438. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  439. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  440. "paddw %%mm3, %%mm4 \n\t"\
  441. "movq %%mm2, %%mm0 \n\t"\
  442. "movq %%mm5, %%mm6 \n\t"\
  443. "movq %%mm4, %%mm3 \n\t"\
  444. "punpcklwd %%mm2, %%mm2 \n\t"\
  445. "punpcklwd %%mm5, %%mm5 \n\t"\
  446. "punpcklwd %%mm4, %%mm4 \n\t"\
  447. "paddw %%mm1, %%mm2 \n\t"\
  448. "paddw %%mm1, %%mm5 \n\t"\
  449. "paddw %%mm1, %%mm4 \n\t"\
  450. "punpckhwd %%mm0, %%mm0 \n\t"\
  451. "punpckhwd %%mm6, %%mm6 \n\t"\
  452. "punpckhwd %%mm3, %%mm3 \n\t"\
  453. "paddw %%mm7, %%mm0 \n\t"\
  454. "paddw %%mm7, %%mm6 \n\t"\
  455. "paddw %%mm7, %%mm3 \n\t"\
  456. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  457. "packuswb %%mm0, %%mm2 \n\t"\
  458. "packuswb %%mm6, %%mm5 \n\t"\
  459. "packuswb %%mm3, %%mm4 \n\t"\
  460. "pxor %%mm7, %%mm7 \n\t"
  461. #define WRITEBGR32(dst, dstw, index) \
  462. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  463. "movq %%mm2, %%mm1 \n\t" /* B */\
  464. "movq %%mm5, %%mm6 \n\t" /* R */\
  465. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  466. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  467. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  468. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  469. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  470. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  471. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  472. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  473. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  474. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  475. \
  476. MOVNTQ(%%mm0, (dst, index, 4))\
  477. MOVNTQ(%%mm2, 8(dst, index, 4))\
  478. MOVNTQ(%%mm1, 16(dst, index, 4))\
  479. MOVNTQ(%%mm3, 24(dst, index, 4))\
  480. \
  481. "addl $8, "#index" \n\t"\
  482. "cmpl "#dstw", "#index" \n\t"\
  483. " jb 1b \n\t"
  484. #define WRITEBGR16(dst, dstw, index) \
  485. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  486. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  487. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  488. "psrlq $3, %%mm2 \n\t"\
  489. \
  490. "movq %%mm2, %%mm1 \n\t"\
  491. "movq %%mm4, %%mm3 \n\t"\
  492. \
  493. "punpcklbw %%mm7, %%mm3 \n\t"\
  494. "punpcklbw %%mm5, %%mm2 \n\t"\
  495. "punpckhbw %%mm7, %%mm4 \n\t"\
  496. "punpckhbw %%mm5, %%mm1 \n\t"\
  497. \
  498. "psllq $3, %%mm3 \n\t"\
  499. "psllq $3, %%mm4 \n\t"\
  500. \
  501. "por %%mm3, %%mm2 \n\t"\
  502. "por %%mm4, %%mm1 \n\t"\
  503. \
  504. MOVNTQ(%%mm2, (dst, index, 2))\
  505. MOVNTQ(%%mm1, 8(dst, index, 2))\
  506. \
  507. "addl $8, "#index" \n\t"\
  508. "cmpl "#dstw", "#index" \n\t"\
  509. " jb 1b \n\t"
  510. #define WRITEBGR15(dst, dstw, index) \
  511. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  512. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  513. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  514. "psrlq $3, %%mm2 \n\t"\
  515. "psrlq $1, %%mm5 \n\t"\
  516. \
  517. "movq %%mm2, %%mm1 \n\t"\
  518. "movq %%mm4, %%mm3 \n\t"\
  519. \
  520. "punpcklbw %%mm7, %%mm3 \n\t"\
  521. "punpcklbw %%mm5, %%mm2 \n\t"\
  522. "punpckhbw %%mm7, %%mm4 \n\t"\
  523. "punpckhbw %%mm5, %%mm1 \n\t"\
  524. \
  525. "psllq $2, %%mm3 \n\t"\
  526. "psllq $2, %%mm4 \n\t"\
  527. \
  528. "por %%mm3, %%mm2 \n\t"\
  529. "por %%mm4, %%mm1 \n\t"\
  530. \
  531. MOVNTQ(%%mm2, (dst, index, 2))\
  532. MOVNTQ(%%mm1, 8(dst, index, 2))\
  533. \
  534. "addl $8, "#index" \n\t"\
  535. "cmpl "#dstw", "#index" \n\t"\
  536. " jb 1b \n\t"
  537. #define WRITEBGR24OLD(dst, dstw, index) \
  538. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  539. "movq %%mm2, %%mm1 \n\t" /* B */\
  540. "movq %%mm5, %%mm6 \n\t" /* R */\
  541. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  542. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  543. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  544. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  545. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  546. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  547. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  548. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  549. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  550. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  551. \
  552. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  553. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  554. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  555. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  556. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  557. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  558. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  559. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  560. \
  561. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  562. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  563. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  564. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  565. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  566. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  567. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  568. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  569. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  570. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  571. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  572. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  573. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  574. \
  575. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  576. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  577. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  578. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  579. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  580. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  581. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  582. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  583. \
  584. MOVNTQ(%%mm0, (dst))\
  585. MOVNTQ(%%mm2, 8(dst))\
  586. MOVNTQ(%%mm3, 16(dst))\
  587. "addl $24, "#dst" \n\t"\
  588. \
  589. "addl $8, "#index" \n\t"\
  590. "cmpl "#dstw", "#index" \n\t"\
  591. " jb 1b \n\t"
  592. #define WRITEBGR24MMX(dst, dstw, index) \
  593. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  594. "movq %%mm2, %%mm1 \n\t" /* B */\
  595. "movq %%mm5, %%mm6 \n\t" /* R */\
  596. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  597. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  598. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  599. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  600. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  601. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  602. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  603. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  604. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  605. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  606. \
  607. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  608. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  609. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  610. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  611. \
  612. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  613. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  614. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  615. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  616. \
  617. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  618. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  619. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  620. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  621. \
  622. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  623. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  624. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  625. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  626. MOVNTQ(%%mm0, (dst))\
  627. \
  628. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  629. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  630. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  631. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  632. MOVNTQ(%%mm6, 8(dst))\
  633. \
  634. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  635. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  636. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  637. MOVNTQ(%%mm5, 16(dst))\
  638. \
  639. "addl $24, "#dst" \n\t"\
  640. \
  641. "addl $8, "#index" \n\t"\
  642. "cmpl "#dstw", "#index" \n\t"\
  643. " jb 1b \n\t"
  644. #define WRITEBGR24MMX2(dst, dstw, index) \
  645. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  646. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  647. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  648. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  649. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  650. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  651. \
  652. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  653. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  654. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  655. \
  656. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  657. "por %%mm1, %%mm6 \n\t"\
  658. "por %%mm3, %%mm6 \n\t"\
  659. MOVNTQ(%%mm6, (dst))\
  660. \
  661. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  662. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  663. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  664. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  665. \
  666. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  667. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  668. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  669. \
  670. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  671. "por %%mm3, %%mm6 \n\t"\
  672. MOVNTQ(%%mm6, 8(dst))\
  673. \
  674. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  675. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  676. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  677. \
  678. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  679. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  680. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  681. \
  682. "por %%mm1, %%mm3 \n\t"\
  683. "por %%mm3, %%mm6 \n\t"\
  684. MOVNTQ(%%mm6, 16(dst))\
  685. \
  686. "addl $24, "#dst" \n\t"\
  687. \
  688. "addl $8, "#index" \n\t"\
  689. "cmpl "#dstw", "#index" \n\t"\
  690. " jb 1b \n\t"
  691. #ifdef HAVE_MMX2
  692. #undef WRITEBGR24
  693. #define WRITEBGR24 WRITEBGR24MMX2
  694. #else
  695. #undef WRITEBGR24
  696. #define WRITEBGR24 WRITEBGR24MMX
  697. #endif
  698. #define WRITEYUY2(dst, dstw, index) \
  699. "packuswb %%mm3, %%mm3 \n\t"\
  700. "packuswb %%mm4, %%mm4 \n\t"\
  701. "packuswb %%mm7, %%mm1 \n\t"\
  702. "punpcklbw %%mm4, %%mm3 \n\t"\
  703. "movq %%mm1, %%mm7 \n\t"\
  704. "punpcklbw %%mm3, %%mm1 \n\t"\
  705. "punpckhbw %%mm3, %%mm7 \n\t"\
  706. \
  707. MOVNTQ(%%mm1, (dst, index, 2))\
  708. MOVNTQ(%%mm7, 8(dst, index, 2))\
  709. \
  710. "addl $8, "#index" \n\t"\
  711. "cmpl "#dstw", "#index" \n\t"\
  712. " jb 1b \n\t"
  713. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  714. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  715. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  716. {
  717. #ifdef HAVE_MMX
  718. if(uDest != NULL)
  719. {
  720. asm volatile(
  721. YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
  722. :: "r" (&c->redDither),
  723. "r" (uDest), "m" (chrDstW)
  724. : "%eax", "%edx", "%esi"
  725. );
  726. asm volatile(
  727. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
  728. :: "r" (&c->redDither),
  729. "r" (vDest), "m" (chrDstW)
  730. : "%eax", "%edx", "%esi"
  731. );
  732. }
  733. asm volatile(
  734. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
  735. :: "r" (&c->redDither),
  736. "r" (dest), "m" (dstW)
  737. : "%eax", "%edx", "%esi"
  738. );
  739. #else
  740. #ifdef HAVE_ALTIVEC
  741. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  742. chrFilter, chrSrc, chrFilterSize,
  743. dest, uDest, vDest, dstW, chrDstW);
  744. #else //HAVE_ALTIVEC
  745. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  746. chrFilter, chrSrc, chrFilterSize,
  747. dest, uDest, vDest, dstW, chrDstW);
  748. #endif //!HAVE_ALTIVEC
  749. #endif
  750. }
  751. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  752. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  753. {
  754. #ifdef HAVE_MMX
  755. if(uDest != NULL)
  756. {
  757. asm volatile(
  758. YSCALEYUV2YV121
  759. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  760. "g" (-chrDstW)
  761. : "%eax"
  762. );
  763. asm volatile(
  764. YSCALEYUV2YV121
  765. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  766. "g" (-chrDstW)
  767. : "%eax"
  768. );
  769. }
  770. asm volatile(
  771. YSCALEYUV2YV121
  772. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  773. "g" (-dstW)
  774. : "%eax"
  775. );
  776. #else
  777. int i;
  778. for(i=0; i<dstW; i++)
  779. {
  780. int val= lumSrc[i]>>7;
  781. if(val&256){
  782. if(val<0) val=0;
  783. else val=255;
  784. }
  785. dest[i]= val;
  786. }
  787. if(uDest != NULL)
  788. for(i=0; i<chrDstW; i++)
  789. {
  790. int u=chrSrc[i]>>7;
  791. int v=chrSrc[i + 2048]>>7;
  792. if((u|v)&256){
  793. if(u<0) u=0;
  794. else if (u>255) u=255;
  795. if(v<0) v=0;
  796. else if (v>255) v=255;
  797. }
  798. uDest[i]= u;
  799. vDest[i]= v;
  800. }
  801. #endif
  802. }
  803. /**
  804. * vertical scale YV12 to RGB
  805. */
  806. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  807. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  808. uint8_t *dest, int dstW, int dstY)
  809. {
  810. int dummy=0;
  811. switch(c->dstFormat)
  812. {
  813. #ifdef HAVE_MMX
  814. case IMGFMT_BGR32:
  815. {
  816. asm volatile(
  817. YSCALEYUV2RGBX
  818. WRITEBGR32(%4, %5, %%eax)
  819. :: "r" (&c->redDither),
  820. "m" (dummy), "m" (dummy), "m" (dummy),
  821. "r" (dest), "m" (dstW)
  822. : "%eax", "%edx", "%esi"
  823. );
  824. }
  825. break;
  826. case IMGFMT_BGR24:
  827. {
  828. asm volatile(
  829. YSCALEYUV2RGBX
  830. "leal (%%eax, %%eax, 2), %%ebx \n\t" //FIXME optimize
  831. "addl %4, %%ebx \n\t"
  832. WRITEBGR24(%%ebx, %5, %%eax)
  833. :: "r" (&c->redDither),
  834. "m" (dummy), "m" (dummy), "m" (dummy),
  835. "r" (dest), "m" (dstW)
  836. : "%eax", "%ebx", "%edx", "%esi" //FIXME ebx
  837. );
  838. }
  839. break;
  840. case IMGFMT_BGR15:
  841. {
  842. asm volatile(
  843. YSCALEYUV2RGBX
  844. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  845. #ifdef DITHER1XBPP
  846. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  847. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  848. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  849. #endif
  850. WRITEBGR15(%4, %5, %%eax)
  851. :: "r" (&c->redDither),
  852. "m" (dummy), "m" (dummy), "m" (dummy),
  853. "r" (dest), "m" (dstW)
  854. : "%eax", "%edx", "%esi"
  855. );
  856. }
  857. break;
  858. case IMGFMT_BGR16:
  859. {
  860. asm volatile(
  861. YSCALEYUV2RGBX
  862. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  863. #ifdef DITHER1XBPP
  864. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  865. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  866. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  867. #endif
  868. WRITEBGR16(%4, %5, %%eax)
  869. :: "r" (&c->redDither),
  870. "m" (dummy), "m" (dummy), "m" (dummy),
  871. "r" (dest), "m" (dstW)
  872. : "%eax", "%edx", "%esi"
  873. );
  874. }
  875. break;
  876. case IMGFMT_YUY2:
  877. {
  878. asm volatile(
  879. YSCALEYUV2PACKEDX
  880. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  881. "psraw $3, %%mm3 \n\t"
  882. "psraw $3, %%mm4 \n\t"
  883. "psraw $3, %%mm1 \n\t"
  884. "psraw $3, %%mm7 \n\t"
  885. WRITEYUY2(%4, %5, %%eax)
  886. :: "r" (&c->redDither),
  887. "m" (dummy), "m" (dummy), "m" (dummy),
  888. "r" (dest), "m" (dstW)
  889. : "%eax", "%edx", "%esi"
  890. );
  891. }
  892. break;
  893. #endif
  894. default:
  895. #ifdef HAVE_ALTIVEC
  896. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  897. chrFilter, chrSrc, chrFilterSize,
  898. dest, dstW, dstY);
  899. #else
  900. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  901. chrFilter, chrSrc, chrFilterSize,
  902. dest, dstW, dstY);
  903. #endif
  904. break;
  905. }
  906. }
  907. /**
  908. * vertical bilinear scale YV12 to RGB
  909. */
  910. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  911. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  912. {
  913. int yalpha1=yalpha^4095;
  914. int uvalpha1=uvalpha^4095;
  915. int i;
  916. #if 0 //isn't used
  917. if(flags&SWS_FULL_CHR_H_INT)
  918. {
  919. switch(dstFormat)
  920. {
  921. #ifdef HAVE_MMX
  922. case IMGFMT_BGR32:
  923. asm volatile(
  924. FULL_YSCALEYUV2RGB
  925. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  926. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  927. "movq %%mm3, %%mm1 \n\t"
  928. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  929. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  930. MOVNTQ(%%mm3, (%4, %%eax, 4))
  931. MOVNTQ(%%mm1, 8(%4, %%eax, 4))
  932. "addl $4, %%eax \n\t"
  933. "cmpl %5, %%eax \n\t"
  934. " jb 1b \n\t"
  935. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  936. "m" (yalpha1), "m" (uvalpha1)
  937. : "%eax"
  938. );
  939. break;
  940. case IMGFMT_BGR24:
  941. asm volatile(
  942. FULL_YSCALEYUV2RGB
  943. // lsb ... msb
  944. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  945. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  946. "movq %%mm3, %%mm1 \n\t"
  947. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  948. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  949. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  950. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  951. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  952. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  953. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  954. "movq %%mm1, %%mm2 \n\t"
  955. "psllq $48, %%mm1 \n\t" // 000000BG
  956. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  957. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  958. "psrld $16, %%mm2 \n\t" // R000R000
  959. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  960. "por %%mm2, %%mm1 \n\t" // RBGRR000
  961. "movl %4, %%ebx \n\t"
  962. "addl %%eax, %%ebx \n\t"
  963. #ifdef HAVE_MMX2
  964. //FIXME Alignment
  965. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  966. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  967. #else
  968. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  969. "psrlq $32, %%mm3 \n\t"
  970. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  971. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  972. #endif
  973. "addl $4, %%eax \n\t"
  974. "cmpl %5, %%eax \n\t"
  975. " jb 1b \n\t"
  976. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  977. "m" (yalpha1), "m" (uvalpha1)
  978. : "%eax", "%ebx"
  979. );
  980. break;
  981. case IMGFMT_BGR15:
  982. asm volatile(
  983. FULL_YSCALEYUV2RGB
  984. #ifdef DITHER1XBPP
  985. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  986. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  987. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  988. #endif
  989. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  990. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  991. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  992. "psrlw $3, %%mm3 \n\t"
  993. "psllw $2, %%mm1 \n\t"
  994. "psllw $7, %%mm0 \n\t"
  995. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  996. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  997. "por %%mm3, %%mm1 \n\t"
  998. "por %%mm1, %%mm0 \n\t"
  999. MOVNTQ(%%mm0, (%4, %%eax, 2))
  1000. "addl $4, %%eax \n\t"
  1001. "cmpl %5, %%eax \n\t"
  1002. " jb 1b \n\t"
  1003. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1004. "m" (yalpha1), "m" (uvalpha1)
  1005. : "%eax"
  1006. );
  1007. break;
  1008. case IMGFMT_BGR16:
  1009. asm volatile(
  1010. FULL_YSCALEYUV2RGB
  1011. #ifdef DITHER1XBPP
  1012. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  1013. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1014. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1015. #endif
  1016. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1017. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1018. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1019. "psrlw $3, %%mm3 \n\t"
  1020. "psllw $3, %%mm1 \n\t"
  1021. "psllw $8, %%mm0 \n\t"
  1022. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1023. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1024. "por %%mm3, %%mm1 \n\t"
  1025. "por %%mm1, %%mm0 \n\t"
  1026. MOVNTQ(%%mm0, (%4, %%eax, 2))
  1027. "addl $4, %%eax \n\t"
  1028. "cmpl %5, %%eax \n\t"
  1029. " jb 1b \n\t"
  1030. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1031. "m" (yalpha1), "m" (uvalpha1)
  1032. : "%eax"
  1033. );
  1034. break;
  1035. #endif
  1036. case IMGFMT_RGB32:
  1037. #ifndef HAVE_MMX
  1038. case IMGFMT_BGR32:
  1039. #endif
  1040. if(dstFormat==IMGFMT_BGR32)
  1041. {
  1042. int i;
  1043. #ifdef WORDS_BIGENDIAN
  1044. dest++;
  1045. #endif
  1046. for(i=0;i<dstW;i++){
  1047. // vertical linear interpolation && yuv2rgb in a single step:
  1048. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1049. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1050. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1051. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1052. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1053. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1054. dest+= 4;
  1055. }
  1056. }
  1057. else if(dstFormat==IMGFMT_BGR24)
  1058. {
  1059. int i;
  1060. for(i=0;i<dstW;i++){
  1061. // vertical linear interpolation && yuv2rgb in a single step:
  1062. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1063. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1064. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1065. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1066. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1067. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1068. dest+= 3;
  1069. }
  1070. }
  1071. else if(dstFormat==IMGFMT_BGR16)
  1072. {
  1073. int i;
  1074. for(i=0;i<dstW;i++){
  1075. // vertical linear interpolation && yuv2rgb in a single step:
  1076. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1077. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1078. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1079. ((uint16_t*)dest)[i] =
  1080. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1081. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1082. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1083. }
  1084. }
  1085. else if(dstFormat==IMGFMT_BGR15)
  1086. {
  1087. int i;
  1088. for(i=0;i<dstW;i++){
  1089. // vertical linear interpolation && yuv2rgb in a single step:
  1090. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1091. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1092. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1093. ((uint16_t*)dest)[i] =
  1094. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1095. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1096. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1097. }
  1098. }
  1099. }//FULL_UV_IPOL
  1100. else
  1101. {
  1102. #endif // if 0
  1103. #ifdef HAVE_MMX
  1104. switch(c->dstFormat)
  1105. {
  1106. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1107. case IMGFMT_BGR32:
  1108. asm volatile(
  1109. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1110. "movl %4, %%esp \n\t"
  1111. YSCALEYUV2RGB(%%eax, %5)
  1112. WRITEBGR32(%%esp, 8280(%5), %%eax)
  1113. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1114. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1115. "r" (&c->redDither)
  1116. : "%eax"
  1117. );
  1118. return;
  1119. case IMGFMT_BGR24:
  1120. asm volatile(
  1121. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1122. "movl %4, %%esp \n\t"
  1123. YSCALEYUV2RGB(%%eax, %5)
  1124. WRITEBGR24(%%esp, 8280(%5), %%eax)
  1125. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1126. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1127. "r" (&c->redDither)
  1128. : "%eax"
  1129. );
  1130. return;
  1131. case IMGFMT_BGR15:
  1132. asm volatile(
  1133. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1134. "movl %4, %%esp \n\t"
  1135. YSCALEYUV2RGB(%%eax, %5)
  1136. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1137. #ifdef DITHER1XBPP
  1138. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1139. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1140. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1141. #endif
  1142. WRITEBGR15(%%esp, 8280(%5), %%eax)
  1143. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1144. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1145. "r" (&c->redDither)
  1146. : "%eax"
  1147. );
  1148. return;
  1149. case IMGFMT_BGR16:
  1150. asm volatile(
  1151. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1152. "movl %4, %%esp \n\t"
  1153. YSCALEYUV2RGB(%%eax, %5)
  1154. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1155. #ifdef DITHER1XBPP
  1156. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1157. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1158. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1159. #endif
  1160. WRITEBGR16(%%esp, 8280(%5), %%eax)
  1161. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1162. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1163. "r" (&c->redDither)
  1164. : "%eax"
  1165. );
  1166. return;
  1167. case IMGFMT_YUY2:
  1168. asm volatile(
  1169. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1170. "movl %4, %%esp \n\t"
  1171. YSCALEYUV2PACKED(%%eax, %5)
  1172. WRITEYUY2(%%esp, 8280(%5), %%eax)
  1173. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1174. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1175. "r" (&c->redDither)
  1176. : "%eax"
  1177. );
  1178. return;
  1179. default: break;
  1180. }
  1181. #endif //HAVE_MMX
  1182. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1183. }
  1184. /**
  1185. * YV12 to RGB without scaling or interpolating
  1186. */
  1187. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1188. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1189. {
  1190. const int yalpha1=0;
  1191. int i;
  1192. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1193. const int yalpha= 4096; //FIXME ...
  1194. if(flags&SWS_FULL_CHR_H_INT)
  1195. {
  1196. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1197. return;
  1198. }
  1199. #ifdef HAVE_MMX
  1200. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1201. {
  1202. switch(dstFormat)
  1203. {
  1204. case IMGFMT_BGR32:
  1205. asm volatile(
  1206. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1207. "movl %4, %%esp \n\t"
  1208. YSCALEYUV2RGB1(%%eax, %5)
  1209. WRITEBGR32(%%esp, 8280(%5), %%eax)
  1210. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1211. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1212. "r" (&c->redDither)
  1213. : "%eax"
  1214. );
  1215. return;
  1216. case IMGFMT_BGR24:
  1217. asm volatile(
  1218. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1219. "movl %4, %%esp \n\t"
  1220. YSCALEYUV2RGB1(%%eax, %5)
  1221. WRITEBGR24(%%esp, 8280(%5), %%eax)
  1222. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1223. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1224. "r" (&c->redDither)
  1225. : "%eax"
  1226. );
  1227. return;
  1228. case IMGFMT_BGR15:
  1229. asm volatile(
  1230. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1231. "movl %4, %%esp \n\t"
  1232. YSCALEYUV2RGB1(%%eax, %5)
  1233. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1234. #ifdef DITHER1XBPP
  1235. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1236. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1237. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1238. #endif
  1239. WRITEBGR15(%%esp, 8280(%5), %%eax)
  1240. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1241. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1242. "r" (&c->redDither)
  1243. : "%eax"
  1244. );
  1245. return;
  1246. case IMGFMT_BGR16:
  1247. asm volatile(
  1248. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1249. "movl %4, %%esp \n\t"
  1250. YSCALEYUV2RGB1(%%eax, %5)
  1251. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1252. #ifdef DITHER1XBPP
  1253. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1254. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1255. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1256. #endif
  1257. WRITEBGR16(%%esp, 8280(%5), %%eax)
  1258. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1259. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1260. "r" (&c->redDither)
  1261. : "%eax"
  1262. );
  1263. return;
  1264. case IMGFMT_YUY2:
  1265. asm volatile(
  1266. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1267. "movl %4, %%esp \n\t"
  1268. YSCALEYUV2PACKED1(%%eax, %5)
  1269. WRITEYUY2(%%esp, 8280(%5), %%eax)
  1270. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1271. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1272. "r" (&c->redDither)
  1273. : "%eax"
  1274. );
  1275. return;
  1276. }
  1277. }
  1278. else
  1279. {
  1280. switch(dstFormat)
  1281. {
  1282. case IMGFMT_BGR32:
  1283. asm volatile(
  1284. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1285. "movl %4, %%esp \n\t"
  1286. YSCALEYUV2RGB1b(%%eax, %5)
  1287. WRITEBGR32(%%esp, 8280(%5), %%eax)
  1288. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1289. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1290. "r" (&c->redDither)
  1291. : "%eax"
  1292. );
  1293. return;
  1294. case IMGFMT_BGR24:
  1295. asm volatile(
  1296. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1297. "movl %4, %%esp \n\t"
  1298. YSCALEYUV2RGB1b(%%eax, %5)
  1299. WRITEBGR24(%%esp, 8280(%5), %%eax)
  1300. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1301. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1302. "r" (&c->redDither)
  1303. : "%eax"
  1304. );
  1305. return;
  1306. case IMGFMT_BGR15:
  1307. asm volatile(
  1308. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1309. "movl %4, %%esp \n\t"
  1310. YSCALEYUV2RGB1b(%%eax, %5)
  1311. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1312. #ifdef DITHER1XBPP
  1313. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1314. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1315. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1316. #endif
  1317. WRITEBGR15(%%esp, 8280(%5), %%eax)
  1318. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1319. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1320. "r" (&c->redDither)
  1321. : "%eax"
  1322. );
  1323. return;
  1324. case IMGFMT_BGR16:
  1325. asm volatile(
  1326. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1327. "movl %4, %%esp \n\t"
  1328. YSCALEYUV2RGB1b(%%eax, %5)
  1329. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1330. #ifdef DITHER1XBPP
  1331. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1332. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1333. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1334. #endif
  1335. WRITEBGR16(%%esp, 8280(%5), %%eax)
  1336. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1337. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1338. "r" (&c->redDither)
  1339. : "%eax"
  1340. );
  1341. return;
  1342. case IMGFMT_YUY2:
  1343. asm volatile(
  1344. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1345. "movl %4, %%esp \n\t"
  1346. YSCALEYUV2PACKED1b(%%eax, %5)
  1347. WRITEYUY2(%%esp, 8280(%5), %%eax)
  1348. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1349. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1350. "r" (&c->redDither)
  1351. : "%eax"
  1352. );
  1353. return;
  1354. }
  1355. }
  1356. #endif
  1357. if( uvalpha < 2048 )
  1358. {
  1359. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1360. }else{
  1361. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1362. }
  1363. }
  1364. //FIXME yuy2* can read upto 7 samples to much
  1365. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width)
  1366. {
  1367. #ifdef HAVE_MMX
  1368. asm volatile(
  1369. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1370. "movl %0, %%eax \n\t"
  1371. "1: \n\t"
  1372. "movq (%1, %%eax,2), %%mm0 \n\t"
  1373. "movq 8(%1, %%eax,2), %%mm1 \n\t"
  1374. "pand %%mm2, %%mm0 \n\t"
  1375. "pand %%mm2, %%mm1 \n\t"
  1376. "packuswb %%mm1, %%mm0 \n\t"
  1377. "movq %%mm0, (%2, %%eax) \n\t"
  1378. "addl $8, %%eax \n\t"
  1379. " js 1b \n\t"
  1380. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1381. : "%eax"
  1382. );
  1383. #else
  1384. int i;
  1385. for(i=0; i<width; i++)
  1386. dst[i]= src[2*i];
  1387. #endif
  1388. }
  1389. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1390. {
  1391. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1392. asm volatile(
  1393. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1394. "movl %0, %%eax \n\t"
  1395. "1: \n\t"
  1396. "movq (%1, %%eax,4), %%mm0 \n\t"
  1397. "movq 8(%1, %%eax,4), %%mm1 \n\t"
  1398. "movq (%2, %%eax,4), %%mm2 \n\t"
  1399. "movq 8(%2, %%eax,4), %%mm3 \n\t"
  1400. PAVGB(%%mm2, %%mm0)
  1401. PAVGB(%%mm3, %%mm1)
  1402. "psrlw $8, %%mm0 \n\t"
  1403. "psrlw $8, %%mm1 \n\t"
  1404. "packuswb %%mm1, %%mm0 \n\t"
  1405. "movq %%mm0, %%mm1 \n\t"
  1406. "psrlw $8, %%mm0 \n\t"
  1407. "pand %%mm4, %%mm1 \n\t"
  1408. "packuswb %%mm0, %%mm0 \n\t"
  1409. "packuswb %%mm1, %%mm1 \n\t"
  1410. "movd %%mm0, (%4, %%eax) \n\t"
  1411. "movd %%mm1, (%3, %%eax) \n\t"
  1412. "addl $4, %%eax \n\t"
  1413. " js 1b \n\t"
  1414. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1415. : "%eax"
  1416. );
  1417. #else
  1418. int i;
  1419. for(i=0; i<width; i++)
  1420. {
  1421. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1422. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1423. }
  1424. #endif
  1425. }
  1426. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1427. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, int width)
  1428. {
  1429. #ifdef HAVE_MMX
  1430. asm volatile(
  1431. "movl %0, %%eax \n\t"
  1432. "1: \n\t"
  1433. "movq (%1, %%eax,2), %%mm0 \n\t"
  1434. "movq 8(%1, %%eax,2), %%mm1 \n\t"
  1435. "psrlw $8, %%mm0 \n\t"
  1436. "psrlw $8, %%mm1 \n\t"
  1437. "packuswb %%mm1, %%mm0 \n\t"
  1438. "movq %%mm0, (%2, %%eax) \n\t"
  1439. "addl $8, %%eax \n\t"
  1440. " js 1b \n\t"
  1441. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1442. : "%eax"
  1443. );
  1444. #else
  1445. int i;
  1446. for(i=0; i<width; i++)
  1447. dst[i]= src[2*i+1];
  1448. #endif
  1449. }
  1450. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1451. {
  1452. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1453. asm volatile(
  1454. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1455. "movl %0, %%eax \n\t"
  1456. "1: \n\t"
  1457. "movq (%1, %%eax,4), %%mm0 \n\t"
  1458. "movq 8(%1, %%eax,4), %%mm1 \n\t"
  1459. "movq (%2, %%eax,4), %%mm2 \n\t"
  1460. "movq 8(%2, %%eax,4), %%mm3 \n\t"
  1461. PAVGB(%%mm2, %%mm0)
  1462. PAVGB(%%mm3, %%mm1)
  1463. "pand %%mm4, %%mm0 \n\t"
  1464. "pand %%mm4, %%mm1 \n\t"
  1465. "packuswb %%mm1, %%mm0 \n\t"
  1466. "movq %%mm0, %%mm1 \n\t"
  1467. "psrlw $8, %%mm0 \n\t"
  1468. "pand %%mm4, %%mm1 \n\t"
  1469. "packuswb %%mm0, %%mm0 \n\t"
  1470. "packuswb %%mm1, %%mm1 \n\t"
  1471. "movd %%mm0, (%4, %%eax) \n\t"
  1472. "movd %%mm1, (%3, %%eax) \n\t"
  1473. "addl $4, %%eax \n\t"
  1474. " js 1b \n\t"
  1475. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1476. : "%eax"
  1477. );
  1478. #else
  1479. int i;
  1480. for(i=0; i<width; i++)
  1481. {
  1482. dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
  1483. dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
  1484. }
  1485. #endif
  1486. }
  1487. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1488. {
  1489. #ifdef HAVE_MMXFIXME
  1490. #else
  1491. int i;
  1492. for(i=0; i<width; i++)
  1493. {
  1494. int b= ((uint32_t*)src)[i]&0xFF;
  1495. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1496. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1497. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1498. }
  1499. #endif
  1500. }
  1501. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1502. {
  1503. #ifdef HAVE_MMXFIXME
  1504. #else
  1505. int i;
  1506. for(i=0; i<width; i++)
  1507. {
  1508. const int a= ((uint32_t*)src1)[2*i+0];
  1509. const int e= ((uint32_t*)src1)[2*i+1];
  1510. const int c= ((uint32_t*)src2)[2*i+0];
  1511. const int d= ((uint32_t*)src2)[2*i+1];
  1512. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1513. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1514. const int b= l&0x3FF;
  1515. const int g= h>>8;
  1516. const int r= l>>16;
  1517. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1518. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1519. }
  1520. #endif
  1521. }
  1522. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
  1523. {
  1524. #ifdef HAVE_MMX
  1525. asm volatile(
  1526. "movl %2, %%eax \n\t"
  1527. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1528. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1529. "pxor %%mm7, %%mm7 \n\t"
  1530. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1531. ".balign 16 \n\t"
  1532. "1: \n\t"
  1533. PREFETCH" 64(%0, %%ebx) \n\t"
  1534. "movd (%0, %%ebx), %%mm0 \n\t"
  1535. "movd 3(%0, %%ebx), %%mm1 \n\t"
  1536. "punpcklbw %%mm7, %%mm0 \n\t"
  1537. "punpcklbw %%mm7, %%mm1 \n\t"
  1538. "movd 6(%0, %%ebx), %%mm2 \n\t"
  1539. "movd 9(%0, %%ebx), %%mm3 \n\t"
  1540. "punpcklbw %%mm7, %%mm2 \n\t"
  1541. "punpcklbw %%mm7, %%mm3 \n\t"
  1542. "pmaddwd %%mm6, %%mm0 \n\t"
  1543. "pmaddwd %%mm6, %%mm1 \n\t"
  1544. "pmaddwd %%mm6, %%mm2 \n\t"
  1545. "pmaddwd %%mm6, %%mm3 \n\t"
  1546. #ifndef FAST_BGR2YV12
  1547. "psrad $8, %%mm0 \n\t"
  1548. "psrad $8, %%mm1 \n\t"
  1549. "psrad $8, %%mm2 \n\t"
  1550. "psrad $8, %%mm3 \n\t"
  1551. #endif
  1552. "packssdw %%mm1, %%mm0 \n\t"
  1553. "packssdw %%mm3, %%mm2 \n\t"
  1554. "pmaddwd %%mm5, %%mm0 \n\t"
  1555. "pmaddwd %%mm5, %%mm2 \n\t"
  1556. "packssdw %%mm2, %%mm0 \n\t"
  1557. "psraw $7, %%mm0 \n\t"
  1558. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1559. "movd 15(%0, %%ebx), %%mm1 \n\t"
  1560. "punpcklbw %%mm7, %%mm4 \n\t"
  1561. "punpcklbw %%mm7, %%mm1 \n\t"
  1562. "movd 18(%0, %%ebx), %%mm2 \n\t"
  1563. "movd 21(%0, %%ebx), %%mm3 \n\t"
  1564. "punpcklbw %%mm7, %%mm2 \n\t"
  1565. "punpcklbw %%mm7, %%mm3 \n\t"
  1566. "pmaddwd %%mm6, %%mm4 \n\t"
  1567. "pmaddwd %%mm6, %%mm1 \n\t"
  1568. "pmaddwd %%mm6, %%mm2 \n\t"
  1569. "pmaddwd %%mm6, %%mm3 \n\t"
  1570. #ifndef FAST_BGR2YV12
  1571. "psrad $8, %%mm4 \n\t"
  1572. "psrad $8, %%mm1 \n\t"
  1573. "psrad $8, %%mm2 \n\t"
  1574. "psrad $8, %%mm3 \n\t"
  1575. #endif
  1576. "packssdw %%mm1, %%mm4 \n\t"
  1577. "packssdw %%mm3, %%mm2 \n\t"
  1578. "pmaddwd %%mm5, %%mm4 \n\t"
  1579. "pmaddwd %%mm5, %%mm2 \n\t"
  1580. "addl $24, %%ebx \n\t"
  1581. "packssdw %%mm2, %%mm4 \n\t"
  1582. "psraw $7, %%mm4 \n\t"
  1583. "packuswb %%mm4, %%mm0 \n\t"
  1584. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1585. "movq %%mm0, (%1, %%eax) \n\t"
  1586. "addl $8, %%eax \n\t"
  1587. " js 1b \n\t"
  1588. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1589. : "%eax", "%ebx"
  1590. );
  1591. #else
  1592. int i;
  1593. for(i=0; i<width; i++)
  1594. {
  1595. int b= src[i*3+0];
  1596. int g= src[i*3+1];
  1597. int r= src[i*3+2];
  1598. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1599. }
  1600. #endif
  1601. }
  1602. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1603. {
  1604. #ifdef HAVE_MMX
  1605. asm volatile(
  1606. "movl %4, %%eax \n\t"
  1607. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1608. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1609. "pxor %%mm7, %%mm7 \n\t"
  1610. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1611. "addl %%ebx, %%ebx \n\t"
  1612. ".balign 16 \n\t"
  1613. "1: \n\t"
  1614. PREFETCH" 64(%0, %%ebx) \n\t"
  1615. PREFETCH" 64(%1, %%ebx) \n\t"
  1616. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1617. "movq (%0, %%ebx), %%mm0 \n\t"
  1618. "movq (%1, %%ebx), %%mm1 \n\t"
  1619. "movq 6(%0, %%ebx), %%mm2 \n\t"
  1620. "movq 6(%1, %%ebx), %%mm3 \n\t"
  1621. PAVGB(%%mm1, %%mm0)
  1622. PAVGB(%%mm3, %%mm2)
  1623. "movq %%mm0, %%mm1 \n\t"
  1624. "movq %%mm2, %%mm3 \n\t"
  1625. "psrlq $24, %%mm0 \n\t"
  1626. "psrlq $24, %%mm2 \n\t"
  1627. PAVGB(%%mm1, %%mm0)
  1628. PAVGB(%%mm3, %%mm2)
  1629. "punpcklbw %%mm7, %%mm0 \n\t"
  1630. "punpcklbw %%mm7, %%mm2 \n\t"
  1631. #else
  1632. "movd (%0, %%ebx), %%mm0 \n\t"
  1633. "movd (%1, %%ebx), %%mm1 \n\t"
  1634. "movd 3(%0, %%ebx), %%mm2 \n\t"
  1635. "movd 3(%1, %%ebx), %%mm3 \n\t"
  1636. "punpcklbw %%mm7, %%mm0 \n\t"
  1637. "punpcklbw %%mm7, %%mm1 \n\t"
  1638. "punpcklbw %%mm7, %%mm2 \n\t"
  1639. "punpcklbw %%mm7, %%mm3 \n\t"
  1640. "paddw %%mm1, %%mm0 \n\t"
  1641. "paddw %%mm3, %%mm2 \n\t"
  1642. "paddw %%mm2, %%mm0 \n\t"
  1643. "movd 6(%0, %%ebx), %%mm4 \n\t"
  1644. "movd 6(%1, %%ebx), %%mm1 \n\t"
  1645. "movd 9(%0, %%ebx), %%mm2 \n\t"
  1646. "movd 9(%1, %%ebx), %%mm3 \n\t"
  1647. "punpcklbw %%mm7, %%mm4 \n\t"
  1648. "punpcklbw %%mm7, %%mm1 \n\t"
  1649. "punpcklbw %%mm7, %%mm2 \n\t"
  1650. "punpcklbw %%mm7, %%mm3 \n\t"
  1651. "paddw %%mm1, %%mm4 \n\t"
  1652. "paddw %%mm3, %%mm2 \n\t"
  1653. "paddw %%mm4, %%mm2 \n\t"
  1654. "psrlw $2, %%mm0 \n\t"
  1655. "psrlw $2, %%mm2 \n\t"
  1656. #endif
  1657. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1658. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1659. "pmaddwd %%mm0, %%mm1 \n\t"
  1660. "pmaddwd %%mm2, %%mm3 \n\t"
  1661. "pmaddwd %%mm6, %%mm0 \n\t"
  1662. "pmaddwd %%mm6, %%mm2 \n\t"
  1663. #ifndef FAST_BGR2YV12
  1664. "psrad $8, %%mm0 \n\t"
  1665. "psrad $8, %%mm1 \n\t"
  1666. "psrad $8, %%mm2 \n\t"
  1667. "psrad $8, %%mm3 \n\t"
  1668. #endif
  1669. "packssdw %%mm2, %%mm0 \n\t"
  1670. "packssdw %%mm3, %%mm1 \n\t"
  1671. "pmaddwd %%mm5, %%mm0 \n\t"
  1672. "pmaddwd %%mm5, %%mm1 \n\t"
  1673. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1674. "psraw $7, %%mm0 \n\t"
  1675. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1676. "movq 12(%0, %%ebx), %%mm4 \n\t"
  1677. "movq 12(%1, %%ebx), %%mm1 \n\t"
  1678. "movq 18(%0, %%ebx), %%mm2 \n\t"
  1679. "movq 18(%1, %%ebx), %%mm3 \n\t"
  1680. PAVGB(%%mm1, %%mm4)
  1681. PAVGB(%%mm3, %%mm2)
  1682. "movq %%mm4, %%mm1 \n\t"
  1683. "movq %%mm2, %%mm3 \n\t"
  1684. "psrlq $24, %%mm4 \n\t"
  1685. "psrlq $24, %%mm2 \n\t"
  1686. PAVGB(%%mm1, %%mm4)
  1687. PAVGB(%%mm3, %%mm2)
  1688. "punpcklbw %%mm7, %%mm4 \n\t"
  1689. "punpcklbw %%mm7, %%mm2 \n\t"
  1690. #else
  1691. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1692. "movd 12(%1, %%ebx), %%mm1 \n\t"
  1693. "movd 15(%0, %%ebx), %%mm2 \n\t"
  1694. "movd 15(%1, %%ebx), %%mm3 \n\t"
  1695. "punpcklbw %%mm7, %%mm4 \n\t"
  1696. "punpcklbw %%mm7, %%mm1 \n\t"
  1697. "punpcklbw %%mm7, %%mm2 \n\t"
  1698. "punpcklbw %%mm7, %%mm3 \n\t"
  1699. "paddw %%mm1, %%mm4 \n\t"
  1700. "paddw %%mm3, %%mm2 \n\t"
  1701. "paddw %%mm2, %%mm4 \n\t"
  1702. "movd 18(%0, %%ebx), %%mm5 \n\t"
  1703. "movd 18(%1, %%ebx), %%mm1 \n\t"
  1704. "movd 21(%0, %%ebx), %%mm2 \n\t"
  1705. "movd 21(%1, %%ebx), %%mm3 \n\t"
  1706. "punpcklbw %%mm7, %%mm5 \n\t"
  1707. "punpcklbw %%mm7, %%mm1 \n\t"
  1708. "punpcklbw %%mm7, %%mm2 \n\t"
  1709. "punpcklbw %%mm7, %%mm3 \n\t"
  1710. "paddw %%mm1, %%mm5 \n\t"
  1711. "paddw %%mm3, %%mm2 \n\t"
  1712. "paddw %%mm5, %%mm2 \n\t"
  1713. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1714. "psrlw $2, %%mm4 \n\t"
  1715. "psrlw $2, %%mm2 \n\t"
  1716. #endif
  1717. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1718. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1719. "pmaddwd %%mm4, %%mm1 \n\t"
  1720. "pmaddwd %%mm2, %%mm3 \n\t"
  1721. "pmaddwd %%mm6, %%mm4 \n\t"
  1722. "pmaddwd %%mm6, %%mm2 \n\t"
  1723. #ifndef FAST_BGR2YV12
  1724. "psrad $8, %%mm4 \n\t"
  1725. "psrad $8, %%mm1 \n\t"
  1726. "psrad $8, %%mm2 \n\t"
  1727. "psrad $8, %%mm3 \n\t"
  1728. #endif
  1729. "packssdw %%mm2, %%mm4 \n\t"
  1730. "packssdw %%mm3, %%mm1 \n\t"
  1731. "pmaddwd %%mm5, %%mm4 \n\t"
  1732. "pmaddwd %%mm5, %%mm1 \n\t"
  1733. "addl $24, %%ebx \n\t"
  1734. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1735. "psraw $7, %%mm4 \n\t"
  1736. "movq %%mm0, %%mm1 \n\t"
  1737. "punpckldq %%mm4, %%mm0 \n\t"
  1738. "punpckhdq %%mm4, %%mm1 \n\t"
  1739. "packsswb %%mm1, %%mm0 \n\t"
  1740. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1741. "movd %%mm0, (%2, %%eax) \n\t"
  1742. "punpckhdq %%mm0, %%mm0 \n\t"
  1743. "movd %%mm0, (%3, %%eax) \n\t"
  1744. "addl $4, %%eax \n\t"
  1745. " js 1b \n\t"
  1746. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1747. : "%eax", "%ebx"
  1748. );
  1749. #else
  1750. int i;
  1751. for(i=0; i<width; i++)
  1752. {
  1753. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1754. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1755. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1756. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1757. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1758. }
  1759. #endif
  1760. }
  1761. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1762. {
  1763. int i;
  1764. for(i=0; i<width; i++)
  1765. {
  1766. int d= ((uint16_t*)src)[i];
  1767. int b= d&0x1F;
  1768. int g= (d>>5)&0x3F;
  1769. int r= (d>>11)&0x1F;
  1770. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1771. }
  1772. }
  1773. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1774. {
  1775. int i;
  1776. for(i=0; i<width; i++)
  1777. {
  1778. int d0= ((uint32_t*)src1)[i];
  1779. int d1= ((uint32_t*)src2)[i];
  1780. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1781. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1782. int dh2= (dh>>11) + (dh<<21);
  1783. int d= dh2 + dl;
  1784. int b= d&0x7F;
  1785. int r= (d>>11)&0x7F;
  1786. int g= d>>21;
  1787. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1788. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1789. }
  1790. }
  1791. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1792. {
  1793. int i;
  1794. for(i=0; i<width; i++)
  1795. {
  1796. int d= ((uint16_t*)src)[i];
  1797. int b= d&0x1F;
  1798. int g= (d>>5)&0x1F;
  1799. int r= (d>>10)&0x1F;
  1800. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1801. }
  1802. }
  1803. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1804. {
  1805. int i;
  1806. for(i=0; i<width; i++)
  1807. {
  1808. int d0= ((uint32_t*)src1)[i];
  1809. int d1= ((uint32_t*)src2)[i];
  1810. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1811. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1812. int dh2= (dh>>11) + (dh<<21);
  1813. int d= dh2 + dl;
  1814. int b= d&0x7F;
  1815. int r= (d>>10)&0x7F;
  1816. int g= d>>21;
  1817. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1818. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1819. }
  1820. }
  1821. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1822. {
  1823. int i;
  1824. for(i=0; i<width; i++)
  1825. {
  1826. int r= ((uint32_t*)src)[i]&0xFF;
  1827. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1828. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  1829. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1830. }
  1831. }
  1832. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1833. {
  1834. int i;
  1835. for(i=0; i<width; i++)
  1836. {
  1837. const int a= ((uint32_t*)src1)[2*i+0];
  1838. const int e= ((uint32_t*)src1)[2*i+1];
  1839. const int c= ((uint32_t*)src2)[2*i+0];
  1840. const int d= ((uint32_t*)src2)[2*i+1];
  1841. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1842. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1843. const int r= l&0x3FF;
  1844. const int g= h>>8;
  1845. const int b= l>>16;
  1846. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1847. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1848. }
  1849. }
  1850. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1851. {
  1852. int i;
  1853. for(i=0; i<width; i++)
  1854. {
  1855. int r= src[i*3+0];
  1856. int g= src[i*3+1];
  1857. int b= src[i*3+2];
  1858. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1859. }
  1860. }
  1861. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1862. {
  1863. int i;
  1864. for(i=0; i<width; i++)
  1865. {
  1866. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1867. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1868. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1869. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1870. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1871. }
  1872. }
  1873. // Bilinear / Bicubic scaling
  1874. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1875. int16_t *filter, int16_t *filterPos, int filterSize)
  1876. {
  1877. #ifdef HAVE_MMX
  1878. assert(filterSize % 4 == 0 && filterSize>0);
  1879. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1880. {
  1881. int counter= -2*dstW;
  1882. filter-= counter*2;
  1883. filterPos-= counter/2;
  1884. dst-= counter/2;
  1885. asm volatile(
  1886. "pxor %%mm7, %%mm7 \n\t"
  1887. "movq "MANGLE(w02)", %%mm6 \n\t"
  1888. "pushl %%ebp \n\t" // we use 7 regs here ...
  1889. "movl %%eax, %%ebp \n\t"
  1890. ".balign 16 \n\t"
  1891. "1: \n\t"
  1892. "movzwl (%2, %%ebp), %%eax \n\t"
  1893. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1894. "movq (%1, %%ebp, 4), %%mm1 \n\t"
  1895. "movq 8(%1, %%ebp, 4), %%mm3 \n\t"
  1896. "movd (%3, %%eax), %%mm0 \n\t"
  1897. "movd (%3, %%ebx), %%mm2 \n\t"
  1898. "punpcklbw %%mm7, %%mm0 \n\t"
  1899. "punpcklbw %%mm7, %%mm2 \n\t"
  1900. "pmaddwd %%mm1, %%mm0 \n\t"
  1901. "pmaddwd %%mm2, %%mm3 \n\t"
  1902. "psrad $8, %%mm0 \n\t"
  1903. "psrad $8, %%mm3 \n\t"
  1904. "packssdw %%mm3, %%mm0 \n\t"
  1905. "pmaddwd %%mm6, %%mm0 \n\t"
  1906. "packssdw %%mm0, %%mm0 \n\t"
  1907. "movd %%mm0, (%4, %%ebp) \n\t"
  1908. "addl $4, %%ebp \n\t"
  1909. " jnc 1b \n\t"
  1910. "popl %%ebp \n\t"
  1911. : "+a" (counter)
  1912. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1913. : "%ebx"
  1914. );
  1915. }
  1916. else if(filterSize==8)
  1917. {
  1918. int counter= -2*dstW;
  1919. filter-= counter*4;
  1920. filterPos-= counter/2;
  1921. dst-= counter/2;
  1922. asm volatile(
  1923. "pxor %%mm7, %%mm7 \n\t"
  1924. "movq "MANGLE(w02)", %%mm6 \n\t"
  1925. "pushl %%ebp \n\t" // we use 7 regs here ...
  1926. "movl %%eax, %%ebp \n\t"
  1927. ".balign 16 \n\t"
  1928. "1: \n\t"
  1929. "movzwl (%2, %%ebp), %%eax \n\t"
  1930. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1931. "movq (%1, %%ebp, 8), %%mm1 \n\t"
  1932. "movq 16(%1, %%ebp, 8), %%mm3 \n\t"
  1933. "movd (%3, %%eax), %%mm0 \n\t"
  1934. "movd (%3, %%ebx), %%mm2 \n\t"
  1935. "punpcklbw %%mm7, %%mm0 \n\t"
  1936. "punpcklbw %%mm7, %%mm2 \n\t"
  1937. "pmaddwd %%mm1, %%mm0 \n\t"
  1938. "pmaddwd %%mm2, %%mm3 \n\t"
  1939. "movq 8(%1, %%ebp, 8), %%mm1 \n\t"
  1940. "movq 24(%1, %%ebp, 8), %%mm5 \n\t"
  1941. "movd 4(%3, %%eax), %%mm4 \n\t"
  1942. "movd 4(%3, %%ebx), %%mm2 \n\t"
  1943. "punpcklbw %%mm7, %%mm4 \n\t"
  1944. "punpcklbw %%mm7, %%mm2 \n\t"
  1945. "pmaddwd %%mm1, %%mm4 \n\t"
  1946. "pmaddwd %%mm2, %%mm5 \n\t"
  1947. "paddd %%mm4, %%mm0 \n\t"
  1948. "paddd %%mm5, %%mm3 \n\t"
  1949. "psrad $8, %%mm0 \n\t"
  1950. "psrad $8, %%mm3 \n\t"
  1951. "packssdw %%mm3, %%mm0 \n\t"
  1952. "pmaddwd %%mm6, %%mm0 \n\t"
  1953. "packssdw %%mm0, %%mm0 \n\t"
  1954. "movd %%mm0, (%4, %%ebp) \n\t"
  1955. "addl $4, %%ebp \n\t"
  1956. " jnc 1b \n\t"
  1957. "popl %%ebp \n\t"
  1958. : "+a" (counter)
  1959. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1960. : "%ebx"
  1961. );
  1962. }
  1963. else
  1964. {
  1965. int counter= -2*dstW;
  1966. // filter-= counter*filterSize/2;
  1967. filterPos-= counter/2;
  1968. dst-= counter/2;
  1969. asm volatile(
  1970. "pxor %%mm7, %%mm7 \n\t"
  1971. "movq "MANGLE(w02)", %%mm6 \n\t"
  1972. ".balign 16 \n\t"
  1973. "1: \n\t"
  1974. "movl %2, %%ecx \n\t"
  1975. "movzwl (%%ecx, %0), %%eax \n\t"
  1976. "movzwl 2(%%ecx, %0), %%ebx \n\t"
  1977. "movl %5, %%ecx \n\t"
  1978. "pxor %%mm4, %%mm4 \n\t"
  1979. "pxor %%mm5, %%mm5 \n\t"
  1980. "2: \n\t"
  1981. "movq (%1), %%mm1 \n\t"
  1982. "movq (%1, %6), %%mm3 \n\t"
  1983. "movd (%%ecx, %%eax), %%mm0 \n\t"
  1984. "movd (%%ecx, %%ebx), %%mm2 \n\t"
  1985. "punpcklbw %%mm7, %%mm0 \n\t"
  1986. "punpcklbw %%mm7, %%mm2 \n\t"
  1987. "pmaddwd %%mm1, %%mm0 \n\t"
  1988. "pmaddwd %%mm2, %%mm3 \n\t"
  1989. "paddd %%mm3, %%mm5 \n\t"
  1990. "paddd %%mm0, %%mm4 \n\t"
  1991. "addl $8, %1 \n\t"
  1992. "addl $4, %%ecx \n\t"
  1993. "cmpl %4, %%ecx \n\t"
  1994. " jb 2b \n\t"
  1995. "addl %6, %1 \n\t"
  1996. "psrad $8, %%mm4 \n\t"
  1997. "psrad $8, %%mm5 \n\t"
  1998. "packssdw %%mm5, %%mm4 \n\t"
  1999. "pmaddwd %%mm6, %%mm4 \n\t"
  2000. "packssdw %%mm4, %%mm4 \n\t"
  2001. "movl %3, %%eax \n\t"
  2002. "movd %%mm4, (%%eax, %0) \n\t"
  2003. "addl $4, %0 \n\t"
  2004. " jnc 1b \n\t"
  2005. : "+r" (counter), "+r" (filter)
  2006. : "m" (filterPos), "m" (dst), "m"(src+filterSize),
  2007. "m" (src), "r" (filterSize*2)
  2008. : "%ebx", "%eax", "%ecx"
  2009. );
  2010. }
  2011. #else
  2012. #ifdef HAVE_ALTIVEC
  2013. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2014. #else
  2015. int i;
  2016. for(i=0; i<dstW; i++)
  2017. {
  2018. int j;
  2019. int srcPos= filterPos[i];
  2020. int val=0;
  2021. // printf("filterPos: %d\n", filterPos[i]);
  2022. for(j=0; j<filterSize; j++)
  2023. {
  2024. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2025. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2026. }
  2027. // filter += hFilterSize;
  2028. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2029. // dst[i] = val>>7;
  2030. }
  2031. #endif
  2032. #endif
  2033. }
  2034. // *** horizontal scale Y line to temp buffer
  2035. static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
  2036. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2037. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2038. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2039. int32_t *mmx2FilterPos)
  2040. {
  2041. if(srcFormat==IMGFMT_YUY2)
  2042. {
  2043. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2044. src= formatConvBuffer;
  2045. }
  2046. else if(srcFormat==IMGFMT_UYVY)
  2047. {
  2048. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2049. src= formatConvBuffer;
  2050. }
  2051. else if(srcFormat==IMGFMT_BGR32)
  2052. {
  2053. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2054. src= formatConvBuffer;
  2055. }
  2056. else if(srcFormat==IMGFMT_BGR24)
  2057. {
  2058. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2059. src= formatConvBuffer;
  2060. }
  2061. else if(srcFormat==IMGFMT_BGR16)
  2062. {
  2063. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2064. src= formatConvBuffer;
  2065. }
  2066. else if(srcFormat==IMGFMT_BGR15)
  2067. {
  2068. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2069. src= formatConvBuffer;
  2070. }
  2071. else if(srcFormat==IMGFMT_RGB32)
  2072. {
  2073. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2074. src= formatConvBuffer;
  2075. }
  2076. else if(srcFormat==IMGFMT_RGB24)
  2077. {
  2078. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2079. src= formatConvBuffer;
  2080. }
  2081. #ifdef HAVE_MMX
  2082. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2083. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2084. #else
  2085. if(!(flags&SWS_FAST_BILINEAR))
  2086. #endif
  2087. {
  2088. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2089. }
  2090. else // Fast Bilinear upscale / crap downscale
  2091. {
  2092. #ifdef ARCH_X86
  2093. #ifdef HAVE_MMX2
  2094. int i;
  2095. if(canMMX2BeUsed)
  2096. {
  2097. asm volatile(
  2098. "pxor %%mm7, %%mm7 \n\t"
  2099. "movl %0, %%ecx \n\t"
  2100. "movl %1, %%edi \n\t"
  2101. "movl %2, %%edx \n\t"
  2102. "movl %3, %%ebx \n\t"
  2103. "xorl %%eax, %%eax \n\t" // i
  2104. PREFETCH" (%%ecx) \n\t"
  2105. PREFETCH" 32(%%ecx) \n\t"
  2106. PREFETCH" 64(%%ecx) \n\t"
  2107. #define FUNNY_Y_CODE \
  2108. "movl (%%ebx), %%esi \n\t"\
  2109. "call *%4 \n\t"\
  2110. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2111. "addl %%eax, %%edi \n\t"\
  2112. "xorl %%eax, %%eax \n\t"\
  2113. FUNNY_Y_CODE
  2114. FUNNY_Y_CODE
  2115. FUNNY_Y_CODE
  2116. FUNNY_Y_CODE
  2117. FUNNY_Y_CODE
  2118. FUNNY_Y_CODE
  2119. FUNNY_Y_CODE
  2120. FUNNY_Y_CODE
  2121. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2122. "m" (funnyYCode)
  2123. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2124. );
  2125. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2126. }
  2127. else
  2128. {
  2129. #endif
  2130. //NO MMX just normal asm ...
  2131. asm volatile(
  2132. "xorl %%eax, %%eax \n\t" // i
  2133. "xorl %%ebx, %%ebx \n\t" // xx
  2134. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2135. ".balign 16 \n\t"
  2136. "1: \n\t"
  2137. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2138. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2139. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2140. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2141. "shll $16, %%edi \n\t"
  2142. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2143. "movl %1, %%edi \n\t"
  2144. "shrl $9, %%esi \n\t"
  2145. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2146. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2147. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2148. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2149. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2150. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2151. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2152. "shll $16, %%edi \n\t"
  2153. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2154. "movl %1, %%edi \n\t"
  2155. "shrl $9, %%esi \n\t"
  2156. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  2157. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2158. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2159. "addl $2, %%eax \n\t"
  2160. "cmpl %2, %%eax \n\t"
  2161. " jb 1b \n\t"
  2162. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
  2163. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2164. );
  2165. #ifdef HAVE_MMX2
  2166. } //if MMX2 can't be used
  2167. #endif
  2168. #else
  2169. int i;
  2170. unsigned int xpos=0;
  2171. for(i=0;i<dstWidth;i++)
  2172. {
  2173. register unsigned int xx=xpos>>16;
  2174. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2175. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2176. xpos+=xInc;
  2177. }
  2178. #endif
  2179. }
  2180. }
  2181. inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2,
  2182. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2183. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2184. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2185. int32_t *mmx2FilterPos)
  2186. {
  2187. if(srcFormat==IMGFMT_YUY2)
  2188. {
  2189. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2190. src1= formatConvBuffer;
  2191. src2= formatConvBuffer+2048;
  2192. }
  2193. else if(srcFormat==IMGFMT_UYVY)
  2194. {
  2195. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2196. src1= formatConvBuffer;
  2197. src2= formatConvBuffer+2048;
  2198. }
  2199. else if(srcFormat==IMGFMT_BGR32)
  2200. {
  2201. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2202. src1= formatConvBuffer;
  2203. src2= formatConvBuffer+2048;
  2204. }
  2205. else if(srcFormat==IMGFMT_BGR24)
  2206. {
  2207. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2208. src1= formatConvBuffer;
  2209. src2= formatConvBuffer+2048;
  2210. }
  2211. else if(srcFormat==IMGFMT_BGR16)
  2212. {
  2213. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2214. src1= formatConvBuffer;
  2215. src2= formatConvBuffer+2048;
  2216. }
  2217. else if(srcFormat==IMGFMT_BGR15)
  2218. {
  2219. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2220. src1= formatConvBuffer;
  2221. src2= formatConvBuffer+2048;
  2222. }
  2223. else if(srcFormat==IMGFMT_RGB32)
  2224. {
  2225. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2226. src1= formatConvBuffer;
  2227. src2= formatConvBuffer+2048;
  2228. }
  2229. else if(srcFormat==IMGFMT_RGB24)
  2230. {
  2231. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2232. src1= formatConvBuffer;
  2233. src2= formatConvBuffer+2048;
  2234. }
  2235. else if(isGray(srcFormat))
  2236. {
  2237. return;
  2238. }
  2239. #ifdef HAVE_MMX
  2240. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2241. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2242. #else
  2243. if(!(flags&SWS_FAST_BILINEAR))
  2244. #endif
  2245. {
  2246. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2247. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2248. }
  2249. else // Fast Bilinear upscale / crap downscale
  2250. {
  2251. #ifdef ARCH_X86
  2252. #ifdef HAVE_MMX2
  2253. int i;
  2254. if(canMMX2BeUsed)
  2255. {
  2256. asm volatile(
  2257. "pxor %%mm7, %%mm7 \n\t"
  2258. "movl %0, %%ecx \n\t"
  2259. "movl %1, %%edi \n\t"
  2260. "movl %2, %%edx \n\t"
  2261. "movl %3, %%ebx \n\t"
  2262. "xorl %%eax, %%eax \n\t" // i
  2263. PREFETCH" (%%ecx) \n\t"
  2264. PREFETCH" 32(%%ecx) \n\t"
  2265. PREFETCH" 64(%%ecx) \n\t"
  2266. #define FUNNY_UV_CODE \
  2267. "movl (%%ebx), %%esi \n\t"\
  2268. "call *%4 \n\t"\
  2269. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2270. "addl %%eax, %%edi \n\t"\
  2271. "xorl %%eax, %%eax \n\t"\
  2272. FUNNY_UV_CODE
  2273. FUNNY_UV_CODE
  2274. FUNNY_UV_CODE
  2275. FUNNY_UV_CODE
  2276. "xorl %%eax, %%eax \n\t" // i
  2277. "movl %5, %%ecx \n\t" // src
  2278. "movl %1, %%edi \n\t" // buf1
  2279. "addl $4096, %%edi \n\t"
  2280. PREFETCH" (%%ecx) \n\t"
  2281. PREFETCH" 32(%%ecx) \n\t"
  2282. PREFETCH" 64(%%ecx) \n\t"
  2283. FUNNY_UV_CODE
  2284. FUNNY_UV_CODE
  2285. FUNNY_UV_CODE
  2286. FUNNY_UV_CODE
  2287. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2288. "m" (funnyUVCode), "m" (src2)
  2289. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2290. );
  2291. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2292. {
  2293. // printf("%d %d %d\n", dstWidth, i, srcW);
  2294. dst[i] = src1[srcW-1]*128;
  2295. dst[i+2048] = src2[srcW-1]*128;
  2296. }
  2297. }
  2298. else
  2299. {
  2300. #endif
  2301. asm volatile(
  2302. "xorl %%eax, %%eax \n\t" // i
  2303. "xorl %%ebx, %%ebx \n\t" // xx
  2304. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2305. ".balign 16 \n\t"
  2306. "1: \n\t"
  2307. "movl %0, %%esi \n\t"
  2308. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  2309. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  2310. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2311. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2312. "shll $16, %%edi \n\t"
  2313. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2314. "movl %1, %%edi \n\t"
  2315. "shrl $9, %%esi \n\t"
  2316. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2317. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  2318. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  2319. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2320. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2321. "shll $16, %%edi \n\t"
  2322. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2323. "movl %1, %%edi \n\t"
  2324. "shrl $9, %%esi \n\t"
  2325. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  2326. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2327. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2328. "addl $1, %%eax \n\t"
  2329. "cmpl %2, %%eax \n\t"
  2330. " jb 1b \n\t"
  2331. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
  2332. "r" (src2)
  2333. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2334. );
  2335. #ifdef HAVE_MMX2
  2336. } //if MMX2 can't be used
  2337. #endif
  2338. #else
  2339. int i;
  2340. unsigned int xpos=0;
  2341. for(i=0;i<dstWidth;i++)
  2342. {
  2343. register unsigned int xx=xpos>>16;
  2344. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2345. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2346. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2347. /* slower
  2348. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2349. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2350. */
  2351. xpos+=xInc;
  2352. }
  2353. #endif
  2354. }
  2355. }
  2356. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2357. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2358. /* load a few things into local vars to make the code more readable? and faster */
  2359. const int srcW= c->srcW;
  2360. const int dstW= c->dstW;
  2361. const int dstH= c->dstH;
  2362. const int chrDstW= c->chrDstW;
  2363. const int chrSrcW= c->chrSrcW;
  2364. const int lumXInc= c->lumXInc;
  2365. const int chrXInc= c->chrXInc;
  2366. const int dstFormat= c->dstFormat;
  2367. const int srcFormat= c->srcFormat;
  2368. const int flags= c->flags;
  2369. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2370. int16_t *vLumFilterPos= c->vLumFilterPos;
  2371. int16_t *vChrFilterPos= c->vChrFilterPos;
  2372. int16_t *hLumFilterPos= c->hLumFilterPos;
  2373. int16_t *hChrFilterPos= c->hChrFilterPos;
  2374. int16_t *vLumFilter= c->vLumFilter;
  2375. int16_t *vChrFilter= c->vChrFilter;
  2376. int16_t *hLumFilter= c->hLumFilter;
  2377. int16_t *hChrFilter= c->hChrFilter;
  2378. int32_t *lumMmxFilter= c->lumMmxFilter;
  2379. int32_t *chrMmxFilter= c->chrMmxFilter;
  2380. const int vLumFilterSize= c->vLumFilterSize;
  2381. const int vChrFilterSize= c->vChrFilterSize;
  2382. const int hLumFilterSize= c->hLumFilterSize;
  2383. const int hChrFilterSize= c->hChrFilterSize;
  2384. int16_t **lumPixBuf= c->lumPixBuf;
  2385. int16_t **chrPixBuf= c->chrPixBuf;
  2386. const int vLumBufSize= c->vLumBufSize;
  2387. const int vChrBufSize= c->vChrBufSize;
  2388. uint8_t *funnyYCode= c->funnyYCode;
  2389. uint8_t *funnyUVCode= c->funnyUVCode;
  2390. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2391. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2392. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2393. int lastDstY;
  2394. /* vars whch will change and which we need to storw back in the context */
  2395. int dstY= c->dstY;
  2396. int lumBufIndex= c->lumBufIndex;
  2397. int chrBufIndex= c->chrBufIndex;
  2398. int lastInLumBuf= c->lastInLumBuf;
  2399. int lastInChrBuf= c->lastInChrBuf;
  2400. if(isPacked(c->srcFormat)){
  2401. src[0]=
  2402. src[1]=
  2403. src[2]= src[0];
  2404. srcStride[0]=
  2405. srcStride[1]=
  2406. srcStride[2]= srcStride[0];
  2407. }
  2408. srcStride[1]<<= c->vChrDrop;
  2409. srcStride[2]<<= c->vChrDrop;
  2410. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2411. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2412. #if 0 //self test FIXME move to a vfilter or something
  2413. {
  2414. static volatile int i=0;
  2415. i++;
  2416. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2417. selfTest(src, srcStride, c->srcW, c->srcH);
  2418. i--;
  2419. }
  2420. #endif
  2421. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2422. //dstStride[0],dstStride[1],dstStride[2]);
  2423. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2424. {
  2425. static int firstTime=1; //FIXME move this into the context perhaps
  2426. if(flags & SWS_PRINT_INFO && firstTime)
  2427. {
  2428. MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
  2429. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2430. firstTime=0;
  2431. }
  2432. }
  2433. /* Note the user might start scaling the picture in the middle so this will not get executed
  2434. this is not really intended but works currently, so ppl might do it */
  2435. if(srcSliceY ==0){
  2436. lumBufIndex=0;
  2437. chrBufIndex=0;
  2438. dstY=0;
  2439. lastInLumBuf= -1;
  2440. lastInChrBuf= -1;
  2441. }
  2442. lastDstY= dstY;
  2443. for(;dstY < dstH; dstY++){
  2444. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2445. const int chrDstY= dstY>>c->chrDstVSubSample;
  2446. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2447. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2448. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2449. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2450. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2451. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2452. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2453. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2454. //handle holes (FAST_BILINEAR & weird filters)
  2455. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2456. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2457. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2458. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2459. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2460. // Do we have enough lines in this slice to output the dstY line
  2461. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2462. {
  2463. //Do horizontal scaling
  2464. while(lastInLumBuf < lastLumSrcY)
  2465. {
  2466. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2467. lumBufIndex++;
  2468. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2469. ASSERT(lumBufIndex < 2*vLumBufSize)
  2470. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2471. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2472. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2473. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2474. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2475. funnyYCode, c->srcFormat, formatConvBuffer,
  2476. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2477. lastInLumBuf++;
  2478. }
  2479. while(lastInChrBuf < lastChrSrcY)
  2480. {
  2481. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2482. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2483. chrBufIndex++;
  2484. ASSERT(chrBufIndex < 2*vChrBufSize)
  2485. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2486. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2487. //FIXME replace parameters through context struct (some at least)
  2488. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2489. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2490. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2491. funnyUVCode, c->srcFormat, formatConvBuffer,
  2492. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2493. lastInChrBuf++;
  2494. }
  2495. //wrap buf index around to stay inside the ring buffer
  2496. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2497. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2498. }
  2499. else // not enough lines left in this slice -> load the rest in the buffer
  2500. {
  2501. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2502. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2503. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2504. vChrBufSize, vLumBufSize);*/
  2505. //Do horizontal scaling
  2506. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2507. {
  2508. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2509. lumBufIndex++;
  2510. ASSERT(lumBufIndex < 2*vLumBufSize)
  2511. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2512. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2513. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2514. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2515. funnyYCode, c->srcFormat, formatConvBuffer,
  2516. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2517. lastInLumBuf++;
  2518. }
  2519. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2520. {
  2521. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2522. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2523. chrBufIndex++;
  2524. ASSERT(chrBufIndex < 2*vChrBufSize)
  2525. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2526. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2527. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2528. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2529. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2530. funnyUVCode, c->srcFormat, formatConvBuffer,
  2531. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2532. lastInChrBuf++;
  2533. }
  2534. //wrap buf index around to stay inside the ring buffer
  2535. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2536. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2537. break; //we can't output a dstY line so let's try with the next slice
  2538. }
  2539. #ifdef HAVE_MMX
  2540. b5Dither= dither8[dstY&1];
  2541. g6Dither= dither4[dstY&1];
  2542. g5Dither= dither8[dstY&1];
  2543. r5Dither= dither8[(dstY+1)&1];
  2544. #endif
  2545. if(dstY < dstH-2)
  2546. {
  2547. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2548. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2549. #ifdef HAVE_MMX
  2550. int i;
  2551. for(i=0; i<vLumFilterSize; i++)
  2552. {
  2553. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2554. lumMmxFilter[4*i+2]=
  2555. lumMmxFilter[4*i+3]=
  2556. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2557. }
  2558. for(i=0; i<vChrFilterSize; i++)
  2559. {
  2560. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2561. chrMmxFilter[4*i+2]=
  2562. chrMmxFilter[4*i+3]=
  2563. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2564. }
  2565. #endif
  2566. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2567. {
  2568. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2569. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2570. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2571. {
  2572. int16_t *lumBuf = lumPixBuf[0];
  2573. int16_t *chrBuf= chrPixBuf[0];
  2574. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2575. }
  2576. else //General YV12
  2577. {
  2578. RENAME(yuv2yuvX)(c,
  2579. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2580. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2581. dest, uDest, vDest, dstW, chrDstW);
  2582. }
  2583. }
  2584. else
  2585. {
  2586. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2587. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2588. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2589. {
  2590. int chrAlpha= vChrFilter[2*dstY+1];
  2591. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2592. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2593. }
  2594. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2595. {
  2596. int lumAlpha= vLumFilter[2*dstY+1];
  2597. int chrAlpha= vChrFilter[2*dstY+1];
  2598. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2599. dest, dstW, lumAlpha, chrAlpha, dstY);
  2600. }
  2601. else //General RGB
  2602. {
  2603. RENAME(yuv2packedX)(c,
  2604. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2605. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2606. dest, dstW, dstY);
  2607. }
  2608. }
  2609. }
  2610. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2611. {
  2612. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2613. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2614. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2615. {
  2616. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2617. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2618. yuv2yuvXinC(
  2619. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2620. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2621. dest, uDest, vDest, dstW, chrDstW);
  2622. }
  2623. else
  2624. {
  2625. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2626. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2627. yuv2packedXinC(c,
  2628. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2629. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2630. dest, dstW, dstY);
  2631. }
  2632. }
  2633. }
  2634. #ifdef HAVE_MMX
  2635. __asm __volatile(SFENCE:::"memory");
  2636. __asm __volatile(EMMS:::"memory");
  2637. #endif
  2638. /* store changed local vars back in the context */
  2639. c->dstY= dstY;
  2640. c->lumBufIndex= lumBufIndex;
  2641. c->chrBufIndex= chrBufIndex;
  2642. c->lastInLumBuf= lastInLumBuf;
  2643. c->lastInChrBuf= lastInChrBuf;
  2644. return dstY - lastDstY;
  2645. }