You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2813 lines
84KB

  1. /*
  2. Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. #undef MOVNTQ
  16. #undef PAVGB
  17. #undef PREFETCH
  18. #undef PREFETCHW
  19. #undef EMMS
  20. #undef SFENCE
  21. #ifdef HAVE_3DNOW
  22. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  23. #define EMMS "femms"
  24. #else
  25. #define EMMS "emms"
  26. #endif
  27. #ifdef HAVE_3DNOW
  28. #define PREFETCH "prefetch"
  29. #define PREFETCHW "prefetchw"
  30. #elif defined ( HAVE_MMX2 )
  31. #define PREFETCH "prefetchnta"
  32. #define PREFETCHW "prefetcht0"
  33. #else
  34. #define PREFETCH "/nop"
  35. #define PREFETCHW "/nop"
  36. #endif
  37. #ifdef HAVE_MMX2
  38. #define SFENCE "sfence"
  39. #else
  40. #define SFENCE "/nop"
  41. #endif
  42. #ifdef HAVE_MMX2
  43. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  44. #elif defined (HAVE_3DNOW)
  45. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  46. #endif
  47. #ifdef HAVE_MMX2
  48. #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  49. #else
  50. #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  51. #endif
  52. #define YSCALEYUV2YV12X(x, offset) \
  53. "xorl %%eax, %%eax \n\t"\
  54. "pxor %%mm3, %%mm3 \n\t"\
  55. "pxor %%mm4, %%mm4 \n\t"\
  56. "leal " offset "(%0), %%edx \n\t"\
  57. "movl (%%edx), %%esi \n\t"\
  58. ".balign 16 \n\t" /* FIXME Unroll? */\
  59. "1: \n\t"\
  60. "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
  61. "movq " #x "(%%esi, %%eax, 2), %%mm2 \n\t" /* srcData */\
  62. "movq 8+" #x "(%%esi, %%eax, 2), %%mm5 \n\t" /* srcData */\
  63. "addl $16, %%edx \n\t"\
  64. "movl (%%edx), %%esi \n\t"\
  65. "testl %%esi, %%esi \n\t"\
  66. "pmulhw %%mm0, %%mm2 \n\t"\
  67. "pmulhw %%mm0, %%mm5 \n\t"\
  68. "paddw %%mm2, %%mm3 \n\t"\
  69. "paddw %%mm5, %%mm4 \n\t"\
  70. " jnz 1b \n\t"\
  71. "psraw $3, %%mm3 \n\t"\
  72. "psraw $3, %%mm4 \n\t"\
  73. "packuswb %%mm4, %%mm3 \n\t"\
  74. MOVNTQ(%%mm3, (%1, %%eax))\
  75. "addl $8, %%eax \n\t"\
  76. "cmpl %2, %%eax \n\t"\
  77. "pxor %%mm3, %%mm3 \n\t"\
  78. "pxor %%mm4, %%mm4 \n\t"\
  79. "leal " offset "(%0), %%edx \n\t"\
  80. "movl (%%edx), %%esi \n\t"\
  81. "jb 1b \n\t"
  82. #define YSCALEYUV2YV121 \
  83. "movl %2, %%eax \n\t"\
  84. ".balign 16 \n\t" /* FIXME Unroll? */\
  85. "1: \n\t"\
  86. "movq (%0, %%eax, 2), %%mm0 \n\t"\
  87. "movq 8(%0, %%eax, 2), %%mm1 \n\t"\
  88. "psraw $7, %%mm0 \n\t"\
  89. "psraw $7, %%mm1 \n\t"\
  90. "packuswb %%mm1, %%mm0 \n\t"\
  91. MOVNTQ(%%mm0, (%1, %%eax))\
  92. "addl $8, %%eax \n\t"\
  93. "jnc 1b \n\t"
  94. /*
  95. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  96. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  97. "r" (dest), "m" (dstW),
  98. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  99. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  100. */
  101. #define YSCALEYUV2PACKEDX \
  102. "xorl %%eax, %%eax \n\t"\
  103. ".balign 16 \n\t"\
  104. "nop \n\t"\
  105. "1: \n\t"\
  106. "leal "CHR_MMX_FILTER_OFFSET"(%0), %%edx \n\t"\
  107. "movl (%%edx), %%esi \n\t"\
  108. "pxor %%mm3, %%mm3 \n\t"\
  109. "pxor %%mm4, %%mm4 \n\t"\
  110. ".balign 16 \n\t"\
  111. "2: \n\t"\
  112. "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
  113. "movq (%%esi, %%eax), %%mm2 \n\t" /* UsrcData */\
  114. "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\
  115. "addl $16, %%edx \n\t"\
  116. "movl (%%edx), %%esi \n\t"\
  117. "pmulhw %%mm0, %%mm2 \n\t"\
  118. "pmulhw %%mm0, %%mm5 \n\t"\
  119. "paddw %%mm2, %%mm3 \n\t"\
  120. "paddw %%mm5, %%mm4 \n\t"\
  121. "testl %%esi, %%esi \n\t"\
  122. " jnz 2b \n\t"\
  123. \
  124. "leal "LUM_MMX_FILTER_OFFSET"(%0), %%edx \n\t"\
  125. "movl (%%edx), %%esi \n\t"\
  126. "pxor %%mm1, %%mm1 \n\t"\
  127. "pxor %%mm7, %%mm7 \n\t"\
  128. ".balign 16 \n\t"\
  129. "2: \n\t"\
  130. "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
  131. "movq (%%esi, %%eax, 2), %%mm2 \n\t" /* Y1srcData */\
  132. "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\
  133. "addl $16, %%edx \n\t"\
  134. "movl (%%edx), %%esi \n\t"\
  135. "pmulhw %%mm0, %%mm2 \n\t"\
  136. "pmulhw %%mm0, %%mm5 \n\t"\
  137. "paddw %%mm2, %%mm1 \n\t"\
  138. "paddw %%mm5, %%mm7 \n\t"\
  139. "testl %%esi, %%esi \n\t"\
  140. " jnz 2b \n\t"\
  141. #define YSCALEYUV2RGBX \
  142. YSCALEYUV2PACKEDX\
  143. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  144. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  145. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  146. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  147. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  148. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  149. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  150. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  151. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  152. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  153. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  154. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  155. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  156. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  157. "paddw %%mm3, %%mm4 \n\t"\
  158. "movq %%mm2, %%mm0 \n\t"\
  159. "movq %%mm5, %%mm6 \n\t"\
  160. "movq %%mm4, %%mm3 \n\t"\
  161. "punpcklwd %%mm2, %%mm2 \n\t"\
  162. "punpcklwd %%mm5, %%mm5 \n\t"\
  163. "punpcklwd %%mm4, %%mm4 \n\t"\
  164. "paddw %%mm1, %%mm2 \n\t"\
  165. "paddw %%mm1, %%mm5 \n\t"\
  166. "paddw %%mm1, %%mm4 \n\t"\
  167. "punpckhwd %%mm0, %%mm0 \n\t"\
  168. "punpckhwd %%mm6, %%mm6 \n\t"\
  169. "punpckhwd %%mm3, %%mm3 \n\t"\
  170. "paddw %%mm7, %%mm0 \n\t"\
  171. "paddw %%mm7, %%mm6 \n\t"\
  172. "paddw %%mm7, %%mm3 \n\t"\
  173. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  174. "packuswb %%mm0, %%mm2 \n\t"\
  175. "packuswb %%mm6, %%mm5 \n\t"\
  176. "packuswb %%mm3, %%mm4 \n\t"\
  177. "pxor %%mm7, %%mm7 \n\t"
  178. #if 0
  179. #define FULL_YSCALEYUV2RGB \
  180. "pxor %%mm7, %%mm7 \n\t"\
  181. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  182. "punpcklwd %%mm6, %%mm6 \n\t"\
  183. "punpcklwd %%mm6, %%mm6 \n\t"\
  184. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  185. "punpcklwd %%mm5, %%mm5 \n\t"\
  186. "punpcklwd %%mm5, %%mm5 \n\t"\
  187. "xorl %%eax, %%eax \n\t"\
  188. ".balign 16 \n\t"\
  189. "1: \n\t"\
  190. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  191. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  192. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  193. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  194. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  195. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  196. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  197. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  198. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  199. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  200. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  201. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  202. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  203. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  204. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  205. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  206. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  207. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  208. \
  209. \
  210. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  211. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  212. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  213. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  214. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  215. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  216. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  217. \
  218. \
  219. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  220. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  221. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  222. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  223. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  224. "packuswb %%mm3, %%mm3 \n\t"\
  225. \
  226. "packuswb %%mm0, %%mm0 \n\t"\
  227. "paddw %%mm4, %%mm2 \n\t"\
  228. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  229. \
  230. "packuswb %%mm1, %%mm1 \n\t"
  231. #endif
  232. #define YSCALEYUV2PACKED(index, c) \
  233. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  234. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
  235. "psraw $3, %%mm0 \n\t"\
  236. "psraw $3, %%mm1 \n\t"\
  237. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  238. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  239. "xorl "#index", "#index" \n\t"\
  240. ".balign 16 \n\t"\
  241. "1: \n\t"\
  242. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  243. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  244. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  245. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  246. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  247. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  248. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  249. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  250. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  251. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  252. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  253. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  254. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  255. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  256. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  257. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  258. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  259. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  260. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  261. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  262. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  263. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  264. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  265. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  266. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  267. #define YSCALEYUV2RGB(index, c) \
  268. "xorl "#index", "#index" \n\t"\
  269. ".balign 16 \n\t"\
  270. "1: \n\t"\
  271. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  272. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  273. "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
  274. "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
  275. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  276. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  277. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  278. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  279. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  280. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  281. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  282. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  283. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  284. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  285. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  286. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  287. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  288. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  289. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  290. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  291. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  292. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  293. "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
  294. "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
  295. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  296. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  297. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  298. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  299. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  300. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  301. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  302. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  303. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  304. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  305. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  306. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  307. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  308. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  309. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  310. "paddw %%mm3, %%mm4 \n\t"\
  311. "movq %%mm2, %%mm0 \n\t"\
  312. "movq %%mm5, %%mm6 \n\t"\
  313. "movq %%mm4, %%mm3 \n\t"\
  314. "punpcklwd %%mm2, %%mm2 \n\t"\
  315. "punpcklwd %%mm5, %%mm5 \n\t"\
  316. "punpcklwd %%mm4, %%mm4 \n\t"\
  317. "paddw %%mm1, %%mm2 \n\t"\
  318. "paddw %%mm1, %%mm5 \n\t"\
  319. "paddw %%mm1, %%mm4 \n\t"\
  320. "punpckhwd %%mm0, %%mm0 \n\t"\
  321. "punpckhwd %%mm6, %%mm6 \n\t"\
  322. "punpckhwd %%mm3, %%mm3 \n\t"\
  323. "paddw %%mm7, %%mm0 \n\t"\
  324. "paddw %%mm7, %%mm6 \n\t"\
  325. "paddw %%mm7, %%mm3 \n\t"\
  326. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  327. "packuswb %%mm0, %%mm2 \n\t"\
  328. "packuswb %%mm6, %%mm5 \n\t"\
  329. "packuswb %%mm3, %%mm4 \n\t"\
  330. "pxor %%mm7, %%mm7 \n\t"
  331. #define YSCALEYUV2PACKED1(index, c) \
  332. "xorl "#index", "#index" \n\t"\
  333. ".balign 16 \n\t"\
  334. "1: \n\t"\
  335. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  336. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  337. "psraw $7, %%mm3 \n\t" \
  338. "psraw $7, %%mm4 \n\t" \
  339. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  340. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  341. "psraw $7, %%mm1 \n\t" \
  342. "psraw $7, %%mm7 \n\t" \
  343. #define YSCALEYUV2RGB1(index, c) \
  344. "xorl "#index", "#index" \n\t"\
  345. ".balign 16 \n\t"\
  346. "1: \n\t"\
  347. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  348. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  349. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  350. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  351. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  352. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  353. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  354. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  355. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  356. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  357. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  358. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  359. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  360. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  361. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  362. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  363. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  364. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  365. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  366. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  367. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  368. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  369. "paddw %%mm3, %%mm4 \n\t"\
  370. "movq %%mm2, %%mm0 \n\t"\
  371. "movq %%mm5, %%mm6 \n\t"\
  372. "movq %%mm4, %%mm3 \n\t"\
  373. "punpcklwd %%mm2, %%mm2 \n\t"\
  374. "punpcklwd %%mm5, %%mm5 \n\t"\
  375. "punpcklwd %%mm4, %%mm4 \n\t"\
  376. "paddw %%mm1, %%mm2 \n\t"\
  377. "paddw %%mm1, %%mm5 \n\t"\
  378. "paddw %%mm1, %%mm4 \n\t"\
  379. "punpckhwd %%mm0, %%mm0 \n\t"\
  380. "punpckhwd %%mm6, %%mm6 \n\t"\
  381. "punpckhwd %%mm3, %%mm3 \n\t"\
  382. "paddw %%mm7, %%mm0 \n\t"\
  383. "paddw %%mm7, %%mm6 \n\t"\
  384. "paddw %%mm7, %%mm3 \n\t"\
  385. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  386. "packuswb %%mm0, %%mm2 \n\t"\
  387. "packuswb %%mm6, %%mm5 \n\t"\
  388. "packuswb %%mm3, %%mm4 \n\t"\
  389. "pxor %%mm7, %%mm7 \n\t"
  390. #define YSCALEYUV2PACKED1b(index, c) \
  391. "xorl "#index", "#index" \n\t"\
  392. ".balign 16 \n\t"\
  393. "1: \n\t"\
  394. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  395. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  396. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  397. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  398. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  399. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  400. "psrlw $8, %%mm3 \n\t" \
  401. "psrlw $8, %%mm4 \n\t" \
  402. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  403. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  404. "psraw $7, %%mm1 \n\t" \
  405. "psraw $7, %%mm7 \n\t"
  406. // do vertical chrominance interpolation
  407. #define YSCALEYUV2RGB1b(index, c) \
  408. "xorl "#index", "#index" \n\t"\
  409. ".balign 16 \n\t"\
  410. "1: \n\t"\
  411. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  412. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  413. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  414. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  415. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  416. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  417. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  418. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  419. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  420. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  421. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  422. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  423. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  424. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  425. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  426. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  427. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  428. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  429. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  430. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  431. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  432. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  433. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  434. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  435. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  436. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  437. "paddw %%mm3, %%mm4 \n\t"\
  438. "movq %%mm2, %%mm0 \n\t"\
  439. "movq %%mm5, %%mm6 \n\t"\
  440. "movq %%mm4, %%mm3 \n\t"\
  441. "punpcklwd %%mm2, %%mm2 \n\t"\
  442. "punpcklwd %%mm5, %%mm5 \n\t"\
  443. "punpcklwd %%mm4, %%mm4 \n\t"\
  444. "paddw %%mm1, %%mm2 \n\t"\
  445. "paddw %%mm1, %%mm5 \n\t"\
  446. "paddw %%mm1, %%mm4 \n\t"\
  447. "punpckhwd %%mm0, %%mm0 \n\t"\
  448. "punpckhwd %%mm6, %%mm6 \n\t"\
  449. "punpckhwd %%mm3, %%mm3 \n\t"\
  450. "paddw %%mm7, %%mm0 \n\t"\
  451. "paddw %%mm7, %%mm6 \n\t"\
  452. "paddw %%mm7, %%mm3 \n\t"\
  453. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  454. "packuswb %%mm0, %%mm2 \n\t"\
  455. "packuswb %%mm6, %%mm5 \n\t"\
  456. "packuswb %%mm3, %%mm4 \n\t"\
  457. "pxor %%mm7, %%mm7 \n\t"
  458. #define WRITEBGR32(dst, dstw, index) \
  459. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  460. "movq %%mm2, %%mm1 \n\t" /* B */\
  461. "movq %%mm5, %%mm6 \n\t" /* R */\
  462. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  463. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  464. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  465. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  466. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  467. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  468. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  469. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  470. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  471. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  472. \
  473. MOVNTQ(%%mm0, (dst, index, 4))\
  474. MOVNTQ(%%mm2, 8(dst, index, 4))\
  475. MOVNTQ(%%mm1, 16(dst, index, 4))\
  476. MOVNTQ(%%mm3, 24(dst, index, 4))\
  477. \
  478. "addl $8, "#index" \n\t"\
  479. "cmpl "#dstw", "#index" \n\t"\
  480. " jb 1b \n\t"
  481. #define WRITEBGR16(dst, dstw, index) \
  482. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  483. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  484. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  485. "psrlq $3, %%mm2 \n\t"\
  486. \
  487. "movq %%mm2, %%mm1 \n\t"\
  488. "movq %%mm4, %%mm3 \n\t"\
  489. \
  490. "punpcklbw %%mm7, %%mm3 \n\t"\
  491. "punpcklbw %%mm5, %%mm2 \n\t"\
  492. "punpckhbw %%mm7, %%mm4 \n\t"\
  493. "punpckhbw %%mm5, %%mm1 \n\t"\
  494. \
  495. "psllq $3, %%mm3 \n\t"\
  496. "psllq $3, %%mm4 \n\t"\
  497. \
  498. "por %%mm3, %%mm2 \n\t"\
  499. "por %%mm4, %%mm1 \n\t"\
  500. \
  501. MOVNTQ(%%mm2, (dst, index, 2))\
  502. MOVNTQ(%%mm1, 8(dst, index, 2))\
  503. \
  504. "addl $8, "#index" \n\t"\
  505. "cmpl "#dstw", "#index" \n\t"\
  506. " jb 1b \n\t"
  507. #define WRITEBGR15(dst, dstw, index) \
  508. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  509. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  510. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  511. "psrlq $3, %%mm2 \n\t"\
  512. "psrlq $1, %%mm5 \n\t"\
  513. \
  514. "movq %%mm2, %%mm1 \n\t"\
  515. "movq %%mm4, %%mm3 \n\t"\
  516. \
  517. "punpcklbw %%mm7, %%mm3 \n\t"\
  518. "punpcklbw %%mm5, %%mm2 \n\t"\
  519. "punpckhbw %%mm7, %%mm4 \n\t"\
  520. "punpckhbw %%mm5, %%mm1 \n\t"\
  521. \
  522. "psllq $2, %%mm3 \n\t"\
  523. "psllq $2, %%mm4 \n\t"\
  524. \
  525. "por %%mm3, %%mm2 \n\t"\
  526. "por %%mm4, %%mm1 \n\t"\
  527. \
  528. MOVNTQ(%%mm2, (dst, index, 2))\
  529. MOVNTQ(%%mm1, 8(dst, index, 2))\
  530. \
  531. "addl $8, "#index" \n\t"\
  532. "cmpl "#dstw", "#index" \n\t"\
  533. " jb 1b \n\t"
  534. #define WRITEBGR24OLD(dst, dstw, index) \
  535. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  536. "movq %%mm2, %%mm1 \n\t" /* B */\
  537. "movq %%mm5, %%mm6 \n\t" /* R */\
  538. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  539. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  540. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  541. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  542. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  543. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  544. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  545. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  546. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  547. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  548. \
  549. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  550. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  551. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  552. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  553. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  554. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  555. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  556. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  557. \
  558. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  559. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  560. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  561. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  562. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  563. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  564. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  565. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  566. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  567. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  568. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  569. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  570. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  571. \
  572. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  573. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  574. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  575. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  576. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  577. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  578. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  579. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  580. \
  581. MOVNTQ(%%mm0, (dst))\
  582. MOVNTQ(%%mm2, 8(dst))\
  583. MOVNTQ(%%mm3, 16(dst))\
  584. "addl $24, "#dst" \n\t"\
  585. \
  586. "addl $8, "#index" \n\t"\
  587. "cmpl "#dstw", "#index" \n\t"\
  588. " jb 1b \n\t"
  589. #define WRITEBGR24MMX(dst, dstw, index) \
  590. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  591. "movq %%mm2, %%mm1 \n\t" /* B */\
  592. "movq %%mm5, %%mm6 \n\t" /* R */\
  593. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  594. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  595. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  596. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  597. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  598. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  599. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  600. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  601. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  602. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  603. \
  604. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  605. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  606. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  607. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  608. \
  609. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  610. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  611. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  612. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  613. \
  614. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  615. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  616. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  617. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  618. \
  619. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  620. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  621. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  622. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  623. MOVNTQ(%%mm0, (dst))\
  624. \
  625. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  626. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  627. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  628. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  629. MOVNTQ(%%mm6, 8(dst))\
  630. \
  631. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  632. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  633. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  634. MOVNTQ(%%mm5, 16(dst))\
  635. \
  636. "addl $24, "#dst" \n\t"\
  637. \
  638. "addl $8, "#index" \n\t"\
  639. "cmpl "#dstw", "#index" \n\t"\
  640. " jb 1b \n\t"
  641. #define WRITEBGR24MMX2(dst, dstw, index) \
  642. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  643. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  644. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  645. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  646. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  647. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  648. \
  649. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  650. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  651. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  652. \
  653. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  654. "por %%mm1, %%mm6 \n\t"\
  655. "por %%mm3, %%mm6 \n\t"\
  656. MOVNTQ(%%mm6, (dst))\
  657. \
  658. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  659. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  660. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  661. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  662. \
  663. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  664. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  665. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  666. \
  667. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  668. "por %%mm3, %%mm6 \n\t"\
  669. MOVNTQ(%%mm6, 8(dst))\
  670. \
  671. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  672. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  673. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  674. \
  675. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  676. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  677. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  678. \
  679. "por %%mm1, %%mm3 \n\t"\
  680. "por %%mm3, %%mm6 \n\t"\
  681. MOVNTQ(%%mm6, 16(dst))\
  682. \
  683. "addl $24, "#dst" \n\t"\
  684. \
  685. "addl $8, "#index" \n\t"\
  686. "cmpl "#dstw", "#index" \n\t"\
  687. " jb 1b \n\t"
  688. #ifdef HAVE_MMX2
  689. #undef WRITEBGR24
  690. #define WRITEBGR24 WRITEBGR24MMX2
  691. #else
  692. #undef WRITEBGR24
  693. #define WRITEBGR24 WRITEBGR24MMX
  694. #endif
  695. #define WRITEYUY2(dst, dstw, index) \
  696. "packuswb %%mm3, %%mm3 \n\t"\
  697. "packuswb %%mm4, %%mm4 \n\t"\
  698. "packuswb %%mm7, %%mm1 \n\t"\
  699. "punpcklbw %%mm4, %%mm3 \n\t"\
  700. "movq %%mm1, %%mm7 \n\t"\
  701. "punpcklbw %%mm3, %%mm1 \n\t"\
  702. "punpckhbw %%mm3, %%mm7 \n\t"\
  703. \
  704. MOVNTQ(%%mm1, (dst, index, 2))\
  705. MOVNTQ(%%mm7, 8(dst, index, 2))\
  706. \
  707. "addl $8, "#index" \n\t"\
  708. "cmpl "#dstw", "#index" \n\t"\
  709. " jb 1b \n\t"
  710. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  711. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  712. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  713. {
  714. #ifdef HAVE_MMX
  715. if(uDest != NULL)
  716. {
  717. asm volatile(
  718. YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
  719. :: "r" (&c->redDither),
  720. "r" (uDest), "m" (chrDstW)
  721. : "%eax", "%edx", "%esi"
  722. );
  723. asm volatile(
  724. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
  725. :: "r" (&c->redDither),
  726. "r" (vDest), "m" (chrDstW)
  727. : "%eax", "%edx", "%esi"
  728. );
  729. }
  730. asm volatile(
  731. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
  732. :: "r" (&c->redDither),
  733. "r" (dest), "m" (dstW)
  734. : "%eax", "%edx", "%esi"
  735. );
  736. #else
  737. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  738. chrFilter, chrSrc, chrFilterSize,
  739. dest, uDest, vDest, dstW, chrDstW);
  740. #endif
  741. }
  742. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  743. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  744. {
  745. #ifdef HAVE_MMX
  746. if(uDest != NULL)
  747. {
  748. asm volatile(
  749. YSCALEYUV2YV121
  750. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  751. "g" (-chrDstW)
  752. : "%eax"
  753. );
  754. asm volatile(
  755. YSCALEYUV2YV121
  756. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  757. "g" (-chrDstW)
  758. : "%eax"
  759. );
  760. }
  761. asm volatile(
  762. YSCALEYUV2YV121
  763. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  764. "g" (-dstW)
  765. : "%eax"
  766. );
  767. #else
  768. int i;
  769. for(i=0; i<dstW; i++)
  770. {
  771. int val= lumSrc[i]>>7;
  772. if(val&256){
  773. if(val<0) val=0;
  774. else val=255;
  775. }
  776. dest[i]= val;
  777. }
  778. if(uDest != NULL)
  779. for(i=0; i<chrDstW; i++)
  780. {
  781. int u=chrSrc[i]>>7;
  782. int v=chrSrc[i + 2048]>>7;
  783. if((u|v)&256){
  784. if(u<0) u=0;
  785. else if (u>255) u=255;
  786. if(v<0) v=0;
  787. else if (v>255) v=255;
  788. }
  789. uDest[i]= u;
  790. vDest[i]= v;
  791. }
  792. #endif
  793. }
  794. /**
  795. * vertical scale YV12 to RGB
  796. */
  797. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  798. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  799. uint8_t *dest, int dstW, int dstY)
  800. {
  801. int dummy=0;
  802. switch(c->dstFormat)
  803. {
  804. #ifdef HAVE_MMX
  805. case IMGFMT_BGR32:
  806. {
  807. asm volatile(
  808. YSCALEYUV2RGBX
  809. WRITEBGR32(%4, %5, %%eax)
  810. :: "r" (&c->redDither),
  811. "m" (dummy), "m" (dummy), "m" (dummy),
  812. "r" (dest), "m" (dstW)
  813. : "%eax", "%edx", "%esi"
  814. );
  815. }
  816. break;
  817. case IMGFMT_BGR24:
  818. {
  819. asm volatile(
  820. YSCALEYUV2RGBX
  821. "leal (%%eax, %%eax, 2), %%ebx \n\t" //FIXME optimize
  822. "addl %4, %%ebx \n\t"
  823. WRITEBGR24(%%ebx, %5, %%eax)
  824. :: "r" (&c->redDither),
  825. "m" (dummy), "m" (dummy), "m" (dummy),
  826. "r" (dest), "m" (dstW)
  827. : "%eax", "%ebx", "%edx", "%esi" //FIXME ebx
  828. );
  829. }
  830. break;
  831. case IMGFMT_BGR15:
  832. {
  833. asm volatile(
  834. YSCALEYUV2RGBX
  835. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  836. #ifdef DITHER1XBPP
  837. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  838. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  839. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  840. #endif
  841. WRITEBGR15(%4, %5, %%eax)
  842. :: "r" (&c->redDither),
  843. "m" (dummy), "m" (dummy), "m" (dummy),
  844. "r" (dest), "m" (dstW)
  845. : "%eax", "%edx", "%esi"
  846. );
  847. }
  848. break;
  849. case IMGFMT_BGR16:
  850. {
  851. asm volatile(
  852. YSCALEYUV2RGBX
  853. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  854. #ifdef DITHER1XBPP
  855. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  856. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  857. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  858. #endif
  859. WRITEBGR16(%4, %5, %%eax)
  860. :: "r" (&c->redDither),
  861. "m" (dummy), "m" (dummy), "m" (dummy),
  862. "r" (dest), "m" (dstW)
  863. : "%eax", "%edx", "%esi"
  864. );
  865. }
  866. break;
  867. case IMGFMT_YUY2:
  868. {
  869. asm volatile(
  870. YSCALEYUV2PACKEDX
  871. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  872. "psraw $3, %%mm3 \n\t"
  873. "psraw $3, %%mm4 \n\t"
  874. "psraw $3, %%mm1 \n\t"
  875. "psraw $3, %%mm7 \n\t"
  876. WRITEYUY2(%4, %5, %%eax)
  877. :: "r" (&c->redDither),
  878. "m" (dummy), "m" (dummy), "m" (dummy),
  879. "r" (dest), "m" (dstW)
  880. : "%eax", "%edx", "%esi"
  881. );
  882. }
  883. break;
  884. #endif
  885. default:
  886. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  887. chrFilter, chrSrc, chrFilterSize,
  888. dest, dstW, dstY);
  889. break;
  890. }
  891. }
  892. /**
  893. * vertical bilinear scale YV12 to RGB
  894. */
  895. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  896. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  897. {
  898. int yalpha1=yalpha^4095;
  899. int uvalpha1=uvalpha^4095;
  900. int i;
  901. #if 0 //isnt used
  902. if(flags&SWS_FULL_CHR_H_INT)
  903. {
  904. switch(dstFormat)
  905. {
  906. #ifdef HAVE_MMX
  907. case IMGFMT_BGR32:
  908. asm volatile(
  909. FULL_YSCALEYUV2RGB
  910. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  911. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  912. "movq %%mm3, %%mm1 \n\t"
  913. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  914. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  915. MOVNTQ(%%mm3, (%4, %%eax, 4))
  916. MOVNTQ(%%mm1, 8(%4, %%eax, 4))
  917. "addl $4, %%eax \n\t"
  918. "cmpl %5, %%eax \n\t"
  919. " jb 1b \n\t"
  920. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  921. "m" (yalpha1), "m" (uvalpha1)
  922. : "%eax"
  923. );
  924. break;
  925. case IMGFMT_BGR24:
  926. asm volatile(
  927. FULL_YSCALEYUV2RGB
  928. // lsb ... msb
  929. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  930. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  931. "movq %%mm3, %%mm1 \n\t"
  932. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  933. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  934. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  935. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  936. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  937. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  938. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  939. "movq %%mm1, %%mm2 \n\t"
  940. "psllq $48, %%mm1 \n\t" // 000000BG
  941. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  942. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  943. "psrld $16, %%mm2 \n\t" // R000R000
  944. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  945. "por %%mm2, %%mm1 \n\t" // RBGRR000
  946. "movl %4, %%ebx \n\t"
  947. "addl %%eax, %%ebx \n\t"
  948. #ifdef HAVE_MMX2
  949. //FIXME Alignment
  950. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  951. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  952. #else
  953. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  954. "psrlq $32, %%mm3 \n\t"
  955. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  956. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  957. #endif
  958. "addl $4, %%eax \n\t"
  959. "cmpl %5, %%eax \n\t"
  960. " jb 1b \n\t"
  961. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  962. "m" (yalpha1), "m" (uvalpha1)
  963. : "%eax", "%ebx"
  964. );
  965. break;
  966. case IMGFMT_BGR15:
  967. asm volatile(
  968. FULL_YSCALEYUV2RGB
  969. #ifdef DITHER1XBPP
  970. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  971. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  972. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  973. #endif
  974. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  975. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  976. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  977. "psrlw $3, %%mm3 \n\t"
  978. "psllw $2, %%mm1 \n\t"
  979. "psllw $7, %%mm0 \n\t"
  980. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  981. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  982. "por %%mm3, %%mm1 \n\t"
  983. "por %%mm1, %%mm0 \n\t"
  984. MOVNTQ(%%mm0, (%4, %%eax, 2))
  985. "addl $4, %%eax \n\t"
  986. "cmpl %5, %%eax \n\t"
  987. " jb 1b \n\t"
  988. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  989. "m" (yalpha1), "m" (uvalpha1)
  990. : "%eax"
  991. );
  992. break;
  993. case IMGFMT_BGR16:
  994. asm volatile(
  995. FULL_YSCALEYUV2RGB
  996. #ifdef DITHER1XBPP
  997. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  998. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  999. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1000. #endif
  1001. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1002. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1003. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1004. "psrlw $3, %%mm3 \n\t"
  1005. "psllw $3, %%mm1 \n\t"
  1006. "psllw $8, %%mm0 \n\t"
  1007. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1008. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1009. "por %%mm3, %%mm1 \n\t"
  1010. "por %%mm1, %%mm0 \n\t"
  1011. MOVNTQ(%%mm0, (%4, %%eax, 2))
  1012. "addl $4, %%eax \n\t"
  1013. "cmpl %5, %%eax \n\t"
  1014. " jb 1b \n\t"
  1015. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1016. "m" (yalpha1), "m" (uvalpha1)
  1017. : "%eax"
  1018. );
  1019. break;
  1020. #endif
  1021. case IMGFMT_RGB32:
  1022. #ifndef HAVE_MMX
  1023. case IMGFMT_BGR32:
  1024. #endif
  1025. if(dstFormat==IMGFMT_BGR32)
  1026. {
  1027. int i;
  1028. #ifdef WORDS_BIGENDIAN
  1029. dest++;
  1030. #endif
  1031. for(i=0;i<dstW;i++){
  1032. // vertical linear interpolation && yuv2rgb in a single step:
  1033. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1034. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1035. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1036. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1037. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1038. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1039. dest+= 4;
  1040. }
  1041. }
  1042. else if(dstFormat==IMGFMT_BGR24)
  1043. {
  1044. int i;
  1045. for(i=0;i<dstW;i++){
  1046. // vertical linear interpolation && yuv2rgb in a single step:
  1047. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1048. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1049. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1050. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1051. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1052. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1053. dest+= 3;
  1054. }
  1055. }
  1056. else if(dstFormat==IMGFMT_BGR16)
  1057. {
  1058. int i;
  1059. for(i=0;i<dstW;i++){
  1060. // vertical linear interpolation && yuv2rgb in a single step:
  1061. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1062. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1063. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1064. ((uint16_t*)dest)[i] =
  1065. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1066. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1067. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1068. }
  1069. }
  1070. else if(dstFormat==IMGFMT_BGR15)
  1071. {
  1072. int i;
  1073. for(i=0;i<dstW;i++){
  1074. // vertical linear interpolation && yuv2rgb in a single step:
  1075. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1076. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1077. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1078. ((uint16_t*)dest)[i] =
  1079. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1080. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1081. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1082. }
  1083. }
  1084. }//FULL_UV_IPOL
  1085. else
  1086. {
  1087. #endif // if 0
  1088. #ifdef HAVE_MMX
  1089. switch(c->dstFormat)
  1090. {
  1091. //Note 8280 == DSTW_OFFSET but the preprocessor cant handle that there :(
  1092. case IMGFMT_BGR32:
  1093. asm volatile(
  1094. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1095. "movl %4, %%esp \n\t"
  1096. YSCALEYUV2RGB(%%eax, %5)
  1097. WRITEBGR32(%%esp, 8280(%5), %%eax)
  1098. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1099. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1100. "r" (&c->redDither)
  1101. : "%eax"
  1102. );
  1103. return;
  1104. case IMGFMT_BGR24:
  1105. asm volatile(
  1106. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1107. "movl %4, %%esp \n\t"
  1108. YSCALEYUV2RGB(%%eax, %5)
  1109. WRITEBGR24(%%esp, 8280(%5), %%eax)
  1110. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1111. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1112. "r" (&c->redDither)
  1113. : "%eax"
  1114. );
  1115. return;
  1116. case IMGFMT_BGR15:
  1117. asm volatile(
  1118. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1119. "movl %4, %%esp \n\t"
  1120. YSCALEYUV2RGB(%%eax, %5)
  1121. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1122. #ifdef DITHER1XBPP
  1123. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1124. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1125. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1126. #endif
  1127. WRITEBGR15(%%esp, 8280(%5), %%eax)
  1128. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1129. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1130. "r" (&c->redDither)
  1131. : "%eax"
  1132. );
  1133. return;
  1134. case IMGFMT_BGR16:
  1135. asm volatile(
  1136. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1137. "movl %4, %%esp \n\t"
  1138. YSCALEYUV2RGB(%%eax, %5)
  1139. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1140. #ifdef DITHER1XBPP
  1141. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1142. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1143. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1144. #endif
  1145. WRITEBGR16(%%esp, 8280(%5), %%eax)
  1146. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1147. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1148. "r" (&c->redDither)
  1149. : "%eax"
  1150. );
  1151. return;
  1152. case IMGFMT_YUY2:
  1153. asm volatile(
  1154. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1155. "movl %4, %%esp \n\t"
  1156. YSCALEYUV2PACKED(%%eax, %5)
  1157. WRITEYUY2(%%esp, 8280(%5), %%eax)
  1158. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1159. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1160. "r" (&c->redDither)
  1161. : "%eax"
  1162. );
  1163. return;
  1164. default: break;
  1165. }
  1166. #endif //HAVE_MMX
  1167. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1168. }
  1169. /**
  1170. * YV12 to RGB without scaling or interpolating
  1171. */
  1172. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1173. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1174. {
  1175. const int yalpha1=0;
  1176. int i;
  1177. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1178. const int yalpha= 4096; //FIXME ...
  1179. if(flags&SWS_FULL_CHR_H_INT)
  1180. {
  1181. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1182. return;
  1183. }
  1184. #ifdef HAVE_MMX
  1185. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1186. {
  1187. switch(dstFormat)
  1188. {
  1189. case IMGFMT_BGR32:
  1190. asm volatile(
  1191. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1192. "movl %4, %%esp \n\t"
  1193. YSCALEYUV2RGB1(%%eax, %5)
  1194. WRITEBGR32(%%esp, 8280(%5), %%eax)
  1195. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1196. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1197. "r" (&c->redDither)
  1198. : "%eax"
  1199. );
  1200. return;
  1201. case IMGFMT_BGR24:
  1202. asm volatile(
  1203. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1204. "movl %4, %%esp \n\t"
  1205. YSCALEYUV2RGB1(%%eax, %5)
  1206. WRITEBGR24(%%esp, 8280(%5), %%eax)
  1207. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1208. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1209. "r" (&c->redDither)
  1210. : "%eax"
  1211. );
  1212. return;
  1213. case IMGFMT_BGR15:
  1214. asm volatile(
  1215. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1216. "movl %4, %%esp \n\t"
  1217. YSCALEYUV2RGB1(%%eax, %5)
  1218. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1219. #ifdef DITHER1XBPP
  1220. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1221. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1222. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1223. #endif
  1224. WRITEBGR15(%%esp, 8280(%5), %%eax)
  1225. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1226. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1227. "r" (&c->redDither)
  1228. : "%eax"
  1229. );
  1230. return;
  1231. case IMGFMT_BGR16:
  1232. asm volatile(
  1233. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1234. "movl %4, %%esp \n\t"
  1235. YSCALEYUV2RGB1(%%eax, %5)
  1236. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1237. #ifdef DITHER1XBPP
  1238. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1239. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1240. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1241. #endif
  1242. WRITEBGR16(%%esp, 8280(%5), %%eax)
  1243. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1244. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1245. "r" (&c->redDither)
  1246. : "%eax"
  1247. );
  1248. return;
  1249. case IMGFMT_YUY2:
  1250. asm volatile(
  1251. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1252. "movl %4, %%esp \n\t"
  1253. YSCALEYUV2PACKED1(%%eax, %5)
  1254. WRITEYUY2(%%esp, 8280(%5), %%eax)
  1255. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1256. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1257. "r" (&c->redDither)
  1258. : "%eax"
  1259. );
  1260. return;
  1261. }
  1262. }
  1263. else
  1264. {
  1265. switch(dstFormat)
  1266. {
  1267. case IMGFMT_BGR32:
  1268. asm volatile(
  1269. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1270. "movl %4, %%esp \n\t"
  1271. YSCALEYUV2RGB1b(%%eax, %5)
  1272. WRITEBGR32(%%esp, 8280(%5), %%eax)
  1273. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1274. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1275. "r" (&c->redDither)
  1276. : "%eax"
  1277. );
  1278. return;
  1279. case IMGFMT_BGR24:
  1280. asm volatile(
  1281. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1282. "movl %4, %%esp \n\t"
  1283. YSCALEYUV2RGB1b(%%eax, %5)
  1284. WRITEBGR24(%%esp, 8280(%5), %%eax)
  1285. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1286. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1287. "r" (&c->redDither)
  1288. : "%eax"
  1289. );
  1290. return;
  1291. case IMGFMT_BGR15:
  1292. asm volatile(
  1293. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1294. "movl %4, %%esp \n\t"
  1295. YSCALEYUV2RGB1b(%%eax, %5)
  1296. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1297. #ifdef DITHER1XBPP
  1298. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1299. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1300. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1301. #endif
  1302. WRITEBGR15(%%esp, 8280(%5), %%eax)
  1303. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1304. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1305. "r" (&c->redDither)
  1306. : "%eax"
  1307. );
  1308. return;
  1309. case IMGFMT_BGR16:
  1310. asm volatile(
  1311. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1312. "movl %4, %%esp \n\t"
  1313. YSCALEYUV2RGB1b(%%eax, %5)
  1314. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1315. #ifdef DITHER1XBPP
  1316. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1317. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1318. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1319. #endif
  1320. WRITEBGR16(%%esp, 8280(%5), %%eax)
  1321. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1322. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1323. "r" (&c->redDither)
  1324. : "%eax"
  1325. );
  1326. return;
  1327. case IMGFMT_YUY2:
  1328. asm volatile(
  1329. "movl %%esp, "ESP_OFFSET"(%5) \n\t"
  1330. "movl %4, %%esp \n\t"
  1331. YSCALEYUV2PACKED1b(%%eax, %5)
  1332. WRITEYUY2(%%esp, 8280(%5), %%eax)
  1333. "movl "ESP_OFFSET"(%5), %%esp \n\t"
  1334. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1335. "r" (&c->redDither)
  1336. : "%eax"
  1337. );
  1338. return;
  1339. }
  1340. }
  1341. #endif
  1342. if( uvalpha < 2048 )
  1343. {
  1344. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1345. }else{
  1346. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1347. }
  1348. }
  1349. //FIXME yuy2* can read upto 7 samples to much
  1350. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width)
  1351. {
  1352. #ifdef HAVE_MMX
  1353. asm volatile(
  1354. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1355. "movl %0, %%eax \n\t"
  1356. "1: \n\t"
  1357. "movq (%1, %%eax,2), %%mm0 \n\t"
  1358. "movq 8(%1, %%eax,2), %%mm1 \n\t"
  1359. "pand %%mm2, %%mm0 \n\t"
  1360. "pand %%mm2, %%mm1 \n\t"
  1361. "packuswb %%mm1, %%mm0 \n\t"
  1362. "movq %%mm0, (%2, %%eax) \n\t"
  1363. "addl $8, %%eax \n\t"
  1364. " js 1b \n\t"
  1365. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1366. : "%eax"
  1367. );
  1368. #else
  1369. int i;
  1370. for(i=0; i<width; i++)
  1371. dst[i]= src[2*i];
  1372. #endif
  1373. }
  1374. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1375. {
  1376. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1377. asm volatile(
  1378. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1379. "movl %0, %%eax \n\t"
  1380. "1: \n\t"
  1381. "movq (%1, %%eax,4), %%mm0 \n\t"
  1382. "movq 8(%1, %%eax,4), %%mm1 \n\t"
  1383. "movq (%2, %%eax,4), %%mm2 \n\t"
  1384. "movq 8(%2, %%eax,4), %%mm3 \n\t"
  1385. PAVGB(%%mm2, %%mm0)
  1386. PAVGB(%%mm3, %%mm1)
  1387. "psrlw $8, %%mm0 \n\t"
  1388. "psrlw $8, %%mm1 \n\t"
  1389. "packuswb %%mm1, %%mm0 \n\t"
  1390. "movq %%mm0, %%mm1 \n\t"
  1391. "psrlw $8, %%mm0 \n\t"
  1392. "pand %%mm4, %%mm1 \n\t"
  1393. "packuswb %%mm0, %%mm0 \n\t"
  1394. "packuswb %%mm1, %%mm1 \n\t"
  1395. "movd %%mm0, (%4, %%eax) \n\t"
  1396. "movd %%mm1, (%3, %%eax) \n\t"
  1397. "addl $4, %%eax \n\t"
  1398. " js 1b \n\t"
  1399. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1400. : "%eax"
  1401. );
  1402. #else
  1403. int i;
  1404. for(i=0; i<width; i++)
  1405. {
  1406. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1407. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1408. }
  1409. #endif
  1410. }
  1411. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1412. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, int width)
  1413. {
  1414. #ifdef HAVE_MMX
  1415. asm volatile(
  1416. "movl %0, %%eax \n\t"
  1417. "1: \n\t"
  1418. "movq (%1, %%eax,2), %%mm0 \n\t"
  1419. "movq 8(%1, %%eax,2), %%mm1 \n\t"
  1420. "psrlw $8, %%mm0 \n\t"
  1421. "psrlw $8, %%mm1 \n\t"
  1422. "packuswb %%mm1, %%mm0 \n\t"
  1423. "movq %%mm0, (%2, %%eax) \n\t"
  1424. "addl $8, %%eax \n\t"
  1425. " js 1b \n\t"
  1426. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1427. : "%eax"
  1428. );
  1429. #else
  1430. int i;
  1431. for(i=0; i<width; i++)
  1432. dst[i]= src[2*i+1];
  1433. #endif
  1434. }
  1435. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1436. {
  1437. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1438. asm volatile(
  1439. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1440. "movl %0, %%eax \n\t"
  1441. "1: \n\t"
  1442. "movq (%1, %%eax,4), %%mm0 \n\t"
  1443. "movq 8(%1, %%eax,4), %%mm1 \n\t"
  1444. "movq (%2, %%eax,4), %%mm2 \n\t"
  1445. "movq 8(%2, %%eax,4), %%mm3 \n\t"
  1446. PAVGB(%%mm2, %%mm0)
  1447. PAVGB(%%mm3, %%mm1)
  1448. "pand %%mm4, %%mm0 \n\t"
  1449. "pand %%mm4, %%mm1 \n\t"
  1450. "packuswb %%mm1, %%mm0 \n\t"
  1451. "movq %%mm0, %%mm1 \n\t"
  1452. "psrlw $8, %%mm0 \n\t"
  1453. "pand %%mm4, %%mm1 \n\t"
  1454. "packuswb %%mm0, %%mm0 \n\t"
  1455. "packuswb %%mm1, %%mm1 \n\t"
  1456. "movd %%mm0, (%4, %%eax) \n\t"
  1457. "movd %%mm1, (%3, %%eax) \n\t"
  1458. "addl $4, %%eax \n\t"
  1459. " js 1b \n\t"
  1460. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1461. : "%eax"
  1462. );
  1463. #else
  1464. int i;
  1465. for(i=0; i<width; i++)
  1466. {
  1467. dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
  1468. dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
  1469. }
  1470. #endif
  1471. }
  1472. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1473. {
  1474. #ifdef HAVE_MMXFIXME
  1475. #else
  1476. int i;
  1477. for(i=0; i<width; i++)
  1478. {
  1479. int b= ((uint32_t*)src)[i]&0xFF;
  1480. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1481. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1482. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1483. }
  1484. #endif
  1485. }
  1486. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1487. {
  1488. #ifdef HAVE_MMXFIXME
  1489. #else
  1490. int i;
  1491. for(i=0; i<width; i++)
  1492. {
  1493. const int a= ((uint32_t*)src1)[2*i+0];
  1494. const int e= ((uint32_t*)src1)[2*i+1];
  1495. const int c= ((uint32_t*)src2)[2*i+0];
  1496. const int d= ((uint32_t*)src2)[2*i+1];
  1497. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1498. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1499. const int b= l&0x3FF;
  1500. const int g= h>>8;
  1501. const int r= l>>16;
  1502. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1503. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1504. }
  1505. #endif
  1506. }
  1507. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
  1508. {
  1509. #ifdef HAVE_MMX
  1510. asm volatile(
  1511. "movl %2, %%eax \n\t"
  1512. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1513. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1514. "pxor %%mm7, %%mm7 \n\t"
  1515. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1516. ".balign 16 \n\t"
  1517. "1: \n\t"
  1518. PREFETCH" 64(%0, %%ebx) \n\t"
  1519. "movd (%0, %%ebx), %%mm0 \n\t"
  1520. "movd 3(%0, %%ebx), %%mm1 \n\t"
  1521. "punpcklbw %%mm7, %%mm0 \n\t"
  1522. "punpcklbw %%mm7, %%mm1 \n\t"
  1523. "movd 6(%0, %%ebx), %%mm2 \n\t"
  1524. "movd 9(%0, %%ebx), %%mm3 \n\t"
  1525. "punpcklbw %%mm7, %%mm2 \n\t"
  1526. "punpcklbw %%mm7, %%mm3 \n\t"
  1527. "pmaddwd %%mm6, %%mm0 \n\t"
  1528. "pmaddwd %%mm6, %%mm1 \n\t"
  1529. "pmaddwd %%mm6, %%mm2 \n\t"
  1530. "pmaddwd %%mm6, %%mm3 \n\t"
  1531. #ifndef FAST_BGR2YV12
  1532. "psrad $8, %%mm0 \n\t"
  1533. "psrad $8, %%mm1 \n\t"
  1534. "psrad $8, %%mm2 \n\t"
  1535. "psrad $8, %%mm3 \n\t"
  1536. #endif
  1537. "packssdw %%mm1, %%mm0 \n\t"
  1538. "packssdw %%mm3, %%mm2 \n\t"
  1539. "pmaddwd %%mm5, %%mm0 \n\t"
  1540. "pmaddwd %%mm5, %%mm2 \n\t"
  1541. "packssdw %%mm2, %%mm0 \n\t"
  1542. "psraw $7, %%mm0 \n\t"
  1543. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1544. "movd 15(%0, %%ebx), %%mm1 \n\t"
  1545. "punpcklbw %%mm7, %%mm4 \n\t"
  1546. "punpcklbw %%mm7, %%mm1 \n\t"
  1547. "movd 18(%0, %%ebx), %%mm2 \n\t"
  1548. "movd 21(%0, %%ebx), %%mm3 \n\t"
  1549. "punpcklbw %%mm7, %%mm2 \n\t"
  1550. "punpcklbw %%mm7, %%mm3 \n\t"
  1551. "pmaddwd %%mm6, %%mm4 \n\t"
  1552. "pmaddwd %%mm6, %%mm1 \n\t"
  1553. "pmaddwd %%mm6, %%mm2 \n\t"
  1554. "pmaddwd %%mm6, %%mm3 \n\t"
  1555. #ifndef FAST_BGR2YV12
  1556. "psrad $8, %%mm4 \n\t"
  1557. "psrad $8, %%mm1 \n\t"
  1558. "psrad $8, %%mm2 \n\t"
  1559. "psrad $8, %%mm3 \n\t"
  1560. #endif
  1561. "packssdw %%mm1, %%mm4 \n\t"
  1562. "packssdw %%mm3, %%mm2 \n\t"
  1563. "pmaddwd %%mm5, %%mm4 \n\t"
  1564. "pmaddwd %%mm5, %%mm2 \n\t"
  1565. "addl $24, %%ebx \n\t"
  1566. "packssdw %%mm2, %%mm4 \n\t"
  1567. "psraw $7, %%mm4 \n\t"
  1568. "packuswb %%mm4, %%mm0 \n\t"
  1569. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1570. "movq %%mm0, (%1, %%eax) \n\t"
  1571. "addl $8, %%eax \n\t"
  1572. " js 1b \n\t"
  1573. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1574. : "%eax", "%ebx"
  1575. );
  1576. #else
  1577. int i;
  1578. for(i=0; i<width; i++)
  1579. {
  1580. int b= src[i*3+0];
  1581. int g= src[i*3+1];
  1582. int r= src[i*3+2];
  1583. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1584. }
  1585. #endif
  1586. }
  1587. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1588. {
  1589. #ifdef HAVE_MMX
  1590. asm volatile(
  1591. "movl %4, %%eax \n\t"
  1592. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1593. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1594. "pxor %%mm7, %%mm7 \n\t"
  1595. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1596. "addl %%ebx, %%ebx \n\t"
  1597. ".balign 16 \n\t"
  1598. "1: \n\t"
  1599. PREFETCH" 64(%0, %%ebx) \n\t"
  1600. PREFETCH" 64(%1, %%ebx) \n\t"
  1601. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1602. "movq (%0, %%ebx), %%mm0 \n\t"
  1603. "movq (%1, %%ebx), %%mm1 \n\t"
  1604. "movq 6(%0, %%ebx), %%mm2 \n\t"
  1605. "movq 6(%1, %%ebx), %%mm3 \n\t"
  1606. PAVGB(%%mm1, %%mm0)
  1607. PAVGB(%%mm3, %%mm2)
  1608. "movq %%mm0, %%mm1 \n\t"
  1609. "movq %%mm2, %%mm3 \n\t"
  1610. "psrlq $24, %%mm0 \n\t"
  1611. "psrlq $24, %%mm2 \n\t"
  1612. PAVGB(%%mm1, %%mm0)
  1613. PAVGB(%%mm3, %%mm2)
  1614. "punpcklbw %%mm7, %%mm0 \n\t"
  1615. "punpcklbw %%mm7, %%mm2 \n\t"
  1616. #else
  1617. "movd (%0, %%ebx), %%mm0 \n\t"
  1618. "movd (%1, %%ebx), %%mm1 \n\t"
  1619. "movd 3(%0, %%ebx), %%mm2 \n\t"
  1620. "movd 3(%1, %%ebx), %%mm3 \n\t"
  1621. "punpcklbw %%mm7, %%mm0 \n\t"
  1622. "punpcklbw %%mm7, %%mm1 \n\t"
  1623. "punpcklbw %%mm7, %%mm2 \n\t"
  1624. "punpcklbw %%mm7, %%mm3 \n\t"
  1625. "paddw %%mm1, %%mm0 \n\t"
  1626. "paddw %%mm3, %%mm2 \n\t"
  1627. "paddw %%mm2, %%mm0 \n\t"
  1628. "movd 6(%0, %%ebx), %%mm4 \n\t"
  1629. "movd 6(%1, %%ebx), %%mm1 \n\t"
  1630. "movd 9(%0, %%ebx), %%mm2 \n\t"
  1631. "movd 9(%1, %%ebx), %%mm3 \n\t"
  1632. "punpcklbw %%mm7, %%mm4 \n\t"
  1633. "punpcklbw %%mm7, %%mm1 \n\t"
  1634. "punpcklbw %%mm7, %%mm2 \n\t"
  1635. "punpcklbw %%mm7, %%mm3 \n\t"
  1636. "paddw %%mm1, %%mm4 \n\t"
  1637. "paddw %%mm3, %%mm2 \n\t"
  1638. "paddw %%mm4, %%mm2 \n\t"
  1639. "psrlw $2, %%mm0 \n\t"
  1640. "psrlw $2, %%mm2 \n\t"
  1641. #endif
  1642. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1643. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1644. "pmaddwd %%mm0, %%mm1 \n\t"
  1645. "pmaddwd %%mm2, %%mm3 \n\t"
  1646. "pmaddwd %%mm6, %%mm0 \n\t"
  1647. "pmaddwd %%mm6, %%mm2 \n\t"
  1648. #ifndef FAST_BGR2YV12
  1649. "psrad $8, %%mm0 \n\t"
  1650. "psrad $8, %%mm1 \n\t"
  1651. "psrad $8, %%mm2 \n\t"
  1652. "psrad $8, %%mm3 \n\t"
  1653. #endif
  1654. "packssdw %%mm2, %%mm0 \n\t"
  1655. "packssdw %%mm3, %%mm1 \n\t"
  1656. "pmaddwd %%mm5, %%mm0 \n\t"
  1657. "pmaddwd %%mm5, %%mm1 \n\t"
  1658. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1659. "psraw $7, %%mm0 \n\t"
  1660. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1661. "movq 12(%0, %%ebx), %%mm4 \n\t"
  1662. "movq 12(%1, %%ebx), %%mm1 \n\t"
  1663. "movq 18(%0, %%ebx), %%mm2 \n\t"
  1664. "movq 18(%1, %%ebx), %%mm3 \n\t"
  1665. PAVGB(%%mm1, %%mm4)
  1666. PAVGB(%%mm3, %%mm2)
  1667. "movq %%mm4, %%mm1 \n\t"
  1668. "movq %%mm2, %%mm3 \n\t"
  1669. "psrlq $24, %%mm4 \n\t"
  1670. "psrlq $24, %%mm2 \n\t"
  1671. PAVGB(%%mm1, %%mm4)
  1672. PAVGB(%%mm3, %%mm2)
  1673. "punpcklbw %%mm7, %%mm4 \n\t"
  1674. "punpcklbw %%mm7, %%mm2 \n\t"
  1675. #else
  1676. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1677. "movd 12(%1, %%ebx), %%mm1 \n\t"
  1678. "movd 15(%0, %%ebx), %%mm2 \n\t"
  1679. "movd 15(%1, %%ebx), %%mm3 \n\t"
  1680. "punpcklbw %%mm7, %%mm4 \n\t"
  1681. "punpcklbw %%mm7, %%mm1 \n\t"
  1682. "punpcklbw %%mm7, %%mm2 \n\t"
  1683. "punpcklbw %%mm7, %%mm3 \n\t"
  1684. "paddw %%mm1, %%mm4 \n\t"
  1685. "paddw %%mm3, %%mm2 \n\t"
  1686. "paddw %%mm2, %%mm4 \n\t"
  1687. "movd 18(%0, %%ebx), %%mm5 \n\t"
  1688. "movd 18(%1, %%ebx), %%mm1 \n\t"
  1689. "movd 21(%0, %%ebx), %%mm2 \n\t"
  1690. "movd 21(%1, %%ebx), %%mm3 \n\t"
  1691. "punpcklbw %%mm7, %%mm5 \n\t"
  1692. "punpcklbw %%mm7, %%mm1 \n\t"
  1693. "punpcklbw %%mm7, %%mm2 \n\t"
  1694. "punpcklbw %%mm7, %%mm3 \n\t"
  1695. "paddw %%mm1, %%mm5 \n\t"
  1696. "paddw %%mm3, %%mm2 \n\t"
  1697. "paddw %%mm5, %%mm2 \n\t"
  1698. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1699. "psrlw $2, %%mm4 \n\t"
  1700. "psrlw $2, %%mm2 \n\t"
  1701. #endif
  1702. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1703. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1704. "pmaddwd %%mm4, %%mm1 \n\t"
  1705. "pmaddwd %%mm2, %%mm3 \n\t"
  1706. "pmaddwd %%mm6, %%mm4 \n\t"
  1707. "pmaddwd %%mm6, %%mm2 \n\t"
  1708. #ifndef FAST_BGR2YV12
  1709. "psrad $8, %%mm4 \n\t"
  1710. "psrad $8, %%mm1 \n\t"
  1711. "psrad $8, %%mm2 \n\t"
  1712. "psrad $8, %%mm3 \n\t"
  1713. #endif
  1714. "packssdw %%mm2, %%mm4 \n\t"
  1715. "packssdw %%mm3, %%mm1 \n\t"
  1716. "pmaddwd %%mm5, %%mm4 \n\t"
  1717. "pmaddwd %%mm5, %%mm1 \n\t"
  1718. "addl $24, %%ebx \n\t"
  1719. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1720. "psraw $7, %%mm4 \n\t"
  1721. "movq %%mm0, %%mm1 \n\t"
  1722. "punpckldq %%mm4, %%mm0 \n\t"
  1723. "punpckhdq %%mm4, %%mm1 \n\t"
  1724. "packsswb %%mm1, %%mm0 \n\t"
  1725. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1726. "movd %%mm0, (%2, %%eax) \n\t"
  1727. "punpckhdq %%mm0, %%mm0 \n\t"
  1728. "movd %%mm0, (%3, %%eax) \n\t"
  1729. "addl $4, %%eax \n\t"
  1730. " js 1b \n\t"
  1731. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1732. : "%eax", "%ebx"
  1733. );
  1734. #else
  1735. int i;
  1736. for(i=0; i<width; i++)
  1737. {
  1738. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1739. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1740. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1741. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1742. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1743. }
  1744. #endif
  1745. }
  1746. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1747. {
  1748. int i;
  1749. for(i=0; i<width; i++)
  1750. {
  1751. int d= ((uint16_t*)src)[i];
  1752. int b= d&0x1F;
  1753. int g= (d>>5)&0x3F;
  1754. int r= (d>>11)&0x1F;
  1755. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1756. }
  1757. }
  1758. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1759. {
  1760. int i;
  1761. for(i=0; i<width; i++)
  1762. {
  1763. int d0= ((uint32_t*)src1)[i];
  1764. int d1= ((uint32_t*)src2)[i];
  1765. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1766. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1767. int dh2= (dh>>11) + (dh<<21);
  1768. int d= dh2 + dl;
  1769. int b= d&0x7F;
  1770. int r= (d>>11)&0x7F;
  1771. int g= d>>21;
  1772. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1773. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1774. }
  1775. }
  1776. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1777. {
  1778. int i;
  1779. for(i=0; i<width; i++)
  1780. {
  1781. int d= ((uint16_t*)src)[i];
  1782. int b= d&0x1F;
  1783. int g= (d>>5)&0x1F;
  1784. int r= (d>>10)&0x1F;
  1785. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1786. }
  1787. }
  1788. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1789. {
  1790. int i;
  1791. for(i=0; i<width; i++)
  1792. {
  1793. int d0= ((uint32_t*)src1)[i];
  1794. int d1= ((uint32_t*)src2)[i];
  1795. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1796. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1797. int dh2= (dh>>11) + (dh<<21);
  1798. int d= dh2 + dl;
  1799. int b= d&0x7F;
  1800. int r= (d>>10)&0x7F;
  1801. int g= d>>21;
  1802. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1803. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1804. }
  1805. }
  1806. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1807. {
  1808. int i;
  1809. for(i=0; i<width; i++)
  1810. {
  1811. int r= ((uint32_t*)src)[i]&0xFF;
  1812. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1813. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  1814. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1815. }
  1816. }
  1817. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1818. {
  1819. int i;
  1820. for(i=0; i<width; i++)
  1821. {
  1822. const int a= ((uint32_t*)src1)[2*i+0];
  1823. const int e= ((uint32_t*)src1)[2*i+1];
  1824. const int c= ((uint32_t*)src2)[2*i+0];
  1825. const int d= ((uint32_t*)src2)[2*i+1];
  1826. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1827. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1828. const int r= l&0x3FF;
  1829. const int g= h>>8;
  1830. const int b= l>>16;
  1831. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1832. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1833. }
  1834. }
  1835. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1836. {
  1837. int i;
  1838. for(i=0; i<width; i++)
  1839. {
  1840. int r= src[i*3+0];
  1841. int g= src[i*3+1];
  1842. int b= src[i*3+2];
  1843. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1844. }
  1845. }
  1846. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1847. {
  1848. int i;
  1849. for(i=0; i<width; i++)
  1850. {
  1851. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1852. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1853. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1854. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1855. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1856. }
  1857. }
  1858. // Bilinear / Bicubic scaling
  1859. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1860. int16_t *filter, int16_t *filterPos, int filterSize)
  1861. {
  1862. #ifdef HAVE_MMX
  1863. assert(filterSize % 4 == 0 && filterSize>0);
  1864. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1865. {
  1866. int counter= -2*dstW;
  1867. filter-= counter*2;
  1868. filterPos-= counter/2;
  1869. dst-= counter/2;
  1870. asm volatile(
  1871. "pxor %%mm7, %%mm7 \n\t"
  1872. "movq "MANGLE(w02)", %%mm6 \n\t"
  1873. "pushl %%ebp \n\t" // we use 7 regs here ...
  1874. "movl %%eax, %%ebp \n\t"
  1875. ".balign 16 \n\t"
  1876. "1: \n\t"
  1877. "movzwl (%2, %%ebp), %%eax \n\t"
  1878. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1879. "movq (%1, %%ebp, 4), %%mm1 \n\t"
  1880. "movq 8(%1, %%ebp, 4), %%mm3 \n\t"
  1881. "movd (%3, %%eax), %%mm0 \n\t"
  1882. "movd (%3, %%ebx), %%mm2 \n\t"
  1883. "punpcklbw %%mm7, %%mm0 \n\t"
  1884. "punpcklbw %%mm7, %%mm2 \n\t"
  1885. "pmaddwd %%mm1, %%mm0 \n\t"
  1886. "pmaddwd %%mm2, %%mm3 \n\t"
  1887. "psrad $8, %%mm0 \n\t"
  1888. "psrad $8, %%mm3 \n\t"
  1889. "packssdw %%mm3, %%mm0 \n\t"
  1890. "pmaddwd %%mm6, %%mm0 \n\t"
  1891. "packssdw %%mm0, %%mm0 \n\t"
  1892. "movd %%mm0, (%4, %%ebp) \n\t"
  1893. "addl $4, %%ebp \n\t"
  1894. " jnc 1b \n\t"
  1895. "popl %%ebp \n\t"
  1896. : "+a" (counter)
  1897. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1898. : "%ebx"
  1899. );
  1900. }
  1901. else if(filterSize==8)
  1902. {
  1903. int counter= -2*dstW;
  1904. filter-= counter*4;
  1905. filterPos-= counter/2;
  1906. dst-= counter/2;
  1907. asm volatile(
  1908. "pxor %%mm7, %%mm7 \n\t"
  1909. "movq "MANGLE(w02)", %%mm6 \n\t"
  1910. "pushl %%ebp \n\t" // we use 7 regs here ...
  1911. "movl %%eax, %%ebp \n\t"
  1912. ".balign 16 \n\t"
  1913. "1: \n\t"
  1914. "movzwl (%2, %%ebp), %%eax \n\t"
  1915. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1916. "movq (%1, %%ebp, 8), %%mm1 \n\t"
  1917. "movq 16(%1, %%ebp, 8), %%mm3 \n\t"
  1918. "movd (%3, %%eax), %%mm0 \n\t"
  1919. "movd (%3, %%ebx), %%mm2 \n\t"
  1920. "punpcklbw %%mm7, %%mm0 \n\t"
  1921. "punpcklbw %%mm7, %%mm2 \n\t"
  1922. "pmaddwd %%mm1, %%mm0 \n\t"
  1923. "pmaddwd %%mm2, %%mm3 \n\t"
  1924. "movq 8(%1, %%ebp, 8), %%mm1 \n\t"
  1925. "movq 24(%1, %%ebp, 8), %%mm5 \n\t"
  1926. "movd 4(%3, %%eax), %%mm4 \n\t"
  1927. "movd 4(%3, %%ebx), %%mm2 \n\t"
  1928. "punpcklbw %%mm7, %%mm4 \n\t"
  1929. "punpcklbw %%mm7, %%mm2 \n\t"
  1930. "pmaddwd %%mm1, %%mm4 \n\t"
  1931. "pmaddwd %%mm2, %%mm5 \n\t"
  1932. "paddd %%mm4, %%mm0 \n\t"
  1933. "paddd %%mm5, %%mm3 \n\t"
  1934. "psrad $8, %%mm0 \n\t"
  1935. "psrad $8, %%mm3 \n\t"
  1936. "packssdw %%mm3, %%mm0 \n\t"
  1937. "pmaddwd %%mm6, %%mm0 \n\t"
  1938. "packssdw %%mm0, %%mm0 \n\t"
  1939. "movd %%mm0, (%4, %%ebp) \n\t"
  1940. "addl $4, %%ebp \n\t"
  1941. " jnc 1b \n\t"
  1942. "popl %%ebp \n\t"
  1943. : "+a" (counter)
  1944. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1945. : "%ebx"
  1946. );
  1947. }
  1948. else
  1949. {
  1950. int counter= -2*dstW;
  1951. // filter-= counter*filterSize/2;
  1952. filterPos-= counter/2;
  1953. dst-= counter/2;
  1954. asm volatile(
  1955. "pxor %%mm7, %%mm7 \n\t"
  1956. "movq "MANGLE(w02)", %%mm6 \n\t"
  1957. ".balign 16 \n\t"
  1958. "1: \n\t"
  1959. "movl %2, %%ecx \n\t"
  1960. "movzwl (%%ecx, %0), %%eax \n\t"
  1961. "movzwl 2(%%ecx, %0), %%ebx \n\t"
  1962. "movl %5, %%ecx \n\t"
  1963. "pxor %%mm4, %%mm4 \n\t"
  1964. "pxor %%mm5, %%mm5 \n\t"
  1965. "2: \n\t"
  1966. "movq (%1), %%mm1 \n\t"
  1967. "movq (%1, %6), %%mm3 \n\t"
  1968. "movd (%%ecx, %%eax), %%mm0 \n\t"
  1969. "movd (%%ecx, %%ebx), %%mm2 \n\t"
  1970. "punpcklbw %%mm7, %%mm0 \n\t"
  1971. "punpcklbw %%mm7, %%mm2 \n\t"
  1972. "pmaddwd %%mm1, %%mm0 \n\t"
  1973. "pmaddwd %%mm2, %%mm3 \n\t"
  1974. "paddd %%mm3, %%mm5 \n\t"
  1975. "paddd %%mm0, %%mm4 \n\t"
  1976. "addl $8, %1 \n\t"
  1977. "addl $4, %%ecx \n\t"
  1978. "cmpl %4, %%ecx \n\t"
  1979. " jb 2b \n\t"
  1980. "addl %6, %1 \n\t"
  1981. "psrad $8, %%mm4 \n\t"
  1982. "psrad $8, %%mm5 \n\t"
  1983. "packssdw %%mm5, %%mm4 \n\t"
  1984. "pmaddwd %%mm6, %%mm4 \n\t"
  1985. "packssdw %%mm4, %%mm4 \n\t"
  1986. "movl %3, %%eax \n\t"
  1987. "movd %%mm4, (%%eax, %0) \n\t"
  1988. "addl $4, %0 \n\t"
  1989. " jnc 1b \n\t"
  1990. : "+r" (counter), "+r" (filter)
  1991. : "m" (filterPos), "m" (dst), "m"(src+filterSize),
  1992. "m" (src), "r" (filterSize*2)
  1993. : "%ebx", "%eax", "%ecx"
  1994. );
  1995. }
  1996. #else
  1997. int i;
  1998. for(i=0; i<dstW; i++)
  1999. {
  2000. int j;
  2001. int srcPos= filterPos[i];
  2002. int val=0;
  2003. // printf("filterPos: %d\n", filterPos[i]);
  2004. for(j=0; j<filterSize; j++)
  2005. {
  2006. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2007. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2008. }
  2009. // filter += hFilterSize;
  2010. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2011. // dst[i] = val>>7;
  2012. }
  2013. #endif
  2014. }
  2015. // *** horizontal scale Y line to temp buffer
  2016. static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
  2017. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2018. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2019. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2020. int32_t *mmx2FilterPos)
  2021. {
  2022. if(srcFormat==IMGFMT_YUY2)
  2023. {
  2024. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2025. src= formatConvBuffer;
  2026. }
  2027. else if(srcFormat==IMGFMT_UYVY)
  2028. {
  2029. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2030. src= formatConvBuffer;
  2031. }
  2032. else if(srcFormat==IMGFMT_BGR32)
  2033. {
  2034. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2035. src= formatConvBuffer;
  2036. }
  2037. else if(srcFormat==IMGFMT_BGR24)
  2038. {
  2039. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2040. src= formatConvBuffer;
  2041. }
  2042. else if(srcFormat==IMGFMT_BGR16)
  2043. {
  2044. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2045. src= formatConvBuffer;
  2046. }
  2047. else if(srcFormat==IMGFMT_BGR15)
  2048. {
  2049. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2050. src= formatConvBuffer;
  2051. }
  2052. else if(srcFormat==IMGFMT_RGB32)
  2053. {
  2054. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2055. src= formatConvBuffer;
  2056. }
  2057. else if(srcFormat==IMGFMT_RGB24)
  2058. {
  2059. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2060. src= formatConvBuffer;
  2061. }
  2062. #ifdef HAVE_MMX
  2063. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  2064. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2065. #else
  2066. if(!(flags&SWS_FAST_BILINEAR))
  2067. #endif
  2068. {
  2069. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2070. }
  2071. else // Fast Bilinear upscale / crap downscale
  2072. {
  2073. #ifdef ARCH_X86
  2074. #ifdef HAVE_MMX2
  2075. int i;
  2076. if(canMMX2BeUsed)
  2077. {
  2078. asm volatile(
  2079. "pxor %%mm7, %%mm7 \n\t"
  2080. "movl %0, %%ecx \n\t"
  2081. "movl %1, %%edi \n\t"
  2082. "movl %2, %%edx \n\t"
  2083. "movl %3, %%ebx \n\t"
  2084. "xorl %%eax, %%eax \n\t" // i
  2085. PREFETCH" (%%ecx) \n\t"
  2086. PREFETCH" 32(%%ecx) \n\t"
  2087. PREFETCH" 64(%%ecx) \n\t"
  2088. #define FUNNY_Y_CODE \
  2089. "movl (%%ebx), %%esi \n\t"\
  2090. "call *%4 \n\t"\
  2091. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2092. "addl %%eax, %%edi \n\t"\
  2093. "xorl %%eax, %%eax \n\t"\
  2094. FUNNY_Y_CODE
  2095. FUNNY_Y_CODE
  2096. FUNNY_Y_CODE
  2097. FUNNY_Y_CODE
  2098. FUNNY_Y_CODE
  2099. FUNNY_Y_CODE
  2100. FUNNY_Y_CODE
  2101. FUNNY_Y_CODE
  2102. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2103. "m" (funnyYCode)
  2104. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2105. );
  2106. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2107. }
  2108. else
  2109. {
  2110. #endif
  2111. //NO MMX just normal asm ...
  2112. asm volatile(
  2113. "xorl %%eax, %%eax \n\t" // i
  2114. "xorl %%ebx, %%ebx \n\t" // xx
  2115. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2116. ".balign 16 \n\t"
  2117. "1: \n\t"
  2118. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2119. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2120. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2121. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2122. "shll $16, %%edi \n\t"
  2123. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2124. "movl %1, %%edi \n\t"
  2125. "shrl $9, %%esi \n\t"
  2126. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2127. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2128. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2129. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2130. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2131. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2132. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2133. "shll $16, %%edi \n\t"
  2134. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2135. "movl %1, %%edi \n\t"
  2136. "shrl $9, %%esi \n\t"
  2137. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  2138. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2139. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2140. "addl $2, %%eax \n\t"
  2141. "cmpl %2, %%eax \n\t"
  2142. " jb 1b \n\t"
  2143. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
  2144. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2145. );
  2146. #ifdef HAVE_MMX2
  2147. } //if MMX2 cant be used
  2148. #endif
  2149. #else
  2150. int i;
  2151. unsigned int xpos=0;
  2152. for(i=0;i<dstWidth;i++)
  2153. {
  2154. register unsigned int xx=xpos>>16;
  2155. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2156. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2157. xpos+=xInc;
  2158. }
  2159. #endif
  2160. }
  2161. }
  2162. inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2,
  2163. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2164. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2165. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2166. int32_t *mmx2FilterPos)
  2167. {
  2168. if(srcFormat==IMGFMT_YUY2)
  2169. {
  2170. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2171. src1= formatConvBuffer;
  2172. src2= formatConvBuffer+2048;
  2173. }
  2174. else if(srcFormat==IMGFMT_UYVY)
  2175. {
  2176. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2177. src1= formatConvBuffer;
  2178. src2= formatConvBuffer+2048;
  2179. }
  2180. else if(srcFormat==IMGFMT_BGR32)
  2181. {
  2182. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2183. src1= formatConvBuffer;
  2184. src2= formatConvBuffer+2048;
  2185. }
  2186. else if(srcFormat==IMGFMT_BGR24)
  2187. {
  2188. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2189. src1= formatConvBuffer;
  2190. src2= formatConvBuffer+2048;
  2191. }
  2192. else if(srcFormat==IMGFMT_BGR16)
  2193. {
  2194. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2195. src1= formatConvBuffer;
  2196. src2= formatConvBuffer+2048;
  2197. }
  2198. else if(srcFormat==IMGFMT_BGR15)
  2199. {
  2200. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2201. src1= formatConvBuffer;
  2202. src2= formatConvBuffer+2048;
  2203. }
  2204. else if(srcFormat==IMGFMT_RGB32)
  2205. {
  2206. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2207. src1= formatConvBuffer;
  2208. src2= formatConvBuffer+2048;
  2209. }
  2210. else if(srcFormat==IMGFMT_RGB24)
  2211. {
  2212. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2213. src1= formatConvBuffer;
  2214. src2= formatConvBuffer+2048;
  2215. }
  2216. else if(isGray(srcFormat))
  2217. {
  2218. return;
  2219. }
  2220. #ifdef HAVE_MMX
  2221. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  2222. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2223. #else
  2224. if(!(flags&SWS_FAST_BILINEAR))
  2225. #endif
  2226. {
  2227. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2228. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2229. }
  2230. else // Fast Bilinear upscale / crap downscale
  2231. {
  2232. #ifdef ARCH_X86
  2233. #ifdef HAVE_MMX2
  2234. int i;
  2235. if(canMMX2BeUsed)
  2236. {
  2237. asm volatile(
  2238. "pxor %%mm7, %%mm7 \n\t"
  2239. "movl %0, %%ecx \n\t"
  2240. "movl %1, %%edi \n\t"
  2241. "movl %2, %%edx \n\t"
  2242. "movl %3, %%ebx \n\t"
  2243. "xorl %%eax, %%eax \n\t" // i
  2244. PREFETCH" (%%ecx) \n\t"
  2245. PREFETCH" 32(%%ecx) \n\t"
  2246. PREFETCH" 64(%%ecx) \n\t"
  2247. #define FUNNY_UV_CODE \
  2248. "movl (%%ebx), %%esi \n\t"\
  2249. "call *%4 \n\t"\
  2250. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2251. "addl %%eax, %%edi \n\t"\
  2252. "xorl %%eax, %%eax \n\t"\
  2253. FUNNY_UV_CODE
  2254. FUNNY_UV_CODE
  2255. FUNNY_UV_CODE
  2256. FUNNY_UV_CODE
  2257. "xorl %%eax, %%eax \n\t" // i
  2258. "movl %5, %%ecx \n\t" // src
  2259. "movl %1, %%edi \n\t" // buf1
  2260. "addl $4096, %%edi \n\t"
  2261. PREFETCH" (%%ecx) \n\t"
  2262. PREFETCH" 32(%%ecx) \n\t"
  2263. PREFETCH" 64(%%ecx) \n\t"
  2264. FUNNY_UV_CODE
  2265. FUNNY_UV_CODE
  2266. FUNNY_UV_CODE
  2267. FUNNY_UV_CODE
  2268. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2269. "m" (funnyUVCode), "m" (src2)
  2270. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2271. );
  2272. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2273. {
  2274. // printf("%d %d %d\n", dstWidth, i, srcW);
  2275. dst[i] = src1[srcW-1]*128;
  2276. dst[i+2048] = src2[srcW-1]*128;
  2277. }
  2278. }
  2279. else
  2280. {
  2281. #endif
  2282. asm volatile(
  2283. "xorl %%eax, %%eax \n\t" // i
  2284. "xorl %%ebx, %%ebx \n\t" // xx
  2285. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2286. ".balign 16 \n\t"
  2287. "1: \n\t"
  2288. "movl %0, %%esi \n\t"
  2289. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  2290. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  2291. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2292. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2293. "shll $16, %%edi \n\t"
  2294. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2295. "movl %1, %%edi \n\t"
  2296. "shrl $9, %%esi \n\t"
  2297. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2298. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  2299. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  2300. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2301. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2302. "shll $16, %%edi \n\t"
  2303. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2304. "movl %1, %%edi \n\t"
  2305. "shrl $9, %%esi \n\t"
  2306. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  2307. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2308. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2309. "addl $1, %%eax \n\t"
  2310. "cmpl %2, %%eax \n\t"
  2311. " jb 1b \n\t"
  2312. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
  2313. "r" (src2)
  2314. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2315. );
  2316. #ifdef HAVE_MMX2
  2317. } //if MMX2 cant be used
  2318. #endif
  2319. #else
  2320. int i;
  2321. unsigned int xpos=0;
  2322. for(i=0;i<dstWidth;i++)
  2323. {
  2324. register unsigned int xx=xpos>>16;
  2325. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2326. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2327. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2328. /* slower
  2329. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2330. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2331. */
  2332. xpos+=xInc;
  2333. }
  2334. #endif
  2335. }
  2336. }
  2337. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2338. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2339. /* load a few things into local vars to make the code more readable? and faster */
  2340. const int srcW= c->srcW;
  2341. const int dstW= c->dstW;
  2342. const int dstH= c->dstH;
  2343. const int chrDstW= c->chrDstW;
  2344. const int chrSrcW= c->chrSrcW;
  2345. const int lumXInc= c->lumXInc;
  2346. const int chrXInc= c->chrXInc;
  2347. const int dstFormat= c->dstFormat;
  2348. const int srcFormat= c->srcFormat;
  2349. const int flags= c->flags;
  2350. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2351. int16_t *vLumFilterPos= c->vLumFilterPos;
  2352. int16_t *vChrFilterPos= c->vChrFilterPos;
  2353. int16_t *hLumFilterPos= c->hLumFilterPos;
  2354. int16_t *hChrFilterPos= c->hChrFilterPos;
  2355. int16_t *vLumFilter= c->vLumFilter;
  2356. int16_t *vChrFilter= c->vChrFilter;
  2357. int16_t *hLumFilter= c->hLumFilter;
  2358. int16_t *hChrFilter= c->hChrFilter;
  2359. int32_t *lumMmxFilter= c->lumMmxFilter;
  2360. int32_t *chrMmxFilter= c->chrMmxFilter;
  2361. const int vLumFilterSize= c->vLumFilterSize;
  2362. const int vChrFilterSize= c->vChrFilterSize;
  2363. const int hLumFilterSize= c->hLumFilterSize;
  2364. const int hChrFilterSize= c->hChrFilterSize;
  2365. int16_t **lumPixBuf= c->lumPixBuf;
  2366. int16_t **chrPixBuf= c->chrPixBuf;
  2367. const int vLumBufSize= c->vLumBufSize;
  2368. const int vChrBufSize= c->vChrBufSize;
  2369. uint8_t *funnyYCode= c->funnyYCode;
  2370. uint8_t *funnyUVCode= c->funnyUVCode;
  2371. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2372. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2373. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2374. int lastDstY;
  2375. /* vars whch will change and which we need to storw back in the context */
  2376. int dstY= c->dstY;
  2377. int lumBufIndex= c->lumBufIndex;
  2378. int chrBufIndex= c->chrBufIndex;
  2379. int lastInLumBuf= c->lastInLumBuf;
  2380. int lastInChrBuf= c->lastInChrBuf;
  2381. if(isPacked(c->srcFormat)){
  2382. src[0]=
  2383. src[1]=
  2384. src[2]= src[0];
  2385. srcStride[0]=
  2386. srcStride[1]=
  2387. srcStride[2]= srcStride[0];
  2388. }
  2389. srcStride[1]<<= c->vChrDrop;
  2390. srcStride[2]<<= c->vChrDrop;
  2391. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2392. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2393. #if 0 //self test FIXME move to a vfilter or something
  2394. {
  2395. static volatile int i=0;
  2396. i++;
  2397. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2398. selfTest(src, srcStride, c->srcW, c->srcH);
  2399. i--;
  2400. }
  2401. #endif
  2402. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2403. //dstStride[0],dstStride[1],dstStride[2]);
  2404. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2405. {
  2406. static int firstTime=1; //FIXME move this into the context perhaps
  2407. if(flags & SWS_PRINT_INFO && firstTime)
  2408. {
  2409. MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
  2410. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2411. firstTime=0;
  2412. }
  2413. }
  2414. /* Note the user might start scaling the picture in the middle so this will not get executed
  2415. this is not really intended but works currently, so ppl might do it */
  2416. if(srcSliceY ==0){
  2417. lumBufIndex=0;
  2418. chrBufIndex=0;
  2419. dstY=0;
  2420. lastInLumBuf= -1;
  2421. lastInChrBuf= -1;
  2422. }
  2423. lastDstY= dstY;
  2424. for(;dstY < dstH; dstY++){
  2425. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2426. const int chrDstY= dstY>>c->chrDstVSubSample;
  2427. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2428. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2429. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2430. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2431. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2432. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2433. //handle holes (FAST_BILINEAR & weird filters)
  2434. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2435. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2436. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2437. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2438. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2439. // Do we have enough lines in this slice to output the dstY line
  2440. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2441. {
  2442. //Do horizontal scaling
  2443. while(lastInLumBuf < lastLumSrcY)
  2444. {
  2445. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2446. lumBufIndex++;
  2447. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2448. ASSERT(lumBufIndex < 2*vLumBufSize)
  2449. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2450. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2451. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2452. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2453. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2454. funnyYCode, c->srcFormat, formatConvBuffer,
  2455. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2456. lastInLumBuf++;
  2457. }
  2458. while(lastInChrBuf < lastChrSrcY)
  2459. {
  2460. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2461. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2462. chrBufIndex++;
  2463. ASSERT(chrBufIndex < 2*vChrBufSize)
  2464. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2465. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2466. //FIXME replace parameters through context struct (some at least)
  2467. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2468. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2469. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2470. funnyUVCode, c->srcFormat, formatConvBuffer,
  2471. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2472. lastInChrBuf++;
  2473. }
  2474. //wrap buf index around to stay inside the ring buffer
  2475. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2476. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2477. }
  2478. else // not enough lines left in this slice -> load the rest in the buffer
  2479. {
  2480. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2481. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2482. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2483. vChrBufSize, vLumBufSize);*/
  2484. //Do horizontal scaling
  2485. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2486. {
  2487. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2488. lumBufIndex++;
  2489. ASSERT(lumBufIndex < 2*vLumBufSize)
  2490. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2491. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2492. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2493. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2494. funnyYCode, c->srcFormat, formatConvBuffer,
  2495. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2496. lastInLumBuf++;
  2497. }
  2498. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2499. {
  2500. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2501. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2502. chrBufIndex++;
  2503. ASSERT(chrBufIndex < 2*vChrBufSize)
  2504. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2505. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2506. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2507. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2508. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2509. funnyUVCode, c->srcFormat, formatConvBuffer,
  2510. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2511. lastInChrBuf++;
  2512. }
  2513. //wrap buf index around to stay inside the ring buffer
  2514. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2515. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2516. break; //we cant output a dstY line so lets try with the next slice
  2517. }
  2518. #ifdef HAVE_MMX
  2519. b5Dither= dither8[dstY&1];
  2520. g6Dither= dither4[dstY&1];
  2521. g5Dither= dither8[dstY&1];
  2522. r5Dither= dither8[(dstY+1)&1];
  2523. #endif
  2524. if(dstY < dstH-2)
  2525. {
  2526. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2527. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2528. #ifdef HAVE_MMX
  2529. int i;
  2530. for(i=0; i<vLumFilterSize; i++)
  2531. {
  2532. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2533. lumMmxFilter[4*i+2]=
  2534. lumMmxFilter[4*i+3]=
  2535. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2536. }
  2537. for(i=0; i<vChrFilterSize; i++)
  2538. {
  2539. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2540. chrMmxFilter[4*i+2]=
  2541. chrMmxFilter[4*i+3]=
  2542. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2543. }
  2544. #endif
  2545. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2546. {
  2547. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2548. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2549. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2550. {
  2551. int16_t *lumBuf = lumPixBuf[0];
  2552. int16_t *chrBuf= chrPixBuf[0];
  2553. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2554. }
  2555. else //General YV12
  2556. {
  2557. RENAME(yuv2yuvX)(c,
  2558. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2559. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2560. dest, uDest, vDest, dstW, chrDstW);
  2561. }
  2562. }
  2563. else
  2564. {
  2565. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2566. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2567. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2568. {
  2569. int chrAlpha= vChrFilter[2*dstY+1];
  2570. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2571. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2572. }
  2573. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2574. {
  2575. int lumAlpha= vLumFilter[2*dstY+1];
  2576. int chrAlpha= vChrFilter[2*dstY+1];
  2577. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2578. dest, dstW, lumAlpha, chrAlpha, dstY);
  2579. }
  2580. else //General RGB
  2581. {
  2582. RENAME(yuv2packedX)(c,
  2583. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2584. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2585. dest, dstW, dstY);
  2586. }
  2587. }
  2588. }
  2589. else // hmm looks like we cant use MMX here without overwriting this arrays tail
  2590. {
  2591. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2592. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2593. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2594. {
  2595. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2596. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2597. yuv2yuvXinC(
  2598. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2599. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2600. dest, uDest, vDest, dstW, chrDstW);
  2601. }
  2602. else
  2603. {
  2604. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2605. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2606. yuv2packedXinC(c,
  2607. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2608. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2609. dest, dstW, dstY);
  2610. }
  2611. }
  2612. }
  2613. #ifdef HAVE_MMX
  2614. __asm __volatile(SFENCE:::"memory");
  2615. __asm __volatile(EMMS:::"memory");
  2616. #endif
  2617. /* store changed local vars back in the context */
  2618. c->dstY= dstY;
  2619. c->lumBufIndex= lumBufIndex;
  2620. c->chrBufIndex= chrBufIndex;
  2621. c->lastInLumBuf= lastInLumBuf;
  2622. c->lastInChrBuf= lastInChrBuf;
  2623. return dstY - lastDstY;
  2624. }