You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2907 lines
85KB

  1. /*
  2. Copyright (C) 2001-2002 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. #undef MOVNTQ
  16. #undef PAVGB
  17. #undef PREFETCH
  18. #undef PREFETCHW
  19. #undef EMMS
  20. #undef SFENCE
  21. #ifdef HAVE_3DNOW
  22. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  23. #define EMMS "femms"
  24. #else
  25. #define EMMS "emms"
  26. #endif
  27. #ifdef HAVE_3DNOW
  28. #define PREFETCH "prefetch"
  29. #define PREFETCHW "prefetchw"
  30. #elif defined ( HAVE_MMX2 )
  31. #define PREFETCH "prefetchnta"
  32. #define PREFETCHW "prefetcht0"
  33. #else
  34. #define PREFETCH "/nop"
  35. #define PREFETCHW "/nop"
  36. #endif
  37. #ifdef HAVE_MMX2
  38. #define SFENCE "sfence"
  39. #else
  40. #define SFENCE "/nop"
  41. #endif
  42. #ifdef HAVE_MMX2
  43. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  44. #elif defined (HAVE_3DNOW)
  45. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  46. #endif
  47. #ifdef HAVE_MMX2
  48. #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  49. #else
  50. #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  51. #endif
  52. #define YSCALEYUV2YV12X(x) \
  53. "xorl %%eax, %%eax \n\t"\
  54. "pxor %%mm3, %%mm3 \n\t"\
  55. "pxor %%mm4, %%mm4 \n\t"\
  56. "movl %0, %%edx \n\t"\
  57. ".balign 16 \n\t" /* FIXME Unroll? */\
  58. "1: \n\t"\
  59. "movl (%1, %%edx, 4), %%esi \n\t"\
  60. "movq (%2, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  61. "movq " #x "(%%esi, %%eax, 2), %%mm2 \n\t" /* srcData */\
  62. "movq 8+" #x "(%%esi, %%eax, 2), %%mm5 \n\t" /* srcData */\
  63. "pmulhw %%mm0, %%mm2 \n\t"\
  64. "pmulhw %%mm0, %%mm5 \n\t"\
  65. "paddw %%mm2, %%mm3 \n\t"\
  66. "paddw %%mm5, %%mm4 \n\t"\
  67. "addl $1, %%edx \n\t"\
  68. " jnz 1b \n\t"\
  69. "psraw $3, %%mm3 \n\t"\
  70. "psraw $3, %%mm4 \n\t"\
  71. "packuswb %%mm4, %%mm3 \n\t"\
  72. MOVNTQ(%%mm3, (%3, %%eax))\
  73. "addl $8, %%eax \n\t"\
  74. "cmpl %4, %%eax \n\t"\
  75. "pxor %%mm3, %%mm3 \n\t"\
  76. "pxor %%mm4, %%mm4 \n\t"\
  77. "movl %0, %%edx \n\t"\
  78. "jb 1b \n\t"
  79. #define YSCALEYUV2YV121 \
  80. "movl %2, %%eax \n\t"\
  81. ".balign 16 \n\t" /* FIXME Unroll? */\
  82. "1: \n\t"\
  83. "movq (%0, %%eax, 2), %%mm0 \n\t"\
  84. "movq 8(%0, %%eax, 2), %%mm1 \n\t"\
  85. "psraw $7, %%mm0 \n\t"\
  86. "psraw $7, %%mm1 \n\t"\
  87. "packuswb %%mm1, %%mm0 \n\t"\
  88. MOVNTQ(%%mm0, (%1, %%eax))\
  89. "addl $8, %%eax \n\t"\
  90. "jnc 1b \n\t"
  91. /*
  92. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  93. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  94. "r" (dest), "m" (dstW),
  95. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  96. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  97. */
  98. #define YSCALEYUV2RGBX \
  99. "xorl %%eax, %%eax \n\t"\
  100. ".balign 16 \n\t"\
  101. "1: \n\t"\
  102. "movl %1, %%edx \n\t" /* -chrFilterSize */\
  103. "movl %3, %%ebx \n\t" /* chrMmxFilter+lumFilterSize */\
  104. "movl %7, %%ecx \n\t" /* chrSrc+lumFilterSize */\
  105. "pxor %%mm3, %%mm3 \n\t"\
  106. "pxor %%mm4, %%mm4 \n\t"\
  107. "2: \n\t"\
  108. "movl (%%ecx, %%edx, 4), %%esi \n\t"\
  109. "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  110. "movq (%%esi, %%eax), %%mm2 \n\t" /* UsrcData */\
  111. "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\
  112. "pmulhw %%mm0, %%mm2 \n\t"\
  113. "pmulhw %%mm0, %%mm5 \n\t"\
  114. "paddw %%mm2, %%mm3 \n\t"\
  115. "paddw %%mm5, %%mm4 \n\t"\
  116. "addl $1, %%edx \n\t"\
  117. " jnz 2b \n\t"\
  118. \
  119. "movl %0, %%edx \n\t" /* -lumFilterSize */\
  120. "movl %2, %%ebx \n\t" /* lumMmxFilter+lumFilterSize */\
  121. "movl %6, %%ecx \n\t" /* lumSrc+lumFilterSize */\
  122. "pxor %%mm1, %%mm1 \n\t"\
  123. "pxor %%mm7, %%mm7 \n\t"\
  124. "2: \n\t"\
  125. "movl (%%ecx, %%edx, 4), %%esi \n\t"\
  126. "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  127. "movq (%%esi, %%eax, 2), %%mm2 \n\t" /* Y1srcData */\
  128. "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\
  129. "pmulhw %%mm0, %%mm2 \n\t"\
  130. "pmulhw %%mm0, %%mm5 \n\t"\
  131. "paddw %%mm2, %%mm1 \n\t"\
  132. "paddw %%mm5, %%mm7 \n\t"\
  133. "addl $1, %%edx \n\t"\
  134. " jnz 2b \n\t"\
  135. \
  136. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  137. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  138. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  139. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  140. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  141. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  142. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  143. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  144. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  145. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  146. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  147. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  148. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  149. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  150. "paddw %%mm3, %%mm4 \n\t"\
  151. "movq %%mm2, %%mm0 \n\t"\
  152. "movq %%mm5, %%mm6 \n\t"\
  153. "movq %%mm4, %%mm3 \n\t"\
  154. "punpcklwd %%mm2, %%mm2 \n\t"\
  155. "punpcklwd %%mm5, %%mm5 \n\t"\
  156. "punpcklwd %%mm4, %%mm4 \n\t"\
  157. "paddw %%mm1, %%mm2 \n\t"\
  158. "paddw %%mm1, %%mm5 \n\t"\
  159. "paddw %%mm1, %%mm4 \n\t"\
  160. "punpckhwd %%mm0, %%mm0 \n\t"\
  161. "punpckhwd %%mm6, %%mm6 \n\t"\
  162. "punpckhwd %%mm3, %%mm3 \n\t"\
  163. "paddw %%mm7, %%mm0 \n\t"\
  164. "paddw %%mm7, %%mm6 \n\t"\
  165. "paddw %%mm7, %%mm3 \n\t"\
  166. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  167. "packuswb %%mm0, %%mm2 \n\t"\
  168. "packuswb %%mm6, %%mm5 \n\t"\
  169. "packuswb %%mm3, %%mm4 \n\t"\
  170. "pxor %%mm7, %%mm7 \n\t"
  171. #define FULL_YSCALEYUV2RGB \
  172. "pxor %%mm7, %%mm7 \n\t"\
  173. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  174. "punpcklwd %%mm6, %%mm6 \n\t"\
  175. "punpcklwd %%mm6, %%mm6 \n\t"\
  176. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  177. "punpcklwd %%mm5, %%mm5 \n\t"\
  178. "punpcklwd %%mm5, %%mm5 \n\t"\
  179. "xorl %%eax, %%eax \n\t"\
  180. ".balign 16 \n\t"\
  181. "1: \n\t"\
  182. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  183. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  184. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  185. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  186. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  187. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  188. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  189. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  190. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  191. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  192. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  193. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  194. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  195. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  196. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  197. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  198. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  199. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  200. \
  201. \
  202. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  203. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  204. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  205. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  206. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  207. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  208. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  209. \
  210. \
  211. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  212. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  213. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  214. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  215. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  216. "packuswb %%mm3, %%mm3 \n\t"\
  217. \
  218. "packuswb %%mm0, %%mm0 \n\t"\
  219. "paddw %%mm4, %%mm2 \n\t"\
  220. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  221. \
  222. "packuswb %%mm1, %%mm1 \n\t"
  223. #define YSCALEYUV2RGB \
  224. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  225. "punpcklwd %%mm6, %%mm6 \n\t"\
  226. "punpcklwd %%mm6, %%mm6 \n\t"\
  227. "movq %%mm6, "MANGLE(asm_yalpha1)"\n\t"\
  228. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  229. "punpcklwd %%mm5, %%mm5 \n\t"\
  230. "punpcklwd %%mm5, %%mm5 \n\t"\
  231. "movq %%mm5, "MANGLE(asm_uvalpha1)"\n\t"\
  232. "xorl %%eax, %%eax \n\t"\
  233. ".balign 16 \n\t"\
  234. "1: \n\t"\
  235. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  236. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  237. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  238. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  239. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  240. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  241. "movq "MANGLE(asm_uvalpha1)", %%mm0\n\t"\
  242. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  243. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  244. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  245. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  246. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  247. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  248. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  249. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  250. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  251. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  252. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  253. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  254. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  255. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  256. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  257. "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
  258. "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
  259. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  260. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  261. "pmulhw "MANGLE(asm_yalpha1)", %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  262. "pmulhw "MANGLE(asm_yalpha1)", %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  263. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  264. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  265. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  266. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  267. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  268. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  269. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  270. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  271. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  272. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  273. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  274. "paddw %%mm3, %%mm4 \n\t"\
  275. "movq %%mm2, %%mm0 \n\t"\
  276. "movq %%mm5, %%mm6 \n\t"\
  277. "movq %%mm4, %%mm3 \n\t"\
  278. "punpcklwd %%mm2, %%mm2 \n\t"\
  279. "punpcklwd %%mm5, %%mm5 \n\t"\
  280. "punpcklwd %%mm4, %%mm4 \n\t"\
  281. "paddw %%mm1, %%mm2 \n\t"\
  282. "paddw %%mm1, %%mm5 \n\t"\
  283. "paddw %%mm1, %%mm4 \n\t"\
  284. "punpckhwd %%mm0, %%mm0 \n\t"\
  285. "punpckhwd %%mm6, %%mm6 \n\t"\
  286. "punpckhwd %%mm3, %%mm3 \n\t"\
  287. "paddw %%mm7, %%mm0 \n\t"\
  288. "paddw %%mm7, %%mm6 \n\t"\
  289. "paddw %%mm7, %%mm3 \n\t"\
  290. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  291. "packuswb %%mm0, %%mm2 \n\t"\
  292. "packuswb %%mm6, %%mm5 \n\t"\
  293. "packuswb %%mm3, %%mm4 \n\t"\
  294. "pxor %%mm7, %%mm7 \n\t"
  295. #define YSCALEYUV2RGB1 \
  296. "xorl %%eax, %%eax \n\t"\
  297. ".balign 16 \n\t"\
  298. "1: \n\t"\
  299. "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
  300. "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  301. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  302. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  303. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  304. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  305. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  306. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  307. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  308. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  309. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  310. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  311. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  312. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  313. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  314. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  315. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  316. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  317. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  318. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  319. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  320. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  321. "paddw %%mm3, %%mm4 \n\t"\
  322. "movq %%mm2, %%mm0 \n\t"\
  323. "movq %%mm5, %%mm6 \n\t"\
  324. "movq %%mm4, %%mm3 \n\t"\
  325. "punpcklwd %%mm2, %%mm2 \n\t"\
  326. "punpcklwd %%mm5, %%mm5 \n\t"\
  327. "punpcklwd %%mm4, %%mm4 \n\t"\
  328. "paddw %%mm1, %%mm2 \n\t"\
  329. "paddw %%mm1, %%mm5 \n\t"\
  330. "paddw %%mm1, %%mm4 \n\t"\
  331. "punpckhwd %%mm0, %%mm0 \n\t"\
  332. "punpckhwd %%mm6, %%mm6 \n\t"\
  333. "punpckhwd %%mm3, %%mm3 \n\t"\
  334. "paddw %%mm7, %%mm0 \n\t"\
  335. "paddw %%mm7, %%mm6 \n\t"\
  336. "paddw %%mm7, %%mm3 \n\t"\
  337. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  338. "packuswb %%mm0, %%mm2 \n\t"\
  339. "packuswb %%mm6, %%mm5 \n\t"\
  340. "packuswb %%mm3, %%mm4 \n\t"\
  341. "pxor %%mm7, %%mm7 \n\t"
  342. // do vertical chrominance interpolation
  343. #define YSCALEYUV2RGB1b \
  344. "xorl %%eax, %%eax \n\t"\
  345. ".balign 16 \n\t"\
  346. "1: \n\t"\
  347. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  348. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  349. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  350. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  351. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  352. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  353. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  354. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  355. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  356. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  357. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  358. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  359. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  360. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  361. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  362. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  363. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  364. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  365. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  366. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  367. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  368. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  369. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  370. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  371. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  372. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  373. "paddw %%mm3, %%mm4 \n\t"\
  374. "movq %%mm2, %%mm0 \n\t"\
  375. "movq %%mm5, %%mm6 \n\t"\
  376. "movq %%mm4, %%mm3 \n\t"\
  377. "punpcklwd %%mm2, %%mm2 \n\t"\
  378. "punpcklwd %%mm5, %%mm5 \n\t"\
  379. "punpcklwd %%mm4, %%mm4 \n\t"\
  380. "paddw %%mm1, %%mm2 \n\t"\
  381. "paddw %%mm1, %%mm5 \n\t"\
  382. "paddw %%mm1, %%mm4 \n\t"\
  383. "punpckhwd %%mm0, %%mm0 \n\t"\
  384. "punpckhwd %%mm6, %%mm6 \n\t"\
  385. "punpckhwd %%mm3, %%mm3 \n\t"\
  386. "paddw %%mm7, %%mm0 \n\t"\
  387. "paddw %%mm7, %%mm6 \n\t"\
  388. "paddw %%mm7, %%mm3 \n\t"\
  389. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  390. "packuswb %%mm0, %%mm2 \n\t"\
  391. "packuswb %%mm6, %%mm5 \n\t"\
  392. "packuswb %%mm3, %%mm4 \n\t"\
  393. "pxor %%mm7, %%mm7 \n\t"
  394. #define WRITEBGR32 \
  395. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  396. "movq %%mm2, %%mm1 \n\t" /* B */\
  397. "movq %%mm5, %%mm6 \n\t" /* R */\
  398. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  399. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  400. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  401. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  402. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  403. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  404. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  405. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  406. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  407. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  408. \
  409. MOVNTQ(%%mm0, (%4, %%eax, 4))\
  410. MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
  411. MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
  412. MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
  413. \
  414. "addl $8, %%eax \n\t"\
  415. "cmpl %5, %%eax \n\t"\
  416. " jb 1b \n\t"
  417. #define WRITEBGR16 \
  418. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  419. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  420. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  421. "psrlq $3, %%mm2 \n\t"\
  422. \
  423. "movq %%mm2, %%mm1 \n\t"\
  424. "movq %%mm4, %%mm3 \n\t"\
  425. \
  426. "punpcklbw %%mm7, %%mm3 \n\t"\
  427. "punpcklbw %%mm5, %%mm2 \n\t"\
  428. "punpckhbw %%mm7, %%mm4 \n\t"\
  429. "punpckhbw %%mm5, %%mm1 \n\t"\
  430. \
  431. "psllq $3, %%mm3 \n\t"\
  432. "psllq $3, %%mm4 \n\t"\
  433. \
  434. "por %%mm3, %%mm2 \n\t"\
  435. "por %%mm4, %%mm1 \n\t"\
  436. \
  437. MOVNTQ(%%mm2, (%4, %%eax, 2))\
  438. MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
  439. \
  440. "addl $8, %%eax \n\t"\
  441. "cmpl %5, %%eax \n\t"\
  442. " jb 1b \n\t"
  443. #define WRITEBGR15 \
  444. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  445. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  446. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  447. "psrlq $3, %%mm2 \n\t"\
  448. "psrlq $1, %%mm5 \n\t"\
  449. \
  450. "movq %%mm2, %%mm1 \n\t"\
  451. "movq %%mm4, %%mm3 \n\t"\
  452. \
  453. "punpcklbw %%mm7, %%mm3 \n\t"\
  454. "punpcklbw %%mm5, %%mm2 \n\t"\
  455. "punpckhbw %%mm7, %%mm4 \n\t"\
  456. "punpckhbw %%mm5, %%mm1 \n\t"\
  457. \
  458. "psllq $2, %%mm3 \n\t"\
  459. "psllq $2, %%mm4 \n\t"\
  460. \
  461. "por %%mm3, %%mm2 \n\t"\
  462. "por %%mm4, %%mm1 \n\t"\
  463. \
  464. MOVNTQ(%%mm2, (%4, %%eax, 2))\
  465. MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
  466. \
  467. "addl $8, %%eax \n\t"\
  468. "cmpl %5, %%eax \n\t"\
  469. " jb 1b \n\t"
  470. #define WRITEBGR24OLD \
  471. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  472. "movq %%mm2, %%mm1 \n\t" /* B */\
  473. "movq %%mm5, %%mm6 \n\t" /* R */\
  474. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  475. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  476. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  477. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  478. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  479. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  480. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  481. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  482. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  483. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  484. \
  485. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  486. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  487. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  488. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  489. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  490. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  491. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  492. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  493. \
  494. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  495. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  496. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  497. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  498. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  499. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  500. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  501. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  502. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  503. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  504. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  505. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  506. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  507. \
  508. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  509. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  510. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  511. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  512. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  513. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  514. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  515. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  516. \
  517. MOVNTQ(%%mm0, (%%ebx))\
  518. MOVNTQ(%%mm2, 8(%%ebx))\
  519. MOVNTQ(%%mm3, 16(%%ebx))\
  520. "addl $24, %%ebx \n\t"\
  521. \
  522. "addl $8, %%eax \n\t"\
  523. "cmpl %5, %%eax \n\t"\
  524. " jb 1b \n\t"
  525. #define WRITEBGR24MMX \
  526. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  527. "movq %%mm2, %%mm1 \n\t" /* B */\
  528. "movq %%mm5, %%mm6 \n\t" /* R */\
  529. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  530. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  531. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  532. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  533. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  534. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  535. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  536. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  537. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  538. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  539. \
  540. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  541. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  542. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  543. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  544. \
  545. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  546. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  547. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  548. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  549. \
  550. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  551. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  552. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  553. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  554. \
  555. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  556. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  557. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  558. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  559. MOVNTQ(%%mm0, (%%ebx))\
  560. \
  561. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  562. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  563. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  564. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  565. MOVNTQ(%%mm6, 8(%%ebx))\
  566. \
  567. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  568. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  569. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  570. MOVNTQ(%%mm5, 16(%%ebx))\
  571. \
  572. "addl $24, %%ebx \n\t"\
  573. \
  574. "addl $8, %%eax \n\t"\
  575. "cmpl %5, %%eax \n\t"\
  576. " jb 1b \n\t"
  577. #define WRITEBGR24MMX2 \
  578. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  579. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  580. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  581. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  582. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  583. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  584. \
  585. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  586. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  587. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  588. \
  589. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  590. "por %%mm1, %%mm6 \n\t"\
  591. "por %%mm3, %%mm6 \n\t"\
  592. MOVNTQ(%%mm6, (%%ebx))\
  593. \
  594. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  595. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  596. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  597. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  598. \
  599. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  600. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  601. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  602. \
  603. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  604. "por %%mm3, %%mm6 \n\t"\
  605. MOVNTQ(%%mm6, 8(%%ebx))\
  606. \
  607. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  608. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  609. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  610. \
  611. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  612. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  613. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  614. \
  615. "por %%mm1, %%mm3 \n\t"\
  616. "por %%mm3, %%mm6 \n\t"\
  617. MOVNTQ(%%mm6, 16(%%ebx))\
  618. \
  619. "addl $24, %%ebx \n\t"\
  620. \
  621. "addl $8, %%eax \n\t"\
  622. "cmpl %5, %%eax \n\t"\
  623. " jb 1b \n\t"
  624. #ifdef HAVE_MMX2
  625. #undef WRITEBGR24
  626. #define WRITEBGR24 WRITEBGR24MMX2
  627. #else
  628. #undef WRITEBGR24
  629. #define WRITEBGR24 WRITEBGR24MMX
  630. #endif
  631. static inline void RENAME(yuv2yuvX)(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  632. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  633. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW,
  634. int16_t * lumMmxFilter, int16_t * chrMmxFilter)
  635. {
  636. #ifdef HAVE_MMX
  637. if(uDest != NULL)
  638. {
  639. asm volatile(
  640. YSCALEYUV2YV12X(0)
  641. :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize),
  642. "r" (chrMmxFilter+chrFilterSize*4), "r" (uDest), "m" (dstW>>1)
  643. : "%eax", "%edx", "%esi"
  644. );
  645. asm volatile(
  646. YSCALEYUV2YV12X(4096)
  647. :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize),
  648. "r" (chrMmxFilter+chrFilterSize*4), "r" (vDest), "m" (dstW>>1)
  649. : "%eax", "%edx", "%esi"
  650. );
  651. }
  652. asm volatile(
  653. YSCALEYUV2YV12X(0)
  654. :: "m" (-lumFilterSize), "r" (lumSrc+lumFilterSize),
  655. "r" (lumMmxFilter+lumFilterSize*4), "r" (dest), "m" (dstW)
  656. : "%eax", "%edx", "%esi"
  657. );
  658. #else
  659. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  660. chrFilter, chrSrc, chrFilterSize,
  661. dest, uDest, vDest, dstW);
  662. #endif
  663. }
  664. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  665. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW)
  666. {
  667. #ifdef HAVE_MMX
  668. if(uDest != NULL)
  669. {
  670. asm volatile(
  671. YSCALEYUV2YV121
  672. :: "r" (chrSrc + (dstW>>1)), "r" (uDest + (dstW>>1)),
  673. "g" (-(dstW>>1))
  674. : "%eax"
  675. );
  676. asm volatile(
  677. YSCALEYUV2YV121
  678. :: "r" (chrSrc + 2048 + (dstW>>1)), "r" (vDest + (dstW>>1)),
  679. "g" (-(dstW>>1))
  680. : "%eax"
  681. );
  682. }
  683. asm volatile(
  684. YSCALEYUV2YV121
  685. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  686. "g" (-dstW)
  687. : "%eax"
  688. );
  689. #else
  690. //FIXME Optimize (just quickly writen not opti..)
  691. //FIXME replace MINMAX with LUTs
  692. int i;
  693. for(i=0; i<dstW; i++)
  694. {
  695. int val= lumSrc[i]>>7;
  696. dest[i]= MIN(MAX(val>>19, 0), 255);
  697. }
  698. if(uDest != NULL)
  699. for(i=0; i<(dstW>>1); i++)
  700. {
  701. int u=chrSrc[i]>>7;
  702. int v=chrSrc[i + 2048]>>7;
  703. uDest[i]= MIN(MAX(u>>19, 0), 255);
  704. vDest[i]= MIN(MAX(v>>19, 0), 255);
  705. }
  706. #endif
  707. }
  708. /**
  709. * vertical scale YV12 to RGB
  710. */
  711. static inline void RENAME(yuv2rgbX)(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  712. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  713. uint8_t *dest, int dstW, int dstFormat, int16_t * lumMmxFilter, int16_t * chrMmxFilter)
  714. {
  715. /* if(flags&SWS_FULL_UV_IPOL)
  716. {
  717. //FIXME
  718. }//FULL_UV_IPOL
  719. else*/
  720. {
  721. #ifdef HAVE_MMX
  722. if(dstFormat == IMGFMT_BGR32) //FIXME untested
  723. {
  724. asm volatile(
  725. YSCALEYUV2RGBX
  726. WRITEBGR32
  727. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  728. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  729. "r" (dest), "m" (dstW),
  730. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  731. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  732. );
  733. }
  734. else if(dstFormat == IMGFMT_BGR24) //FIXME untested
  735. {
  736. asm volatile(
  737. YSCALEYUV2RGBX
  738. "leal (%%eax, %%eax, 2), %%ebx \n\t" //FIXME optimize
  739. "addl %4, %%ebx \n\t"
  740. WRITEBGR24
  741. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  742. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  743. "r" (dest), "m" (dstW),
  744. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  745. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  746. );
  747. }
  748. else if(dstFormat==IMGFMT_BGR15)
  749. {
  750. asm volatile(
  751. YSCALEYUV2RGBX
  752. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  753. #ifdef DITHER1XBPP
  754. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  755. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  756. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  757. #endif
  758. WRITEBGR15
  759. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  760. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  761. "r" (dest), "m" (dstW),
  762. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  763. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  764. );
  765. }
  766. else if(dstFormat==IMGFMT_BGR16)
  767. {
  768. asm volatile(
  769. YSCALEYUV2RGBX
  770. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  771. #ifdef DITHER1XBPP
  772. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  773. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  774. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  775. #endif
  776. WRITEBGR16
  777. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  778. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  779. "r" (dest), "m" (dstW),
  780. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  781. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  782. );
  783. }
  784. #else
  785. yuv2rgbXinC(lumFilter, lumSrc, lumFilterSize,
  786. chrFilter, chrSrc, chrFilterSize,
  787. dest, dstW, dstFormat);
  788. #endif
  789. } //!FULL_UV_IPOL
  790. }
  791. /**
  792. * vertical bilinear scale YV12 to RGB
  793. */
  794. static inline void RENAME(yuv2rgb2)(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  795. uint8_t *dest, int dstW, int yalpha, int uvalpha, int dstFormat, int flags)
  796. {
  797. int yalpha1=yalpha^4095;
  798. int uvalpha1=uvalpha^4095;
  799. if(flags&SWS_FULL_CHR_H_INT)
  800. {
  801. #ifdef HAVE_MMX
  802. if(dstFormat==IMGFMT_BGR32)
  803. {
  804. asm volatile(
  805. FULL_YSCALEYUV2RGB
  806. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  807. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  808. "movq %%mm3, %%mm1 \n\t"
  809. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  810. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  811. MOVNTQ(%%mm3, (%4, %%eax, 4))
  812. MOVNTQ(%%mm1, 8(%4, %%eax, 4))
  813. "addl $4, %%eax \n\t"
  814. "cmpl %5, %%eax \n\t"
  815. " jb 1b \n\t"
  816. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  817. "m" (yalpha1), "m" (uvalpha1)
  818. : "%eax"
  819. );
  820. }
  821. else if(dstFormat==IMGFMT_BGR24)
  822. {
  823. asm volatile(
  824. FULL_YSCALEYUV2RGB
  825. // lsb ... msb
  826. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  827. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  828. "movq %%mm3, %%mm1 \n\t"
  829. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  830. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  831. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  832. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  833. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  834. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  835. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  836. "movq %%mm1, %%mm2 \n\t"
  837. "psllq $48, %%mm1 \n\t" // 000000BG
  838. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  839. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  840. "psrld $16, %%mm2 \n\t" // R000R000
  841. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  842. "por %%mm2, %%mm1 \n\t" // RBGRR000
  843. "movl %4, %%ebx \n\t"
  844. "addl %%eax, %%ebx \n\t"
  845. #ifdef HAVE_MMX2
  846. //FIXME Alignment
  847. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  848. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  849. #else
  850. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  851. "psrlq $32, %%mm3 \n\t"
  852. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  853. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  854. #endif
  855. "addl $4, %%eax \n\t"
  856. "cmpl %5, %%eax \n\t"
  857. " jb 1b \n\t"
  858. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  859. "m" (yalpha1), "m" (uvalpha1)
  860. : "%eax", "%ebx"
  861. );
  862. }
  863. else if(dstFormat==IMGFMT_BGR15)
  864. {
  865. asm volatile(
  866. FULL_YSCALEYUV2RGB
  867. #ifdef DITHER1XBPP
  868. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  869. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  870. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  871. #endif
  872. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  873. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  874. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  875. "psrlw $3, %%mm3 \n\t"
  876. "psllw $2, %%mm1 \n\t"
  877. "psllw $7, %%mm0 \n\t"
  878. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  879. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  880. "por %%mm3, %%mm1 \n\t"
  881. "por %%mm1, %%mm0 \n\t"
  882. MOVNTQ(%%mm0, (%4, %%eax, 2))
  883. "addl $4, %%eax \n\t"
  884. "cmpl %5, %%eax \n\t"
  885. " jb 1b \n\t"
  886. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  887. "m" (yalpha1), "m" (uvalpha1)
  888. : "%eax"
  889. );
  890. }
  891. else if(dstFormat==IMGFMT_BGR16)
  892. {
  893. asm volatile(
  894. FULL_YSCALEYUV2RGB
  895. #ifdef DITHER1XBPP
  896. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  897. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  898. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  899. #endif
  900. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  901. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  902. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  903. "psrlw $3, %%mm3 \n\t"
  904. "psllw $3, %%mm1 \n\t"
  905. "psllw $8, %%mm0 \n\t"
  906. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  907. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  908. "por %%mm3, %%mm1 \n\t"
  909. "por %%mm1, %%mm0 \n\t"
  910. MOVNTQ(%%mm0, (%4, %%eax, 2))
  911. "addl $4, %%eax \n\t"
  912. "cmpl %5, %%eax \n\t"
  913. " jb 1b \n\t"
  914. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  915. "m" (yalpha1), "m" (uvalpha1)
  916. : "%eax"
  917. );
  918. }
  919. #else
  920. if(dstFormat==IMGFMT_BGR32)
  921. {
  922. int i;
  923. #ifdef WORDS_BIGENDIAN
  924. dest++;
  925. #endif
  926. for(i=0;i<dstW;i++){
  927. // vertical linear interpolation && yuv2rgb in a single step:
  928. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  929. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  930. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  931. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  932. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  933. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  934. dest+= 4;
  935. }
  936. }
  937. else if(dstFormat==IMGFMT_BGR24)
  938. {
  939. int i;
  940. for(i=0;i<dstW;i++){
  941. // vertical linear interpolation && yuv2rgb in a single step:
  942. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  943. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  944. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  945. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  946. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  947. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  948. dest+= 3;
  949. }
  950. }
  951. else if(dstFormat==IMGFMT_BGR16)
  952. {
  953. int i;
  954. for(i=0;i<dstW;i++){
  955. // vertical linear interpolation && yuv2rgb in a single step:
  956. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  957. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  958. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  959. ((uint16_t*)dest)[i] =
  960. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  961. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  962. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  963. }
  964. }
  965. else if(dstFormat==IMGFMT_BGR15)
  966. {
  967. int i;
  968. for(i=0;i<dstW;i++){
  969. // vertical linear interpolation && yuv2rgb in a single step:
  970. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  971. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  972. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  973. ((uint16_t*)dest)[i] =
  974. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  975. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  976. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  977. }
  978. }
  979. #endif
  980. }//FULL_UV_IPOL
  981. else
  982. {
  983. #ifdef HAVE_MMX
  984. if(dstFormat==IMGFMT_BGR32)
  985. {
  986. asm volatile(
  987. YSCALEYUV2RGB
  988. WRITEBGR32
  989. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  990. "m" (yalpha1), "m" (uvalpha1)
  991. : "%eax"
  992. );
  993. }
  994. else if(dstFormat==IMGFMT_BGR24)
  995. {
  996. asm volatile(
  997. "movl %4, %%ebx \n\t"
  998. YSCALEYUV2RGB
  999. WRITEBGR24
  1000. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1001. "m" (yalpha1), "m" (uvalpha1)
  1002. : "%eax", "%ebx"
  1003. );
  1004. }
  1005. else if(dstFormat==IMGFMT_BGR15)
  1006. {
  1007. asm volatile(
  1008. YSCALEYUV2RGB
  1009. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1010. #ifdef DITHER1XBPP
  1011. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1012. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1013. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1014. #endif
  1015. WRITEBGR15
  1016. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1017. "m" (yalpha1), "m" (uvalpha1)
  1018. : "%eax"
  1019. );
  1020. }
  1021. else if(dstFormat==IMGFMT_BGR16)
  1022. {
  1023. asm volatile(
  1024. YSCALEYUV2RGB
  1025. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1026. #ifdef DITHER1XBPP
  1027. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1028. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1029. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1030. #endif
  1031. WRITEBGR16
  1032. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1033. "m" (yalpha1), "m" (uvalpha1)
  1034. : "%eax"
  1035. );
  1036. }
  1037. #else
  1038. if(dstFormat==IMGFMT_BGR32)
  1039. {
  1040. int i;
  1041. #ifdef WORDS_BIGENDIAN
  1042. dest++;
  1043. #endif
  1044. for(i=0; i<dstW-1; i+=2){
  1045. // vertical linear interpolation && yuv2rgb in a single step:
  1046. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1047. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1048. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1049. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1050. int Cb= yuvtab_40cf[U];
  1051. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1052. int Cr= yuvtab_3343[V];
  1053. dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
  1054. dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
  1055. dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
  1056. dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
  1057. dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
  1058. dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
  1059. }
  1060. }
  1061. else if(dstFormat==IMGFMT_BGR24)
  1062. {
  1063. int i;
  1064. for(i=0; i<dstW-1; i+=2){
  1065. // vertical linear interpolation && yuv2rgb in a single step:
  1066. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1067. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1068. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1069. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1070. int Cb= yuvtab_40cf[U];
  1071. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1072. int Cr= yuvtab_3343[V];
  1073. dest[0]=clip_table[((Y1 + Cb) >>13)];
  1074. dest[1]=clip_table[((Y1 + Cg) >>13)];
  1075. dest[2]=clip_table[((Y1 + Cr) >>13)];
  1076. dest[3]=clip_table[((Y2 + Cb) >>13)];
  1077. dest[4]=clip_table[((Y2 + Cg) >>13)];
  1078. dest[5]=clip_table[((Y2 + Cr) >>13)];
  1079. dest+=6;
  1080. }
  1081. }
  1082. else if(dstFormat==IMGFMT_BGR16)
  1083. {
  1084. int i;
  1085. #ifdef DITHER1XBPP
  1086. static int ditherb1=1<<14;
  1087. static int ditherg1=1<<13;
  1088. static int ditherr1=2<<14;
  1089. static int ditherb2=3<<14;
  1090. static int ditherg2=3<<13;
  1091. static int ditherr2=0<<14;
  1092. ditherb1 ^= (1^2)<<14;
  1093. ditherg1 ^= (1^2)<<13;
  1094. ditherr1 ^= (1^2)<<14;
  1095. ditherb2 ^= (3^0)<<14;
  1096. ditherg2 ^= (3^0)<<13;
  1097. ditherr2 ^= (3^0)<<14;
  1098. #else
  1099. const int ditherb1=0;
  1100. const int ditherg1=0;
  1101. const int ditherr1=0;
  1102. const int ditherb2=0;
  1103. const int ditherg2=0;
  1104. const int ditherr2=0;
  1105. #endif
  1106. for(i=0; i<dstW-1; i+=2){
  1107. // vertical linear interpolation && yuv2rgb in a single step:
  1108. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1109. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1110. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1111. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1112. int Cb= yuvtab_40cf[U];
  1113. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1114. int Cr= yuvtab_3343[V];
  1115. ((uint16_t*)dest)[i] =
  1116. clip_table16b[(Y1 + Cb + ditherb1) >>13] |
  1117. clip_table16g[(Y1 + Cg + ditherg1) >>13] |
  1118. clip_table16r[(Y1 + Cr + ditherr1) >>13];
  1119. ((uint16_t*)dest)[i+1] =
  1120. clip_table16b[(Y2 + Cb + ditherb2) >>13] |
  1121. clip_table16g[(Y2 + Cg + ditherg2) >>13] |
  1122. clip_table16r[(Y2 + Cr + ditherr2) >>13];
  1123. }
  1124. }
  1125. else if(dstFormat==IMGFMT_BGR15)
  1126. {
  1127. int i;
  1128. #ifdef DITHER1XBPP
  1129. static int ditherb1=1<<14;
  1130. static int ditherg1=1<<14;
  1131. static int ditherr1=2<<14;
  1132. static int ditherb2=3<<14;
  1133. static int ditherg2=3<<14;
  1134. static int ditherr2=0<<14;
  1135. ditherb1 ^= (1^2)<<14;
  1136. ditherg1 ^= (1^2)<<14;
  1137. ditherr1 ^= (1^2)<<14;
  1138. ditherb2 ^= (3^0)<<14;
  1139. ditherg2 ^= (3^0)<<14;
  1140. ditherr2 ^= (3^0)<<14;
  1141. #else
  1142. const int ditherb1=0;
  1143. const int ditherg1=0;
  1144. const int ditherr1=0;
  1145. const int ditherb2=0;
  1146. const int ditherg2=0;
  1147. const int ditherr2=0;
  1148. #endif
  1149. for(i=0; i<dstW-1; i+=2){
  1150. // vertical linear interpolation && yuv2rgb in a single step:
  1151. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1152. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1153. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1154. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1155. int Cb= yuvtab_40cf[U];
  1156. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1157. int Cr= yuvtab_3343[V];
  1158. ((uint16_t*)dest)[i] =
  1159. clip_table15b[(Y1 + Cb + ditherb1) >>13] |
  1160. clip_table15g[(Y1 + Cg + ditherg1) >>13] |
  1161. clip_table15r[(Y1 + Cr + ditherr1) >>13];
  1162. ((uint16_t*)dest)[i+1] =
  1163. clip_table15b[(Y2 + Cb + ditherb2) >>13] |
  1164. clip_table15g[(Y2 + Cg + ditherg2) >>13] |
  1165. clip_table15r[(Y2 + Cr + ditherr2) >>13];
  1166. }
  1167. }
  1168. #endif
  1169. } //!FULL_UV_IPOL
  1170. }
  1171. /**
  1172. * YV12 to RGB without scaling or interpolating
  1173. */
  1174. static inline void RENAME(yuv2rgb1)(uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1175. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags)
  1176. {
  1177. int uvalpha1=uvalpha^4095;
  1178. const int yalpha1=0;
  1179. if(flags&SWS_FULL_CHR_H_INT)
  1180. {
  1181. RENAME(yuv2rgb2)(buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, dstFormat, flags);
  1182. return;
  1183. }
  1184. #ifdef HAVE_MMX
  1185. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1186. {
  1187. if(dstFormat==IMGFMT_BGR32)
  1188. {
  1189. asm volatile(
  1190. YSCALEYUV2RGB1
  1191. WRITEBGR32
  1192. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1193. "m" (yalpha1), "m" (uvalpha1)
  1194. : "%eax"
  1195. );
  1196. }
  1197. else if(dstFormat==IMGFMT_BGR24)
  1198. {
  1199. asm volatile(
  1200. "movl %4, %%ebx \n\t"
  1201. YSCALEYUV2RGB1
  1202. WRITEBGR24
  1203. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1204. "m" (yalpha1), "m" (uvalpha1)
  1205. : "%eax", "%ebx"
  1206. );
  1207. }
  1208. else if(dstFormat==IMGFMT_BGR15)
  1209. {
  1210. asm volatile(
  1211. YSCALEYUV2RGB1
  1212. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1213. #ifdef DITHER1XBPP
  1214. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1215. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1216. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1217. #endif
  1218. WRITEBGR15
  1219. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1220. "m" (yalpha1), "m" (uvalpha1)
  1221. : "%eax"
  1222. );
  1223. }
  1224. else if(dstFormat==IMGFMT_BGR16)
  1225. {
  1226. asm volatile(
  1227. YSCALEYUV2RGB1
  1228. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1229. #ifdef DITHER1XBPP
  1230. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1231. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1232. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1233. #endif
  1234. WRITEBGR16
  1235. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1236. "m" (yalpha1), "m" (uvalpha1)
  1237. : "%eax"
  1238. );
  1239. }
  1240. }
  1241. else
  1242. {
  1243. if(dstFormat==IMGFMT_BGR32)
  1244. {
  1245. asm volatile(
  1246. YSCALEYUV2RGB1b
  1247. WRITEBGR32
  1248. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1249. "m" (yalpha1), "m" (uvalpha1)
  1250. : "%eax"
  1251. );
  1252. }
  1253. else if(dstFormat==IMGFMT_BGR24)
  1254. {
  1255. asm volatile(
  1256. "movl %4, %%ebx \n\t"
  1257. YSCALEYUV2RGB1b
  1258. WRITEBGR24
  1259. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1260. "m" (yalpha1), "m" (uvalpha1)
  1261. : "%eax", "%ebx"
  1262. );
  1263. }
  1264. else if(dstFormat==IMGFMT_BGR15)
  1265. {
  1266. asm volatile(
  1267. YSCALEYUV2RGB1b
  1268. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1269. #ifdef DITHER1XBPP
  1270. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1271. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1272. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1273. #endif
  1274. WRITEBGR15
  1275. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1276. "m" (yalpha1), "m" (uvalpha1)
  1277. : "%eax"
  1278. );
  1279. }
  1280. else if(dstFormat==IMGFMT_BGR16)
  1281. {
  1282. asm volatile(
  1283. YSCALEYUV2RGB1b
  1284. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1285. #ifdef DITHER1XBPP
  1286. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1287. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1288. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1289. #endif
  1290. WRITEBGR16
  1291. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1292. "m" (yalpha1), "m" (uvalpha1)
  1293. : "%eax"
  1294. );
  1295. }
  1296. }
  1297. #else
  1298. //FIXME write 2 versions (for even & odd lines)
  1299. if(dstFormat==IMGFMT_BGR32)
  1300. {
  1301. int i;
  1302. #ifdef WORDS_BIGENDIAN
  1303. dest++;
  1304. #endif
  1305. for(i=0; i<dstW-1; i+=2){
  1306. // vertical linear interpolation && yuv2rgb in a single step:
  1307. int Y1=yuvtab_2568[buf0[i]>>7];
  1308. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1309. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1310. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1311. int Cb= yuvtab_40cf[U];
  1312. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1313. int Cr= yuvtab_3343[V];
  1314. dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
  1315. dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
  1316. dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
  1317. dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
  1318. dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
  1319. dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
  1320. }
  1321. }
  1322. else if(dstFormat==IMGFMT_BGR24)
  1323. {
  1324. int i;
  1325. for(i=0; i<dstW-1; i+=2){
  1326. // vertical linear interpolation && yuv2rgb in a single step:
  1327. int Y1=yuvtab_2568[buf0[i]>>7];
  1328. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1329. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1330. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1331. int Cb= yuvtab_40cf[U];
  1332. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1333. int Cr= yuvtab_3343[V];
  1334. dest[0]=clip_table[((Y1 + Cb) >>13)];
  1335. dest[1]=clip_table[((Y1 + Cg) >>13)];
  1336. dest[2]=clip_table[((Y1 + Cr) >>13)];
  1337. dest[3]=clip_table[((Y2 + Cb) >>13)];
  1338. dest[4]=clip_table[((Y2 + Cg) >>13)];
  1339. dest[5]=clip_table[((Y2 + Cr) >>13)];
  1340. dest+=6;
  1341. }
  1342. }
  1343. else if(dstFormat==IMGFMT_BGR16)
  1344. {
  1345. int i;
  1346. #ifdef DITHER1XBPP
  1347. static int ditherb1=1<<14;
  1348. static int ditherg1=1<<13;
  1349. static int ditherr1=2<<14;
  1350. static int ditherb2=3<<14;
  1351. static int ditherg2=3<<13;
  1352. static int ditherr2=0<<14;
  1353. ditherb1 ^= (1^2)<<14;
  1354. ditherg1 ^= (1^2)<<13;
  1355. ditherr1 ^= (1^2)<<14;
  1356. ditherb2 ^= (3^0)<<14;
  1357. ditherg2 ^= (3^0)<<13;
  1358. ditherr2 ^= (3^0)<<14;
  1359. #else
  1360. const int ditherb1=0;
  1361. const int ditherg1=0;
  1362. const int ditherr1=0;
  1363. const int ditherb2=0;
  1364. const int ditherg2=0;
  1365. const int ditherr2=0;
  1366. #endif
  1367. for(i=0; i<dstW-1; i+=2){
  1368. // vertical linear interpolation && yuv2rgb in a single step:
  1369. int Y1=yuvtab_2568[buf0[i]>>7];
  1370. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1371. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1372. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1373. int Cb= yuvtab_40cf[U];
  1374. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1375. int Cr= yuvtab_3343[V];
  1376. ((uint16_t*)dest)[i] =
  1377. clip_table16b[(Y1 + Cb + ditherb1) >>13] |
  1378. clip_table16g[(Y1 + Cg + ditherg1) >>13] |
  1379. clip_table16r[(Y1 + Cr + ditherr1) >>13];
  1380. ((uint16_t*)dest)[i+1] =
  1381. clip_table16b[(Y2 + Cb + ditherb2) >>13] |
  1382. clip_table16g[(Y2 + Cg + ditherg2) >>13] |
  1383. clip_table16r[(Y2 + Cr + ditherr2) >>13];
  1384. }
  1385. }
  1386. else if(dstFormat==IMGFMT_BGR15)
  1387. {
  1388. int i;
  1389. #ifdef DITHER1XBPP
  1390. static int ditherb1=1<<14;
  1391. static int ditherg1=1<<14;
  1392. static int ditherr1=2<<14;
  1393. static int ditherb2=3<<14;
  1394. static int ditherg2=3<<14;
  1395. static int ditherr2=0<<14;
  1396. ditherb1 ^= (1^2)<<14;
  1397. ditherg1 ^= (1^2)<<14;
  1398. ditherr1 ^= (1^2)<<14;
  1399. ditherb2 ^= (3^0)<<14;
  1400. ditherg2 ^= (3^0)<<14;
  1401. ditherr2 ^= (3^0)<<14;
  1402. #else
  1403. const int ditherb1=0;
  1404. const int ditherg1=0;
  1405. const int ditherr1=0;
  1406. const int ditherb2=0;
  1407. const int ditherg2=0;
  1408. const int ditherr2=0;
  1409. #endif
  1410. for(i=0; i<dstW-1; i+=2){
  1411. // vertical linear interpolation && yuv2rgb in a single step:
  1412. int Y1=yuvtab_2568[buf0[i]>>7];
  1413. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1414. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1415. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1416. int Cb= yuvtab_40cf[U];
  1417. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1418. int Cr= yuvtab_3343[V];
  1419. ((uint16_t*)dest)[i] =
  1420. clip_table15b[(Y1 + Cb + ditherb1) >>13] |
  1421. clip_table15g[(Y1 + Cg + ditherg1) >>13] |
  1422. clip_table15r[(Y1 + Cr + ditherr1) >>13];
  1423. ((uint16_t*)dest)[i+1] =
  1424. clip_table15b[(Y2 + Cb + ditherb2) >>13] |
  1425. clip_table15g[(Y2 + Cg + ditherg2) >>13] |
  1426. clip_table15r[(Y2 + Cr + ditherr2) >>13];
  1427. }
  1428. }
  1429. #endif
  1430. }
  1431. //FIXME yuy2* can read upto 7 samples to much
  1432. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width)
  1433. {
  1434. #ifdef HAVE_MMX
  1435. asm volatile(
  1436. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1437. "movl %0, %%eax \n\t"
  1438. "1: \n\t"
  1439. "movq (%1, %%eax,2), %%mm0 \n\t"
  1440. "movq 8(%1, %%eax,2), %%mm1 \n\t"
  1441. "pand %%mm2, %%mm0 \n\t"
  1442. "pand %%mm2, %%mm1 \n\t"
  1443. "packuswb %%mm1, %%mm0 \n\t"
  1444. "movq %%mm0, (%2, %%eax) \n\t"
  1445. "addl $8, %%eax \n\t"
  1446. " js 1b \n\t"
  1447. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1448. : "%eax"
  1449. );
  1450. #else
  1451. int i;
  1452. for(i=0; i<width; i++)
  1453. dst[i]= src[2*i];
  1454. #endif
  1455. }
  1456. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1457. {
  1458. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1459. asm volatile(
  1460. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1461. "movl %0, %%eax \n\t"
  1462. "1: \n\t"
  1463. "movq (%1, %%eax,4), %%mm0 \n\t"
  1464. "movq 8(%1, %%eax,4), %%mm1 \n\t"
  1465. "movq (%2, %%eax,4), %%mm2 \n\t"
  1466. "movq 8(%2, %%eax,4), %%mm3 \n\t"
  1467. PAVGB(%%mm2, %%mm0)
  1468. PAVGB(%%mm3, %%mm1)
  1469. "psrlw $8, %%mm0 \n\t"
  1470. "psrlw $8, %%mm1 \n\t"
  1471. "packuswb %%mm1, %%mm0 \n\t"
  1472. "movq %%mm0, %%mm1 \n\t"
  1473. "psrlw $8, %%mm0 \n\t"
  1474. "pand %%mm4, %%mm1 \n\t"
  1475. "packuswb %%mm0, %%mm0 \n\t"
  1476. "packuswb %%mm1, %%mm1 \n\t"
  1477. "movd %%mm0, (%4, %%eax) \n\t"
  1478. "movd %%mm1, (%3, %%eax) \n\t"
  1479. "addl $4, %%eax \n\t"
  1480. " js 1b \n\t"
  1481. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1482. : "%eax"
  1483. );
  1484. #else
  1485. int i;
  1486. for(i=0; i<width; i++)
  1487. {
  1488. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1489. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1490. }
  1491. #endif
  1492. }
  1493. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1494. {
  1495. #ifdef HAVE_MMXFIXME
  1496. #else
  1497. int i;
  1498. for(i=0; i<width; i++)
  1499. {
  1500. int b= src[i*4+0];
  1501. int g= src[i*4+1];
  1502. int r= src[i*4+2];
  1503. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1504. }
  1505. #endif
  1506. }
  1507. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1508. {
  1509. #ifdef HAVE_MMXFIXME
  1510. #else
  1511. int i;
  1512. for(i=0; i<width; i++)
  1513. {
  1514. int b= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4];
  1515. int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5];
  1516. int r= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6];
  1517. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1518. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1519. }
  1520. #endif
  1521. }
  1522. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
  1523. {
  1524. #ifdef HAVE_MMX
  1525. asm volatile(
  1526. "movl %2, %%eax \n\t"
  1527. "movq bgr2YCoeff, %%mm6 \n\t"
  1528. "movq w1111, %%mm5 \n\t"
  1529. "pxor %%mm7, %%mm7 \n\t"
  1530. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1531. ".balign 16 \n\t"
  1532. "1: \n\t"
  1533. PREFETCH" 64(%0, %%ebx) \n\t"
  1534. "movd (%0, %%ebx), %%mm0 \n\t"
  1535. "movd 3(%0, %%ebx), %%mm1 \n\t"
  1536. "punpcklbw %%mm7, %%mm0 \n\t"
  1537. "punpcklbw %%mm7, %%mm1 \n\t"
  1538. "movd 6(%0, %%ebx), %%mm2 \n\t"
  1539. "movd 9(%0, %%ebx), %%mm3 \n\t"
  1540. "punpcklbw %%mm7, %%mm2 \n\t"
  1541. "punpcklbw %%mm7, %%mm3 \n\t"
  1542. "pmaddwd %%mm6, %%mm0 \n\t"
  1543. "pmaddwd %%mm6, %%mm1 \n\t"
  1544. "pmaddwd %%mm6, %%mm2 \n\t"
  1545. "pmaddwd %%mm6, %%mm3 \n\t"
  1546. #ifndef FAST_BGR2YV12
  1547. "psrad $8, %%mm0 \n\t"
  1548. "psrad $8, %%mm1 \n\t"
  1549. "psrad $8, %%mm2 \n\t"
  1550. "psrad $8, %%mm3 \n\t"
  1551. #endif
  1552. "packssdw %%mm1, %%mm0 \n\t"
  1553. "packssdw %%mm3, %%mm2 \n\t"
  1554. "pmaddwd %%mm5, %%mm0 \n\t"
  1555. "pmaddwd %%mm5, %%mm2 \n\t"
  1556. "packssdw %%mm2, %%mm0 \n\t"
  1557. "psraw $7, %%mm0 \n\t"
  1558. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1559. "movd 15(%0, %%ebx), %%mm1 \n\t"
  1560. "punpcklbw %%mm7, %%mm4 \n\t"
  1561. "punpcklbw %%mm7, %%mm1 \n\t"
  1562. "movd 18(%0, %%ebx), %%mm2 \n\t"
  1563. "movd 21(%0, %%ebx), %%mm3 \n\t"
  1564. "punpcklbw %%mm7, %%mm2 \n\t"
  1565. "punpcklbw %%mm7, %%mm3 \n\t"
  1566. "pmaddwd %%mm6, %%mm4 \n\t"
  1567. "pmaddwd %%mm6, %%mm1 \n\t"
  1568. "pmaddwd %%mm6, %%mm2 \n\t"
  1569. "pmaddwd %%mm6, %%mm3 \n\t"
  1570. #ifndef FAST_BGR2YV12
  1571. "psrad $8, %%mm4 \n\t"
  1572. "psrad $8, %%mm1 \n\t"
  1573. "psrad $8, %%mm2 \n\t"
  1574. "psrad $8, %%mm3 \n\t"
  1575. #endif
  1576. "packssdw %%mm1, %%mm4 \n\t"
  1577. "packssdw %%mm3, %%mm2 \n\t"
  1578. "pmaddwd %%mm5, %%mm4 \n\t"
  1579. "pmaddwd %%mm5, %%mm2 \n\t"
  1580. "addl $24, %%ebx \n\t"
  1581. "packssdw %%mm2, %%mm4 \n\t"
  1582. "psraw $7, %%mm4 \n\t"
  1583. "packuswb %%mm4, %%mm0 \n\t"
  1584. "paddusb bgr2YOffset, %%mm0 \n\t"
  1585. "movq %%mm0, (%1, %%eax) \n\t"
  1586. "addl $8, %%eax \n\t"
  1587. " js 1b \n\t"
  1588. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1589. : "%eax", "%ebx"
  1590. );
  1591. #else
  1592. int i;
  1593. for(i=0; i<width; i++)
  1594. {
  1595. int b= src[i*3+0];
  1596. int g= src[i*3+1];
  1597. int r= src[i*3+2];
  1598. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1599. }
  1600. #endif
  1601. }
  1602. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1603. {
  1604. #ifdef HAVE_MMX
  1605. asm volatile(
  1606. "movl %4, %%eax \n\t"
  1607. "movq w1111, %%mm5 \n\t"
  1608. "movq bgr2UCoeff, %%mm6 \n\t"
  1609. "pxor %%mm7, %%mm7 \n\t"
  1610. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1611. "addl %%ebx, %%ebx \n\t"
  1612. ".balign 16 \n\t"
  1613. "1: \n\t"
  1614. PREFETCH" 64(%0, %%ebx) \n\t"
  1615. PREFETCH" 64(%1, %%ebx) \n\t"
  1616. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1617. "movq (%0, %%ebx), %%mm0 \n\t"
  1618. "movq (%1, %%ebx), %%mm1 \n\t"
  1619. "movq 6(%0, %%ebx), %%mm2 \n\t"
  1620. "movq 6(%1, %%ebx), %%mm3 \n\t"
  1621. PAVGB(%%mm1, %%mm0)
  1622. PAVGB(%%mm3, %%mm2)
  1623. "movq %%mm0, %%mm1 \n\t"
  1624. "movq %%mm2, %%mm3 \n\t"
  1625. "psrlq $24, %%mm0 \n\t"
  1626. "psrlq $24, %%mm2 \n\t"
  1627. PAVGB(%%mm1, %%mm0)
  1628. PAVGB(%%mm3, %%mm2)
  1629. "punpcklbw %%mm7, %%mm0 \n\t"
  1630. "punpcklbw %%mm7, %%mm2 \n\t"
  1631. #else
  1632. "movd (%0, %%ebx), %%mm0 \n\t"
  1633. "movd (%1, %%ebx), %%mm1 \n\t"
  1634. "movd 3(%0, %%ebx), %%mm2 \n\t"
  1635. "movd 3(%1, %%ebx), %%mm3 \n\t"
  1636. "punpcklbw %%mm7, %%mm0 \n\t"
  1637. "punpcklbw %%mm7, %%mm1 \n\t"
  1638. "punpcklbw %%mm7, %%mm2 \n\t"
  1639. "punpcklbw %%mm7, %%mm3 \n\t"
  1640. "paddw %%mm1, %%mm0 \n\t"
  1641. "paddw %%mm3, %%mm2 \n\t"
  1642. "paddw %%mm2, %%mm0 \n\t"
  1643. "movd 6(%0, %%ebx), %%mm4 \n\t"
  1644. "movd 6(%1, %%ebx), %%mm1 \n\t"
  1645. "movd 9(%0, %%ebx), %%mm2 \n\t"
  1646. "movd 9(%1, %%ebx), %%mm3 \n\t"
  1647. "punpcklbw %%mm7, %%mm4 \n\t"
  1648. "punpcklbw %%mm7, %%mm1 \n\t"
  1649. "punpcklbw %%mm7, %%mm2 \n\t"
  1650. "punpcklbw %%mm7, %%mm3 \n\t"
  1651. "paddw %%mm1, %%mm4 \n\t"
  1652. "paddw %%mm3, %%mm2 \n\t"
  1653. "paddw %%mm4, %%mm2 \n\t"
  1654. "psrlw $2, %%mm0 \n\t"
  1655. "psrlw $2, %%mm2 \n\t"
  1656. #endif
  1657. "movq bgr2VCoeff, %%mm1 \n\t"
  1658. "movq bgr2VCoeff, %%mm3 \n\t"
  1659. "pmaddwd %%mm0, %%mm1 \n\t"
  1660. "pmaddwd %%mm2, %%mm3 \n\t"
  1661. "pmaddwd %%mm6, %%mm0 \n\t"
  1662. "pmaddwd %%mm6, %%mm2 \n\t"
  1663. #ifndef FAST_BGR2YV12
  1664. "psrad $8, %%mm0 \n\t"
  1665. "psrad $8, %%mm1 \n\t"
  1666. "psrad $8, %%mm2 \n\t"
  1667. "psrad $8, %%mm3 \n\t"
  1668. #endif
  1669. "packssdw %%mm2, %%mm0 \n\t"
  1670. "packssdw %%mm3, %%mm1 \n\t"
  1671. "pmaddwd %%mm5, %%mm0 \n\t"
  1672. "pmaddwd %%mm5, %%mm1 \n\t"
  1673. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1674. "psraw $7, %%mm0 \n\t"
  1675. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1676. "movq 12(%0, %%ebx), %%mm4 \n\t"
  1677. "movq 12(%1, %%ebx), %%mm1 \n\t"
  1678. "movq 18(%0, %%ebx), %%mm2 \n\t"
  1679. "movq 18(%1, %%ebx), %%mm3 \n\t"
  1680. PAVGB(%%mm1, %%mm4)
  1681. PAVGB(%%mm3, %%mm2)
  1682. "movq %%mm4, %%mm1 \n\t"
  1683. "movq %%mm2, %%mm3 \n\t"
  1684. "psrlq $24, %%mm4 \n\t"
  1685. "psrlq $24, %%mm2 \n\t"
  1686. PAVGB(%%mm1, %%mm4)
  1687. PAVGB(%%mm3, %%mm2)
  1688. "punpcklbw %%mm7, %%mm4 \n\t"
  1689. "punpcklbw %%mm7, %%mm2 \n\t"
  1690. #else
  1691. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1692. "movd 12(%1, %%ebx), %%mm1 \n\t"
  1693. "movd 15(%0, %%ebx), %%mm2 \n\t"
  1694. "movd 15(%1, %%ebx), %%mm3 \n\t"
  1695. "punpcklbw %%mm7, %%mm4 \n\t"
  1696. "punpcklbw %%mm7, %%mm1 \n\t"
  1697. "punpcklbw %%mm7, %%mm2 \n\t"
  1698. "punpcklbw %%mm7, %%mm3 \n\t"
  1699. "paddw %%mm1, %%mm4 \n\t"
  1700. "paddw %%mm3, %%mm2 \n\t"
  1701. "paddw %%mm2, %%mm4 \n\t"
  1702. "movd 18(%0, %%ebx), %%mm5 \n\t"
  1703. "movd 18(%1, %%ebx), %%mm1 \n\t"
  1704. "movd 21(%0, %%ebx), %%mm2 \n\t"
  1705. "movd 21(%1, %%ebx), %%mm3 \n\t"
  1706. "punpcklbw %%mm7, %%mm5 \n\t"
  1707. "punpcklbw %%mm7, %%mm1 \n\t"
  1708. "punpcklbw %%mm7, %%mm2 \n\t"
  1709. "punpcklbw %%mm7, %%mm3 \n\t"
  1710. "paddw %%mm1, %%mm5 \n\t"
  1711. "paddw %%mm3, %%mm2 \n\t"
  1712. "paddw %%mm5, %%mm2 \n\t"
  1713. "movq w1111, %%mm5 \n\t"
  1714. "psrlw $2, %%mm4 \n\t"
  1715. "psrlw $2, %%mm2 \n\t"
  1716. #endif
  1717. "movq bgr2VCoeff, %%mm1 \n\t"
  1718. "movq bgr2VCoeff, %%mm3 \n\t"
  1719. "pmaddwd %%mm4, %%mm1 \n\t"
  1720. "pmaddwd %%mm2, %%mm3 \n\t"
  1721. "pmaddwd %%mm6, %%mm4 \n\t"
  1722. "pmaddwd %%mm6, %%mm2 \n\t"
  1723. #ifndef FAST_BGR2YV12
  1724. "psrad $8, %%mm4 \n\t"
  1725. "psrad $8, %%mm1 \n\t"
  1726. "psrad $8, %%mm2 \n\t"
  1727. "psrad $8, %%mm3 \n\t"
  1728. #endif
  1729. "packssdw %%mm2, %%mm4 \n\t"
  1730. "packssdw %%mm3, %%mm1 \n\t"
  1731. "pmaddwd %%mm5, %%mm4 \n\t"
  1732. "pmaddwd %%mm5, %%mm1 \n\t"
  1733. "addl $24, %%ebx \n\t"
  1734. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1735. "psraw $7, %%mm4 \n\t"
  1736. "movq %%mm0, %%mm1 \n\t"
  1737. "punpckldq %%mm4, %%mm0 \n\t"
  1738. "punpckhdq %%mm4, %%mm1 \n\t"
  1739. "packsswb %%mm1, %%mm0 \n\t"
  1740. "paddb bgr2UVOffset, %%mm0 \n\t"
  1741. "movd %%mm0, (%2, %%eax) \n\t"
  1742. "punpckhdq %%mm0, %%mm0 \n\t"
  1743. "movd %%mm0, (%3, %%eax) \n\t"
  1744. "addl $4, %%eax \n\t"
  1745. " js 1b \n\t"
  1746. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1747. : "%eax", "%ebx"
  1748. );
  1749. #else
  1750. int i;
  1751. for(i=0; i<width; i++)
  1752. {
  1753. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1754. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1755. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1756. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1757. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1758. }
  1759. #endif
  1760. }
  1761. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1762. {
  1763. int i;
  1764. for(i=0; i<width; i++)
  1765. {
  1766. int d= src[i*2] + (src[i*2+1]<<8);
  1767. int b= d&0x1F;
  1768. int g= (d>>5)&0x3F;
  1769. int r= (d>>11)&0x1F;
  1770. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1771. }
  1772. }
  1773. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1774. {
  1775. int i;
  1776. for(i=0; i<width; i++)
  1777. {
  1778. #if 1
  1779. int d0= le2me_32( ((uint32_t*)src1)[i] );
  1780. int d1= le2me_32( ((uint32_t*)src2)[i] );
  1781. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1782. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1783. int dh2= (dh>>11) + (dh<<21);
  1784. int d= dh2 + dl;
  1785. int b= d&0x7F;
  1786. int r= (d>>11)&0x7F;
  1787. int g= d>>21;
  1788. #else
  1789. int d0= src1[i*4] + (src1[i*4+1]<<8);
  1790. int b0= d0&0x1F;
  1791. int g0= (d0>>5)&0x3F;
  1792. int r0= (d0>>11)&0x1F;
  1793. int d1= src1[i*4+2] + (src1[i*4+3]<<8);
  1794. int b1= d1&0x1F;
  1795. int g1= (d1>>5)&0x3F;
  1796. int r1= (d1>>11)&0x1F;
  1797. int d2= src2[i*4] + (src2[i*4+1]<<8);
  1798. int b2= d2&0x1F;
  1799. int g2= (d2>>5)&0x3F;
  1800. int r2= (d2>>11)&0x1F;
  1801. int d3= src2[i*4+2] + (src2[i*4+3]<<8);
  1802. int b3= d3&0x1F;
  1803. int g3= (d3>>5)&0x3F;
  1804. int r3= (d3>>11)&0x1F;
  1805. int b= b0 + b1 + b2 + b3;
  1806. int g= g0 + g1 + g2 + g3;
  1807. int r= r0 + r1 + r2 + r3;
  1808. #endif
  1809. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1810. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1811. }
  1812. }
  1813. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1814. {
  1815. int i;
  1816. for(i=0; i<width; i++)
  1817. {
  1818. int d= src[i*2] + (src[i*2+1]<<8);
  1819. int b= d&0x1F;
  1820. int g= (d>>5)&0x1F;
  1821. int r= (d>>10)&0x1F;
  1822. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1823. }
  1824. }
  1825. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1826. {
  1827. int i;
  1828. for(i=0; i<width; i++)
  1829. {
  1830. #if 1
  1831. int d0= le2me_32( ((uint32_t*)src1)[i] );
  1832. int d1= le2me_32( ((uint32_t*)src2)[i] );
  1833. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1834. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1835. int dh2= (dh>>11) + (dh<<21);
  1836. int d= dh2 + dl;
  1837. int b= d&0x7F;
  1838. int r= (d>>10)&0x7F;
  1839. int g= d>>21;
  1840. #else
  1841. int d0= src1[i*4] + (src1[i*4+1]<<8);
  1842. int b0= d0&0x1F;
  1843. int g0= (d0>>5)&0x1F;
  1844. int r0= (d0>>10)&0x1F;
  1845. int d1= src1[i*4+2] + (src1[i*4+3]<<8);
  1846. int b1= d1&0x1F;
  1847. int g1= (d1>>5)&0x1F;
  1848. int r1= (d1>>10)&0x1F;
  1849. int d2= src2[i*4] + (src2[i*4+1]<<8);
  1850. int b2= d2&0x1F;
  1851. int g2= (d2>>5)&0x1F;
  1852. int r2= (d2>>10)&0x1F;
  1853. int d3= src2[i*4+2] + (src2[i*4+3]<<8);
  1854. int b3= d3&0x1F;
  1855. int g3= (d3>>5)&0x1F;
  1856. int r3= (d3>>10)&0x1F;
  1857. int b= b0 + b1 + b2 + b3;
  1858. int g= g0 + g1 + g2 + g3;
  1859. int r= r0 + r1 + r2 + r3;
  1860. #endif
  1861. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1862. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1863. }
  1864. }
  1865. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1866. {
  1867. int i;
  1868. for(i=0; i<width; i++)
  1869. {
  1870. int r= src[i*4+0];
  1871. int g= src[i*4+1];
  1872. int b= src[i*4+2];
  1873. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1874. }
  1875. }
  1876. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1877. {
  1878. int i;
  1879. for(i=0; i<width; i++)
  1880. {
  1881. int r= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4];
  1882. int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5];
  1883. int b= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6];
  1884. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1885. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1886. }
  1887. }
  1888. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1889. {
  1890. int i;
  1891. for(i=0; i<width; i++)
  1892. {
  1893. int r= src[i*3+0];
  1894. int g= src[i*3+1];
  1895. int b= src[i*3+2];
  1896. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1897. }
  1898. }
  1899. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1900. {
  1901. int i;
  1902. for(i=0; i<width; i++)
  1903. {
  1904. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1905. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1906. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1907. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1908. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1909. }
  1910. }
  1911. // Bilinear / Bicubic scaling
  1912. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1913. int16_t *filter, int16_t *filterPos, int filterSize)
  1914. {
  1915. #ifdef HAVE_MMX
  1916. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1917. {
  1918. int counter= -2*dstW;
  1919. filter-= counter*2;
  1920. filterPos-= counter/2;
  1921. dst-= counter/2;
  1922. asm volatile(
  1923. "pxor %%mm7, %%mm7 \n\t"
  1924. "movq "MANGLE(w02)", %%mm6 \n\t"
  1925. "pushl %%ebp \n\t" // we use 7 regs here ...
  1926. "movl %%eax, %%ebp \n\t"
  1927. ".balign 16 \n\t"
  1928. "1: \n\t"
  1929. "movzwl (%2, %%ebp), %%eax \n\t"
  1930. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1931. "movq (%1, %%ebp, 4), %%mm1 \n\t"
  1932. "movq 8(%1, %%ebp, 4), %%mm3 \n\t"
  1933. "movd (%3, %%eax), %%mm0 \n\t"
  1934. "movd (%3, %%ebx), %%mm2 \n\t"
  1935. "punpcklbw %%mm7, %%mm0 \n\t"
  1936. "punpcklbw %%mm7, %%mm2 \n\t"
  1937. "pmaddwd %%mm1, %%mm0 \n\t"
  1938. "pmaddwd %%mm2, %%mm3 \n\t"
  1939. "psrad $8, %%mm0 \n\t"
  1940. "psrad $8, %%mm3 \n\t"
  1941. "packssdw %%mm3, %%mm0 \n\t"
  1942. "pmaddwd %%mm6, %%mm0 \n\t"
  1943. "packssdw %%mm0, %%mm0 \n\t"
  1944. "movd %%mm0, (%4, %%ebp) \n\t"
  1945. "addl $4, %%ebp \n\t"
  1946. " jnc 1b \n\t"
  1947. "popl %%ebp \n\t"
  1948. : "+a" (counter)
  1949. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1950. : "%ebx"
  1951. );
  1952. }
  1953. else if(filterSize==8)
  1954. {
  1955. int counter= -2*dstW;
  1956. filter-= counter*4;
  1957. filterPos-= counter/2;
  1958. dst-= counter/2;
  1959. asm volatile(
  1960. "pxor %%mm7, %%mm7 \n\t"
  1961. "movq "MANGLE(w02)", %%mm6 \n\t"
  1962. "pushl %%ebp \n\t" // we use 7 regs here ...
  1963. "movl %%eax, %%ebp \n\t"
  1964. ".balign 16 \n\t"
  1965. "1: \n\t"
  1966. "movzwl (%2, %%ebp), %%eax \n\t"
  1967. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1968. "movq (%1, %%ebp, 8), %%mm1 \n\t"
  1969. "movq 16(%1, %%ebp, 8), %%mm3 \n\t"
  1970. "movd (%3, %%eax), %%mm0 \n\t"
  1971. "movd (%3, %%ebx), %%mm2 \n\t"
  1972. "punpcklbw %%mm7, %%mm0 \n\t"
  1973. "punpcklbw %%mm7, %%mm2 \n\t"
  1974. "pmaddwd %%mm1, %%mm0 \n\t"
  1975. "pmaddwd %%mm2, %%mm3 \n\t"
  1976. "movq 8(%1, %%ebp, 8), %%mm1 \n\t"
  1977. "movq 24(%1, %%ebp, 8), %%mm5 \n\t"
  1978. "movd 4(%3, %%eax), %%mm4 \n\t"
  1979. "movd 4(%3, %%ebx), %%mm2 \n\t"
  1980. "punpcklbw %%mm7, %%mm4 \n\t"
  1981. "punpcklbw %%mm7, %%mm2 \n\t"
  1982. "pmaddwd %%mm1, %%mm4 \n\t"
  1983. "pmaddwd %%mm2, %%mm5 \n\t"
  1984. "paddd %%mm4, %%mm0 \n\t"
  1985. "paddd %%mm5, %%mm3 \n\t"
  1986. "psrad $8, %%mm0 \n\t"
  1987. "psrad $8, %%mm3 \n\t"
  1988. "packssdw %%mm3, %%mm0 \n\t"
  1989. "pmaddwd %%mm6, %%mm0 \n\t"
  1990. "packssdw %%mm0, %%mm0 \n\t"
  1991. "movd %%mm0, (%4, %%ebp) \n\t"
  1992. "addl $4, %%ebp \n\t"
  1993. " jnc 1b \n\t"
  1994. "popl %%ebp \n\t"
  1995. : "+a" (counter)
  1996. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1997. : "%ebx"
  1998. );
  1999. }
  2000. else
  2001. {
  2002. int counter= -2*dstW;
  2003. // filter-= counter*filterSize/2;
  2004. filterPos-= counter/2;
  2005. dst-= counter/2;
  2006. asm volatile(
  2007. "pxor %%mm7, %%mm7 \n\t"
  2008. "movq "MANGLE(w02)", %%mm6 \n\t"
  2009. ".balign 16 \n\t"
  2010. "1: \n\t"
  2011. "movl %2, %%ecx \n\t"
  2012. "movzwl (%%ecx, %0), %%eax \n\t"
  2013. "movzwl 2(%%ecx, %0), %%ebx \n\t"
  2014. "movl %5, %%ecx \n\t"
  2015. "pxor %%mm4, %%mm4 \n\t"
  2016. "pxor %%mm5, %%mm5 \n\t"
  2017. "2: \n\t"
  2018. "movq (%1), %%mm1 \n\t"
  2019. "movq (%1, %6), %%mm3 \n\t"
  2020. "movd (%%ecx, %%eax), %%mm0 \n\t"
  2021. "movd (%%ecx, %%ebx), %%mm2 \n\t"
  2022. "punpcklbw %%mm7, %%mm0 \n\t"
  2023. "punpcklbw %%mm7, %%mm2 \n\t"
  2024. "pmaddwd %%mm1, %%mm0 \n\t"
  2025. "pmaddwd %%mm2, %%mm3 \n\t"
  2026. "paddd %%mm3, %%mm5 \n\t"
  2027. "paddd %%mm0, %%mm4 \n\t"
  2028. "addl $8, %1 \n\t"
  2029. "addl $4, %%ecx \n\t"
  2030. "cmpl %4, %%ecx \n\t"
  2031. " jb 2b \n\t"
  2032. "addl %6, %1 \n\t"
  2033. "psrad $8, %%mm4 \n\t"
  2034. "psrad $8, %%mm5 \n\t"
  2035. "packssdw %%mm5, %%mm4 \n\t"
  2036. "pmaddwd %%mm6, %%mm4 \n\t"
  2037. "packssdw %%mm4, %%mm4 \n\t"
  2038. "movl %3, %%eax \n\t"
  2039. "movd %%mm4, (%%eax, %0) \n\t"
  2040. "addl $4, %0 \n\t"
  2041. " jnc 1b \n\t"
  2042. : "+r" (counter), "+r" (filter)
  2043. : "m" (filterPos), "m" (dst), "m"(src+filterSize),
  2044. "m" (src), "r" (filterSize*2)
  2045. : "%ebx", "%eax", "%ecx"
  2046. );
  2047. }
  2048. #else
  2049. int i;
  2050. for(i=0; i<dstW; i++)
  2051. {
  2052. int j;
  2053. int srcPos= filterPos[i];
  2054. int val=0;
  2055. // printf("filterPos: %d\n", filterPos[i]);
  2056. for(j=0; j<filterSize; j++)
  2057. {
  2058. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2059. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2060. }
  2061. // filter += hFilterSize;
  2062. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2063. // dst[i] = val>>7;
  2064. }
  2065. #endif
  2066. }
  2067. // *** horizontal scale Y line to temp buffer
  2068. static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
  2069. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2070. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2071. int srcFormat, uint8_t *formatConvBuffer)
  2072. {
  2073. if(srcFormat==IMGFMT_YUY2)
  2074. {
  2075. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2076. src= formatConvBuffer;
  2077. }
  2078. else if(srcFormat==IMGFMT_BGR32)
  2079. {
  2080. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2081. src= formatConvBuffer;
  2082. }
  2083. else if(srcFormat==IMGFMT_BGR24)
  2084. {
  2085. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2086. src= formatConvBuffer;
  2087. }
  2088. else if(srcFormat==IMGFMT_BGR16)
  2089. {
  2090. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2091. src= formatConvBuffer;
  2092. }
  2093. else if(srcFormat==IMGFMT_BGR15)
  2094. {
  2095. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2096. src= formatConvBuffer;
  2097. }
  2098. else if(srcFormat==IMGFMT_RGB32)
  2099. {
  2100. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2101. src= formatConvBuffer;
  2102. }
  2103. else if(srcFormat==IMGFMT_RGB24)
  2104. {
  2105. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2106. src= formatConvBuffer;
  2107. }
  2108. #ifdef HAVE_MMX
  2109. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  2110. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2111. #else
  2112. if(!(flags&SWS_FAST_BILINEAR))
  2113. #endif
  2114. {
  2115. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2116. }
  2117. else // Fast Bilinear upscale / crap downscale
  2118. {
  2119. #ifdef ARCH_X86
  2120. #ifdef HAVE_MMX2
  2121. int i;
  2122. if(canMMX2BeUsed)
  2123. {
  2124. asm volatile(
  2125. "pxor %%mm7, %%mm7 \n\t"
  2126. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  2127. "movd %5, %%mm6 \n\t" // xInc&0xFFFF
  2128. "punpcklwd %%mm6, %%mm6 \n\t"
  2129. "punpcklwd %%mm6, %%mm6 \n\t"
  2130. "movq %%mm6, %%mm2 \n\t"
  2131. "psllq $16, %%mm2 \n\t"
  2132. "paddw %%mm6, %%mm2 \n\t"
  2133. "psllq $16, %%mm2 \n\t"
  2134. "paddw %%mm6, %%mm2 \n\t"
  2135. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFF
  2136. "movq %%mm2, %%mm4 \n\t"
  2137. "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
  2138. "punpcklwd %%mm6, %%mm6 \n\t"
  2139. "punpcklwd %%mm6, %%mm6 \n\t"
  2140. "xorl %%eax, %%eax \n\t" // i
  2141. "movl %0, %%esi \n\t" // src
  2142. "movl %1, %%edi \n\t" // buf1
  2143. "movl %3, %%edx \n\t" // (xInc*4)>>16
  2144. "xorl %%ecx, %%ecx \n\t"
  2145. "xorl %%ebx, %%ebx \n\t"
  2146. "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
  2147. #define FUNNY_Y_CODE \
  2148. PREFETCH" 1024(%%esi) \n\t"\
  2149. PREFETCH" 1056(%%esi) \n\t"\
  2150. PREFETCH" 1088(%%esi) \n\t"\
  2151. "call *%6 \n\t"\
  2152. "movq %%mm4, %%mm2 \n\t"\
  2153. "xorl %%ecx, %%ecx \n\t"
  2154. FUNNY_Y_CODE
  2155. FUNNY_Y_CODE
  2156. FUNNY_Y_CODE
  2157. FUNNY_Y_CODE
  2158. FUNNY_Y_CODE
  2159. FUNNY_Y_CODE
  2160. FUNNY_Y_CODE
  2161. FUNNY_Y_CODE
  2162. :: "m" (src), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
  2163. "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (funnyYCode)
  2164. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2165. );
  2166. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2167. }
  2168. else
  2169. {
  2170. #endif
  2171. //NO MMX just normal asm ...
  2172. asm volatile(
  2173. "xorl %%eax, %%eax \n\t" // i
  2174. "xorl %%ebx, %%ebx \n\t" // xx
  2175. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2176. ".balign 16 \n\t"
  2177. "1: \n\t"
  2178. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2179. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2180. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2181. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2182. "shll $16, %%edi \n\t"
  2183. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2184. "movl %1, %%edi \n\t"
  2185. "shrl $9, %%esi \n\t"
  2186. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2187. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2188. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2189. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2190. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2191. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2192. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2193. "shll $16, %%edi \n\t"
  2194. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2195. "movl %1, %%edi \n\t"
  2196. "shrl $9, %%esi \n\t"
  2197. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  2198. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2199. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2200. "addl $2, %%eax \n\t"
  2201. "cmpl %2, %%eax \n\t"
  2202. " jb 1b \n\t"
  2203. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
  2204. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2205. );
  2206. #ifdef HAVE_MMX2
  2207. } //if MMX2 cant be used
  2208. #endif
  2209. #else
  2210. int i;
  2211. unsigned int xpos=0;
  2212. for(i=0;i<dstWidth;i++)
  2213. {
  2214. register unsigned int xx=xpos>>16;
  2215. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2216. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2217. xpos+=xInc;
  2218. }
  2219. #endif
  2220. }
  2221. }
  2222. inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2,
  2223. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2224. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2225. int srcFormat, uint8_t *formatConvBuffer)
  2226. {
  2227. if(srcFormat==IMGFMT_YUY2)
  2228. {
  2229. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2230. src1= formatConvBuffer;
  2231. src2= formatConvBuffer+2048;
  2232. }
  2233. else if(srcFormat==IMGFMT_BGR32)
  2234. {
  2235. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2236. src1= formatConvBuffer;
  2237. src2= formatConvBuffer+2048;
  2238. }
  2239. else if(srcFormat==IMGFMT_BGR24)
  2240. {
  2241. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2242. src1= formatConvBuffer;
  2243. src2= formatConvBuffer+2048;
  2244. }
  2245. else if(srcFormat==IMGFMT_BGR16)
  2246. {
  2247. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2248. src1= formatConvBuffer;
  2249. src2= formatConvBuffer+2048;
  2250. }
  2251. else if(srcFormat==IMGFMT_BGR15)
  2252. {
  2253. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2254. src1= formatConvBuffer;
  2255. src2= formatConvBuffer+2048;
  2256. }
  2257. else if(srcFormat==IMGFMT_RGB32)
  2258. {
  2259. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2260. src1= formatConvBuffer;
  2261. src2= formatConvBuffer+2048;
  2262. }
  2263. else if(srcFormat==IMGFMT_RGB24)
  2264. {
  2265. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2266. src1= formatConvBuffer;
  2267. src2= formatConvBuffer+2048;
  2268. }
  2269. else if(isGray(srcFormat))
  2270. {
  2271. return;
  2272. }
  2273. #ifdef HAVE_MMX
  2274. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  2275. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2276. #else
  2277. if(!(flags&SWS_FAST_BILINEAR))
  2278. #endif
  2279. {
  2280. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2281. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2282. }
  2283. else // Fast Bilinear upscale / crap downscale
  2284. {
  2285. #ifdef ARCH_X86
  2286. #ifdef HAVE_MMX2
  2287. int i;
  2288. if(canMMX2BeUsed)
  2289. {
  2290. asm volatile(
  2291. "pxor %%mm7, %%mm7 \n\t"
  2292. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  2293. "movd %5, %%mm6 \n\t" // xInc&0xFFFF
  2294. "punpcklwd %%mm6, %%mm6 \n\t"
  2295. "punpcklwd %%mm6, %%mm6 \n\t"
  2296. "movq %%mm6, %%mm2 \n\t"
  2297. "psllq $16, %%mm2 \n\t"
  2298. "paddw %%mm6, %%mm2 \n\t"
  2299. "psllq $16, %%mm2 \n\t"
  2300. "paddw %%mm6, %%mm2 \n\t"
  2301. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=xInc&0xFFFF
  2302. "movq %%mm2, %%mm4 \n\t"
  2303. "movd %4, %%mm6 \n\t" //(xInc*4)&0xFFFF
  2304. "punpcklwd %%mm6, %%mm6 \n\t"
  2305. "punpcklwd %%mm6, %%mm6 \n\t"
  2306. "xorl %%eax, %%eax \n\t" // i
  2307. "movl %0, %%esi \n\t" // src
  2308. "movl %1, %%edi \n\t" // buf1
  2309. "movl %3, %%edx \n\t" // (xInc*4)>>16
  2310. "xorl %%ecx, %%ecx \n\t"
  2311. "xorl %%ebx, %%ebx \n\t"
  2312. "movw %4, %%bx \n\t" // (xInc*4)&0xFFFF
  2313. #define FUNNYUVCODE \
  2314. PREFETCH" 1024(%%esi) \n\t"\
  2315. PREFETCH" 1056(%%esi) \n\t"\
  2316. PREFETCH" 1088(%%esi) \n\t"\
  2317. "call *%7 \n\t"\
  2318. "movq %%mm4, %%mm2 \n\t"\
  2319. "xorl %%ecx, %%ecx \n\t"
  2320. FUNNYUVCODE
  2321. FUNNYUVCODE
  2322. FUNNYUVCODE
  2323. FUNNYUVCODE
  2324. FUNNYUVCODE
  2325. FUNNYUVCODE
  2326. FUNNYUVCODE
  2327. FUNNYUVCODE
  2328. "xorl %%eax, %%eax \n\t" // i
  2329. "movl %6, %%esi \n\t" // src
  2330. "movl %1, %%edi \n\t" // buf1
  2331. "addl $4096, %%edi \n\t"
  2332. FUNNYUVCODE
  2333. FUNNYUVCODE
  2334. FUNNYUVCODE
  2335. FUNNYUVCODE
  2336. FUNNYUVCODE
  2337. FUNNYUVCODE
  2338. FUNNYUVCODE
  2339. FUNNYUVCODE
  2340. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" ((xInc*4)>>16),
  2341. "m" ((xInc*4)&0xFFFF), "m" (xInc&0xFFFF), "m" (src2), "m" (funnyUVCode)
  2342. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2343. );
  2344. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2345. {
  2346. // printf("%d %d %d\n", dstWidth, i, srcW);
  2347. dst[i] = src1[srcW-1]*128;
  2348. dst[i+2048] = src2[srcW-1]*128;
  2349. }
  2350. }
  2351. else
  2352. {
  2353. #endif
  2354. asm volatile(
  2355. "xorl %%eax, %%eax \n\t" // i
  2356. "xorl %%ebx, %%ebx \n\t" // xx
  2357. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2358. ".balign 16 \n\t"
  2359. "1: \n\t"
  2360. "movl %0, %%esi \n\t"
  2361. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  2362. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  2363. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2364. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2365. "shll $16, %%edi \n\t"
  2366. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2367. "movl %1, %%edi \n\t"
  2368. "shrl $9, %%esi \n\t"
  2369. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2370. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  2371. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  2372. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2373. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2374. "shll $16, %%edi \n\t"
  2375. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2376. "movl %1, %%edi \n\t"
  2377. "shrl $9, %%esi \n\t"
  2378. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  2379. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2380. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2381. "addl $1, %%eax \n\t"
  2382. "cmpl %2, %%eax \n\t"
  2383. " jb 1b \n\t"
  2384. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
  2385. "r" (src2)
  2386. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2387. );
  2388. #ifdef HAVE_MMX2
  2389. } //if MMX2 cant be used
  2390. #endif
  2391. #else
  2392. int i;
  2393. unsigned int xpos=0;
  2394. for(i=0;i<dstWidth;i++)
  2395. {
  2396. register unsigned int xx=xpos>>16;
  2397. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2398. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2399. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2400. /* slower
  2401. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2402. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2403. */
  2404. xpos+=xInc;
  2405. }
  2406. #endif
  2407. }
  2408. }
  2409. static void RENAME(swScale)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
  2410. int srcSliceH, uint8_t* dstParam[], int dstStrideParam[]){
  2411. /* load a few things into local vars to make the code more readable? and faster */
  2412. const int srcW= c->srcW;
  2413. const int dstW= c->dstW;
  2414. const int dstH= c->dstH;
  2415. const int chrDstW= c->chrDstW;
  2416. const int lumXInc= c->lumXInc;
  2417. const int chrXInc= c->chrXInc;
  2418. const int dstFormat= c->dstFormat;
  2419. const int flags= c->flags;
  2420. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2421. int16_t *vLumFilterPos= c->vLumFilterPos;
  2422. int16_t *vChrFilterPos= c->vChrFilterPos;
  2423. int16_t *hLumFilterPos= c->hLumFilterPos;
  2424. int16_t *hChrFilterPos= c->hChrFilterPos;
  2425. int16_t *vLumFilter= c->vLumFilter;
  2426. int16_t *vChrFilter= c->vChrFilter;
  2427. int16_t *hLumFilter= c->hLumFilter;
  2428. int16_t *hChrFilter= c->hChrFilter;
  2429. int16_t *lumMmxFilter= c->lumMmxFilter;
  2430. int16_t *chrMmxFilter= c->chrMmxFilter;
  2431. const int vLumFilterSize= c->vLumFilterSize;
  2432. const int vChrFilterSize= c->vChrFilterSize;
  2433. const int hLumFilterSize= c->hLumFilterSize;
  2434. const int hChrFilterSize= c->hChrFilterSize;
  2435. int16_t **lumPixBuf= c->lumPixBuf;
  2436. int16_t **chrPixBuf= c->chrPixBuf;
  2437. const int vLumBufSize= c->vLumBufSize;
  2438. const int vChrBufSize= c->vChrBufSize;
  2439. uint8_t *funnyYCode= c->funnyYCode;
  2440. uint8_t *funnyUVCode= c->funnyUVCode;
  2441. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2442. /* vars whch will change and which we need to storw back in the context */
  2443. int dstY= c->dstY;
  2444. int lumBufIndex= c->lumBufIndex;
  2445. int chrBufIndex= c->chrBufIndex;
  2446. int lastInLumBuf= c->lastInLumBuf;
  2447. int lastInChrBuf= c->lastInChrBuf;
  2448. int srcStride[3];
  2449. int dstStride[3];
  2450. uint8_t *src[3];
  2451. uint8_t *dst[3];
  2452. if(c->srcFormat == IMGFMT_I420){
  2453. src[0]= srcParam[0];
  2454. src[1]= srcParam[2];
  2455. src[2]= srcParam[1];
  2456. srcStride[0]= srcStrideParam[0];
  2457. srcStride[1]= srcStrideParam[2];
  2458. srcStride[2]= srcStrideParam[1];
  2459. }
  2460. else if(c->srcFormat==IMGFMT_YV12){
  2461. src[0]= srcParam[0];
  2462. src[1]= srcParam[1];
  2463. src[2]= srcParam[2];
  2464. srcStride[0]= srcStrideParam[0];
  2465. srcStride[1]= srcStrideParam[1];
  2466. srcStride[2]= srcStrideParam[2];
  2467. }
  2468. else if(isPacked(c->srcFormat)){
  2469. src[0]=
  2470. src[1]=
  2471. src[2]= srcParam[0];
  2472. srcStride[0]= srcStrideParam[0];
  2473. srcStride[1]=
  2474. srcStride[2]= srcStrideParam[0]<<1;
  2475. }
  2476. else if(isGray(c->srcFormat)){
  2477. src[0]= srcParam[0];
  2478. src[1]=
  2479. src[2]= NULL;
  2480. srcStride[0]= srcStrideParam[0];
  2481. srcStride[1]=
  2482. srcStride[2]= 0;
  2483. }
  2484. if(dstFormat == IMGFMT_I420){
  2485. dst[0]= dstParam[0];
  2486. dst[1]= dstParam[2];
  2487. dst[2]= dstParam[1];
  2488. dstStride[0]= dstStrideParam[0];
  2489. dstStride[1]= dstStrideParam[2];
  2490. dstStride[2]= dstStrideParam[1];
  2491. }else{
  2492. dst[0]= dstParam[0];
  2493. dst[1]= dstParam[1];
  2494. dst[2]= dstParam[2];
  2495. dstStride[0]= dstStrideParam[0];
  2496. dstStride[1]= dstStrideParam[1];
  2497. dstStride[2]= dstStrideParam[2];
  2498. }
  2499. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2500. //dstStride[0],dstStride[1],dstStride[2]);
  2501. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2502. {
  2503. static int firstTime=1; //FIXME move this into the context perhaps
  2504. if(flags & SWS_PRINT_INFO && firstTime)
  2505. {
  2506. fprintf(stderr, "SwScaler: Warning: dstStride is not aligned!\n"
  2507. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2508. firstTime=0;
  2509. }
  2510. }
  2511. /* Note the user might start scaling the picture in the middle so this will not get executed
  2512. this is not really intended but works currently, so ppl might do it */
  2513. if(srcSliceY ==0){
  2514. lumBufIndex=0;
  2515. chrBufIndex=0;
  2516. dstY=0;
  2517. lastInLumBuf= -1;
  2518. lastInChrBuf= -1;
  2519. }
  2520. for(;dstY < dstH; dstY++){
  2521. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2522. unsigned char *uDest=dst[1]+dstStride[1]*(dstY>>1);
  2523. unsigned char *vDest=dst[2]+dstStride[2]*(dstY>>1);
  2524. const int chrDstY= isHalfChrV(dstFormat) ? (dstY>>1) : dstY;
  2525. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2526. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2527. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2528. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2529. //handle holes (FAST_BILINEAR & weird filters)
  2530. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2531. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2532. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2533. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2534. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2535. // Do we have enough lines in this slice to output the dstY line
  2536. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < ((srcSliceY + srcSliceH + 1)>>1))
  2537. {
  2538. //Do horizontal scaling
  2539. while(lastInLumBuf < lastLumSrcY)
  2540. {
  2541. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2542. lumBufIndex++;
  2543. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2544. ASSERT(lumBufIndex < 2*vLumBufSize)
  2545. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2546. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2547. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2548. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2549. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2550. funnyYCode, c->srcFormat, formatConvBuffer);
  2551. lastInLumBuf++;
  2552. }
  2553. while(lastInChrBuf < lastChrSrcY)
  2554. {
  2555. uint8_t *src1= src[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[1];
  2556. uint8_t *src2= src[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[2];
  2557. chrBufIndex++;
  2558. ASSERT(chrBufIndex < 2*vChrBufSize)
  2559. ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < ((srcSliceH+1)>>1))
  2560. ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0)
  2561. //FIXME replace parameters through context struct (some at least)
  2562. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, (srcW+1)>>1, chrXInc,
  2563. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2564. funnyUVCode, c->srcFormat, formatConvBuffer);
  2565. lastInChrBuf++;
  2566. }
  2567. //wrap buf index around to stay inside the ring buffer
  2568. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2569. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2570. }
  2571. else // not enough lines left in this slice -> load the rest in the buffer
  2572. {
  2573. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2574. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2575. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2576. vChrBufSize, vLumBufSize);
  2577. */
  2578. //Do horizontal scaling
  2579. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2580. {
  2581. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2582. lumBufIndex++;
  2583. ASSERT(lumBufIndex < 2*vLumBufSize)
  2584. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2585. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2586. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2587. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2588. funnyYCode, c->srcFormat, formatConvBuffer);
  2589. lastInLumBuf++;
  2590. }
  2591. while(lastInChrBuf+1 < ((srcSliceY + srcSliceH)>>1))
  2592. {
  2593. uint8_t *src1= src[1]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[1];
  2594. uint8_t *src2= src[2]+(lastInChrBuf + 1 - (srcSliceY>>1))*srcStride[2];
  2595. chrBufIndex++;
  2596. ASSERT(chrBufIndex < 2*vChrBufSize)
  2597. ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) < ((srcSliceH+1)>>1))
  2598. ASSERT(lastInChrBuf + 1 - (srcSliceY>>1) >= 0)
  2599. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, (srcW+1)>>1, chrXInc,
  2600. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2601. funnyUVCode, c->srcFormat, formatConvBuffer);
  2602. lastInChrBuf++;
  2603. }
  2604. //wrap buf index around to stay inside the ring buffer
  2605. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2606. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2607. break; //we cant output a dstY line so lets try with the next slice
  2608. }
  2609. #ifdef HAVE_MMX
  2610. b5Dither= dither8[dstY&1];
  2611. g6Dither= dither4[dstY&1];
  2612. g5Dither= dither8[dstY&1];
  2613. r5Dither= dither8[(dstY+1)&1];
  2614. #endif
  2615. if(dstY < dstH-2)
  2616. {
  2617. if(isPlanarYUV(dstFormat)) //YV12 like
  2618. {
  2619. if(dstY&1) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2620. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2621. {
  2622. int16_t *lumBuf = lumPixBuf[0];
  2623. int16_t *chrBuf= chrPixBuf[0];
  2624. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW);
  2625. }
  2626. else //General YV12
  2627. {
  2628. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2629. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2630. RENAME(yuv2yuvX)(
  2631. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2632. vChrFilter+(dstY>>1)*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2633. dest, uDest, vDest, dstW,
  2634. lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+(dstY>>1)*vChrFilterSize*4);
  2635. }
  2636. }
  2637. else
  2638. {
  2639. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2640. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2641. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2642. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2643. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2644. {
  2645. int chrAlpha= vChrFilter[2*dstY+1];
  2646. RENAME(yuv2rgb1)(*lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2647. dest, dstW, chrAlpha, dstFormat, flags);
  2648. }
  2649. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2650. {
  2651. int lumAlpha= vLumFilter[2*dstY+1];
  2652. int chrAlpha= vChrFilter[2*dstY+1];
  2653. RENAME(yuv2rgb2)(*lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2654. dest, dstW, lumAlpha, chrAlpha, dstFormat, flags);
  2655. }
  2656. else //General RGB
  2657. {
  2658. RENAME(yuv2rgbX)(
  2659. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2660. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2661. dest, dstW, dstFormat,
  2662. lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+dstY*vChrFilterSize*4);
  2663. }
  2664. }
  2665. }
  2666. else // hmm looks like we cant use MMX here without overwriting this arrays tail
  2667. {
  2668. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2669. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2670. if(isPlanarYUV(dstFormat)) //YV12
  2671. {
  2672. if(dstY&1) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2673. yuv2yuvXinC(
  2674. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2675. vChrFilter+(dstY>>1)*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2676. dest, uDest, vDest, dstW);
  2677. }
  2678. else
  2679. {
  2680. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2681. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2682. yuv2rgbXinC(
  2683. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2684. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2685. dest, dstW, dstFormat);
  2686. }
  2687. }
  2688. }
  2689. #ifdef HAVE_MMX
  2690. __asm __volatile(SFENCE:::"memory");
  2691. __asm __volatile(EMMS:::"memory");
  2692. #endif
  2693. /* store changed local vars back in the context */
  2694. c->dstY= dstY;
  2695. c->lumBufIndex= lumBufIndex;
  2696. c->chrBufIndex= chrBufIndex;
  2697. c->lastInLumBuf= lastInLumBuf;
  2698. c->lastInChrBuf= lastInChrBuf;
  2699. }