You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2909 lines
85KB

  1. /*
  2. Copyright (C) 2001-2002 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. #undef MOVNTQ
  16. #undef PAVGB
  17. #undef PREFETCH
  18. #undef PREFETCHW
  19. #undef EMMS
  20. #undef SFENCE
  21. #ifdef HAVE_3DNOW
  22. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  23. #define EMMS "femms"
  24. #else
  25. #define EMMS "emms"
  26. #endif
  27. #ifdef HAVE_3DNOW
  28. #define PREFETCH "prefetch"
  29. #define PREFETCHW "prefetchw"
  30. #elif defined ( HAVE_MMX2 )
  31. #define PREFETCH "prefetchnta"
  32. #define PREFETCHW "prefetcht0"
  33. #else
  34. #define PREFETCH "/nop"
  35. #define PREFETCHW "/nop"
  36. #endif
  37. #ifdef HAVE_MMX2
  38. #define SFENCE "sfence"
  39. #else
  40. #define SFENCE "/nop"
  41. #endif
  42. #ifdef HAVE_MMX2
  43. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  44. #elif defined (HAVE_3DNOW)
  45. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  46. #endif
  47. #ifdef HAVE_MMX2
  48. #define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  49. #else
  50. #define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  51. #endif
  52. #define YSCALEYUV2YV12X(x) \
  53. "xorl %%eax, %%eax \n\t"\
  54. "pxor %%mm3, %%mm3 \n\t"\
  55. "pxor %%mm4, %%mm4 \n\t"\
  56. "movl %0, %%edx \n\t"\
  57. ".balign 16 \n\t" /* FIXME Unroll? */\
  58. "1: \n\t"\
  59. "movl (%1, %%edx, 4), %%esi \n\t"\
  60. "movq (%2, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  61. "movq " #x "(%%esi, %%eax, 2), %%mm2 \n\t" /* srcData */\
  62. "movq 8+" #x "(%%esi, %%eax, 2), %%mm5 \n\t" /* srcData */\
  63. "pmulhw %%mm0, %%mm2 \n\t"\
  64. "pmulhw %%mm0, %%mm5 \n\t"\
  65. "paddw %%mm2, %%mm3 \n\t"\
  66. "paddw %%mm5, %%mm4 \n\t"\
  67. "addl $1, %%edx \n\t"\
  68. " jnz 1b \n\t"\
  69. "psraw $3, %%mm3 \n\t"\
  70. "psraw $3, %%mm4 \n\t"\
  71. "packuswb %%mm4, %%mm3 \n\t"\
  72. MOVNTQ(%%mm3, (%3, %%eax))\
  73. "addl $8, %%eax \n\t"\
  74. "cmpl %4, %%eax \n\t"\
  75. "pxor %%mm3, %%mm3 \n\t"\
  76. "pxor %%mm4, %%mm4 \n\t"\
  77. "movl %0, %%edx \n\t"\
  78. "jb 1b \n\t"
  79. #define YSCALEYUV2YV121 \
  80. "movl %2, %%eax \n\t"\
  81. ".balign 16 \n\t" /* FIXME Unroll? */\
  82. "1: \n\t"\
  83. "movq (%0, %%eax, 2), %%mm0 \n\t"\
  84. "movq 8(%0, %%eax, 2), %%mm1 \n\t"\
  85. "psraw $7, %%mm0 \n\t"\
  86. "psraw $7, %%mm1 \n\t"\
  87. "packuswb %%mm1, %%mm0 \n\t"\
  88. MOVNTQ(%%mm0, (%1, %%eax))\
  89. "addl $8, %%eax \n\t"\
  90. "jnc 1b \n\t"
  91. /*
  92. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  93. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  94. "r" (dest), "m" (dstW),
  95. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  96. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  97. */
  98. #define YSCALEYUV2RGBX \
  99. "xorl %%eax, %%eax \n\t"\
  100. ".balign 16 \n\t"\
  101. "1: \n\t"\
  102. "movl %1, %%edx \n\t" /* -chrFilterSize */\
  103. "movl %3, %%ebx \n\t" /* chrMmxFilter+lumFilterSize */\
  104. "movl %7, %%ecx \n\t" /* chrSrc+lumFilterSize */\
  105. "pxor %%mm3, %%mm3 \n\t"\
  106. "pxor %%mm4, %%mm4 \n\t"\
  107. "2: \n\t"\
  108. "movl (%%ecx, %%edx, 4), %%esi \n\t"\
  109. "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  110. "movq (%%esi, %%eax), %%mm2 \n\t" /* UsrcData */\
  111. "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\
  112. "pmulhw %%mm0, %%mm2 \n\t"\
  113. "pmulhw %%mm0, %%mm5 \n\t"\
  114. "paddw %%mm2, %%mm3 \n\t"\
  115. "paddw %%mm5, %%mm4 \n\t"\
  116. "addl $1, %%edx \n\t"\
  117. " jnz 2b \n\t"\
  118. \
  119. "movl %0, %%edx \n\t" /* -lumFilterSize */\
  120. "movl %2, %%ebx \n\t" /* lumMmxFilter+lumFilterSize */\
  121. "movl %6, %%ecx \n\t" /* lumSrc+lumFilterSize */\
  122. "pxor %%mm1, %%mm1 \n\t"\
  123. "pxor %%mm7, %%mm7 \n\t"\
  124. "2: \n\t"\
  125. "movl (%%ecx, %%edx, 4), %%esi \n\t"\
  126. "movq (%%ebx, %%edx, 8), %%mm0 \n\t" /* filterCoeff */\
  127. "movq (%%esi, %%eax, 2), %%mm2 \n\t" /* Y1srcData */\
  128. "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\
  129. "pmulhw %%mm0, %%mm2 \n\t"\
  130. "pmulhw %%mm0, %%mm5 \n\t"\
  131. "paddw %%mm2, %%mm1 \n\t"\
  132. "paddw %%mm5, %%mm7 \n\t"\
  133. "addl $1, %%edx \n\t"\
  134. " jnz 2b \n\t"\
  135. \
  136. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  137. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  138. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  139. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  140. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  141. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  142. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  143. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  144. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  145. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  146. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  147. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  148. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  149. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  150. "paddw %%mm3, %%mm4 \n\t"\
  151. "movq %%mm2, %%mm0 \n\t"\
  152. "movq %%mm5, %%mm6 \n\t"\
  153. "movq %%mm4, %%mm3 \n\t"\
  154. "punpcklwd %%mm2, %%mm2 \n\t"\
  155. "punpcklwd %%mm5, %%mm5 \n\t"\
  156. "punpcklwd %%mm4, %%mm4 \n\t"\
  157. "paddw %%mm1, %%mm2 \n\t"\
  158. "paddw %%mm1, %%mm5 \n\t"\
  159. "paddw %%mm1, %%mm4 \n\t"\
  160. "punpckhwd %%mm0, %%mm0 \n\t"\
  161. "punpckhwd %%mm6, %%mm6 \n\t"\
  162. "punpckhwd %%mm3, %%mm3 \n\t"\
  163. "paddw %%mm7, %%mm0 \n\t"\
  164. "paddw %%mm7, %%mm6 \n\t"\
  165. "paddw %%mm7, %%mm3 \n\t"\
  166. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  167. "packuswb %%mm0, %%mm2 \n\t"\
  168. "packuswb %%mm6, %%mm5 \n\t"\
  169. "packuswb %%mm3, %%mm4 \n\t"\
  170. "pxor %%mm7, %%mm7 \n\t"
  171. #define FULL_YSCALEYUV2RGB \
  172. "pxor %%mm7, %%mm7 \n\t"\
  173. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  174. "punpcklwd %%mm6, %%mm6 \n\t"\
  175. "punpcklwd %%mm6, %%mm6 \n\t"\
  176. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  177. "punpcklwd %%mm5, %%mm5 \n\t"\
  178. "punpcklwd %%mm5, %%mm5 \n\t"\
  179. "xorl %%eax, %%eax \n\t"\
  180. ".balign 16 \n\t"\
  181. "1: \n\t"\
  182. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  183. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  184. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  185. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  186. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  187. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  188. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  189. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  190. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  191. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  192. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  193. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  194. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  195. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  196. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  197. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  198. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  199. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  200. \
  201. \
  202. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  203. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  204. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  205. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  206. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  207. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  208. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  209. \
  210. \
  211. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  212. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  213. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  214. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  215. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  216. "packuswb %%mm3, %%mm3 \n\t"\
  217. \
  218. "packuswb %%mm0, %%mm0 \n\t"\
  219. "paddw %%mm4, %%mm2 \n\t"\
  220. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  221. \
  222. "packuswb %%mm1, %%mm1 \n\t"
  223. #define YSCALEYUV2RGB \
  224. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  225. "punpcklwd %%mm6, %%mm6 \n\t"\
  226. "punpcklwd %%mm6, %%mm6 \n\t"\
  227. "movq %%mm6, "MANGLE(asm_yalpha1)"\n\t"\
  228. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  229. "punpcklwd %%mm5, %%mm5 \n\t"\
  230. "punpcklwd %%mm5, %%mm5 \n\t"\
  231. "movq %%mm5, "MANGLE(asm_uvalpha1)"\n\t"\
  232. "xorl %%eax, %%eax \n\t"\
  233. ".balign 16 \n\t"\
  234. "1: \n\t"\
  235. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  236. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  237. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  238. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  239. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  240. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  241. "movq "MANGLE(asm_uvalpha1)", %%mm0\n\t"\
  242. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  243. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  244. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  245. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  246. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  247. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  248. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  249. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  250. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  251. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  252. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  253. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  254. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  255. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  256. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  257. "movq 8(%0, %%eax, 2), %%mm6 \n\t" /*buf0[eax]*/\
  258. "movq 8(%1, %%eax, 2), %%mm7 \n\t" /*buf1[eax]*/\
  259. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  260. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  261. "pmulhw "MANGLE(asm_yalpha1)", %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  262. "pmulhw "MANGLE(asm_yalpha1)", %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  263. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  264. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  265. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  266. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  267. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  268. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  269. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  270. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  271. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  272. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  273. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  274. "paddw %%mm3, %%mm4 \n\t"\
  275. "movq %%mm2, %%mm0 \n\t"\
  276. "movq %%mm5, %%mm6 \n\t"\
  277. "movq %%mm4, %%mm3 \n\t"\
  278. "punpcklwd %%mm2, %%mm2 \n\t"\
  279. "punpcklwd %%mm5, %%mm5 \n\t"\
  280. "punpcklwd %%mm4, %%mm4 \n\t"\
  281. "paddw %%mm1, %%mm2 \n\t"\
  282. "paddw %%mm1, %%mm5 \n\t"\
  283. "paddw %%mm1, %%mm4 \n\t"\
  284. "punpckhwd %%mm0, %%mm0 \n\t"\
  285. "punpckhwd %%mm6, %%mm6 \n\t"\
  286. "punpckhwd %%mm3, %%mm3 \n\t"\
  287. "paddw %%mm7, %%mm0 \n\t"\
  288. "paddw %%mm7, %%mm6 \n\t"\
  289. "paddw %%mm7, %%mm3 \n\t"\
  290. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  291. "packuswb %%mm0, %%mm2 \n\t"\
  292. "packuswb %%mm6, %%mm5 \n\t"\
  293. "packuswb %%mm3, %%mm4 \n\t"\
  294. "pxor %%mm7, %%mm7 \n\t"
  295. #define YSCALEYUV2RGB1 \
  296. "xorl %%eax, %%eax \n\t"\
  297. ".balign 16 \n\t"\
  298. "1: \n\t"\
  299. "movq (%2, %%eax), %%mm3 \n\t" /* uvbuf0[eax]*/\
  300. "movq 4096(%2, %%eax), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  301. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  302. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  303. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  304. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  305. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  306. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  307. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  308. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  309. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  310. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  311. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  312. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  313. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  314. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  315. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  316. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  317. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  318. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  319. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  320. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  321. "paddw %%mm3, %%mm4 \n\t"\
  322. "movq %%mm2, %%mm0 \n\t"\
  323. "movq %%mm5, %%mm6 \n\t"\
  324. "movq %%mm4, %%mm3 \n\t"\
  325. "punpcklwd %%mm2, %%mm2 \n\t"\
  326. "punpcklwd %%mm5, %%mm5 \n\t"\
  327. "punpcklwd %%mm4, %%mm4 \n\t"\
  328. "paddw %%mm1, %%mm2 \n\t"\
  329. "paddw %%mm1, %%mm5 \n\t"\
  330. "paddw %%mm1, %%mm4 \n\t"\
  331. "punpckhwd %%mm0, %%mm0 \n\t"\
  332. "punpckhwd %%mm6, %%mm6 \n\t"\
  333. "punpckhwd %%mm3, %%mm3 \n\t"\
  334. "paddw %%mm7, %%mm0 \n\t"\
  335. "paddw %%mm7, %%mm6 \n\t"\
  336. "paddw %%mm7, %%mm3 \n\t"\
  337. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  338. "packuswb %%mm0, %%mm2 \n\t"\
  339. "packuswb %%mm6, %%mm5 \n\t"\
  340. "packuswb %%mm3, %%mm4 \n\t"\
  341. "pxor %%mm7, %%mm7 \n\t"
  342. // do vertical chrominance interpolation
  343. #define YSCALEYUV2RGB1b \
  344. "xorl %%eax, %%eax \n\t"\
  345. ".balign 16 \n\t"\
  346. "1: \n\t"\
  347. "movq (%2, %%eax), %%mm2 \n\t" /* uvbuf0[eax]*/\
  348. "movq (%3, %%eax), %%mm3 \n\t" /* uvbuf1[eax]*/\
  349. "movq 4096(%2, %%eax), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  350. "movq 4096(%3, %%eax), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  351. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  352. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  353. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  354. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  355. "psubw "MANGLE(w400)", %%mm3 \n\t" /* (U-128)8*/\
  356. "psubw "MANGLE(w400)", %%mm4 \n\t" /* (V-128)8*/\
  357. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  358. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  359. "pmulhw "MANGLE(ugCoeff)", %%mm3\n\t"\
  360. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  361. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  362. "movq (%0, %%eax, 2), %%mm1 \n\t" /*buf0[eax]*/\
  363. "movq 8(%0, %%eax, 2), %%mm7 \n\t" /*buf0[eax]*/\
  364. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  365. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  366. "pmulhw "MANGLE(ubCoeff)", %%mm2\n\t"\
  367. "pmulhw "MANGLE(vrCoeff)", %%mm5\n\t"\
  368. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  369. "psubw "MANGLE(w80)", %%mm7 \n\t" /* 8(Y-16)*/\
  370. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  371. "pmulhw "MANGLE(yCoeff)", %%mm7 \n\t"\
  372. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  373. "paddw %%mm3, %%mm4 \n\t"\
  374. "movq %%mm2, %%mm0 \n\t"\
  375. "movq %%mm5, %%mm6 \n\t"\
  376. "movq %%mm4, %%mm3 \n\t"\
  377. "punpcklwd %%mm2, %%mm2 \n\t"\
  378. "punpcklwd %%mm5, %%mm5 \n\t"\
  379. "punpcklwd %%mm4, %%mm4 \n\t"\
  380. "paddw %%mm1, %%mm2 \n\t"\
  381. "paddw %%mm1, %%mm5 \n\t"\
  382. "paddw %%mm1, %%mm4 \n\t"\
  383. "punpckhwd %%mm0, %%mm0 \n\t"\
  384. "punpckhwd %%mm6, %%mm6 \n\t"\
  385. "punpckhwd %%mm3, %%mm3 \n\t"\
  386. "paddw %%mm7, %%mm0 \n\t"\
  387. "paddw %%mm7, %%mm6 \n\t"\
  388. "paddw %%mm7, %%mm3 \n\t"\
  389. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  390. "packuswb %%mm0, %%mm2 \n\t"\
  391. "packuswb %%mm6, %%mm5 \n\t"\
  392. "packuswb %%mm3, %%mm4 \n\t"\
  393. "pxor %%mm7, %%mm7 \n\t"
  394. #define WRITEBGR32 \
  395. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  396. "movq %%mm2, %%mm1 \n\t" /* B */\
  397. "movq %%mm5, %%mm6 \n\t" /* R */\
  398. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  399. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  400. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  401. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  402. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  403. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  404. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  405. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  406. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  407. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  408. \
  409. MOVNTQ(%%mm0, (%4, %%eax, 4))\
  410. MOVNTQ(%%mm2, 8(%4, %%eax, 4))\
  411. MOVNTQ(%%mm1, 16(%4, %%eax, 4))\
  412. MOVNTQ(%%mm3, 24(%4, %%eax, 4))\
  413. \
  414. "addl $8, %%eax \n\t"\
  415. "cmpl %5, %%eax \n\t"\
  416. " jb 1b \n\t"
  417. #define WRITEBGR16 \
  418. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  419. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  420. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  421. "psrlq $3, %%mm2 \n\t"\
  422. \
  423. "movq %%mm2, %%mm1 \n\t"\
  424. "movq %%mm4, %%mm3 \n\t"\
  425. \
  426. "punpcklbw %%mm7, %%mm3 \n\t"\
  427. "punpcklbw %%mm5, %%mm2 \n\t"\
  428. "punpckhbw %%mm7, %%mm4 \n\t"\
  429. "punpckhbw %%mm5, %%mm1 \n\t"\
  430. \
  431. "psllq $3, %%mm3 \n\t"\
  432. "psllq $3, %%mm4 \n\t"\
  433. \
  434. "por %%mm3, %%mm2 \n\t"\
  435. "por %%mm4, %%mm1 \n\t"\
  436. \
  437. MOVNTQ(%%mm2, (%4, %%eax, 2))\
  438. MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
  439. \
  440. "addl $8, %%eax \n\t"\
  441. "cmpl %5, %%eax \n\t"\
  442. " jb 1b \n\t"
  443. #define WRITEBGR15 \
  444. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  445. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  446. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  447. "psrlq $3, %%mm2 \n\t"\
  448. "psrlq $1, %%mm5 \n\t"\
  449. \
  450. "movq %%mm2, %%mm1 \n\t"\
  451. "movq %%mm4, %%mm3 \n\t"\
  452. \
  453. "punpcklbw %%mm7, %%mm3 \n\t"\
  454. "punpcklbw %%mm5, %%mm2 \n\t"\
  455. "punpckhbw %%mm7, %%mm4 \n\t"\
  456. "punpckhbw %%mm5, %%mm1 \n\t"\
  457. \
  458. "psllq $2, %%mm3 \n\t"\
  459. "psllq $2, %%mm4 \n\t"\
  460. \
  461. "por %%mm3, %%mm2 \n\t"\
  462. "por %%mm4, %%mm1 \n\t"\
  463. \
  464. MOVNTQ(%%mm2, (%4, %%eax, 2))\
  465. MOVNTQ(%%mm1, 8(%4, %%eax, 2))\
  466. \
  467. "addl $8, %%eax \n\t"\
  468. "cmpl %5, %%eax \n\t"\
  469. " jb 1b \n\t"
  470. #define WRITEBGR24OLD \
  471. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  472. "movq %%mm2, %%mm1 \n\t" /* B */\
  473. "movq %%mm5, %%mm6 \n\t" /* R */\
  474. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  475. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  476. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  477. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  478. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  479. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  480. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  481. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  482. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  483. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  484. \
  485. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  486. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  487. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  488. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  489. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  490. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  491. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  492. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  493. \
  494. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  495. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  496. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  497. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  498. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  499. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  500. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  501. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  502. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  503. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  504. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  505. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  506. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  507. \
  508. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  509. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  510. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  511. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  512. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  513. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  514. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  515. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  516. \
  517. MOVNTQ(%%mm0, (%%ebx))\
  518. MOVNTQ(%%mm2, 8(%%ebx))\
  519. MOVNTQ(%%mm3, 16(%%ebx))\
  520. "addl $24, %%ebx \n\t"\
  521. \
  522. "addl $8, %%eax \n\t"\
  523. "cmpl %5, %%eax \n\t"\
  524. " jb 1b \n\t"
  525. #define WRITEBGR24MMX \
  526. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  527. "movq %%mm2, %%mm1 \n\t" /* B */\
  528. "movq %%mm5, %%mm6 \n\t" /* R */\
  529. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  530. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  531. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  532. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  533. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  534. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  535. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  536. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  537. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  538. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  539. \
  540. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  541. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  542. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  543. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  544. \
  545. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  546. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  547. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  548. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  549. \
  550. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  551. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  552. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  553. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  554. \
  555. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  556. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  557. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  558. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  559. MOVNTQ(%%mm0, (%%ebx))\
  560. \
  561. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  562. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  563. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  564. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  565. MOVNTQ(%%mm6, 8(%%ebx))\
  566. \
  567. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  568. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  569. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  570. MOVNTQ(%%mm5, 16(%%ebx))\
  571. \
  572. "addl $24, %%ebx \n\t"\
  573. \
  574. "addl $8, %%eax \n\t"\
  575. "cmpl %5, %%eax \n\t"\
  576. " jb 1b \n\t"
  577. #define WRITEBGR24MMX2 \
  578. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  579. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  580. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  581. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  582. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  583. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  584. \
  585. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  586. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  587. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  588. \
  589. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  590. "por %%mm1, %%mm6 \n\t"\
  591. "por %%mm3, %%mm6 \n\t"\
  592. MOVNTQ(%%mm6, (%%ebx))\
  593. \
  594. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  595. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  596. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  597. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  598. \
  599. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  600. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  601. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  602. \
  603. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  604. "por %%mm3, %%mm6 \n\t"\
  605. MOVNTQ(%%mm6, 8(%%ebx))\
  606. \
  607. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  608. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  609. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  610. \
  611. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  612. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  613. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  614. \
  615. "por %%mm1, %%mm3 \n\t"\
  616. "por %%mm3, %%mm6 \n\t"\
  617. MOVNTQ(%%mm6, 16(%%ebx))\
  618. \
  619. "addl $24, %%ebx \n\t"\
  620. \
  621. "addl $8, %%eax \n\t"\
  622. "cmpl %5, %%eax \n\t"\
  623. " jb 1b \n\t"
  624. #ifdef HAVE_MMX2
  625. #undef WRITEBGR24
  626. #define WRITEBGR24 WRITEBGR24MMX2
  627. #else
  628. #undef WRITEBGR24
  629. #define WRITEBGR24 WRITEBGR24MMX
  630. #endif
  631. static inline void RENAME(yuv2yuvX)(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  632. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  633. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW,
  634. int16_t * lumMmxFilter, int16_t * chrMmxFilter)
  635. {
  636. #ifdef HAVE_MMX
  637. if(uDest != NULL)
  638. {
  639. asm volatile(
  640. YSCALEYUV2YV12X(0)
  641. :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize),
  642. "r" (chrMmxFilter+chrFilterSize*4), "r" (uDest), "m" (chrDstW)
  643. : "%eax", "%edx", "%esi"
  644. );
  645. asm volatile(
  646. YSCALEYUV2YV12X(4096)
  647. :: "m" (-chrFilterSize), "r" (chrSrc+chrFilterSize),
  648. "r" (chrMmxFilter+chrFilterSize*4), "r" (vDest), "m" (chrDstW)
  649. : "%eax", "%edx", "%esi"
  650. );
  651. }
  652. asm volatile(
  653. YSCALEYUV2YV12X(0)
  654. :: "m" (-lumFilterSize), "r" (lumSrc+lumFilterSize),
  655. "r" (lumMmxFilter+lumFilterSize*4), "r" (dest), "m" (dstW)
  656. : "%eax", "%edx", "%esi"
  657. );
  658. #else
  659. yuv2yuvXinC(c, lumFilter, lumSrc, lumFilterSize,
  660. chrFilter, chrSrc, chrFilterSize,
  661. dest, uDest, vDest);
  662. #endif
  663. }
  664. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  665. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW, int chrDstW)
  666. {
  667. #ifdef HAVE_MMX
  668. if(uDest != NULL)
  669. {
  670. asm volatile(
  671. YSCALEYUV2YV121
  672. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  673. "g" (-chrDstW)
  674. : "%eax"
  675. );
  676. asm volatile(
  677. YSCALEYUV2YV121
  678. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  679. "g" (-chrDstW)
  680. : "%eax"
  681. );
  682. }
  683. asm volatile(
  684. YSCALEYUV2YV121
  685. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  686. "g" (-dstW)
  687. : "%eax"
  688. );
  689. #else
  690. int i;
  691. for(i=0; i<dstW; i++)
  692. {
  693. int val= lumSrc[i]>>7;
  694. if(val&256){
  695. if(val<0) val=0;
  696. else val=255;
  697. }
  698. dest[i]= val;
  699. }
  700. if(uDest != NULL)
  701. for(i=0; i<chrDstW; i++)
  702. {
  703. int u=chrSrc[i]>>7;
  704. int v=chrSrc[i + 2048]>>7;
  705. if((u|v)&256){
  706. if(u<0) u=0;
  707. else if (u>255) u=255;
  708. if(v<0) v=0;
  709. else if (v>255) v=255;
  710. }
  711. uDest[i]= u;
  712. vDest[i]= v;
  713. }
  714. #endif
  715. }
  716. /**
  717. * vertical scale YV12 to RGB
  718. */
  719. static inline void RENAME(yuv2rgbX)(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  720. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  721. uint8_t *dest, int dstW, int dstFormat, int16_t * lumMmxFilter, int16_t * chrMmxFilter)
  722. {
  723. /* if(flags&SWS_FULL_UV_IPOL)
  724. {
  725. //FIXME
  726. }//FULL_UV_IPOL
  727. else*/
  728. {
  729. #ifdef HAVE_MMX
  730. if(dstFormat == IMGFMT_BGR32) //FIXME untested
  731. {
  732. asm volatile(
  733. YSCALEYUV2RGBX
  734. WRITEBGR32
  735. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  736. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  737. "r" (dest), "m" (dstW),
  738. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  739. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  740. );
  741. }
  742. else if(dstFormat == IMGFMT_BGR24) //FIXME untested
  743. {
  744. asm volatile(
  745. YSCALEYUV2RGBX
  746. "leal (%%eax, %%eax, 2), %%ebx \n\t" //FIXME optimize
  747. "addl %4, %%ebx \n\t"
  748. WRITEBGR24
  749. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  750. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  751. "r" (dest), "m" (dstW),
  752. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  753. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  754. );
  755. }
  756. else if(dstFormat==IMGFMT_BGR15)
  757. {
  758. asm volatile(
  759. YSCALEYUV2RGBX
  760. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  761. #ifdef DITHER1XBPP
  762. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  763. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  764. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  765. #endif
  766. WRITEBGR15
  767. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  768. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  769. "r" (dest), "m" (dstW),
  770. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  771. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  772. );
  773. }
  774. else if(dstFormat==IMGFMT_BGR16)
  775. {
  776. asm volatile(
  777. YSCALEYUV2RGBX
  778. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  779. #ifdef DITHER1XBPP
  780. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  781. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  782. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  783. #endif
  784. WRITEBGR16
  785. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  786. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  787. "r" (dest), "m" (dstW),
  788. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  789. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  790. );
  791. }
  792. #else
  793. yuv2rgbXinC(lumFilter, lumSrc, lumFilterSize,
  794. chrFilter, chrSrc, chrFilterSize,
  795. dest, dstW, dstFormat);
  796. #endif
  797. } //!FULL_UV_IPOL
  798. }
  799. /**
  800. * vertical bilinear scale YV12 to RGB
  801. */
  802. static inline void RENAME(yuv2rgb2)(uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  803. uint8_t *dest, int dstW, int yalpha, int uvalpha, int dstFormat, int flags)
  804. {
  805. int yalpha1=yalpha^4095;
  806. int uvalpha1=uvalpha^4095;
  807. if(flags&SWS_FULL_CHR_H_INT)
  808. {
  809. #ifdef HAVE_MMX
  810. if(dstFormat==IMGFMT_BGR32)
  811. {
  812. asm volatile(
  813. FULL_YSCALEYUV2RGB
  814. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  815. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  816. "movq %%mm3, %%mm1 \n\t"
  817. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  818. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  819. MOVNTQ(%%mm3, (%4, %%eax, 4))
  820. MOVNTQ(%%mm1, 8(%4, %%eax, 4))
  821. "addl $4, %%eax \n\t"
  822. "cmpl %5, %%eax \n\t"
  823. " jb 1b \n\t"
  824. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  825. "m" (yalpha1), "m" (uvalpha1)
  826. : "%eax"
  827. );
  828. }
  829. else if(dstFormat==IMGFMT_BGR24)
  830. {
  831. asm volatile(
  832. FULL_YSCALEYUV2RGB
  833. // lsb ... msb
  834. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  835. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  836. "movq %%mm3, %%mm1 \n\t"
  837. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  838. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  839. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  840. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  841. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  842. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  843. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  844. "movq %%mm1, %%mm2 \n\t"
  845. "psllq $48, %%mm1 \n\t" // 000000BG
  846. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  847. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  848. "psrld $16, %%mm2 \n\t" // R000R000
  849. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  850. "por %%mm2, %%mm1 \n\t" // RBGRR000
  851. "movl %4, %%ebx \n\t"
  852. "addl %%eax, %%ebx \n\t"
  853. #ifdef HAVE_MMX2
  854. //FIXME Alignment
  855. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  856. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  857. #else
  858. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  859. "psrlq $32, %%mm3 \n\t"
  860. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  861. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  862. #endif
  863. "addl $4, %%eax \n\t"
  864. "cmpl %5, %%eax \n\t"
  865. " jb 1b \n\t"
  866. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  867. "m" (yalpha1), "m" (uvalpha1)
  868. : "%eax", "%ebx"
  869. );
  870. }
  871. else if(dstFormat==IMGFMT_BGR15)
  872. {
  873. asm volatile(
  874. FULL_YSCALEYUV2RGB
  875. #ifdef DITHER1XBPP
  876. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  877. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  878. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  879. #endif
  880. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  881. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  882. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  883. "psrlw $3, %%mm3 \n\t"
  884. "psllw $2, %%mm1 \n\t"
  885. "psllw $7, %%mm0 \n\t"
  886. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  887. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  888. "por %%mm3, %%mm1 \n\t"
  889. "por %%mm1, %%mm0 \n\t"
  890. MOVNTQ(%%mm0, (%4, %%eax, 2))
  891. "addl $4, %%eax \n\t"
  892. "cmpl %5, %%eax \n\t"
  893. " jb 1b \n\t"
  894. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  895. "m" (yalpha1), "m" (uvalpha1)
  896. : "%eax"
  897. );
  898. }
  899. else if(dstFormat==IMGFMT_BGR16)
  900. {
  901. asm volatile(
  902. FULL_YSCALEYUV2RGB
  903. #ifdef DITHER1XBPP
  904. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  905. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  906. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  907. #endif
  908. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  909. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  910. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  911. "psrlw $3, %%mm3 \n\t"
  912. "psllw $3, %%mm1 \n\t"
  913. "psllw $8, %%mm0 \n\t"
  914. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  915. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  916. "por %%mm3, %%mm1 \n\t"
  917. "por %%mm1, %%mm0 \n\t"
  918. MOVNTQ(%%mm0, (%4, %%eax, 2))
  919. "addl $4, %%eax \n\t"
  920. "cmpl %5, %%eax \n\t"
  921. " jb 1b \n\t"
  922. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  923. "m" (yalpha1), "m" (uvalpha1)
  924. : "%eax"
  925. );
  926. }
  927. #else
  928. if(dstFormat==IMGFMT_BGR32)
  929. {
  930. int i;
  931. #ifdef WORDS_BIGENDIAN
  932. dest++;
  933. #endif
  934. for(i=0;i<dstW;i++){
  935. // vertical linear interpolation && yuv2rgb in a single step:
  936. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  937. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  938. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  939. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  940. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  941. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  942. dest+= 4;
  943. }
  944. }
  945. else if(dstFormat==IMGFMT_BGR24)
  946. {
  947. int i;
  948. for(i=0;i<dstW;i++){
  949. // vertical linear interpolation && yuv2rgb in a single step:
  950. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  951. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  952. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  953. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  954. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  955. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  956. dest+= 3;
  957. }
  958. }
  959. else if(dstFormat==IMGFMT_BGR16)
  960. {
  961. int i;
  962. for(i=0;i<dstW;i++){
  963. // vertical linear interpolation && yuv2rgb in a single step:
  964. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  965. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  966. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  967. ((uint16_t*)dest)[i] =
  968. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  969. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  970. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  971. }
  972. }
  973. else if(dstFormat==IMGFMT_BGR15)
  974. {
  975. int i;
  976. for(i=0;i<dstW;i++){
  977. // vertical linear interpolation && yuv2rgb in a single step:
  978. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  979. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  980. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  981. ((uint16_t*)dest)[i] =
  982. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  983. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  984. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  985. }
  986. }
  987. #endif
  988. }//FULL_UV_IPOL
  989. else
  990. {
  991. #ifdef HAVE_MMX
  992. if(dstFormat==IMGFMT_BGR32)
  993. {
  994. asm volatile(
  995. YSCALEYUV2RGB
  996. WRITEBGR32
  997. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  998. "m" (yalpha1), "m" (uvalpha1)
  999. : "%eax"
  1000. );
  1001. }
  1002. else if(dstFormat==IMGFMT_BGR24)
  1003. {
  1004. asm volatile(
  1005. "movl %4, %%ebx \n\t"
  1006. YSCALEYUV2RGB
  1007. WRITEBGR24
  1008. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1009. "m" (yalpha1), "m" (uvalpha1)
  1010. : "%eax", "%ebx"
  1011. );
  1012. }
  1013. else if(dstFormat==IMGFMT_BGR15)
  1014. {
  1015. asm volatile(
  1016. YSCALEYUV2RGB
  1017. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1018. #ifdef DITHER1XBPP
  1019. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1020. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1021. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1022. #endif
  1023. WRITEBGR15
  1024. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1025. "m" (yalpha1), "m" (uvalpha1)
  1026. : "%eax"
  1027. );
  1028. }
  1029. else if(dstFormat==IMGFMT_BGR16)
  1030. {
  1031. asm volatile(
  1032. YSCALEYUV2RGB
  1033. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1034. #ifdef DITHER1XBPP
  1035. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1036. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1037. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1038. #endif
  1039. WRITEBGR16
  1040. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1041. "m" (yalpha1), "m" (uvalpha1)
  1042. : "%eax"
  1043. );
  1044. }
  1045. #else
  1046. if(dstFormat==IMGFMT_BGR32)
  1047. {
  1048. int i;
  1049. #ifdef WORDS_BIGENDIAN
  1050. dest++;
  1051. #endif
  1052. for(i=0; i<dstW-1; i+=2){
  1053. // vertical linear interpolation && yuv2rgb in a single step:
  1054. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1055. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1056. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1057. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1058. int Cb= yuvtab_40cf[U];
  1059. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1060. int Cr= yuvtab_3343[V];
  1061. dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
  1062. dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
  1063. dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
  1064. dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
  1065. dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
  1066. dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
  1067. }
  1068. }
  1069. else if(dstFormat==IMGFMT_BGR24)
  1070. {
  1071. int i;
  1072. for(i=0; i<dstW-1; i+=2){
  1073. // vertical linear interpolation && yuv2rgb in a single step:
  1074. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1075. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1076. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1077. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1078. int Cb= yuvtab_40cf[U];
  1079. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1080. int Cr= yuvtab_3343[V];
  1081. dest[0]=clip_table[((Y1 + Cb) >>13)];
  1082. dest[1]=clip_table[((Y1 + Cg) >>13)];
  1083. dest[2]=clip_table[((Y1 + Cr) >>13)];
  1084. dest[3]=clip_table[((Y2 + Cb) >>13)];
  1085. dest[4]=clip_table[((Y2 + Cg) >>13)];
  1086. dest[5]=clip_table[((Y2 + Cr) >>13)];
  1087. dest+=6;
  1088. }
  1089. }
  1090. else if(dstFormat==IMGFMT_BGR16)
  1091. {
  1092. int i;
  1093. #ifdef DITHER1XBPP
  1094. static int ditherb1=1<<14;
  1095. static int ditherg1=1<<13;
  1096. static int ditherr1=2<<14;
  1097. static int ditherb2=3<<14;
  1098. static int ditherg2=3<<13;
  1099. static int ditherr2=0<<14;
  1100. ditherb1 ^= (1^2)<<14;
  1101. ditherg1 ^= (1^2)<<13;
  1102. ditherr1 ^= (1^2)<<14;
  1103. ditherb2 ^= (3^0)<<14;
  1104. ditherg2 ^= (3^0)<<13;
  1105. ditherr2 ^= (3^0)<<14;
  1106. #else
  1107. const int ditherb1=0;
  1108. const int ditherg1=0;
  1109. const int ditherr1=0;
  1110. const int ditherb2=0;
  1111. const int ditherg2=0;
  1112. const int ditherr2=0;
  1113. #endif
  1114. for(i=0; i<dstW-1; i+=2){
  1115. // vertical linear interpolation && yuv2rgb in a single step:
  1116. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1117. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1118. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1119. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1120. int Cb= yuvtab_40cf[U];
  1121. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1122. int Cr= yuvtab_3343[V];
  1123. ((uint16_t*)dest)[i] =
  1124. clip_table16b[(Y1 + Cb + ditherb1) >>13] |
  1125. clip_table16g[(Y1 + Cg + ditherg1) >>13] |
  1126. clip_table16r[(Y1 + Cr + ditherr1) >>13];
  1127. ((uint16_t*)dest)[i+1] =
  1128. clip_table16b[(Y2 + Cb + ditherb2) >>13] |
  1129. clip_table16g[(Y2 + Cg + ditherg2) >>13] |
  1130. clip_table16r[(Y2 + Cr + ditherr2) >>13];
  1131. }
  1132. }
  1133. else if(dstFormat==IMGFMT_BGR15)
  1134. {
  1135. int i;
  1136. #ifdef DITHER1XBPP
  1137. static int ditherb1=1<<14;
  1138. static int ditherg1=1<<14;
  1139. static int ditherr1=2<<14;
  1140. static int ditherb2=3<<14;
  1141. static int ditherg2=3<<14;
  1142. static int ditherr2=0<<14;
  1143. ditherb1 ^= (1^2)<<14;
  1144. ditherg1 ^= (1^2)<<14;
  1145. ditherr1 ^= (1^2)<<14;
  1146. ditherb2 ^= (3^0)<<14;
  1147. ditherg2 ^= (3^0)<<14;
  1148. ditherr2 ^= (3^0)<<14;
  1149. #else
  1150. const int ditherb1=0;
  1151. const int ditherg1=0;
  1152. const int ditherr1=0;
  1153. const int ditherb2=0;
  1154. const int ditherg2=0;
  1155. const int ditherr2=0;
  1156. #endif
  1157. for(i=0; i<dstW-1; i+=2){
  1158. // vertical linear interpolation && yuv2rgb in a single step:
  1159. int Y1=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1160. int Y2=yuvtab_2568[((buf0[i+1]*yalpha1+buf1[i+1]*yalpha)>>19)];
  1161. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1162. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1163. int Cb= yuvtab_40cf[U];
  1164. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1165. int Cr= yuvtab_3343[V];
  1166. ((uint16_t*)dest)[i] =
  1167. clip_table15b[(Y1 + Cb + ditherb1) >>13] |
  1168. clip_table15g[(Y1 + Cg + ditherg1) >>13] |
  1169. clip_table15r[(Y1 + Cr + ditherr1) >>13];
  1170. ((uint16_t*)dest)[i+1] =
  1171. clip_table15b[(Y2 + Cb + ditherb2) >>13] |
  1172. clip_table15g[(Y2 + Cg + ditherg2) >>13] |
  1173. clip_table15r[(Y2 + Cr + ditherr2) >>13];
  1174. }
  1175. }
  1176. #endif
  1177. } //!FULL_UV_IPOL
  1178. }
  1179. /**
  1180. * YV12 to RGB without scaling or interpolating
  1181. */
  1182. static inline void RENAME(yuv2rgb1)(uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1183. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags)
  1184. {
  1185. int uvalpha1=uvalpha^4095;
  1186. const int yalpha1=0;
  1187. if(flags&SWS_FULL_CHR_H_INT)
  1188. {
  1189. RENAME(yuv2rgb2)(buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, dstFormat, flags);
  1190. return;
  1191. }
  1192. #ifdef HAVE_MMX
  1193. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1194. {
  1195. if(dstFormat==IMGFMT_BGR32)
  1196. {
  1197. asm volatile(
  1198. YSCALEYUV2RGB1
  1199. WRITEBGR32
  1200. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1201. "m" (yalpha1), "m" (uvalpha1)
  1202. : "%eax"
  1203. );
  1204. }
  1205. else if(dstFormat==IMGFMT_BGR24)
  1206. {
  1207. asm volatile(
  1208. "movl %4, %%ebx \n\t"
  1209. YSCALEYUV2RGB1
  1210. WRITEBGR24
  1211. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1212. "m" (yalpha1), "m" (uvalpha1)
  1213. : "%eax", "%ebx"
  1214. );
  1215. }
  1216. else if(dstFormat==IMGFMT_BGR15)
  1217. {
  1218. asm volatile(
  1219. YSCALEYUV2RGB1
  1220. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1221. #ifdef DITHER1XBPP
  1222. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1223. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1224. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1225. #endif
  1226. WRITEBGR15
  1227. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1228. "m" (yalpha1), "m" (uvalpha1)
  1229. : "%eax"
  1230. );
  1231. }
  1232. else if(dstFormat==IMGFMT_BGR16)
  1233. {
  1234. asm volatile(
  1235. YSCALEYUV2RGB1
  1236. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1237. #ifdef DITHER1XBPP
  1238. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1239. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1240. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1241. #endif
  1242. WRITEBGR16
  1243. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1244. "m" (yalpha1), "m" (uvalpha1)
  1245. : "%eax"
  1246. );
  1247. }
  1248. }
  1249. else
  1250. {
  1251. if(dstFormat==IMGFMT_BGR32)
  1252. {
  1253. asm volatile(
  1254. YSCALEYUV2RGB1b
  1255. WRITEBGR32
  1256. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1257. "m" (yalpha1), "m" (uvalpha1)
  1258. : "%eax"
  1259. );
  1260. }
  1261. else if(dstFormat==IMGFMT_BGR24)
  1262. {
  1263. asm volatile(
  1264. "movl %4, %%ebx \n\t"
  1265. YSCALEYUV2RGB1b
  1266. WRITEBGR24
  1267. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1268. "m" (yalpha1), "m" (uvalpha1)
  1269. : "%eax", "%ebx"
  1270. );
  1271. }
  1272. else if(dstFormat==IMGFMT_BGR15)
  1273. {
  1274. asm volatile(
  1275. YSCALEYUV2RGB1b
  1276. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1277. #ifdef DITHER1XBPP
  1278. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1279. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1280. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1281. #endif
  1282. WRITEBGR15
  1283. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1284. "m" (yalpha1), "m" (uvalpha1)
  1285. : "%eax"
  1286. );
  1287. }
  1288. else if(dstFormat==IMGFMT_BGR16)
  1289. {
  1290. asm volatile(
  1291. YSCALEYUV2RGB1b
  1292. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1293. #ifdef DITHER1XBPP
  1294. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1295. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1296. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1297. #endif
  1298. WRITEBGR16
  1299. :: "r" (buf0), "r" (buf0), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1300. "m" (yalpha1), "m" (uvalpha1)
  1301. : "%eax"
  1302. );
  1303. }
  1304. }
  1305. #else
  1306. //FIXME write 2 versions (for even & odd lines)
  1307. if(dstFormat==IMGFMT_BGR32)
  1308. {
  1309. int i;
  1310. #ifdef WORDS_BIGENDIAN
  1311. dest++;
  1312. #endif
  1313. for(i=0; i<dstW-1; i+=2){
  1314. // vertical linear interpolation && yuv2rgb in a single step:
  1315. int Y1=yuvtab_2568[buf0[i]>>7];
  1316. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1317. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1318. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1319. int Cb= yuvtab_40cf[U];
  1320. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1321. int Cr= yuvtab_3343[V];
  1322. dest[4*i+0]=clip_table[((Y1 + Cb) >>13)];
  1323. dest[4*i+1]=clip_table[((Y1 + Cg) >>13)];
  1324. dest[4*i+2]=clip_table[((Y1 + Cr) >>13)];
  1325. dest[4*i+4]=clip_table[((Y2 + Cb) >>13)];
  1326. dest[4*i+5]=clip_table[((Y2 + Cg) >>13)];
  1327. dest[4*i+6]=clip_table[((Y2 + Cr) >>13)];
  1328. }
  1329. }
  1330. else if(dstFormat==IMGFMT_BGR24)
  1331. {
  1332. int i;
  1333. for(i=0; i<dstW-1; i+=2){
  1334. // vertical linear interpolation && yuv2rgb in a single step:
  1335. int Y1=yuvtab_2568[buf0[i]>>7];
  1336. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1337. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1338. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1339. int Cb= yuvtab_40cf[U];
  1340. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1341. int Cr= yuvtab_3343[V];
  1342. dest[0]=clip_table[((Y1 + Cb) >>13)];
  1343. dest[1]=clip_table[((Y1 + Cg) >>13)];
  1344. dest[2]=clip_table[((Y1 + Cr) >>13)];
  1345. dest[3]=clip_table[((Y2 + Cb) >>13)];
  1346. dest[4]=clip_table[((Y2 + Cg) >>13)];
  1347. dest[5]=clip_table[((Y2 + Cr) >>13)];
  1348. dest+=6;
  1349. }
  1350. }
  1351. else if(dstFormat==IMGFMT_BGR16)
  1352. {
  1353. int i;
  1354. #ifdef DITHER1XBPP
  1355. static int ditherb1=1<<14;
  1356. static int ditherg1=1<<13;
  1357. static int ditherr1=2<<14;
  1358. static int ditherb2=3<<14;
  1359. static int ditherg2=3<<13;
  1360. static int ditherr2=0<<14;
  1361. ditherb1 ^= (1^2)<<14;
  1362. ditherg1 ^= (1^2)<<13;
  1363. ditherr1 ^= (1^2)<<14;
  1364. ditherb2 ^= (3^0)<<14;
  1365. ditherg2 ^= (3^0)<<13;
  1366. ditherr2 ^= (3^0)<<14;
  1367. #else
  1368. const int ditherb1=0;
  1369. const int ditherg1=0;
  1370. const int ditherr1=0;
  1371. const int ditherb2=0;
  1372. const int ditherg2=0;
  1373. const int ditherr2=0;
  1374. #endif
  1375. for(i=0; i<dstW-1; i+=2){
  1376. // vertical linear interpolation && yuv2rgb in a single step:
  1377. int Y1=yuvtab_2568[buf0[i]>>7];
  1378. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1379. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1380. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1381. int Cb= yuvtab_40cf[U];
  1382. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1383. int Cr= yuvtab_3343[V];
  1384. ((uint16_t*)dest)[i] =
  1385. clip_table16b[(Y1 + Cb + ditherb1) >>13] |
  1386. clip_table16g[(Y1 + Cg + ditherg1) >>13] |
  1387. clip_table16r[(Y1 + Cr + ditherr1) >>13];
  1388. ((uint16_t*)dest)[i+1] =
  1389. clip_table16b[(Y2 + Cb + ditherb2) >>13] |
  1390. clip_table16g[(Y2 + Cg + ditherg2) >>13] |
  1391. clip_table16r[(Y2 + Cr + ditherr2) >>13];
  1392. }
  1393. }
  1394. else if(dstFormat==IMGFMT_BGR15)
  1395. {
  1396. int i;
  1397. #ifdef DITHER1XBPP
  1398. static int ditherb1=1<<14;
  1399. static int ditherg1=1<<14;
  1400. static int ditherr1=2<<14;
  1401. static int ditherb2=3<<14;
  1402. static int ditherg2=3<<14;
  1403. static int ditherr2=0<<14;
  1404. ditherb1 ^= (1^2)<<14;
  1405. ditherg1 ^= (1^2)<<14;
  1406. ditherr1 ^= (1^2)<<14;
  1407. ditherb2 ^= (3^0)<<14;
  1408. ditherg2 ^= (3^0)<<14;
  1409. ditherr2 ^= (3^0)<<14;
  1410. #else
  1411. const int ditherb1=0;
  1412. const int ditherg1=0;
  1413. const int ditherr1=0;
  1414. const int ditherb2=0;
  1415. const int ditherg2=0;
  1416. const int ditherr2=0;
  1417. #endif
  1418. for(i=0; i<dstW-1; i+=2){
  1419. // vertical linear interpolation && yuv2rgb in a single step:
  1420. int Y1=yuvtab_2568[buf0[i]>>7];
  1421. int Y2=yuvtab_2568[buf0[i+1]>>7];
  1422. int U=((uvbuf0[i>>1]*uvalpha1+uvbuf1[i>>1]*uvalpha)>>19);
  1423. int V=((uvbuf0[(i>>1)+2048]*uvalpha1+uvbuf1[(i>>1)+2048]*uvalpha)>>19);
  1424. int Cb= yuvtab_40cf[U];
  1425. int Cg= yuvtab_1a1e[V] + yuvtab_0c92[U];
  1426. int Cr= yuvtab_3343[V];
  1427. ((uint16_t*)dest)[i] =
  1428. clip_table15b[(Y1 + Cb + ditherb1) >>13] |
  1429. clip_table15g[(Y1 + Cg + ditherg1) >>13] |
  1430. clip_table15r[(Y1 + Cr + ditherr1) >>13];
  1431. ((uint16_t*)dest)[i+1] =
  1432. clip_table15b[(Y2 + Cb + ditherb2) >>13] |
  1433. clip_table15g[(Y2 + Cg + ditherg2) >>13] |
  1434. clip_table15r[(Y2 + Cr + ditherr2) >>13];
  1435. }
  1436. }
  1437. #endif
  1438. }
  1439. //FIXME yuy2* can read upto 7 samples to much
  1440. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, int width)
  1441. {
  1442. #ifdef HAVE_MMX
  1443. asm volatile(
  1444. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1445. "movl %0, %%eax \n\t"
  1446. "1: \n\t"
  1447. "movq (%1, %%eax,2), %%mm0 \n\t"
  1448. "movq 8(%1, %%eax,2), %%mm1 \n\t"
  1449. "pand %%mm2, %%mm0 \n\t"
  1450. "pand %%mm2, %%mm1 \n\t"
  1451. "packuswb %%mm1, %%mm0 \n\t"
  1452. "movq %%mm0, (%2, %%eax) \n\t"
  1453. "addl $8, %%eax \n\t"
  1454. " js 1b \n\t"
  1455. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1456. : "%eax"
  1457. );
  1458. #else
  1459. int i;
  1460. for(i=0; i<width; i++)
  1461. dst[i]= src[2*i];
  1462. #endif
  1463. }
  1464. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1465. {
  1466. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1467. asm volatile(
  1468. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1469. "movl %0, %%eax \n\t"
  1470. "1: \n\t"
  1471. "movq (%1, %%eax,4), %%mm0 \n\t"
  1472. "movq 8(%1, %%eax,4), %%mm1 \n\t"
  1473. "movq (%2, %%eax,4), %%mm2 \n\t"
  1474. "movq 8(%2, %%eax,4), %%mm3 \n\t"
  1475. PAVGB(%%mm2, %%mm0)
  1476. PAVGB(%%mm3, %%mm1)
  1477. "psrlw $8, %%mm0 \n\t"
  1478. "psrlw $8, %%mm1 \n\t"
  1479. "packuswb %%mm1, %%mm0 \n\t"
  1480. "movq %%mm0, %%mm1 \n\t"
  1481. "psrlw $8, %%mm0 \n\t"
  1482. "pand %%mm4, %%mm1 \n\t"
  1483. "packuswb %%mm0, %%mm0 \n\t"
  1484. "packuswb %%mm1, %%mm1 \n\t"
  1485. "movd %%mm0, (%4, %%eax) \n\t"
  1486. "movd %%mm1, (%3, %%eax) \n\t"
  1487. "addl $4, %%eax \n\t"
  1488. " js 1b \n\t"
  1489. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1490. : "%eax"
  1491. );
  1492. #else
  1493. int i;
  1494. for(i=0; i<width; i++)
  1495. {
  1496. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1497. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1498. }
  1499. #endif
  1500. }
  1501. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1502. {
  1503. #ifdef HAVE_MMXFIXME
  1504. #else
  1505. int i;
  1506. for(i=0; i<width; i++)
  1507. {
  1508. int b= src[i*4+0];
  1509. int g= src[i*4+1];
  1510. int r= src[i*4+2];
  1511. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1512. }
  1513. #endif
  1514. }
  1515. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1516. {
  1517. #ifdef HAVE_MMXFIXME
  1518. #else
  1519. int i;
  1520. for(i=0; i<width; i++)
  1521. {
  1522. int b= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4];
  1523. int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5];
  1524. int r= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6];
  1525. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1526. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1527. }
  1528. #endif
  1529. }
  1530. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, int width)
  1531. {
  1532. #ifdef HAVE_MMX
  1533. asm volatile(
  1534. "movl %2, %%eax \n\t"
  1535. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1536. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1537. "pxor %%mm7, %%mm7 \n\t"
  1538. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1539. ".balign 16 \n\t"
  1540. "1: \n\t"
  1541. PREFETCH" 64(%0, %%ebx) \n\t"
  1542. "movd (%0, %%ebx), %%mm0 \n\t"
  1543. "movd 3(%0, %%ebx), %%mm1 \n\t"
  1544. "punpcklbw %%mm7, %%mm0 \n\t"
  1545. "punpcklbw %%mm7, %%mm1 \n\t"
  1546. "movd 6(%0, %%ebx), %%mm2 \n\t"
  1547. "movd 9(%0, %%ebx), %%mm3 \n\t"
  1548. "punpcklbw %%mm7, %%mm2 \n\t"
  1549. "punpcklbw %%mm7, %%mm3 \n\t"
  1550. "pmaddwd %%mm6, %%mm0 \n\t"
  1551. "pmaddwd %%mm6, %%mm1 \n\t"
  1552. "pmaddwd %%mm6, %%mm2 \n\t"
  1553. "pmaddwd %%mm6, %%mm3 \n\t"
  1554. #ifndef FAST_BGR2YV12
  1555. "psrad $8, %%mm0 \n\t"
  1556. "psrad $8, %%mm1 \n\t"
  1557. "psrad $8, %%mm2 \n\t"
  1558. "psrad $8, %%mm3 \n\t"
  1559. #endif
  1560. "packssdw %%mm1, %%mm0 \n\t"
  1561. "packssdw %%mm3, %%mm2 \n\t"
  1562. "pmaddwd %%mm5, %%mm0 \n\t"
  1563. "pmaddwd %%mm5, %%mm2 \n\t"
  1564. "packssdw %%mm2, %%mm0 \n\t"
  1565. "psraw $7, %%mm0 \n\t"
  1566. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1567. "movd 15(%0, %%ebx), %%mm1 \n\t"
  1568. "punpcklbw %%mm7, %%mm4 \n\t"
  1569. "punpcklbw %%mm7, %%mm1 \n\t"
  1570. "movd 18(%0, %%ebx), %%mm2 \n\t"
  1571. "movd 21(%0, %%ebx), %%mm3 \n\t"
  1572. "punpcklbw %%mm7, %%mm2 \n\t"
  1573. "punpcklbw %%mm7, %%mm3 \n\t"
  1574. "pmaddwd %%mm6, %%mm4 \n\t"
  1575. "pmaddwd %%mm6, %%mm1 \n\t"
  1576. "pmaddwd %%mm6, %%mm2 \n\t"
  1577. "pmaddwd %%mm6, %%mm3 \n\t"
  1578. #ifndef FAST_BGR2YV12
  1579. "psrad $8, %%mm4 \n\t"
  1580. "psrad $8, %%mm1 \n\t"
  1581. "psrad $8, %%mm2 \n\t"
  1582. "psrad $8, %%mm3 \n\t"
  1583. #endif
  1584. "packssdw %%mm1, %%mm4 \n\t"
  1585. "packssdw %%mm3, %%mm2 \n\t"
  1586. "pmaddwd %%mm5, %%mm4 \n\t"
  1587. "pmaddwd %%mm5, %%mm2 \n\t"
  1588. "addl $24, %%ebx \n\t"
  1589. "packssdw %%mm2, %%mm4 \n\t"
  1590. "psraw $7, %%mm4 \n\t"
  1591. "packuswb %%mm4, %%mm0 \n\t"
  1592. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1593. "movq %%mm0, (%1, %%eax) \n\t"
  1594. "addl $8, %%eax \n\t"
  1595. " js 1b \n\t"
  1596. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1597. : "%eax", "%ebx"
  1598. );
  1599. #else
  1600. int i;
  1601. for(i=0; i<width; i++)
  1602. {
  1603. int b= src[i*3+0];
  1604. int g= src[i*3+1];
  1605. int r= src[i*3+2];
  1606. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1607. }
  1608. #endif
  1609. }
  1610. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1611. {
  1612. #ifdef HAVE_MMX
  1613. asm volatile(
  1614. "movl %4, %%eax \n\t"
  1615. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1616. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1617. "pxor %%mm7, %%mm7 \n\t"
  1618. "leal (%%eax, %%eax, 2), %%ebx \n\t"
  1619. "addl %%ebx, %%ebx \n\t"
  1620. ".balign 16 \n\t"
  1621. "1: \n\t"
  1622. PREFETCH" 64(%0, %%ebx) \n\t"
  1623. PREFETCH" 64(%1, %%ebx) \n\t"
  1624. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1625. "movq (%0, %%ebx), %%mm0 \n\t"
  1626. "movq (%1, %%ebx), %%mm1 \n\t"
  1627. "movq 6(%0, %%ebx), %%mm2 \n\t"
  1628. "movq 6(%1, %%ebx), %%mm3 \n\t"
  1629. PAVGB(%%mm1, %%mm0)
  1630. PAVGB(%%mm3, %%mm2)
  1631. "movq %%mm0, %%mm1 \n\t"
  1632. "movq %%mm2, %%mm3 \n\t"
  1633. "psrlq $24, %%mm0 \n\t"
  1634. "psrlq $24, %%mm2 \n\t"
  1635. PAVGB(%%mm1, %%mm0)
  1636. PAVGB(%%mm3, %%mm2)
  1637. "punpcklbw %%mm7, %%mm0 \n\t"
  1638. "punpcklbw %%mm7, %%mm2 \n\t"
  1639. #else
  1640. "movd (%0, %%ebx), %%mm0 \n\t"
  1641. "movd (%1, %%ebx), %%mm1 \n\t"
  1642. "movd 3(%0, %%ebx), %%mm2 \n\t"
  1643. "movd 3(%1, %%ebx), %%mm3 \n\t"
  1644. "punpcklbw %%mm7, %%mm0 \n\t"
  1645. "punpcklbw %%mm7, %%mm1 \n\t"
  1646. "punpcklbw %%mm7, %%mm2 \n\t"
  1647. "punpcklbw %%mm7, %%mm3 \n\t"
  1648. "paddw %%mm1, %%mm0 \n\t"
  1649. "paddw %%mm3, %%mm2 \n\t"
  1650. "paddw %%mm2, %%mm0 \n\t"
  1651. "movd 6(%0, %%ebx), %%mm4 \n\t"
  1652. "movd 6(%1, %%ebx), %%mm1 \n\t"
  1653. "movd 9(%0, %%ebx), %%mm2 \n\t"
  1654. "movd 9(%1, %%ebx), %%mm3 \n\t"
  1655. "punpcklbw %%mm7, %%mm4 \n\t"
  1656. "punpcklbw %%mm7, %%mm1 \n\t"
  1657. "punpcklbw %%mm7, %%mm2 \n\t"
  1658. "punpcklbw %%mm7, %%mm3 \n\t"
  1659. "paddw %%mm1, %%mm4 \n\t"
  1660. "paddw %%mm3, %%mm2 \n\t"
  1661. "paddw %%mm4, %%mm2 \n\t"
  1662. "psrlw $2, %%mm0 \n\t"
  1663. "psrlw $2, %%mm2 \n\t"
  1664. #endif
  1665. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1666. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1667. "pmaddwd %%mm0, %%mm1 \n\t"
  1668. "pmaddwd %%mm2, %%mm3 \n\t"
  1669. "pmaddwd %%mm6, %%mm0 \n\t"
  1670. "pmaddwd %%mm6, %%mm2 \n\t"
  1671. #ifndef FAST_BGR2YV12
  1672. "psrad $8, %%mm0 \n\t"
  1673. "psrad $8, %%mm1 \n\t"
  1674. "psrad $8, %%mm2 \n\t"
  1675. "psrad $8, %%mm3 \n\t"
  1676. #endif
  1677. "packssdw %%mm2, %%mm0 \n\t"
  1678. "packssdw %%mm3, %%mm1 \n\t"
  1679. "pmaddwd %%mm5, %%mm0 \n\t"
  1680. "pmaddwd %%mm5, %%mm1 \n\t"
  1681. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1682. "psraw $7, %%mm0 \n\t"
  1683. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1684. "movq 12(%0, %%ebx), %%mm4 \n\t"
  1685. "movq 12(%1, %%ebx), %%mm1 \n\t"
  1686. "movq 18(%0, %%ebx), %%mm2 \n\t"
  1687. "movq 18(%1, %%ebx), %%mm3 \n\t"
  1688. PAVGB(%%mm1, %%mm4)
  1689. PAVGB(%%mm3, %%mm2)
  1690. "movq %%mm4, %%mm1 \n\t"
  1691. "movq %%mm2, %%mm3 \n\t"
  1692. "psrlq $24, %%mm4 \n\t"
  1693. "psrlq $24, %%mm2 \n\t"
  1694. PAVGB(%%mm1, %%mm4)
  1695. PAVGB(%%mm3, %%mm2)
  1696. "punpcklbw %%mm7, %%mm4 \n\t"
  1697. "punpcklbw %%mm7, %%mm2 \n\t"
  1698. #else
  1699. "movd 12(%0, %%ebx), %%mm4 \n\t"
  1700. "movd 12(%1, %%ebx), %%mm1 \n\t"
  1701. "movd 15(%0, %%ebx), %%mm2 \n\t"
  1702. "movd 15(%1, %%ebx), %%mm3 \n\t"
  1703. "punpcklbw %%mm7, %%mm4 \n\t"
  1704. "punpcklbw %%mm7, %%mm1 \n\t"
  1705. "punpcklbw %%mm7, %%mm2 \n\t"
  1706. "punpcklbw %%mm7, %%mm3 \n\t"
  1707. "paddw %%mm1, %%mm4 \n\t"
  1708. "paddw %%mm3, %%mm2 \n\t"
  1709. "paddw %%mm2, %%mm4 \n\t"
  1710. "movd 18(%0, %%ebx), %%mm5 \n\t"
  1711. "movd 18(%1, %%ebx), %%mm1 \n\t"
  1712. "movd 21(%0, %%ebx), %%mm2 \n\t"
  1713. "movd 21(%1, %%ebx), %%mm3 \n\t"
  1714. "punpcklbw %%mm7, %%mm5 \n\t"
  1715. "punpcklbw %%mm7, %%mm1 \n\t"
  1716. "punpcklbw %%mm7, %%mm2 \n\t"
  1717. "punpcklbw %%mm7, %%mm3 \n\t"
  1718. "paddw %%mm1, %%mm5 \n\t"
  1719. "paddw %%mm3, %%mm2 \n\t"
  1720. "paddw %%mm5, %%mm2 \n\t"
  1721. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1722. "psrlw $2, %%mm4 \n\t"
  1723. "psrlw $2, %%mm2 \n\t"
  1724. #endif
  1725. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1726. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1727. "pmaddwd %%mm4, %%mm1 \n\t"
  1728. "pmaddwd %%mm2, %%mm3 \n\t"
  1729. "pmaddwd %%mm6, %%mm4 \n\t"
  1730. "pmaddwd %%mm6, %%mm2 \n\t"
  1731. #ifndef FAST_BGR2YV12
  1732. "psrad $8, %%mm4 \n\t"
  1733. "psrad $8, %%mm1 \n\t"
  1734. "psrad $8, %%mm2 \n\t"
  1735. "psrad $8, %%mm3 \n\t"
  1736. #endif
  1737. "packssdw %%mm2, %%mm4 \n\t"
  1738. "packssdw %%mm3, %%mm1 \n\t"
  1739. "pmaddwd %%mm5, %%mm4 \n\t"
  1740. "pmaddwd %%mm5, %%mm1 \n\t"
  1741. "addl $24, %%ebx \n\t"
  1742. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1743. "psraw $7, %%mm4 \n\t"
  1744. "movq %%mm0, %%mm1 \n\t"
  1745. "punpckldq %%mm4, %%mm0 \n\t"
  1746. "punpckhdq %%mm4, %%mm1 \n\t"
  1747. "packsswb %%mm1, %%mm0 \n\t"
  1748. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1749. "movd %%mm0, (%2, %%eax) \n\t"
  1750. "punpckhdq %%mm0, %%mm0 \n\t"
  1751. "movd %%mm0, (%3, %%eax) \n\t"
  1752. "addl $4, %%eax \n\t"
  1753. " js 1b \n\t"
  1754. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1755. : "%eax", "%ebx"
  1756. );
  1757. #else
  1758. int i;
  1759. for(i=0; i<width; i++)
  1760. {
  1761. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1762. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1763. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1764. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1765. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1766. }
  1767. #endif
  1768. }
  1769. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1770. {
  1771. int i;
  1772. for(i=0; i<width; i++)
  1773. {
  1774. int d= src[i*2] + (src[i*2+1]<<8);
  1775. int b= d&0x1F;
  1776. int g= (d>>5)&0x3F;
  1777. int r= (d>>11)&0x1F;
  1778. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1779. }
  1780. }
  1781. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1782. {
  1783. int i;
  1784. for(i=0; i<width; i++)
  1785. {
  1786. #if 1
  1787. int d0= le2me_32( ((uint32_t*)src1)[i] );
  1788. int d1= le2me_32( ((uint32_t*)src2)[i] );
  1789. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1790. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1791. int dh2= (dh>>11) + (dh<<21);
  1792. int d= dh2 + dl;
  1793. int b= d&0x7F;
  1794. int r= (d>>11)&0x7F;
  1795. int g= d>>21;
  1796. #else
  1797. int d0= src1[i*4] + (src1[i*4+1]<<8);
  1798. int b0= d0&0x1F;
  1799. int g0= (d0>>5)&0x3F;
  1800. int r0= (d0>>11)&0x1F;
  1801. int d1= src1[i*4+2] + (src1[i*4+3]<<8);
  1802. int b1= d1&0x1F;
  1803. int g1= (d1>>5)&0x3F;
  1804. int r1= (d1>>11)&0x1F;
  1805. int d2= src2[i*4] + (src2[i*4+1]<<8);
  1806. int b2= d2&0x1F;
  1807. int g2= (d2>>5)&0x3F;
  1808. int r2= (d2>>11)&0x1F;
  1809. int d3= src2[i*4+2] + (src2[i*4+3]<<8);
  1810. int b3= d3&0x1F;
  1811. int g3= (d3>>5)&0x3F;
  1812. int r3= (d3>>11)&0x1F;
  1813. int b= b0 + b1 + b2 + b3;
  1814. int g= g0 + g1 + g2 + g3;
  1815. int r= r0 + r1 + r2 + r3;
  1816. #endif
  1817. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1818. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1819. }
  1820. }
  1821. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1822. {
  1823. int i;
  1824. for(i=0; i<width; i++)
  1825. {
  1826. int d= src[i*2] + (src[i*2+1]<<8);
  1827. int b= d&0x1F;
  1828. int g= (d>>5)&0x1F;
  1829. int r= (d>>10)&0x1F;
  1830. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1831. }
  1832. }
  1833. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1834. {
  1835. int i;
  1836. for(i=0; i<width; i++)
  1837. {
  1838. #if 1
  1839. int d0= le2me_32( ((uint32_t*)src1)[i] );
  1840. int d1= le2me_32( ((uint32_t*)src2)[i] );
  1841. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1842. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1843. int dh2= (dh>>11) + (dh<<21);
  1844. int d= dh2 + dl;
  1845. int b= d&0x7F;
  1846. int r= (d>>10)&0x7F;
  1847. int g= d>>21;
  1848. #else
  1849. int d0= src1[i*4] + (src1[i*4+1]<<8);
  1850. int b0= d0&0x1F;
  1851. int g0= (d0>>5)&0x1F;
  1852. int r0= (d0>>10)&0x1F;
  1853. int d1= src1[i*4+2] + (src1[i*4+3]<<8);
  1854. int b1= d1&0x1F;
  1855. int g1= (d1>>5)&0x1F;
  1856. int r1= (d1>>10)&0x1F;
  1857. int d2= src2[i*4] + (src2[i*4+1]<<8);
  1858. int b2= d2&0x1F;
  1859. int g2= (d2>>5)&0x1F;
  1860. int r2= (d2>>10)&0x1F;
  1861. int d3= src2[i*4+2] + (src2[i*4+3]<<8);
  1862. int b3= d3&0x1F;
  1863. int g3= (d3>>5)&0x1F;
  1864. int r3= (d3>>10)&0x1F;
  1865. int b= b0 + b1 + b2 + b3;
  1866. int g= g0 + g1 + g2 + g3;
  1867. int r= r0 + r1 + r2 + r3;
  1868. #endif
  1869. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1870. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1871. }
  1872. }
  1873. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1874. {
  1875. int i;
  1876. for(i=0; i<width; i++)
  1877. {
  1878. int r= src[i*4+0];
  1879. int g= src[i*4+1];
  1880. int b= src[i*4+2];
  1881. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1882. }
  1883. }
  1884. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1885. {
  1886. int i;
  1887. for(i=0; i<width; i++)
  1888. {
  1889. int r= src1[8*i + 0] + src1[8*i + 4] + src2[8*i + 0] + src2[8*i + 4];
  1890. int g= src1[8*i + 1] + src1[8*i + 5] + src2[8*i + 1] + src2[8*i + 5];
  1891. int b= src1[8*i + 2] + src1[8*i + 6] + src2[8*i + 2] + src2[8*i + 6];
  1892. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1893. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1894. }
  1895. }
  1896. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1897. {
  1898. int i;
  1899. for(i=0; i<width; i++)
  1900. {
  1901. int r= src[i*3+0];
  1902. int g= src[i*3+1];
  1903. int b= src[i*3+2];
  1904. dst[i]= ((RY*r + GY*g + BY*b)>>RGB2YUV_SHIFT) + 16;
  1905. }
  1906. }
  1907. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1908. {
  1909. int i;
  1910. for(i=0; i<width; i++)
  1911. {
  1912. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1913. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1914. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1915. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1916. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1917. }
  1918. }
  1919. // Bilinear / Bicubic scaling
  1920. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1921. int16_t *filter, int16_t *filterPos, int filterSize)
  1922. {
  1923. #ifdef HAVE_MMX
  1924. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1925. {
  1926. int counter= -2*dstW;
  1927. filter-= counter*2;
  1928. filterPos-= counter/2;
  1929. dst-= counter/2;
  1930. asm volatile(
  1931. "pxor %%mm7, %%mm7 \n\t"
  1932. "movq "MANGLE(w02)", %%mm6 \n\t"
  1933. "pushl %%ebp \n\t" // we use 7 regs here ...
  1934. "movl %%eax, %%ebp \n\t"
  1935. ".balign 16 \n\t"
  1936. "1: \n\t"
  1937. "movzwl (%2, %%ebp), %%eax \n\t"
  1938. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1939. "movq (%1, %%ebp, 4), %%mm1 \n\t"
  1940. "movq 8(%1, %%ebp, 4), %%mm3 \n\t"
  1941. "movd (%3, %%eax), %%mm0 \n\t"
  1942. "movd (%3, %%ebx), %%mm2 \n\t"
  1943. "punpcklbw %%mm7, %%mm0 \n\t"
  1944. "punpcklbw %%mm7, %%mm2 \n\t"
  1945. "pmaddwd %%mm1, %%mm0 \n\t"
  1946. "pmaddwd %%mm2, %%mm3 \n\t"
  1947. "psrad $8, %%mm0 \n\t"
  1948. "psrad $8, %%mm3 \n\t"
  1949. "packssdw %%mm3, %%mm0 \n\t"
  1950. "pmaddwd %%mm6, %%mm0 \n\t"
  1951. "packssdw %%mm0, %%mm0 \n\t"
  1952. "movd %%mm0, (%4, %%ebp) \n\t"
  1953. "addl $4, %%ebp \n\t"
  1954. " jnc 1b \n\t"
  1955. "popl %%ebp \n\t"
  1956. : "+a" (counter)
  1957. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1958. : "%ebx"
  1959. );
  1960. }
  1961. else if(filterSize==8)
  1962. {
  1963. int counter= -2*dstW;
  1964. filter-= counter*4;
  1965. filterPos-= counter/2;
  1966. dst-= counter/2;
  1967. asm volatile(
  1968. "pxor %%mm7, %%mm7 \n\t"
  1969. "movq "MANGLE(w02)", %%mm6 \n\t"
  1970. "pushl %%ebp \n\t" // we use 7 regs here ...
  1971. "movl %%eax, %%ebp \n\t"
  1972. ".balign 16 \n\t"
  1973. "1: \n\t"
  1974. "movzwl (%2, %%ebp), %%eax \n\t"
  1975. "movzwl 2(%2, %%ebp), %%ebx \n\t"
  1976. "movq (%1, %%ebp, 8), %%mm1 \n\t"
  1977. "movq 16(%1, %%ebp, 8), %%mm3 \n\t"
  1978. "movd (%3, %%eax), %%mm0 \n\t"
  1979. "movd (%3, %%ebx), %%mm2 \n\t"
  1980. "punpcklbw %%mm7, %%mm0 \n\t"
  1981. "punpcklbw %%mm7, %%mm2 \n\t"
  1982. "pmaddwd %%mm1, %%mm0 \n\t"
  1983. "pmaddwd %%mm2, %%mm3 \n\t"
  1984. "movq 8(%1, %%ebp, 8), %%mm1 \n\t"
  1985. "movq 24(%1, %%ebp, 8), %%mm5 \n\t"
  1986. "movd 4(%3, %%eax), %%mm4 \n\t"
  1987. "movd 4(%3, %%ebx), %%mm2 \n\t"
  1988. "punpcklbw %%mm7, %%mm4 \n\t"
  1989. "punpcklbw %%mm7, %%mm2 \n\t"
  1990. "pmaddwd %%mm1, %%mm4 \n\t"
  1991. "pmaddwd %%mm2, %%mm5 \n\t"
  1992. "paddd %%mm4, %%mm0 \n\t"
  1993. "paddd %%mm5, %%mm3 \n\t"
  1994. "psrad $8, %%mm0 \n\t"
  1995. "psrad $8, %%mm3 \n\t"
  1996. "packssdw %%mm3, %%mm0 \n\t"
  1997. "pmaddwd %%mm6, %%mm0 \n\t"
  1998. "packssdw %%mm0, %%mm0 \n\t"
  1999. "movd %%mm0, (%4, %%ebp) \n\t"
  2000. "addl $4, %%ebp \n\t"
  2001. " jnc 1b \n\t"
  2002. "popl %%ebp \n\t"
  2003. : "+a" (counter)
  2004. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2005. : "%ebx"
  2006. );
  2007. }
  2008. else
  2009. {
  2010. int counter= -2*dstW;
  2011. // filter-= counter*filterSize/2;
  2012. filterPos-= counter/2;
  2013. dst-= counter/2;
  2014. asm volatile(
  2015. "pxor %%mm7, %%mm7 \n\t"
  2016. "movq "MANGLE(w02)", %%mm6 \n\t"
  2017. ".balign 16 \n\t"
  2018. "1: \n\t"
  2019. "movl %2, %%ecx \n\t"
  2020. "movzwl (%%ecx, %0), %%eax \n\t"
  2021. "movzwl 2(%%ecx, %0), %%ebx \n\t"
  2022. "movl %5, %%ecx \n\t"
  2023. "pxor %%mm4, %%mm4 \n\t"
  2024. "pxor %%mm5, %%mm5 \n\t"
  2025. "2: \n\t"
  2026. "movq (%1), %%mm1 \n\t"
  2027. "movq (%1, %6), %%mm3 \n\t"
  2028. "movd (%%ecx, %%eax), %%mm0 \n\t"
  2029. "movd (%%ecx, %%ebx), %%mm2 \n\t"
  2030. "punpcklbw %%mm7, %%mm0 \n\t"
  2031. "punpcklbw %%mm7, %%mm2 \n\t"
  2032. "pmaddwd %%mm1, %%mm0 \n\t"
  2033. "pmaddwd %%mm2, %%mm3 \n\t"
  2034. "paddd %%mm3, %%mm5 \n\t"
  2035. "paddd %%mm0, %%mm4 \n\t"
  2036. "addl $8, %1 \n\t"
  2037. "addl $4, %%ecx \n\t"
  2038. "cmpl %4, %%ecx \n\t"
  2039. " jb 2b \n\t"
  2040. "addl %6, %1 \n\t"
  2041. "psrad $8, %%mm4 \n\t"
  2042. "psrad $8, %%mm5 \n\t"
  2043. "packssdw %%mm5, %%mm4 \n\t"
  2044. "pmaddwd %%mm6, %%mm4 \n\t"
  2045. "packssdw %%mm4, %%mm4 \n\t"
  2046. "movl %3, %%eax \n\t"
  2047. "movd %%mm4, (%%eax, %0) \n\t"
  2048. "addl $4, %0 \n\t"
  2049. " jnc 1b \n\t"
  2050. : "+r" (counter), "+r" (filter)
  2051. : "m" (filterPos), "m" (dst), "m"(src+filterSize),
  2052. "m" (src), "r" (filterSize*2)
  2053. : "%ebx", "%eax", "%ecx"
  2054. );
  2055. }
  2056. #else
  2057. int i;
  2058. for(i=0; i<dstW; i++)
  2059. {
  2060. int j;
  2061. int srcPos= filterPos[i];
  2062. int val=0;
  2063. // printf("filterPos: %d\n", filterPos[i]);
  2064. for(j=0; j<filterSize; j++)
  2065. {
  2066. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2067. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2068. }
  2069. // filter += hFilterSize;
  2070. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2071. // dst[i] = val>>7;
  2072. }
  2073. #endif
  2074. }
  2075. // *** horizontal scale Y line to temp buffer
  2076. static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
  2077. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2078. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2079. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2080. int32_t *mmx2FilterPos)
  2081. {
  2082. if(srcFormat==IMGFMT_YUY2)
  2083. {
  2084. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2085. src= formatConvBuffer;
  2086. }
  2087. else if(srcFormat==IMGFMT_BGR32)
  2088. {
  2089. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2090. src= formatConvBuffer;
  2091. }
  2092. else if(srcFormat==IMGFMT_BGR24)
  2093. {
  2094. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2095. src= formatConvBuffer;
  2096. }
  2097. else if(srcFormat==IMGFMT_BGR16)
  2098. {
  2099. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2100. src= formatConvBuffer;
  2101. }
  2102. else if(srcFormat==IMGFMT_BGR15)
  2103. {
  2104. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2105. src= formatConvBuffer;
  2106. }
  2107. else if(srcFormat==IMGFMT_RGB32)
  2108. {
  2109. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2110. src= formatConvBuffer;
  2111. }
  2112. else if(srcFormat==IMGFMT_RGB24)
  2113. {
  2114. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2115. src= formatConvBuffer;
  2116. }
  2117. #ifdef HAVE_MMX
  2118. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  2119. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2120. #else
  2121. if(!(flags&SWS_FAST_BILINEAR))
  2122. #endif
  2123. {
  2124. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2125. }
  2126. else // Fast Bilinear upscale / crap downscale
  2127. {
  2128. #ifdef ARCH_X86
  2129. #ifdef HAVE_MMX2
  2130. int i;
  2131. if(canMMX2BeUsed)
  2132. {
  2133. asm volatile(
  2134. "pxor %%mm7, %%mm7 \n\t"
  2135. "movl %0, %%ecx \n\t"
  2136. "movl %1, %%edi \n\t"
  2137. "movl %2, %%edx \n\t"
  2138. "movl %3, %%ebx \n\t"
  2139. "xorl %%eax, %%eax \n\t" // i
  2140. PREFETCH" (%%ecx) \n\t"
  2141. PREFETCH" 32(%%ecx) \n\t"
  2142. PREFETCH" 64(%%ecx) \n\t"
  2143. #define FUNNY_Y_CODE \
  2144. "movl (%%ebx), %%esi \n\t"\
  2145. "call *%4 \n\t"\
  2146. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2147. "addl %%eax, %%edi \n\t"\
  2148. "xorl %%eax, %%eax \n\t"\
  2149. FUNNY_Y_CODE
  2150. FUNNY_Y_CODE
  2151. FUNNY_Y_CODE
  2152. FUNNY_Y_CODE
  2153. FUNNY_Y_CODE
  2154. FUNNY_Y_CODE
  2155. FUNNY_Y_CODE
  2156. FUNNY_Y_CODE
  2157. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2158. "m" (funnyYCode)
  2159. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2160. );
  2161. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2162. }
  2163. else
  2164. {
  2165. #endif
  2166. //NO MMX just normal asm ...
  2167. asm volatile(
  2168. "xorl %%eax, %%eax \n\t" // i
  2169. "xorl %%ebx, %%ebx \n\t" // xx
  2170. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2171. ".balign 16 \n\t"
  2172. "1: \n\t"
  2173. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2174. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2175. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2176. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2177. "shll $16, %%edi \n\t"
  2178. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2179. "movl %1, %%edi \n\t"
  2180. "shrl $9, %%esi \n\t"
  2181. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2182. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2183. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2184. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  2185. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  2186. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2187. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2188. "shll $16, %%edi \n\t"
  2189. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2190. "movl %1, %%edi \n\t"
  2191. "shrl $9, %%esi \n\t"
  2192. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  2193. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2194. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2195. "addl $2, %%eax \n\t"
  2196. "cmpl %2, %%eax \n\t"
  2197. " jb 1b \n\t"
  2198. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
  2199. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2200. );
  2201. #ifdef HAVE_MMX2
  2202. } //if MMX2 cant be used
  2203. #endif
  2204. #else
  2205. int i;
  2206. unsigned int xpos=0;
  2207. for(i=0;i<dstWidth;i++)
  2208. {
  2209. register unsigned int xx=xpos>>16;
  2210. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2211. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2212. xpos+=xInc;
  2213. }
  2214. #endif
  2215. }
  2216. }
  2217. inline static void RENAME(hcscale)(uint16_t *dst, int dstWidth, uint8_t *src1, uint8_t *src2,
  2218. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2219. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2220. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2221. int32_t *mmx2FilterPos)
  2222. {
  2223. if(srcFormat==IMGFMT_YUY2)
  2224. {
  2225. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2226. src1= formatConvBuffer;
  2227. src2= formatConvBuffer+2048;
  2228. }
  2229. else if(srcFormat==IMGFMT_BGR32)
  2230. {
  2231. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2232. src1= formatConvBuffer;
  2233. src2= formatConvBuffer+2048;
  2234. }
  2235. else if(srcFormat==IMGFMT_BGR24)
  2236. {
  2237. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2238. src1= formatConvBuffer;
  2239. src2= formatConvBuffer+2048;
  2240. }
  2241. else if(srcFormat==IMGFMT_BGR16)
  2242. {
  2243. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2244. src1= formatConvBuffer;
  2245. src2= formatConvBuffer+2048;
  2246. }
  2247. else if(srcFormat==IMGFMT_BGR15)
  2248. {
  2249. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2250. src1= formatConvBuffer;
  2251. src2= formatConvBuffer+2048;
  2252. }
  2253. else if(srcFormat==IMGFMT_RGB32)
  2254. {
  2255. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2256. src1= formatConvBuffer;
  2257. src2= formatConvBuffer+2048;
  2258. }
  2259. else if(srcFormat==IMGFMT_RGB24)
  2260. {
  2261. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2262. src1= formatConvBuffer;
  2263. src2= formatConvBuffer+2048;
  2264. }
  2265. else if(isGray(srcFormat))
  2266. {
  2267. return;
  2268. }
  2269. #ifdef HAVE_MMX
  2270. // use the new MMX scaler if th mmx2 cant be used (its faster than the x86asm one)
  2271. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2272. #else
  2273. if(!(flags&SWS_FAST_BILINEAR))
  2274. #endif
  2275. {
  2276. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2277. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2278. }
  2279. else // Fast Bilinear upscale / crap downscale
  2280. {
  2281. #ifdef ARCH_X86
  2282. #ifdef HAVE_MMX2
  2283. int i;
  2284. if(canMMX2BeUsed)
  2285. {
  2286. asm volatile(
  2287. "pxor %%mm7, %%mm7 \n\t"
  2288. "movl %0, %%ecx \n\t"
  2289. "movl %1, %%edi \n\t"
  2290. "movl %2, %%edx \n\t"
  2291. "movl %3, %%ebx \n\t"
  2292. "xorl %%eax, %%eax \n\t" // i
  2293. PREFETCH" (%%ecx) \n\t"
  2294. PREFETCH" 32(%%ecx) \n\t"
  2295. PREFETCH" 64(%%ecx) \n\t"
  2296. #define FUNNY_UV_CODE \
  2297. "movl (%%ebx), %%esi \n\t"\
  2298. "call *%4 \n\t"\
  2299. "addl (%%ebx, %%eax), %%ecx \n\t"\
  2300. "addl %%eax, %%edi \n\t"\
  2301. "xorl %%eax, %%eax \n\t"\
  2302. FUNNY_UV_CODE
  2303. FUNNY_UV_CODE
  2304. FUNNY_UV_CODE
  2305. FUNNY_UV_CODE
  2306. "xorl %%eax, %%eax \n\t" // i
  2307. "movl %5, %%ecx \n\t" // src
  2308. "movl %1, %%edi \n\t" // buf1
  2309. "addl $4096, %%edi \n\t"
  2310. PREFETCH" (%%ecx) \n\t"
  2311. PREFETCH" 32(%%ecx) \n\t"
  2312. PREFETCH" 64(%%ecx) \n\t"
  2313. FUNNY_UV_CODE
  2314. FUNNY_UV_CODE
  2315. FUNNY_UV_CODE
  2316. FUNNY_UV_CODE
  2317. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2318. "m" (funnyUVCode), "m" (src2)
  2319. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  2320. );
  2321. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2322. {
  2323. // printf("%d %d %d\n", dstWidth, i, srcW);
  2324. dst[i] = src1[srcW-1]*128;
  2325. dst[i+2048] = src2[srcW-1]*128;
  2326. }
  2327. }
  2328. else
  2329. {
  2330. #endif
  2331. asm volatile(
  2332. "xorl %%eax, %%eax \n\t" // i
  2333. "xorl %%ebx, %%ebx \n\t" // xx
  2334. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2335. ".balign 16 \n\t"
  2336. "1: \n\t"
  2337. "movl %0, %%esi \n\t"
  2338. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  2339. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  2340. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2341. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2342. "shll $16, %%edi \n\t"
  2343. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2344. "movl %1, %%edi \n\t"
  2345. "shrl $9, %%esi \n\t"
  2346. "movw %%si, (%%edi, %%eax, 2) \n\t"
  2347. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  2348. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  2349. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2350. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2351. "shll $16, %%edi \n\t"
  2352. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2353. "movl %1, %%edi \n\t"
  2354. "shrl $9, %%esi \n\t"
  2355. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  2356. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2357. "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
  2358. "addl $1, %%eax \n\t"
  2359. "cmpl %2, %%eax \n\t"
  2360. " jb 1b \n\t"
  2361. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
  2362. "r" (src2)
  2363. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  2364. );
  2365. #ifdef HAVE_MMX2
  2366. } //if MMX2 cant be used
  2367. #endif
  2368. #else
  2369. int i;
  2370. unsigned int xpos=0;
  2371. for(i=0;i<dstWidth;i++)
  2372. {
  2373. register unsigned int xx=xpos>>16;
  2374. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2375. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2376. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2377. /* slower
  2378. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2379. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2380. */
  2381. xpos+=xInc;
  2382. }
  2383. #endif
  2384. }
  2385. }
  2386. static void RENAME(swScale)(SwsContext *c, uint8_t* srcParam[], int srcStrideParam[], int srcSliceY,
  2387. int srcSliceH, uint8_t* dstParam[], int dstStrideParam[]){
  2388. /* load a few things into local vars to make the code more readable? and faster */
  2389. const int srcW= c->srcW;
  2390. const int dstW= c->dstW;
  2391. const int dstH= c->dstH;
  2392. const int chrDstW= c->chrDstW;
  2393. const int chrSrcW= c->chrSrcW;
  2394. const int lumXInc= c->lumXInc;
  2395. const int chrXInc= c->chrXInc;
  2396. const int dstFormat= c->dstFormat;
  2397. const int srcFormat= c->srcFormat;
  2398. const int flags= c->flags;
  2399. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2400. int16_t *vLumFilterPos= c->vLumFilterPos;
  2401. int16_t *vChrFilterPos= c->vChrFilterPos;
  2402. int16_t *hLumFilterPos= c->hLumFilterPos;
  2403. int16_t *hChrFilterPos= c->hChrFilterPos;
  2404. int16_t *vLumFilter= c->vLumFilter;
  2405. int16_t *vChrFilter= c->vChrFilter;
  2406. int16_t *hLumFilter= c->hLumFilter;
  2407. int16_t *hChrFilter= c->hChrFilter;
  2408. int16_t *lumMmxFilter= c->lumMmxFilter;
  2409. int16_t *chrMmxFilter= c->chrMmxFilter;
  2410. const int vLumFilterSize= c->vLumFilterSize;
  2411. const int vChrFilterSize= c->vChrFilterSize;
  2412. const int hLumFilterSize= c->hLumFilterSize;
  2413. const int hChrFilterSize= c->hChrFilterSize;
  2414. int16_t **lumPixBuf= c->lumPixBuf;
  2415. int16_t **chrPixBuf= c->chrPixBuf;
  2416. const int vLumBufSize= c->vLumBufSize;
  2417. const int vChrBufSize= c->vChrBufSize;
  2418. uint8_t *funnyYCode= c->funnyYCode;
  2419. uint8_t *funnyUVCode= c->funnyUVCode;
  2420. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2421. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2422. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2423. /* vars whch will change and which we need to storw back in the context */
  2424. int dstY= c->dstY;
  2425. int lumBufIndex= c->lumBufIndex;
  2426. int chrBufIndex= c->chrBufIndex;
  2427. int lastInLumBuf= c->lastInLumBuf;
  2428. int lastInChrBuf= c->lastInChrBuf;
  2429. int srcStride[3];
  2430. int dstStride[3];
  2431. uint8_t *src[3];
  2432. uint8_t *dst[3];
  2433. if(c->srcFormat == IMGFMT_I420){
  2434. src[0]= srcParam[0];
  2435. src[1]= srcParam[2];
  2436. src[2]= srcParam[1];
  2437. srcStride[0]= srcStrideParam[0];
  2438. srcStride[1]= srcStrideParam[2];
  2439. srcStride[2]= srcStrideParam[1];
  2440. }
  2441. else if(c->srcFormat==IMGFMT_YV12 || c->srcFormat==IMGFMT_YVU9){
  2442. src[0]= srcParam[0];
  2443. src[1]= srcParam[1];
  2444. src[2]= srcParam[2];
  2445. srcStride[0]= srcStrideParam[0];
  2446. srcStride[1]= srcStrideParam[1];
  2447. srcStride[2]= srcStrideParam[2];
  2448. }
  2449. else if(isPacked(c->srcFormat)){
  2450. src[0]=
  2451. src[1]=
  2452. src[2]= srcParam[0];
  2453. srcStride[0]= srcStrideParam[0];
  2454. srcStride[1]=
  2455. srcStride[2]= srcStrideParam[0]<<1;
  2456. }
  2457. else if(isGray(c->srcFormat)){
  2458. src[0]= srcParam[0];
  2459. src[1]=
  2460. src[2]= NULL;
  2461. srcStride[0]= srcStrideParam[0];
  2462. srcStride[1]=
  2463. srcStride[2]= 0;
  2464. }
  2465. if(dstFormat == IMGFMT_I420){
  2466. dst[0]= dstParam[0];
  2467. dst[1]= dstParam[2];
  2468. dst[2]= dstParam[1];
  2469. dstStride[0]= dstStrideParam[0];
  2470. dstStride[1]= dstStrideParam[2];
  2471. dstStride[2]= dstStrideParam[1];
  2472. }else{
  2473. dst[0]= dstParam[0];
  2474. dst[1]= dstParam[1];
  2475. dst[2]= dstParam[2];
  2476. dstStride[0]= dstStrideParam[0];
  2477. dstStride[1]= dstStrideParam[1];
  2478. dstStride[2]= dstStrideParam[2];
  2479. }
  2480. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2481. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2482. #if 0 //self test FIXME move to a vfilter or something
  2483. {
  2484. static volatile int i=0;
  2485. i++;
  2486. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2487. selfTest(src, srcStride, c->srcW, c->srcH);
  2488. i--;
  2489. }
  2490. #endif
  2491. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2492. //dstStride[0],dstStride[1],dstStride[2]);
  2493. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2494. {
  2495. static int firstTime=1; //FIXME move this into the context perhaps
  2496. if(flags & SWS_PRINT_INFO && firstTime)
  2497. {
  2498. mp_msg(MSGT_SWS,MSGL_WARN,"SwScaler: Warning: dstStride is not aligned!\n"
  2499. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2500. firstTime=0;
  2501. }
  2502. }
  2503. /* Note the user might start scaling the picture in the middle so this will not get executed
  2504. this is not really intended but works currently, so ppl might do it */
  2505. if(srcSliceY ==0){
  2506. lumBufIndex=0;
  2507. chrBufIndex=0;
  2508. dstY=0;
  2509. lastInLumBuf= -1;
  2510. lastInChrBuf= -1;
  2511. }
  2512. for(;dstY < dstH; dstY++){
  2513. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2514. const int chrDstY= dstY>>c->chrDstVSubSample;
  2515. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2516. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2517. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2518. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2519. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2520. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2521. //handle holes (FAST_BILINEAR & weird filters)
  2522. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2523. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2524. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2525. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2526. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2527. // Do we have enough lines in this slice to output the dstY line
  2528. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2529. {
  2530. //Do horizontal scaling
  2531. while(lastInLumBuf < lastLumSrcY)
  2532. {
  2533. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2534. lumBufIndex++;
  2535. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2536. ASSERT(lumBufIndex < 2*vLumBufSize)
  2537. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2538. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2539. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2540. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2541. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2542. funnyYCode, c->srcFormat, formatConvBuffer,
  2543. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2544. lastInLumBuf++;
  2545. }
  2546. while(lastInChrBuf < lastChrSrcY)
  2547. {
  2548. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2549. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2550. chrBufIndex++;
  2551. ASSERT(chrBufIndex < 2*vChrBufSize)
  2552. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2553. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2554. //FIXME replace parameters through context struct (some at least)
  2555. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2556. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2557. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2558. funnyUVCode, c->srcFormat, formatConvBuffer,
  2559. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2560. lastInChrBuf++;
  2561. }
  2562. //wrap buf index around to stay inside the ring buffer
  2563. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2564. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2565. }
  2566. else // not enough lines left in this slice -> load the rest in the buffer
  2567. {
  2568. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2569. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2570. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2571. vChrBufSize, vLumBufSize);*/
  2572. //Do horizontal scaling
  2573. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2574. {
  2575. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2576. lumBufIndex++;
  2577. ASSERT(lumBufIndex < 2*vLumBufSize)
  2578. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2579. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2580. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2581. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2582. funnyYCode, c->srcFormat, formatConvBuffer,
  2583. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2584. lastInLumBuf++;
  2585. }
  2586. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2587. {
  2588. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2589. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2590. chrBufIndex++;
  2591. ASSERT(chrBufIndex < 2*vChrBufSize)
  2592. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2593. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2594. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2595. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2596. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2597. funnyUVCode, c->srcFormat, formatConvBuffer,
  2598. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2599. lastInChrBuf++;
  2600. }
  2601. //wrap buf index around to stay inside the ring buffer
  2602. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2603. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2604. break; //we cant output a dstY line so lets try with the next slice
  2605. }
  2606. #ifdef HAVE_MMX
  2607. b5Dither= dither8[dstY&1];
  2608. g6Dither= dither4[dstY&1];
  2609. g5Dither= dither8[dstY&1];
  2610. r5Dither= dither8[(dstY+1)&1];
  2611. #endif
  2612. if(dstY < dstH-2)
  2613. {
  2614. if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2615. {
  2616. if((dstY&1) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2617. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2618. {
  2619. int16_t *lumBuf = lumPixBuf[0];
  2620. int16_t *chrBuf= chrPixBuf[0];
  2621. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2622. }
  2623. else //General YV12
  2624. {
  2625. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2626. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2627. RENAME(yuv2yuvX)(
  2628. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2629. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2630. dest, uDest, vDest, dstW, chrDstW,
  2631. lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+chrDstY*vChrFilterSize*4);
  2632. }
  2633. }
  2634. else
  2635. {
  2636. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2637. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2638. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2639. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2640. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2641. {
  2642. int chrAlpha= vChrFilter[2*dstY+1];
  2643. RENAME(yuv2rgb1)(*lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2644. dest, dstW, chrAlpha, dstFormat, flags);
  2645. }
  2646. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2647. {
  2648. int lumAlpha= vLumFilter[2*dstY+1];
  2649. int chrAlpha= vChrFilter[2*dstY+1];
  2650. RENAME(yuv2rgb2)(*lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2651. dest, dstW, lumAlpha, chrAlpha, dstFormat, flags);
  2652. }
  2653. else //General RGB
  2654. {
  2655. RENAME(yuv2rgbX)(
  2656. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2657. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2658. dest, dstW, dstFormat,
  2659. lumMmxFilter+dstY*vLumFilterSize*4, chrMmxFilter+dstY*vChrFilterSize*4);
  2660. }
  2661. }
  2662. }
  2663. else // hmm looks like we cant use MMX here without overwriting this arrays tail
  2664. {
  2665. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2666. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2667. if(isPlanarYUV(dstFormat)) //YV12
  2668. {
  2669. if(dstY&1) uDest=vDest= NULL;
  2670. yuv2yuvXinC(c,
  2671. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2672. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2673. dest, uDest, vDest);
  2674. }
  2675. else
  2676. {
  2677. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2678. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2679. yuv2rgbXinC(
  2680. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2681. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2682. dest, dstW, dstFormat);
  2683. }
  2684. }
  2685. }
  2686. #ifdef HAVE_MMX
  2687. __asm __volatile(SFENCE:::"memory");
  2688. __asm __volatile(EMMS:::"memory");
  2689. #endif
  2690. /* store changed local vars back in the context */
  2691. c->dstY= dstY;
  2692. c->lumBufIndex= lumBufIndex;
  2693. c->chrBufIndex= chrBufIndex;
  2694. c->lastInLumBuf= lastInLumBuf;
  2695. c->lastInChrBuf= lastInChrBuf;
  2696. }