You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2929 lines
91KB

  1. /*
  2. Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  14. */
  15. #include "asmalign.h"
  16. #undef REAL_MOVNTQ
  17. #undef MOVNTQ
  18. #undef PAVGB
  19. #undef PREFETCH
  20. #undef PREFETCHW
  21. #undef EMMS
  22. #undef SFENCE
  23. #ifdef HAVE_3DNOW
  24. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  25. #define EMMS "femms"
  26. #else
  27. #define EMMS "emms"
  28. #endif
  29. #ifdef HAVE_3DNOW
  30. #define PREFETCH "prefetch"
  31. #define PREFETCHW "prefetchw"
  32. #elif defined ( HAVE_MMX2 )
  33. #define PREFETCH "prefetchnta"
  34. #define PREFETCHW "prefetcht0"
  35. #else
  36. #define PREFETCH "/nop"
  37. #define PREFETCHW "/nop"
  38. #endif
  39. #ifdef HAVE_MMX2
  40. #define SFENCE "sfence"
  41. #else
  42. #define SFENCE "/nop"
  43. #endif
  44. #ifdef HAVE_MMX2
  45. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  46. #elif defined (HAVE_3DNOW)
  47. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  48. #endif
  49. #ifdef HAVE_MMX2
  50. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  51. #else
  52. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  53. #endif
  54. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  55. #ifdef HAVE_ALTIVEC
  56. #include "swscale_altivec_template.c"
  57. #endif
  58. #define YSCALEYUV2YV12X(x, offset) \
  59. "xor %%"REG_a", %%"REG_a" \n\t"\
  60. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  61. "movq %%mm3, %%mm4 \n\t"\
  62. "lea " offset "(%0), %%"REG_d" \n\t"\
  63. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  64. ASMALIGN16 /* FIXME Unroll? */\
  65. "1: \n\t"\
  66. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  67. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
  68. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
  69. "add $16, %%"REG_d" \n\t"\
  70. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  71. "test %%"REG_S", %%"REG_S" \n\t"\
  72. "pmulhw %%mm0, %%mm2 \n\t"\
  73. "pmulhw %%mm0, %%mm5 \n\t"\
  74. "paddw %%mm2, %%mm3 \n\t"\
  75. "paddw %%mm5, %%mm4 \n\t"\
  76. " jnz 1b \n\t"\
  77. "psraw $3, %%mm3 \n\t"\
  78. "psraw $3, %%mm4 \n\t"\
  79. "packuswb %%mm4, %%mm3 \n\t"\
  80. MOVNTQ(%%mm3, (%1, %%REGa))\
  81. "add $8, %%"REG_a" \n\t"\
  82. "cmp %2, %%"REG_a" \n\t"\
  83. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  84. "movq %%mm3, %%mm4 \n\t"\
  85. "lea " offset "(%0), %%"REG_d" \n\t"\
  86. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  87. "jb 1b \n\t"
  88. #define YSCALEYUV2YV121 \
  89. "mov %2, %%"REG_a" \n\t"\
  90. ASMALIGN16 /* FIXME Unroll? */\
  91. "1: \n\t"\
  92. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  93. "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
  94. "psraw $7, %%mm0 \n\t"\
  95. "psraw $7, %%mm1 \n\t"\
  96. "packuswb %%mm1, %%mm0 \n\t"\
  97. MOVNTQ(%%mm0, (%1, %%REGa))\
  98. "add $8, %%"REG_a" \n\t"\
  99. "jnc 1b \n\t"
  100. /*
  101. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  102. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  103. "r" (dest), "m" (dstW),
  104. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  105. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  106. */
  107. #define YSCALEYUV2PACKEDX \
  108. "xor %%"REG_a", %%"REG_a" \n\t"\
  109. ASMALIGN16\
  110. "nop \n\t"\
  111. "1: \n\t"\
  112. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  113. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  114. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  115. "movq %%mm3, %%mm4 \n\t"\
  116. ASMALIGN16\
  117. "2: \n\t"\
  118. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  119. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  120. "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  121. "add $16, %%"REG_d" \n\t"\
  122. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  123. "pmulhw %%mm0, %%mm2 \n\t"\
  124. "pmulhw %%mm0, %%mm5 \n\t"\
  125. "paddw %%mm2, %%mm3 \n\t"\
  126. "paddw %%mm5, %%mm4 \n\t"\
  127. "test %%"REG_S", %%"REG_S" \n\t"\
  128. " jnz 2b \n\t"\
  129. \
  130. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  131. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  132. "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
  133. "movq %%mm1, %%mm7 \n\t"\
  134. ASMALIGN16\
  135. "2: \n\t"\
  136. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  137. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  138. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  139. "add $16, %%"REG_d" \n\t"\
  140. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  141. "pmulhw %%mm0, %%mm2 \n\t"\
  142. "pmulhw %%mm0, %%mm5 \n\t"\
  143. "paddw %%mm2, %%mm1 \n\t"\
  144. "paddw %%mm5, %%mm7 \n\t"\
  145. "test %%"REG_S", %%"REG_S" \n\t"\
  146. " jnz 2b \n\t"\
  147. #define YSCALEYUV2RGBX \
  148. YSCALEYUV2PACKEDX\
  149. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  150. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  151. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  152. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  153. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  154. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  155. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  156. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  157. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  158. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  159. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  160. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  161. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  162. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  163. "paddw %%mm3, %%mm4 \n\t"\
  164. "movq %%mm2, %%mm0 \n\t"\
  165. "movq %%mm5, %%mm6 \n\t"\
  166. "movq %%mm4, %%mm3 \n\t"\
  167. "punpcklwd %%mm2, %%mm2 \n\t"\
  168. "punpcklwd %%mm5, %%mm5 \n\t"\
  169. "punpcklwd %%mm4, %%mm4 \n\t"\
  170. "paddw %%mm1, %%mm2 \n\t"\
  171. "paddw %%mm1, %%mm5 \n\t"\
  172. "paddw %%mm1, %%mm4 \n\t"\
  173. "punpckhwd %%mm0, %%mm0 \n\t"\
  174. "punpckhwd %%mm6, %%mm6 \n\t"\
  175. "punpckhwd %%mm3, %%mm3 \n\t"\
  176. "paddw %%mm7, %%mm0 \n\t"\
  177. "paddw %%mm7, %%mm6 \n\t"\
  178. "paddw %%mm7, %%mm3 \n\t"\
  179. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  180. "packuswb %%mm0, %%mm2 \n\t"\
  181. "packuswb %%mm6, %%mm5 \n\t"\
  182. "packuswb %%mm3, %%mm4 \n\t"\
  183. "pxor %%mm7, %%mm7 \n\t"
  184. #if 0
  185. #define FULL_YSCALEYUV2RGB \
  186. "pxor %%mm7, %%mm7 \n\t"\
  187. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  188. "punpcklwd %%mm6, %%mm6 \n\t"\
  189. "punpcklwd %%mm6, %%mm6 \n\t"\
  190. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  191. "punpcklwd %%mm5, %%mm5 \n\t"\
  192. "punpcklwd %%mm5, %%mm5 \n\t"\
  193. "xor %%"REG_a", %%"REG_a" \n\t"\
  194. ASMALIGN16\
  195. "1: \n\t"\
  196. "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
  197. "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
  198. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  199. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  200. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  201. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  202. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  203. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  204. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  205. "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  206. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  207. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  208. "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  209. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  210. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  211. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  212. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  213. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  214. \
  215. \
  216. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  217. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  218. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  219. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  220. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  221. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  222. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  223. \
  224. \
  225. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  226. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  227. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  228. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  229. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  230. "packuswb %%mm3, %%mm3 \n\t"\
  231. \
  232. "packuswb %%mm0, %%mm0 \n\t"\
  233. "paddw %%mm4, %%mm2 \n\t"\
  234. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  235. \
  236. "packuswb %%mm1, %%mm1 \n\t"
  237. #endif
  238. #define REAL_YSCALEYUV2PACKED(index, c) \
  239. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  240. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
  241. "psraw $3, %%mm0 \n\t"\
  242. "psraw $3, %%mm1 \n\t"\
  243. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  244. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  245. "xor "#index", "#index" \n\t"\
  246. ASMALIGN16\
  247. "1: \n\t"\
  248. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  249. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  250. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  251. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  252. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  253. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  254. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  255. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  256. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  257. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  258. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  259. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  260. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  261. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  262. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  263. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  264. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  265. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  266. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  267. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  268. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  269. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  270. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  271. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  272. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  273. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  274. #define REAL_YSCALEYUV2RGB(index, c) \
  275. "xor "#index", "#index" \n\t"\
  276. ASMALIGN16\
  277. "1: \n\t"\
  278. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  279. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  280. "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
  281. "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
  282. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  283. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  284. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  285. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  286. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  287. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  288. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  289. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  290. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  291. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  292. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  293. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  294. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  295. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  296. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  297. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  298. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  299. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  300. "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
  301. "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
  302. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  303. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  304. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  305. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  306. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  307. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  308. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  309. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  310. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  311. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  312. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  313. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  314. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  315. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  316. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  317. "paddw %%mm3, %%mm4 \n\t"\
  318. "movq %%mm2, %%mm0 \n\t"\
  319. "movq %%mm5, %%mm6 \n\t"\
  320. "movq %%mm4, %%mm3 \n\t"\
  321. "punpcklwd %%mm2, %%mm2 \n\t"\
  322. "punpcklwd %%mm5, %%mm5 \n\t"\
  323. "punpcklwd %%mm4, %%mm4 \n\t"\
  324. "paddw %%mm1, %%mm2 \n\t"\
  325. "paddw %%mm1, %%mm5 \n\t"\
  326. "paddw %%mm1, %%mm4 \n\t"\
  327. "punpckhwd %%mm0, %%mm0 \n\t"\
  328. "punpckhwd %%mm6, %%mm6 \n\t"\
  329. "punpckhwd %%mm3, %%mm3 \n\t"\
  330. "paddw %%mm7, %%mm0 \n\t"\
  331. "paddw %%mm7, %%mm6 \n\t"\
  332. "paddw %%mm7, %%mm3 \n\t"\
  333. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  334. "packuswb %%mm0, %%mm2 \n\t"\
  335. "packuswb %%mm6, %%mm5 \n\t"\
  336. "packuswb %%mm3, %%mm4 \n\t"\
  337. "pxor %%mm7, %%mm7 \n\t"
  338. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  339. #define REAL_YSCALEYUV2PACKED1(index, c) \
  340. "xor "#index", "#index" \n\t"\
  341. ASMALIGN16\
  342. "1: \n\t"\
  343. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  344. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  345. "psraw $7, %%mm3 \n\t" \
  346. "psraw $7, %%mm4 \n\t" \
  347. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  348. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  349. "psraw $7, %%mm1 \n\t" \
  350. "psraw $7, %%mm7 \n\t" \
  351. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  352. #define REAL_YSCALEYUV2RGB1(index, c) \
  353. "xor "#index", "#index" \n\t"\
  354. ASMALIGN16\
  355. "1: \n\t"\
  356. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  357. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  358. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  359. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  360. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  361. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  362. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  363. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  364. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  365. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  366. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  367. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  368. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  369. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  370. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  371. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  372. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  373. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  374. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  375. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  376. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  377. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  378. "paddw %%mm3, %%mm4 \n\t"\
  379. "movq %%mm2, %%mm0 \n\t"\
  380. "movq %%mm5, %%mm6 \n\t"\
  381. "movq %%mm4, %%mm3 \n\t"\
  382. "punpcklwd %%mm2, %%mm2 \n\t"\
  383. "punpcklwd %%mm5, %%mm5 \n\t"\
  384. "punpcklwd %%mm4, %%mm4 \n\t"\
  385. "paddw %%mm1, %%mm2 \n\t"\
  386. "paddw %%mm1, %%mm5 \n\t"\
  387. "paddw %%mm1, %%mm4 \n\t"\
  388. "punpckhwd %%mm0, %%mm0 \n\t"\
  389. "punpckhwd %%mm6, %%mm6 \n\t"\
  390. "punpckhwd %%mm3, %%mm3 \n\t"\
  391. "paddw %%mm7, %%mm0 \n\t"\
  392. "paddw %%mm7, %%mm6 \n\t"\
  393. "paddw %%mm7, %%mm3 \n\t"\
  394. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  395. "packuswb %%mm0, %%mm2 \n\t"\
  396. "packuswb %%mm6, %%mm5 \n\t"\
  397. "packuswb %%mm3, %%mm4 \n\t"\
  398. "pxor %%mm7, %%mm7 \n\t"
  399. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  400. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  401. "xor "#index", "#index" \n\t"\
  402. ASMALIGN16\
  403. "1: \n\t"\
  404. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  405. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  406. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  407. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  408. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  409. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  410. "psrlw $8, %%mm3 \n\t" \
  411. "psrlw $8, %%mm4 \n\t" \
  412. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  413. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  414. "psraw $7, %%mm1 \n\t" \
  415. "psraw $7, %%mm7 \n\t"
  416. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  417. // do vertical chrominance interpolation
  418. #define REAL_YSCALEYUV2RGB1b(index, c) \
  419. "xor "#index", "#index" \n\t"\
  420. ASMALIGN16\
  421. "1: \n\t"\
  422. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  423. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  424. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  425. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  426. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  427. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  428. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  429. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  430. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  431. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  432. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  433. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  434. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  435. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  436. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  437. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  438. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  439. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  440. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  441. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  442. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  443. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  444. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  445. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  446. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  447. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  448. "paddw %%mm3, %%mm4 \n\t"\
  449. "movq %%mm2, %%mm0 \n\t"\
  450. "movq %%mm5, %%mm6 \n\t"\
  451. "movq %%mm4, %%mm3 \n\t"\
  452. "punpcklwd %%mm2, %%mm2 \n\t"\
  453. "punpcklwd %%mm5, %%mm5 \n\t"\
  454. "punpcklwd %%mm4, %%mm4 \n\t"\
  455. "paddw %%mm1, %%mm2 \n\t"\
  456. "paddw %%mm1, %%mm5 \n\t"\
  457. "paddw %%mm1, %%mm4 \n\t"\
  458. "punpckhwd %%mm0, %%mm0 \n\t"\
  459. "punpckhwd %%mm6, %%mm6 \n\t"\
  460. "punpckhwd %%mm3, %%mm3 \n\t"\
  461. "paddw %%mm7, %%mm0 \n\t"\
  462. "paddw %%mm7, %%mm6 \n\t"\
  463. "paddw %%mm7, %%mm3 \n\t"\
  464. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  465. "packuswb %%mm0, %%mm2 \n\t"\
  466. "packuswb %%mm6, %%mm5 \n\t"\
  467. "packuswb %%mm3, %%mm4 \n\t"\
  468. "pxor %%mm7, %%mm7 \n\t"
  469. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  470. #define REAL_WRITEBGR32(dst, dstw, index) \
  471. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  472. "movq %%mm2, %%mm1 \n\t" /* B */\
  473. "movq %%mm5, %%mm6 \n\t" /* R */\
  474. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  475. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  476. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  477. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  478. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  479. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  480. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  481. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  482. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  483. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  484. \
  485. MOVNTQ(%%mm0, (dst, index, 4))\
  486. MOVNTQ(%%mm2, 8(dst, index, 4))\
  487. MOVNTQ(%%mm1, 16(dst, index, 4))\
  488. MOVNTQ(%%mm3, 24(dst, index, 4))\
  489. \
  490. "add $8, "#index" \n\t"\
  491. "cmp "#dstw", "#index" \n\t"\
  492. " jb 1b \n\t"
  493. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  494. #define REAL_WRITEBGR16(dst, dstw, index) \
  495. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  496. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  497. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  498. "psrlq $3, %%mm2 \n\t"\
  499. \
  500. "movq %%mm2, %%mm1 \n\t"\
  501. "movq %%mm4, %%mm3 \n\t"\
  502. \
  503. "punpcklbw %%mm7, %%mm3 \n\t"\
  504. "punpcklbw %%mm5, %%mm2 \n\t"\
  505. "punpckhbw %%mm7, %%mm4 \n\t"\
  506. "punpckhbw %%mm5, %%mm1 \n\t"\
  507. \
  508. "psllq $3, %%mm3 \n\t"\
  509. "psllq $3, %%mm4 \n\t"\
  510. \
  511. "por %%mm3, %%mm2 \n\t"\
  512. "por %%mm4, %%mm1 \n\t"\
  513. \
  514. MOVNTQ(%%mm2, (dst, index, 2))\
  515. MOVNTQ(%%mm1, 8(dst, index, 2))\
  516. \
  517. "add $8, "#index" \n\t"\
  518. "cmp "#dstw", "#index" \n\t"\
  519. " jb 1b \n\t"
  520. #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
  521. #define REAL_WRITEBGR15(dst, dstw, index) \
  522. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  523. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  524. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  525. "psrlq $3, %%mm2 \n\t"\
  526. "psrlq $1, %%mm5 \n\t"\
  527. \
  528. "movq %%mm2, %%mm1 \n\t"\
  529. "movq %%mm4, %%mm3 \n\t"\
  530. \
  531. "punpcklbw %%mm7, %%mm3 \n\t"\
  532. "punpcklbw %%mm5, %%mm2 \n\t"\
  533. "punpckhbw %%mm7, %%mm4 \n\t"\
  534. "punpckhbw %%mm5, %%mm1 \n\t"\
  535. \
  536. "psllq $2, %%mm3 \n\t"\
  537. "psllq $2, %%mm4 \n\t"\
  538. \
  539. "por %%mm3, %%mm2 \n\t"\
  540. "por %%mm4, %%mm1 \n\t"\
  541. \
  542. MOVNTQ(%%mm2, (dst, index, 2))\
  543. MOVNTQ(%%mm1, 8(dst, index, 2))\
  544. \
  545. "add $8, "#index" \n\t"\
  546. "cmp "#dstw", "#index" \n\t"\
  547. " jb 1b \n\t"
  548. #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
  549. #define WRITEBGR24OLD(dst, dstw, index) \
  550. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  551. "movq %%mm2, %%mm1 \n\t" /* B */\
  552. "movq %%mm5, %%mm6 \n\t" /* R */\
  553. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  554. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  555. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  556. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  557. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  558. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  559. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  560. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  561. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  562. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  563. \
  564. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  565. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  566. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  567. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  568. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  569. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  570. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  571. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  572. \
  573. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  574. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  575. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  576. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  577. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  578. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  579. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  580. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  581. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  582. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  583. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  584. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  585. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  586. \
  587. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  588. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  589. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  590. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  591. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  592. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  593. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  594. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  595. \
  596. MOVNTQ(%%mm0, (dst))\
  597. MOVNTQ(%%mm2, 8(dst))\
  598. MOVNTQ(%%mm3, 16(dst))\
  599. "add $24, "#dst" \n\t"\
  600. \
  601. "add $8, "#index" \n\t"\
  602. "cmp "#dstw", "#index" \n\t"\
  603. " jb 1b \n\t"
  604. #define WRITEBGR24MMX(dst, dstw, index) \
  605. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  606. "movq %%mm2, %%mm1 \n\t" /* B */\
  607. "movq %%mm5, %%mm6 \n\t" /* R */\
  608. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  609. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  610. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  611. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  612. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  613. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  614. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  615. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  616. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  617. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  618. \
  619. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  620. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  621. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  622. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  623. \
  624. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  625. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  626. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  627. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  628. \
  629. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  630. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  631. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  632. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  633. \
  634. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  635. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  636. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  637. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  638. MOVNTQ(%%mm0, (dst))\
  639. \
  640. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  641. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  642. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  643. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  644. MOVNTQ(%%mm6, 8(dst))\
  645. \
  646. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  647. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  648. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  649. MOVNTQ(%%mm5, 16(dst))\
  650. \
  651. "add $24, "#dst" \n\t"\
  652. \
  653. "add $8, "#index" \n\t"\
  654. "cmp "#dstw", "#index" \n\t"\
  655. " jb 1b \n\t"
  656. #define WRITEBGR24MMX2(dst, dstw, index) \
  657. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  658. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  659. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  660. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  661. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  662. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  663. \
  664. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  665. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  666. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  667. \
  668. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  669. "por %%mm1, %%mm6 \n\t"\
  670. "por %%mm3, %%mm6 \n\t"\
  671. MOVNTQ(%%mm6, (dst))\
  672. \
  673. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  674. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  675. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  676. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  677. \
  678. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  679. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  680. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  681. \
  682. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  683. "por %%mm3, %%mm6 \n\t"\
  684. MOVNTQ(%%mm6, 8(dst))\
  685. \
  686. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  687. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  688. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  689. \
  690. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  691. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  692. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  693. \
  694. "por %%mm1, %%mm3 \n\t"\
  695. "por %%mm3, %%mm6 \n\t"\
  696. MOVNTQ(%%mm6, 16(dst))\
  697. \
  698. "add $24, "#dst" \n\t"\
  699. \
  700. "add $8, "#index" \n\t"\
  701. "cmp "#dstw", "#index" \n\t"\
  702. " jb 1b \n\t"
  703. #ifdef HAVE_MMX2
  704. #undef WRITEBGR24
  705. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  706. #else
  707. #undef WRITEBGR24
  708. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  709. #endif
  710. #define REAL_WRITEYUY2(dst, dstw, index) \
  711. "packuswb %%mm3, %%mm3 \n\t"\
  712. "packuswb %%mm4, %%mm4 \n\t"\
  713. "packuswb %%mm7, %%mm1 \n\t"\
  714. "punpcklbw %%mm4, %%mm3 \n\t"\
  715. "movq %%mm1, %%mm7 \n\t"\
  716. "punpcklbw %%mm3, %%mm1 \n\t"\
  717. "punpckhbw %%mm3, %%mm7 \n\t"\
  718. \
  719. MOVNTQ(%%mm1, (dst, index, 2))\
  720. MOVNTQ(%%mm7, 8(dst, index, 2))\
  721. \
  722. "add $8, "#index" \n\t"\
  723. "cmp "#dstw", "#index" \n\t"\
  724. " jb 1b \n\t"
  725. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  726. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  727. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  728. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  729. {
  730. #ifdef HAVE_MMX
  731. if(uDest != NULL)
  732. {
  733. asm volatile(
  734. YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
  735. :: "r" (&c->redDither),
  736. "r" (uDest), "p" (chrDstW)
  737. : "%"REG_a, "%"REG_d, "%"REG_S
  738. );
  739. asm volatile(
  740. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
  741. :: "r" (&c->redDither),
  742. "r" (vDest), "p" (chrDstW)
  743. : "%"REG_a, "%"REG_d, "%"REG_S
  744. );
  745. }
  746. asm volatile(
  747. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
  748. :: "r" (&c->redDither),
  749. "r" (dest), "p" (dstW)
  750. : "%"REG_a, "%"REG_d, "%"REG_S
  751. );
  752. #else
  753. #ifdef HAVE_ALTIVEC
  754. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  755. chrFilter, chrSrc, chrFilterSize,
  756. dest, uDest, vDest, dstW, chrDstW);
  757. #else //HAVE_ALTIVEC
  758. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  759. chrFilter, chrSrc, chrFilterSize,
  760. dest, uDest, vDest, dstW, chrDstW);
  761. #endif //!HAVE_ALTIVEC
  762. #endif
  763. }
  764. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  765. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  766. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  767. {
  768. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  769. chrFilter, chrSrc, chrFilterSize,
  770. dest, uDest, dstW, chrDstW, dstFormat);
  771. }
  772. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  773. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  774. {
  775. #ifdef HAVE_MMX
  776. if(uDest != NULL)
  777. {
  778. asm volatile(
  779. YSCALEYUV2YV121
  780. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  781. "g" (-chrDstW)
  782. : "%"REG_a
  783. );
  784. asm volatile(
  785. YSCALEYUV2YV121
  786. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  787. "g" (-chrDstW)
  788. : "%"REG_a
  789. );
  790. }
  791. asm volatile(
  792. YSCALEYUV2YV121
  793. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  794. "g" (-dstW)
  795. : "%"REG_a
  796. );
  797. #else
  798. int i;
  799. for(i=0; i<dstW; i++)
  800. {
  801. int val= lumSrc[i]>>7;
  802. if(val&256){
  803. if(val<0) val=0;
  804. else val=255;
  805. }
  806. dest[i]= val;
  807. }
  808. if(uDest != NULL)
  809. for(i=0; i<chrDstW; i++)
  810. {
  811. int u=chrSrc[i]>>7;
  812. int v=chrSrc[i + 2048]>>7;
  813. if((u|v)&256){
  814. if(u<0) u=0;
  815. else if (u>255) u=255;
  816. if(v<0) v=0;
  817. else if (v>255) v=255;
  818. }
  819. uDest[i]= u;
  820. vDest[i]= v;
  821. }
  822. #endif
  823. }
  824. /**
  825. * vertical scale YV12 to RGB
  826. */
  827. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  828. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  829. uint8_t *dest, long dstW, long dstY)
  830. {
  831. long dummy=0;
  832. switch(c->dstFormat)
  833. {
  834. #ifdef HAVE_MMX
  835. case IMGFMT_BGR32:
  836. {
  837. asm volatile(
  838. YSCALEYUV2RGBX
  839. WRITEBGR32(%4, %5, %%REGa)
  840. :: "r" (&c->redDither),
  841. "m" (dummy), "m" (dummy), "m" (dummy),
  842. "r" (dest), "m" (dstW)
  843. : "%"REG_a, "%"REG_d, "%"REG_S
  844. );
  845. }
  846. break;
  847. case IMGFMT_BGR24:
  848. {
  849. asm volatile(
  850. YSCALEYUV2RGBX
  851. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
  852. "add %4, %%"REG_b" \n\t"
  853. WRITEBGR24(%%REGb, %5, %%REGa)
  854. :: "r" (&c->redDither),
  855. "m" (dummy), "m" (dummy), "m" (dummy),
  856. "r" (dest), "m" (dstW)
  857. : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
  858. );
  859. }
  860. break;
  861. case IMGFMT_BGR15:
  862. {
  863. asm volatile(
  864. YSCALEYUV2RGBX
  865. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  866. #ifdef DITHER1XBPP
  867. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  868. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  869. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  870. #endif
  871. WRITEBGR15(%4, %5, %%REGa)
  872. :: "r" (&c->redDither),
  873. "m" (dummy), "m" (dummy), "m" (dummy),
  874. "r" (dest), "m" (dstW)
  875. : "%"REG_a, "%"REG_d, "%"REG_S
  876. );
  877. }
  878. break;
  879. case IMGFMT_BGR16:
  880. {
  881. asm volatile(
  882. YSCALEYUV2RGBX
  883. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  884. #ifdef DITHER1XBPP
  885. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  886. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  887. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  888. #endif
  889. WRITEBGR16(%4, %5, %%REGa)
  890. :: "r" (&c->redDither),
  891. "m" (dummy), "m" (dummy), "m" (dummy),
  892. "r" (dest), "m" (dstW)
  893. : "%"REG_a, "%"REG_d, "%"REG_S
  894. );
  895. }
  896. break;
  897. case IMGFMT_YUY2:
  898. {
  899. asm volatile(
  900. YSCALEYUV2PACKEDX
  901. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  902. "psraw $3, %%mm3 \n\t"
  903. "psraw $3, %%mm4 \n\t"
  904. "psraw $3, %%mm1 \n\t"
  905. "psraw $3, %%mm7 \n\t"
  906. WRITEYUY2(%4, %5, %%REGa)
  907. :: "r" (&c->redDither),
  908. "m" (dummy), "m" (dummy), "m" (dummy),
  909. "r" (dest), "m" (dstW)
  910. : "%"REG_a, "%"REG_d, "%"REG_S
  911. );
  912. }
  913. break;
  914. #endif
  915. default:
  916. #ifdef HAVE_ALTIVEC
  917. /* The following list of supported dstFormat values should
  918. match what's found in the body of altivec_yuv2packedX() */
  919. if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA ||
  920. c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 ||
  921. c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB)
  922. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  923. chrFilter, chrSrc, chrFilterSize,
  924. dest, dstW, dstY);
  925. else
  926. #endif
  927. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  928. chrFilter, chrSrc, chrFilterSize,
  929. dest, dstW, dstY);
  930. break;
  931. }
  932. }
  933. /**
  934. * vertical bilinear scale YV12 to RGB
  935. */
  936. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  937. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  938. {
  939. int yalpha1=yalpha^4095;
  940. int uvalpha1=uvalpha^4095;
  941. int i;
  942. #if 0 //isn't used
  943. if(flags&SWS_FULL_CHR_H_INT)
  944. {
  945. switch(dstFormat)
  946. {
  947. #ifdef HAVE_MMX
  948. case IMGFMT_BGR32:
  949. asm volatile(
  950. FULL_YSCALEYUV2RGB
  951. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  952. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  953. "movq %%mm3, %%mm1 \n\t"
  954. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  955. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  956. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  957. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  958. "add $4, %%"REG_a" \n\t"
  959. "cmp %5, %%"REG_a" \n\t"
  960. " jb 1b \n\t"
  961. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  962. "m" (yalpha1), "m" (uvalpha1)
  963. : "%"REG_a
  964. );
  965. break;
  966. case IMGFMT_BGR24:
  967. asm volatile(
  968. FULL_YSCALEYUV2RGB
  969. // lsb ... msb
  970. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  971. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  972. "movq %%mm3, %%mm1 \n\t"
  973. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  974. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  975. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  976. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  977. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  978. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  979. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  980. "movq %%mm1, %%mm2 \n\t"
  981. "psllq $48, %%mm1 \n\t" // 000000BG
  982. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  983. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  984. "psrld $16, %%mm2 \n\t" // R000R000
  985. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  986. "por %%mm2, %%mm1 \n\t" // RBGRR000
  987. "mov %4, %%"REG_b" \n\t"
  988. "add %%"REG_a", %%"REG_b" \n\t"
  989. #ifdef HAVE_MMX2
  990. //FIXME Alignment
  991. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
  992. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
  993. #else
  994. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  995. "psrlq $32, %%mm3 \n\t"
  996. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  997. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  998. #endif
  999. "add $4, %%"REG_a" \n\t"
  1000. "cmp %5, %%"REG_a" \n\t"
  1001. " jb 1b \n\t"
  1002. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1003. "m" (yalpha1), "m" (uvalpha1)
  1004. : "%"REG_a, "%"REG_b
  1005. );
  1006. break;
  1007. case IMGFMT_BGR15:
  1008. asm volatile(
  1009. FULL_YSCALEYUV2RGB
  1010. #ifdef DITHER1XBPP
  1011. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  1012. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1013. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1014. #endif
  1015. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1016. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1017. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1018. "psrlw $3, %%mm3 \n\t"
  1019. "psllw $2, %%mm1 \n\t"
  1020. "psllw $7, %%mm0 \n\t"
  1021. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1022. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1023. "por %%mm3, %%mm1 \n\t"
  1024. "por %%mm1, %%mm0 \n\t"
  1025. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1026. "add $4, %%"REG_a" \n\t"
  1027. "cmp %5, %%"REG_a" \n\t"
  1028. " jb 1b \n\t"
  1029. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1030. "m" (yalpha1), "m" (uvalpha1)
  1031. : "%"REG_a
  1032. );
  1033. break;
  1034. case IMGFMT_BGR16:
  1035. asm volatile(
  1036. FULL_YSCALEYUV2RGB
  1037. #ifdef DITHER1XBPP
  1038. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  1039. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1040. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1041. #endif
  1042. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1043. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1044. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1045. "psrlw $3, %%mm3 \n\t"
  1046. "psllw $3, %%mm1 \n\t"
  1047. "psllw $8, %%mm0 \n\t"
  1048. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1049. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1050. "por %%mm3, %%mm1 \n\t"
  1051. "por %%mm1, %%mm0 \n\t"
  1052. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1053. "add $4, %%"REG_a" \n\t"
  1054. "cmp %5, %%"REG_a" \n\t"
  1055. " jb 1b \n\t"
  1056. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1057. "m" (yalpha1), "m" (uvalpha1)
  1058. : "%"REG_a
  1059. );
  1060. break;
  1061. #endif
  1062. case IMGFMT_RGB32:
  1063. #ifndef HAVE_MMX
  1064. case IMGFMT_BGR32:
  1065. #endif
  1066. if(dstFormat==IMGFMT_BGR32)
  1067. {
  1068. int i;
  1069. #ifdef WORDS_BIGENDIAN
  1070. dest++;
  1071. #endif
  1072. for(i=0;i<dstW;i++){
  1073. // vertical linear interpolation && yuv2rgb in a single step:
  1074. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1075. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1076. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1077. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1078. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1079. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1080. dest+= 4;
  1081. }
  1082. }
  1083. else if(dstFormat==IMGFMT_BGR24)
  1084. {
  1085. int i;
  1086. for(i=0;i<dstW;i++){
  1087. // vertical linear interpolation && yuv2rgb in a single step:
  1088. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1089. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1090. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1091. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1092. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1093. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1094. dest+= 3;
  1095. }
  1096. }
  1097. else if(dstFormat==IMGFMT_BGR16)
  1098. {
  1099. int i;
  1100. for(i=0;i<dstW;i++){
  1101. // vertical linear interpolation && yuv2rgb in a single step:
  1102. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1103. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1104. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1105. ((uint16_t*)dest)[i] =
  1106. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1107. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1108. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1109. }
  1110. }
  1111. else if(dstFormat==IMGFMT_BGR15)
  1112. {
  1113. int i;
  1114. for(i=0;i<dstW;i++){
  1115. // vertical linear interpolation && yuv2rgb in a single step:
  1116. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1117. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1118. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1119. ((uint16_t*)dest)[i] =
  1120. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1121. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1122. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1123. }
  1124. }
  1125. }//FULL_UV_IPOL
  1126. else
  1127. {
  1128. #endif // if 0
  1129. #ifdef HAVE_MMX
  1130. switch(c->dstFormat)
  1131. {
  1132. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1133. case IMGFMT_BGR32:
  1134. asm volatile(
  1135. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1136. "mov %4, %%"REG_b" \n\t"
  1137. "push %%"REG_BP" \n\t"
  1138. YSCALEYUV2RGB(%%REGBP, %5)
  1139. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1140. "pop %%"REG_BP" \n\t"
  1141. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1142. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1143. "a" (&c->redDither)
  1144. );
  1145. return;
  1146. case IMGFMT_BGR24:
  1147. asm volatile(
  1148. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1149. "mov %4, %%"REG_b" \n\t"
  1150. "push %%"REG_BP" \n\t"
  1151. YSCALEYUV2RGB(%%REGBP, %5)
  1152. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1153. "pop %%"REG_BP" \n\t"
  1154. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1155. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1156. "a" (&c->redDither)
  1157. );
  1158. return;
  1159. case IMGFMT_BGR15:
  1160. asm volatile(
  1161. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1162. "mov %4, %%"REG_b" \n\t"
  1163. "push %%"REG_BP" \n\t"
  1164. YSCALEYUV2RGB(%%REGBP, %5)
  1165. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1166. #ifdef DITHER1XBPP
  1167. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1168. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1169. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1170. #endif
  1171. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1172. "pop %%"REG_BP" \n\t"
  1173. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1174. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1175. "a" (&c->redDither)
  1176. );
  1177. return;
  1178. case IMGFMT_BGR16:
  1179. asm volatile(
  1180. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1181. "mov %4, %%"REG_b" \n\t"
  1182. "push %%"REG_BP" \n\t"
  1183. YSCALEYUV2RGB(%%REGBP, %5)
  1184. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1185. #ifdef DITHER1XBPP
  1186. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1187. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1188. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1189. #endif
  1190. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1191. "pop %%"REG_BP" \n\t"
  1192. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1193. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1194. "a" (&c->redDither)
  1195. );
  1196. return;
  1197. case IMGFMT_YUY2:
  1198. asm volatile(
  1199. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1200. "mov %4, %%"REG_b" \n\t"
  1201. "push %%"REG_BP" \n\t"
  1202. YSCALEYUV2PACKED(%%REGBP, %5)
  1203. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1204. "pop %%"REG_BP" \n\t"
  1205. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1206. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1207. "a" (&c->redDither)
  1208. );
  1209. return;
  1210. default: break;
  1211. }
  1212. #endif //HAVE_MMX
  1213. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1214. }
  1215. /**
  1216. * YV12 to RGB without scaling or interpolating
  1217. */
  1218. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1219. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1220. {
  1221. const int yalpha1=0;
  1222. int i;
  1223. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1224. const int yalpha= 4096; //FIXME ...
  1225. if(flags&SWS_FULL_CHR_H_INT)
  1226. {
  1227. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1228. return;
  1229. }
  1230. #ifdef HAVE_MMX
  1231. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1232. {
  1233. switch(dstFormat)
  1234. {
  1235. case IMGFMT_BGR32:
  1236. asm volatile(
  1237. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1238. "mov %4, %%"REG_b" \n\t"
  1239. "push %%"REG_BP" \n\t"
  1240. YSCALEYUV2RGB1(%%REGBP, %5)
  1241. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1242. "pop %%"REG_BP" \n\t"
  1243. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1244. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1245. "a" (&c->redDither)
  1246. );
  1247. return;
  1248. case IMGFMT_BGR24:
  1249. asm volatile(
  1250. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1251. "mov %4, %%"REG_b" \n\t"
  1252. "push %%"REG_BP" \n\t"
  1253. YSCALEYUV2RGB1(%%REGBP, %5)
  1254. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1255. "pop %%"REG_BP" \n\t"
  1256. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1257. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1258. "a" (&c->redDither)
  1259. );
  1260. return;
  1261. case IMGFMT_BGR15:
  1262. asm volatile(
  1263. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1264. "mov %4, %%"REG_b" \n\t"
  1265. "push %%"REG_BP" \n\t"
  1266. YSCALEYUV2RGB1(%%REGBP, %5)
  1267. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1268. #ifdef DITHER1XBPP
  1269. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1270. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1271. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1272. #endif
  1273. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1274. "pop %%"REG_BP" \n\t"
  1275. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1276. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1277. "a" (&c->redDither)
  1278. );
  1279. return;
  1280. case IMGFMT_BGR16:
  1281. asm volatile(
  1282. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1283. "mov %4, %%"REG_b" \n\t"
  1284. "push %%"REG_BP" \n\t"
  1285. YSCALEYUV2RGB1(%%REGBP, %5)
  1286. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1287. #ifdef DITHER1XBPP
  1288. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1289. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1290. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1291. #endif
  1292. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1293. "pop %%"REG_BP" \n\t"
  1294. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1295. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1296. "a" (&c->redDither)
  1297. );
  1298. return;
  1299. case IMGFMT_YUY2:
  1300. asm volatile(
  1301. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1302. "mov %4, %%"REG_b" \n\t"
  1303. "push %%"REG_BP" \n\t"
  1304. YSCALEYUV2PACKED1(%%REGBP, %5)
  1305. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1306. "pop %%"REG_BP" \n\t"
  1307. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1308. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1309. "a" (&c->redDither)
  1310. );
  1311. return;
  1312. }
  1313. }
  1314. else
  1315. {
  1316. switch(dstFormat)
  1317. {
  1318. case IMGFMT_BGR32:
  1319. asm volatile(
  1320. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1321. "mov %4, %%"REG_b" \n\t"
  1322. "push %%"REG_BP" \n\t"
  1323. YSCALEYUV2RGB1b(%%REGBP, %5)
  1324. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1325. "pop %%"REG_BP" \n\t"
  1326. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1327. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1328. "a" (&c->redDither)
  1329. );
  1330. return;
  1331. case IMGFMT_BGR24:
  1332. asm volatile(
  1333. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1334. "mov %4, %%"REG_b" \n\t"
  1335. "push %%"REG_BP" \n\t"
  1336. YSCALEYUV2RGB1b(%%REGBP, %5)
  1337. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1338. "pop %%"REG_BP" \n\t"
  1339. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1340. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1341. "a" (&c->redDither)
  1342. );
  1343. return;
  1344. case IMGFMT_BGR15:
  1345. asm volatile(
  1346. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1347. "mov %4, %%"REG_b" \n\t"
  1348. "push %%"REG_BP" \n\t"
  1349. YSCALEYUV2RGB1b(%%REGBP, %5)
  1350. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1351. #ifdef DITHER1XBPP
  1352. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1353. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1354. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1355. #endif
  1356. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1357. "pop %%"REG_BP" \n\t"
  1358. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1359. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1360. "a" (&c->redDither)
  1361. );
  1362. return;
  1363. case IMGFMT_BGR16:
  1364. asm volatile(
  1365. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1366. "mov %4, %%"REG_b" \n\t"
  1367. "push %%"REG_BP" \n\t"
  1368. YSCALEYUV2RGB1b(%%REGBP, %5)
  1369. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1370. #ifdef DITHER1XBPP
  1371. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1372. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1373. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1374. #endif
  1375. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1376. "pop %%"REG_BP" \n\t"
  1377. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1378. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1379. "a" (&c->redDither)
  1380. );
  1381. return;
  1382. case IMGFMT_YUY2:
  1383. asm volatile(
  1384. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1385. "mov %4, %%"REG_b" \n\t"
  1386. "push %%"REG_BP" \n\t"
  1387. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1388. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1389. "pop %%"REG_BP" \n\t"
  1390. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1391. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1392. "a" (&c->redDither)
  1393. );
  1394. return;
  1395. }
  1396. }
  1397. #endif
  1398. if( uvalpha < 2048 )
  1399. {
  1400. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1401. }else{
  1402. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1403. }
  1404. }
  1405. //FIXME yuy2* can read upto 7 samples to much
  1406. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
  1407. {
  1408. #ifdef HAVE_MMX
  1409. asm volatile(
  1410. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1411. "mov %0, %%"REG_a" \n\t"
  1412. "1: \n\t"
  1413. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1414. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1415. "pand %%mm2, %%mm0 \n\t"
  1416. "pand %%mm2, %%mm1 \n\t"
  1417. "packuswb %%mm1, %%mm0 \n\t"
  1418. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1419. "add $8, %%"REG_a" \n\t"
  1420. " js 1b \n\t"
  1421. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1422. : "%"REG_a
  1423. );
  1424. #else
  1425. int i;
  1426. for(i=0; i<width; i++)
  1427. dst[i]= src[2*i];
  1428. #endif
  1429. }
  1430. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1431. {
  1432. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1433. asm volatile(
  1434. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1435. "mov %0, %%"REG_a" \n\t"
  1436. "1: \n\t"
  1437. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1438. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1439. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1440. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1441. PAVGB(%%mm2, %%mm0)
  1442. PAVGB(%%mm3, %%mm1)
  1443. "psrlw $8, %%mm0 \n\t"
  1444. "psrlw $8, %%mm1 \n\t"
  1445. "packuswb %%mm1, %%mm0 \n\t"
  1446. "movq %%mm0, %%mm1 \n\t"
  1447. "psrlw $8, %%mm0 \n\t"
  1448. "pand %%mm4, %%mm1 \n\t"
  1449. "packuswb %%mm0, %%mm0 \n\t"
  1450. "packuswb %%mm1, %%mm1 \n\t"
  1451. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1452. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1453. "add $4, %%"REG_a" \n\t"
  1454. " js 1b \n\t"
  1455. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1456. : "%"REG_a
  1457. );
  1458. #else
  1459. int i;
  1460. for(i=0; i<width; i++)
  1461. {
  1462. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1463. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1464. }
  1465. #endif
  1466. }
  1467. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1468. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
  1469. {
  1470. #ifdef HAVE_MMX
  1471. asm volatile(
  1472. "mov %0, %%"REG_a" \n\t"
  1473. "1: \n\t"
  1474. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1475. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1476. "psrlw $8, %%mm0 \n\t"
  1477. "psrlw $8, %%mm1 \n\t"
  1478. "packuswb %%mm1, %%mm0 \n\t"
  1479. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1480. "add $8, %%"REG_a" \n\t"
  1481. " js 1b \n\t"
  1482. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1483. : "%"REG_a
  1484. );
  1485. #else
  1486. int i;
  1487. for(i=0; i<width; i++)
  1488. dst[i]= src[2*i+1];
  1489. #endif
  1490. }
  1491. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1492. {
  1493. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1494. asm volatile(
  1495. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1496. "mov %0, %%"REG_a" \n\t"
  1497. "1: \n\t"
  1498. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1499. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1500. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1501. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1502. PAVGB(%%mm2, %%mm0)
  1503. PAVGB(%%mm3, %%mm1)
  1504. "pand %%mm4, %%mm0 \n\t"
  1505. "pand %%mm4, %%mm1 \n\t"
  1506. "packuswb %%mm1, %%mm0 \n\t"
  1507. "movq %%mm0, %%mm1 \n\t"
  1508. "psrlw $8, %%mm0 \n\t"
  1509. "pand %%mm4, %%mm1 \n\t"
  1510. "packuswb %%mm0, %%mm0 \n\t"
  1511. "packuswb %%mm1, %%mm1 \n\t"
  1512. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1513. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1514. "add $4, %%"REG_a" \n\t"
  1515. " js 1b \n\t"
  1516. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1517. : "%"REG_a
  1518. );
  1519. #else
  1520. int i;
  1521. for(i=0; i<width; i++)
  1522. {
  1523. dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
  1524. dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
  1525. }
  1526. #endif
  1527. }
  1528. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1529. {
  1530. int i;
  1531. for(i=0; i<width; i++)
  1532. {
  1533. int b= ((uint32_t*)src)[i]&0xFF;
  1534. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1535. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1536. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1537. }
  1538. }
  1539. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1540. {
  1541. int i;
  1542. for(i=0; i<width; i++)
  1543. {
  1544. const int a= ((uint32_t*)src1)[2*i+0];
  1545. const int e= ((uint32_t*)src1)[2*i+1];
  1546. const int c= ((uint32_t*)src2)[2*i+0];
  1547. const int d= ((uint32_t*)src2)[2*i+1];
  1548. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1549. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1550. const int b= l&0x3FF;
  1551. const int g= h>>8;
  1552. const int r= l>>16;
  1553. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1554. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1555. }
  1556. }
  1557. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
  1558. {
  1559. #ifdef HAVE_MMX
  1560. asm volatile(
  1561. "mov %2, %%"REG_a" \n\t"
  1562. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1563. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1564. "pxor %%mm7, %%mm7 \n\t"
  1565. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
  1566. ASMALIGN16
  1567. "1: \n\t"
  1568. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1569. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1570. "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
  1571. "punpcklbw %%mm7, %%mm0 \n\t"
  1572. "punpcklbw %%mm7, %%mm1 \n\t"
  1573. "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
  1574. "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
  1575. "punpcklbw %%mm7, %%mm2 \n\t"
  1576. "punpcklbw %%mm7, %%mm3 \n\t"
  1577. "pmaddwd %%mm6, %%mm0 \n\t"
  1578. "pmaddwd %%mm6, %%mm1 \n\t"
  1579. "pmaddwd %%mm6, %%mm2 \n\t"
  1580. "pmaddwd %%mm6, %%mm3 \n\t"
  1581. #ifndef FAST_BGR2YV12
  1582. "psrad $8, %%mm0 \n\t"
  1583. "psrad $8, %%mm1 \n\t"
  1584. "psrad $8, %%mm2 \n\t"
  1585. "psrad $8, %%mm3 \n\t"
  1586. #endif
  1587. "packssdw %%mm1, %%mm0 \n\t"
  1588. "packssdw %%mm3, %%mm2 \n\t"
  1589. "pmaddwd %%mm5, %%mm0 \n\t"
  1590. "pmaddwd %%mm5, %%mm2 \n\t"
  1591. "packssdw %%mm2, %%mm0 \n\t"
  1592. "psraw $7, %%mm0 \n\t"
  1593. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1594. "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
  1595. "punpcklbw %%mm7, %%mm4 \n\t"
  1596. "punpcklbw %%mm7, %%mm1 \n\t"
  1597. "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
  1598. "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
  1599. "punpcklbw %%mm7, %%mm2 \n\t"
  1600. "punpcklbw %%mm7, %%mm3 \n\t"
  1601. "pmaddwd %%mm6, %%mm4 \n\t"
  1602. "pmaddwd %%mm6, %%mm1 \n\t"
  1603. "pmaddwd %%mm6, %%mm2 \n\t"
  1604. "pmaddwd %%mm6, %%mm3 \n\t"
  1605. #ifndef FAST_BGR2YV12
  1606. "psrad $8, %%mm4 \n\t"
  1607. "psrad $8, %%mm1 \n\t"
  1608. "psrad $8, %%mm2 \n\t"
  1609. "psrad $8, %%mm3 \n\t"
  1610. #endif
  1611. "packssdw %%mm1, %%mm4 \n\t"
  1612. "packssdw %%mm3, %%mm2 \n\t"
  1613. "pmaddwd %%mm5, %%mm4 \n\t"
  1614. "pmaddwd %%mm5, %%mm2 \n\t"
  1615. "add $24, %%"REG_b" \n\t"
  1616. "packssdw %%mm2, %%mm4 \n\t"
  1617. "psraw $7, %%mm4 \n\t"
  1618. "packuswb %%mm4, %%mm0 \n\t"
  1619. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1620. "movq %%mm0, (%1, %%"REG_a") \n\t"
  1621. "add $8, %%"REG_a" \n\t"
  1622. " js 1b \n\t"
  1623. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1624. : "%"REG_a, "%"REG_b
  1625. );
  1626. #else
  1627. int i;
  1628. for(i=0; i<width; i++)
  1629. {
  1630. int b= src[i*3+0];
  1631. int g= src[i*3+1];
  1632. int r= src[i*3+2];
  1633. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1634. }
  1635. #endif
  1636. }
  1637. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1638. {
  1639. #ifdef HAVE_MMX
  1640. asm volatile(
  1641. "mov %4, %%"REG_a" \n\t"
  1642. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1643. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1644. "pxor %%mm7, %%mm7 \n\t"
  1645. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t"
  1646. "add %%"REG_b", %%"REG_b" \n\t"
  1647. ASMALIGN16
  1648. "1: \n\t"
  1649. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1650. PREFETCH" 64(%1, %%"REG_b") \n\t"
  1651. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1652. "movq (%0, %%"REG_b"), %%mm0 \n\t"
  1653. "movq (%1, %%"REG_b"), %%mm1 \n\t"
  1654. "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
  1655. "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
  1656. PAVGB(%%mm1, %%mm0)
  1657. PAVGB(%%mm3, %%mm2)
  1658. "movq %%mm0, %%mm1 \n\t"
  1659. "movq %%mm2, %%mm3 \n\t"
  1660. "psrlq $24, %%mm0 \n\t"
  1661. "psrlq $24, %%mm2 \n\t"
  1662. PAVGB(%%mm1, %%mm0)
  1663. PAVGB(%%mm3, %%mm2)
  1664. "punpcklbw %%mm7, %%mm0 \n\t"
  1665. "punpcklbw %%mm7, %%mm2 \n\t"
  1666. #else
  1667. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1668. "movd (%1, %%"REG_b"), %%mm1 \n\t"
  1669. "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
  1670. "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
  1671. "punpcklbw %%mm7, %%mm0 \n\t"
  1672. "punpcklbw %%mm7, %%mm1 \n\t"
  1673. "punpcklbw %%mm7, %%mm2 \n\t"
  1674. "punpcklbw %%mm7, %%mm3 \n\t"
  1675. "paddw %%mm1, %%mm0 \n\t"
  1676. "paddw %%mm3, %%mm2 \n\t"
  1677. "paddw %%mm2, %%mm0 \n\t"
  1678. "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
  1679. "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
  1680. "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
  1681. "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
  1682. "punpcklbw %%mm7, %%mm4 \n\t"
  1683. "punpcklbw %%mm7, %%mm1 \n\t"
  1684. "punpcklbw %%mm7, %%mm2 \n\t"
  1685. "punpcklbw %%mm7, %%mm3 \n\t"
  1686. "paddw %%mm1, %%mm4 \n\t"
  1687. "paddw %%mm3, %%mm2 \n\t"
  1688. "paddw %%mm4, %%mm2 \n\t"
  1689. "psrlw $2, %%mm0 \n\t"
  1690. "psrlw $2, %%mm2 \n\t"
  1691. #endif
  1692. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1693. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1694. "pmaddwd %%mm0, %%mm1 \n\t"
  1695. "pmaddwd %%mm2, %%mm3 \n\t"
  1696. "pmaddwd %%mm6, %%mm0 \n\t"
  1697. "pmaddwd %%mm6, %%mm2 \n\t"
  1698. #ifndef FAST_BGR2YV12
  1699. "psrad $8, %%mm0 \n\t"
  1700. "psrad $8, %%mm1 \n\t"
  1701. "psrad $8, %%mm2 \n\t"
  1702. "psrad $8, %%mm3 \n\t"
  1703. #endif
  1704. "packssdw %%mm2, %%mm0 \n\t"
  1705. "packssdw %%mm3, %%mm1 \n\t"
  1706. "pmaddwd %%mm5, %%mm0 \n\t"
  1707. "pmaddwd %%mm5, %%mm1 \n\t"
  1708. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1709. "psraw $7, %%mm0 \n\t"
  1710. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1711. "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
  1712. "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
  1713. "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
  1714. "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
  1715. PAVGB(%%mm1, %%mm4)
  1716. PAVGB(%%mm3, %%mm2)
  1717. "movq %%mm4, %%mm1 \n\t"
  1718. "movq %%mm2, %%mm3 \n\t"
  1719. "psrlq $24, %%mm4 \n\t"
  1720. "psrlq $24, %%mm2 \n\t"
  1721. PAVGB(%%mm1, %%mm4)
  1722. PAVGB(%%mm3, %%mm2)
  1723. "punpcklbw %%mm7, %%mm4 \n\t"
  1724. "punpcklbw %%mm7, %%mm2 \n\t"
  1725. #else
  1726. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1727. "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
  1728. "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
  1729. "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
  1730. "punpcklbw %%mm7, %%mm4 \n\t"
  1731. "punpcklbw %%mm7, %%mm1 \n\t"
  1732. "punpcklbw %%mm7, %%mm2 \n\t"
  1733. "punpcklbw %%mm7, %%mm3 \n\t"
  1734. "paddw %%mm1, %%mm4 \n\t"
  1735. "paddw %%mm3, %%mm2 \n\t"
  1736. "paddw %%mm2, %%mm4 \n\t"
  1737. "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
  1738. "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
  1739. "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
  1740. "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
  1741. "punpcklbw %%mm7, %%mm5 \n\t"
  1742. "punpcklbw %%mm7, %%mm1 \n\t"
  1743. "punpcklbw %%mm7, %%mm2 \n\t"
  1744. "punpcklbw %%mm7, %%mm3 \n\t"
  1745. "paddw %%mm1, %%mm5 \n\t"
  1746. "paddw %%mm3, %%mm2 \n\t"
  1747. "paddw %%mm5, %%mm2 \n\t"
  1748. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1749. "psrlw $2, %%mm4 \n\t"
  1750. "psrlw $2, %%mm2 \n\t"
  1751. #endif
  1752. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1753. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1754. "pmaddwd %%mm4, %%mm1 \n\t"
  1755. "pmaddwd %%mm2, %%mm3 \n\t"
  1756. "pmaddwd %%mm6, %%mm4 \n\t"
  1757. "pmaddwd %%mm6, %%mm2 \n\t"
  1758. #ifndef FAST_BGR2YV12
  1759. "psrad $8, %%mm4 \n\t"
  1760. "psrad $8, %%mm1 \n\t"
  1761. "psrad $8, %%mm2 \n\t"
  1762. "psrad $8, %%mm3 \n\t"
  1763. #endif
  1764. "packssdw %%mm2, %%mm4 \n\t"
  1765. "packssdw %%mm3, %%mm1 \n\t"
  1766. "pmaddwd %%mm5, %%mm4 \n\t"
  1767. "pmaddwd %%mm5, %%mm1 \n\t"
  1768. "add $24, %%"REG_b" \n\t"
  1769. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1770. "psraw $7, %%mm4 \n\t"
  1771. "movq %%mm0, %%mm1 \n\t"
  1772. "punpckldq %%mm4, %%mm0 \n\t"
  1773. "punpckhdq %%mm4, %%mm1 \n\t"
  1774. "packsswb %%mm1, %%mm0 \n\t"
  1775. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1776. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1777. "punpckhdq %%mm0, %%mm0 \n\t"
  1778. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1779. "add $4, %%"REG_a" \n\t"
  1780. " js 1b \n\t"
  1781. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1782. : "%"REG_a, "%"REG_b
  1783. );
  1784. #else
  1785. int i;
  1786. for(i=0; i<width; i++)
  1787. {
  1788. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1789. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1790. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1791. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1792. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1793. }
  1794. #endif
  1795. }
  1796. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1797. {
  1798. int i;
  1799. for(i=0; i<width; i++)
  1800. {
  1801. int d= ((uint16_t*)src)[i];
  1802. int b= d&0x1F;
  1803. int g= (d>>5)&0x3F;
  1804. int r= (d>>11)&0x1F;
  1805. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1806. }
  1807. }
  1808. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1809. {
  1810. int i;
  1811. for(i=0; i<width; i++)
  1812. {
  1813. int d0= ((uint32_t*)src1)[i];
  1814. int d1= ((uint32_t*)src2)[i];
  1815. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1816. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1817. int dh2= (dh>>11) + (dh<<21);
  1818. int d= dh2 + dl;
  1819. int b= d&0x7F;
  1820. int r= (d>>11)&0x7F;
  1821. int g= d>>21;
  1822. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1823. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1824. }
  1825. }
  1826. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1827. {
  1828. int i;
  1829. for(i=0; i<width; i++)
  1830. {
  1831. int d= ((uint16_t*)src)[i];
  1832. int b= d&0x1F;
  1833. int g= (d>>5)&0x1F;
  1834. int r= (d>>10)&0x1F;
  1835. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1836. }
  1837. }
  1838. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1839. {
  1840. int i;
  1841. for(i=0; i<width; i++)
  1842. {
  1843. int d0= ((uint32_t*)src1)[i];
  1844. int d1= ((uint32_t*)src2)[i];
  1845. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1846. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1847. int dh2= (dh>>11) + (dh<<21);
  1848. int d= dh2 + dl;
  1849. int b= d&0x7F;
  1850. int r= (d>>10)&0x7F;
  1851. int g= d>>21;
  1852. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1853. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1854. }
  1855. }
  1856. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1857. {
  1858. int i;
  1859. for(i=0; i<width; i++)
  1860. {
  1861. int r= ((uint32_t*)src)[i]&0xFF;
  1862. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1863. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  1864. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1865. }
  1866. }
  1867. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1868. {
  1869. int i;
  1870. for(i=0; i<width; i++)
  1871. {
  1872. const int a= ((uint32_t*)src1)[2*i+0];
  1873. const int e= ((uint32_t*)src1)[2*i+1];
  1874. const int c= ((uint32_t*)src2)[2*i+0];
  1875. const int d= ((uint32_t*)src2)[2*i+1];
  1876. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1877. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1878. const int r= l&0x3FF;
  1879. const int g= h>>8;
  1880. const int b= l>>16;
  1881. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1882. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1883. }
  1884. }
  1885. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1886. {
  1887. int i;
  1888. for(i=0; i<width; i++)
  1889. {
  1890. int r= src[i*3+0];
  1891. int g= src[i*3+1];
  1892. int b= src[i*3+2];
  1893. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1894. }
  1895. }
  1896. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1897. {
  1898. int i;
  1899. for(i=0; i<width; i++)
  1900. {
  1901. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1902. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1903. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1904. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1905. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1906. }
  1907. }
  1908. // Bilinear / Bicubic scaling
  1909. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1910. int16_t *filter, int16_t *filterPos, long filterSize)
  1911. {
  1912. #ifdef HAVE_MMX
  1913. assert(filterSize % 4 == 0 && filterSize>0);
  1914. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1915. {
  1916. long counter= -2*dstW;
  1917. filter-= counter*2;
  1918. filterPos-= counter/2;
  1919. dst-= counter/2;
  1920. asm volatile(
  1921. "pxor %%mm7, %%mm7 \n\t"
  1922. "movq "MANGLE(w02)", %%mm6 \n\t"
  1923. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1924. "mov %%"REG_a", %%"REG_BP" \n\t"
  1925. ASMALIGN16
  1926. "1: \n\t"
  1927. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1928. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  1929. "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
  1930. "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
  1931. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1932. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1933. "punpcklbw %%mm7, %%mm0 \n\t"
  1934. "punpcklbw %%mm7, %%mm2 \n\t"
  1935. "pmaddwd %%mm1, %%mm0 \n\t"
  1936. "pmaddwd %%mm2, %%mm3 \n\t"
  1937. "psrad $8, %%mm0 \n\t"
  1938. "psrad $8, %%mm3 \n\t"
  1939. "packssdw %%mm3, %%mm0 \n\t"
  1940. "pmaddwd %%mm6, %%mm0 \n\t"
  1941. "packssdw %%mm0, %%mm0 \n\t"
  1942. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1943. "add $4, %%"REG_BP" \n\t"
  1944. " jnc 1b \n\t"
  1945. "pop %%"REG_BP" \n\t"
  1946. : "+a" (counter)
  1947. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1948. : "%"REG_b
  1949. );
  1950. }
  1951. else if(filterSize==8)
  1952. {
  1953. long counter= -2*dstW;
  1954. filter-= counter*4;
  1955. filterPos-= counter/2;
  1956. dst-= counter/2;
  1957. asm volatile(
  1958. "pxor %%mm7, %%mm7 \n\t"
  1959. "movq "MANGLE(w02)", %%mm6 \n\t"
  1960. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1961. "mov %%"REG_a", %%"REG_BP" \n\t"
  1962. ASMALIGN16
  1963. "1: \n\t"
  1964. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1965. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  1966. "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
  1967. "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
  1968. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1969. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1970. "punpcklbw %%mm7, %%mm0 \n\t"
  1971. "punpcklbw %%mm7, %%mm2 \n\t"
  1972. "pmaddwd %%mm1, %%mm0 \n\t"
  1973. "pmaddwd %%mm2, %%mm3 \n\t"
  1974. "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
  1975. "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
  1976. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  1977. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  1978. "punpcklbw %%mm7, %%mm4 \n\t"
  1979. "punpcklbw %%mm7, %%mm2 \n\t"
  1980. "pmaddwd %%mm1, %%mm4 \n\t"
  1981. "pmaddwd %%mm2, %%mm5 \n\t"
  1982. "paddd %%mm4, %%mm0 \n\t"
  1983. "paddd %%mm5, %%mm3 \n\t"
  1984. "psrad $8, %%mm0 \n\t"
  1985. "psrad $8, %%mm3 \n\t"
  1986. "packssdw %%mm3, %%mm0 \n\t"
  1987. "pmaddwd %%mm6, %%mm0 \n\t"
  1988. "packssdw %%mm0, %%mm0 \n\t"
  1989. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1990. "add $4, %%"REG_BP" \n\t"
  1991. " jnc 1b \n\t"
  1992. "pop %%"REG_BP" \n\t"
  1993. : "+a" (counter)
  1994. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1995. : "%"REG_b
  1996. );
  1997. }
  1998. else
  1999. {
  2000. uint8_t *offset = src+filterSize;
  2001. long counter= -2*dstW;
  2002. // filter-= counter*filterSize/2;
  2003. filterPos-= counter/2;
  2004. dst-= counter/2;
  2005. asm volatile(
  2006. "pxor %%mm7, %%mm7 \n\t"
  2007. "movq "MANGLE(w02)", %%mm6 \n\t"
  2008. ASMALIGN16
  2009. "1: \n\t"
  2010. "mov %2, %%"REG_c" \n\t"
  2011. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2012. "movzwl 2(%%"REG_c", %0), %%ebx \n\t"
  2013. "mov %5, %%"REG_c" \n\t"
  2014. "pxor %%mm4, %%mm4 \n\t"
  2015. "pxor %%mm5, %%mm5 \n\t"
  2016. "2: \n\t"
  2017. "movq (%1), %%mm1 \n\t"
  2018. "movq (%1, %6), %%mm3 \n\t"
  2019. "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
  2020. "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
  2021. "punpcklbw %%mm7, %%mm0 \n\t"
  2022. "punpcklbw %%mm7, %%mm2 \n\t"
  2023. "pmaddwd %%mm1, %%mm0 \n\t"
  2024. "pmaddwd %%mm2, %%mm3 \n\t"
  2025. "paddd %%mm3, %%mm5 \n\t"
  2026. "paddd %%mm0, %%mm4 \n\t"
  2027. "add $8, %1 \n\t"
  2028. "add $4, %%"REG_c" \n\t"
  2029. "cmp %4, %%"REG_c" \n\t"
  2030. " jb 2b \n\t"
  2031. "add %6, %1 \n\t"
  2032. "psrad $8, %%mm4 \n\t"
  2033. "psrad $8, %%mm5 \n\t"
  2034. "packssdw %%mm5, %%mm4 \n\t"
  2035. "pmaddwd %%mm6, %%mm4 \n\t"
  2036. "packssdw %%mm4, %%mm4 \n\t"
  2037. "mov %3, %%"REG_a" \n\t"
  2038. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2039. "add $4, %0 \n\t"
  2040. " jnc 1b \n\t"
  2041. : "+r" (counter), "+r" (filter)
  2042. : "m" (filterPos), "m" (dst), "m"(offset),
  2043. "m" (src), "r" (filterSize*2)
  2044. : "%"REG_b, "%"REG_a, "%"REG_c
  2045. );
  2046. }
  2047. #else
  2048. #ifdef HAVE_ALTIVEC
  2049. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2050. #else
  2051. int i;
  2052. for(i=0; i<dstW; i++)
  2053. {
  2054. int j;
  2055. int srcPos= filterPos[i];
  2056. int val=0;
  2057. // printf("filterPos: %d\n", filterPos[i]);
  2058. for(j=0; j<filterSize; j++)
  2059. {
  2060. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2061. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2062. }
  2063. // filter += hFilterSize;
  2064. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2065. // dst[i] = val>>7;
  2066. }
  2067. #endif
  2068. #endif
  2069. }
  2070. // *** horizontal scale Y line to temp buffer
  2071. static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  2072. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2073. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2074. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2075. int32_t *mmx2FilterPos)
  2076. {
  2077. if(srcFormat==IMGFMT_YUY2)
  2078. {
  2079. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2080. src= formatConvBuffer;
  2081. }
  2082. else if(srcFormat==IMGFMT_UYVY)
  2083. {
  2084. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2085. src= formatConvBuffer;
  2086. }
  2087. else if(srcFormat==IMGFMT_BGR32)
  2088. {
  2089. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2090. src= formatConvBuffer;
  2091. }
  2092. else if(srcFormat==IMGFMT_BGR24)
  2093. {
  2094. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2095. src= formatConvBuffer;
  2096. }
  2097. else if(srcFormat==IMGFMT_BGR16)
  2098. {
  2099. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2100. src= formatConvBuffer;
  2101. }
  2102. else if(srcFormat==IMGFMT_BGR15)
  2103. {
  2104. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2105. src= formatConvBuffer;
  2106. }
  2107. else if(srcFormat==IMGFMT_RGB32)
  2108. {
  2109. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2110. src= formatConvBuffer;
  2111. }
  2112. else if(srcFormat==IMGFMT_RGB24)
  2113. {
  2114. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2115. src= formatConvBuffer;
  2116. }
  2117. #ifdef HAVE_MMX
  2118. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2119. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2120. #else
  2121. if(!(flags&SWS_FAST_BILINEAR))
  2122. #endif
  2123. {
  2124. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2125. }
  2126. else // Fast Bilinear upscale / crap downscale
  2127. {
  2128. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2129. #ifdef HAVE_MMX2
  2130. int i;
  2131. if(canMMX2BeUsed)
  2132. {
  2133. asm volatile(
  2134. "pxor %%mm7, %%mm7 \n\t"
  2135. "mov %0, %%"REG_c" \n\t"
  2136. "mov %1, %%"REG_D" \n\t"
  2137. "mov %2, %%"REG_d" \n\t"
  2138. "mov %3, %%"REG_b" \n\t"
  2139. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2140. PREFETCH" (%%"REG_c") \n\t"
  2141. PREFETCH" 32(%%"REG_c") \n\t"
  2142. PREFETCH" 64(%%"REG_c") \n\t"
  2143. #ifdef ARCH_X86_64
  2144. #define FUNNY_Y_CODE \
  2145. "movl (%%"REG_b"), %%esi \n\t"\
  2146. "call *%4 \n\t"\
  2147. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2148. "add %%"REG_S", %%"REG_c" \n\t"\
  2149. "add %%"REG_a", %%"REG_D" \n\t"\
  2150. "xor %%"REG_a", %%"REG_a" \n\t"\
  2151. #else
  2152. #define FUNNY_Y_CODE \
  2153. "movl (%%"REG_b"), %%esi \n\t"\
  2154. "call *%4 \n\t"\
  2155. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2156. "add %%"REG_a", %%"REG_D" \n\t"\
  2157. "xor %%"REG_a", %%"REG_a" \n\t"\
  2158. #endif
  2159. FUNNY_Y_CODE
  2160. FUNNY_Y_CODE
  2161. FUNNY_Y_CODE
  2162. FUNNY_Y_CODE
  2163. FUNNY_Y_CODE
  2164. FUNNY_Y_CODE
  2165. FUNNY_Y_CODE
  2166. FUNNY_Y_CODE
  2167. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2168. "m" (funnyYCode)
  2169. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2170. );
  2171. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2172. }
  2173. else
  2174. {
  2175. #endif
  2176. long xInc_shr16 = xInc >> 16;
  2177. uint16_t xInc_mask = xInc & 0xffff;
  2178. //NO MMX just normal asm ...
  2179. asm volatile(
  2180. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2181. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2182. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2183. ASMALIGN16
  2184. "1: \n\t"
  2185. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2186. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2187. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2188. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2189. "shll $16, %%edi \n\t"
  2190. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2191. "mov %1, %%"REG_D" \n\t"
  2192. "shrl $9, %%esi \n\t"
  2193. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2194. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2195. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2196. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2197. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2198. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2199. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2200. "shll $16, %%edi \n\t"
  2201. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2202. "mov %1, %%"REG_D" \n\t"
  2203. "shrl $9, %%esi \n\t"
  2204. "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
  2205. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2206. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2207. "add $2, %%"REG_a" \n\t"
  2208. "cmp %2, %%"REG_a" \n\t"
  2209. " jb 1b \n\t"
  2210. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2211. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2212. );
  2213. #ifdef HAVE_MMX2
  2214. } //if MMX2 can't be used
  2215. #endif
  2216. #else
  2217. int i;
  2218. unsigned int xpos=0;
  2219. for(i=0;i<dstWidth;i++)
  2220. {
  2221. register unsigned int xx=xpos>>16;
  2222. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2223. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2224. xpos+=xInc;
  2225. }
  2226. #endif
  2227. }
  2228. }
  2229. inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2230. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2231. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2232. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2233. int32_t *mmx2FilterPos)
  2234. {
  2235. if(srcFormat==IMGFMT_YUY2)
  2236. {
  2237. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2238. src1= formatConvBuffer;
  2239. src2= formatConvBuffer+2048;
  2240. }
  2241. else if(srcFormat==IMGFMT_UYVY)
  2242. {
  2243. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2244. src1= formatConvBuffer;
  2245. src2= formatConvBuffer+2048;
  2246. }
  2247. else if(srcFormat==IMGFMT_BGR32)
  2248. {
  2249. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2250. src1= formatConvBuffer;
  2251. src2= formatConvBuffer+2048;
  2252. }
  2253. else if(srcFormat==IMGFMT_BGR24)
  2254. {
  2255. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2256. src1= formatConvBuffer;
  2257. src2= formatConvBuffer+2048;
  2258. }
  2259. else if(srcFormat==IMGFMT_BGR16)
  2260. {
  2261. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2262. src1= formatConvBuffer;
  2263. src2= formatConvBuffer+2048;
  2264. }
  2265. else if(srcFormat==IMGFMT_BGR15)
  2266. {
  2267. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2268. src1= formatConvBuffer;
  2269. src2= formatConvBuffer+2048;
  2270. }
  2271. else if(srcFormat==IMGFMT_RGB32)
  2272. {
  2273. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2274. src1= formatConvBuffer;
  2275. src2= formatConvBuffer+2048;
  2276. }
  2277. else if(srcFormat==IMGFMT_RGB24)
  2278. {
  2279. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2280. src1= formatConvBuffer;
  2281. src2= formatConvBuffer+2048;
  2282. }
  2283. else if(isGray(srcFormat))
  2284. {
  2285. return;
  2286. }
  2287. #ifdef HAVE_MMX
  2288. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2289. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2290. #else
  2291. if(!(flags&SWS_FAST_BILINEAR))
  2292. #endif
  2293. {
  2294. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2295. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2296. }
  2297. else // Fast Bilinear upscale / crap downscale
  2298. {
  2299. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2300. #ifdef HAVE_MMX2
  2301. int i;
  2302. if(canMMX2BeUsed)
  2303. {
  2304. asm volatile(
  2305. "pxor %%mm7, %%mm7 \n\t"
  2306. "mov %0, %%"REG_c" \n\t"
  2307. "mov %1, %%"REG_D" \n\t"
  2308. "mov %2, %%"REG_d" \n\t"
  2309. "mov %3, %%"REG_b" \n\t"
  2310. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2311. PREFETCH" (%%"REG_c") \n\t"
  2312. PREFETCH" 32(%%"REG_c") \n\t"
  2313. PREFETCH" 64(%%"REG_c") \n\t"
  2314. #ifdef ARCH_X86_64
  2315. #define FUNNY_UV_CODE \
  2316. "movl (%%"REG_b"), %%esi \n\t"\
  2317. "call *%4 \n\t"\
  2318. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2319. "add %%"REG_S", %%"REG_c" \n\t"\
  2320. "add %%"REG_a", %%"REG_D" \n\t"\
  2321. "xor %%"REG_a", %%"REG_a" \n\t"\
  2322. #else
  2323. #define FUNNY_UV_CODE \
  2324. "movl (%%"REG_b"), %%esi \n\t"\
  2325. "call *%4 \n\t"\
  2326. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2327. "add %%"REG_a", %%"REG_D" \n\t"\
  2328. "xor %%"REG_a", %%"REG_a" \n\t"\
  2329. #endif
  2330. FUNNY_UV_CODE
  2331. FUNNY_UV_CODE
  2332. FUNNY_UV_CODE
  2333. FUNNY_UV_CODE
  2334. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2335. "mov %5, %%"REG_c" \n\t" // src
  2336. "mov %1, %%"REG_D" \n\t" // buf1
  2337. "add $4096, %%"REG_D" \n\t"
  2338. PREFETCH" (%%"REG_c") \n\t"
  2339. PREFETCH" 32(%%"REG_c") \n\t"
  2340. PREFETCH" 64(%%"REG_c") \n\t"
  2341. FUNNY_UV_CODE
  2342. FUNNY_UV_CODE
  2343. FUNNY_UV_CODE
  2344. FUNNY_UV_CODE
  2345. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2346. "m" (funnyUVCode), "m" (src2)
  2347. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2348. );
  2349. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2350. {
  2351. // printf("%d %d %d\n", dstWidth, i, srcW);
  2352. dst[i] = src1[srcW-1]*128;
  2353. dst[i+2048] = src2[srcW-1]*128;
  2354. }
  2355. }
  2356. else
  2357. {
  2358. #endif
  2359. long xInc_shr16 = (long) (xInc >> 16);
  2360. uint16_t xInc_mask = xInc & 0xffff;
  2361. asm volatile(
  2362. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2363. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2364. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2365. ASMALIGN16
  2366. "1: \n\t"
  2367. "mov %0, %%"REG_S" \n\t"
  2368. "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
  2369. "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
  2370. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2371. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2372. "shll $16, %%edi \n\t"
  2373. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2374. "mov %1, %%"REG_D" \n\t"
  2375. "shrl $9, %%esi \n\t"
  2376. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2377. "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
  2378. "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2379. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2380. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2381. "shll $16, %%edi \n\t"
  2382. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2383. "mov %1, %%"REG_D" \n\t"
  2384. "shrl $9, %%esi \n\t"
  2385. "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
  2386. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2387. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2388. "add $1, %%"REG_a" \n\t"
  2389. "cmp %2, %%"REG_a" \n\t"
  2390. " jb 1b \n\t"
  2391. /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2392. which is needed to support GCC-4.0 */
  2393. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2394. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2395. #else
  2396. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2397. #endif
  2398. "r" (src2)
  2399. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2400. );
  2401. #ifdef HAVE_MMX2
  2402. } //if MMX2 can't be used
  2403. #endif
  2404. #else
  2405. int i;
  2406. unsigned int xpos=0;
  2407. for(i=0;i<dstWidth;i++)
  2408. {
  2409. register unsigned int xx=xpos>>16;
  2410. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2411. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2412. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2413. /* slower
  2414. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2415. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2416. */
  2417. xpos+=xInc;
  2418. }
  2419. #endif
  2420. }
  2421. }
  2422. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2423. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2424. /* load a few things into local vars to make the code more readable? and faster */
  2425. const int srcW= c->srcW;
  2426. const int dstW= c->dstW;
  2427. const int dstH= c->dstH;
  2428. const int chrDstW= c->chrDstW;
  2429. const int chrSrcW= c->chrSrcW;
  2430. const int lumXInc= c->lumXInc;
  2431. const int chrXInc= c->chrXInc;
  2432. const int dstFormat= c->dstFormat;
  2433. const int srcFormat= c->srcFormat;
  2434. const int flags= c->flags;
  2435. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2436. int16_t *vLumFilterPos= c->vLumFilterPos;
  2437. int16_t *vChrFilterPos= c->vChrFilterPos;
  2438. int16_t *hLumFilterPos= c->hLumFilterPos;
  2439. int16_t *hChrFilterPos= c->hChrFilterPos;
  2440. int16_t *vLumFilter= c->vLumFilter;
  2441. int16_t *vChrFilter= c->vChrFilter;
  2442. int16_t *hLumFilter= c->hLumFilter;
  2443. int16_t *hChrFilter= c->hChrFilter;
  2444. int32_t *lumMmxFilter= c->lumMmxFilter;
  2445. int32_t *chrMmxFilter= c->chrMmxFilter;
  2446. const int vLumFilterSize= c->vLumFilterSize;
  2447. const int vChrFilterSize= c->vChrFilterSize;
  2448. const int hLumFilterSize= c->hLumFilterSize;
  2449. const int hChrFilterSize= c->hChrFilterSize;
  2450. int16_t **lumPixBuf= c->lumPixBuf;
  2451. int16_t **chrPixBuf= c->chrPixBuf;
  2452. const int vLumBufSize= c->vLumBufSize;
  2453. const int vChrBufSize= c->vChrBufSize;
  2454. uint8_t *funnyYCode= c->funnyYCode;
  2455. uint8_t *funnyUVCode= c->funnyUVCode;
  2456. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2457. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2458. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2459. int lastDstY;
  2460. /* vars whch will change and which we need to storw back in the context */
  2461. int dstY= c->dstY;
  2462. int lumBufIndex= c->lumBufIndex;
  2463. int chrBufIndex= c->chrBufIndex;
  2464. int lastInLumBuf= c->lastInLumBuf;
  2465. int lastInChrBuf= c->lastInChrBuf;
  2466. if(isPacked(c->srcFormat)){
  2467. src[0]=
  2468. src[1]=
  2469. src[2]= src[0];
  2470. srcStride[0]=
  2471. srcStride[1]=
  2472. srcStride[2]= srcStride[0];
  2473. }
  2474. srcStride[1]<<= c->vChrDrop;
  2475. srcStride[2]<<= c->vChrDrop;
  2476. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2477. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2478. #if 0 //self test FIXME move to a vfilter or something
  2479. {
  2480. static volatile int i=0;
  2481. i++;
  2482. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2483. selfTest(src, srcStride, c->srcW, c->srcH);
  2484. i--;
  2485. }
  2486. #endif
  2487. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2488. //dstStride[0],dstStride[1],dstStride[2]);
  2489. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2490. {
  2491. static int firstTime=1; //FIXME move this into the context perhaps
  2492. if(flags & SWS_PRINT_INFO && firstTime)
  2493. {
  2494. MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
  2495. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2496. firstTime=0;
  2497. }
  2498. }
  2499. /* Note the user might start scaling the picture in the middle so this will not get executed
  2500. this is not really intended but works currently, so ppl might do it */
  2501. if(srcSliceY ==0){
  2502. lumBufIndex=0;
  2503. chrBufIndex=0;
  2504. dstY=0;
  2505. lastInLumBuf= -1;
  2506. lastInChrBuf= -1;
  2507. }
  2508. lastDstY= dstY;
  2509. for(;dstY < dstH; dstY++){
  2510. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2511. const int chrDstY= dstY>>c->chrDstVSubSample;
  2512. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2513. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2514. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2515. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2516. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2517. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2518. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2519. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2520. //handle holes (FAST_BILINEAR & weird filters)
  2521. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2522. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2523. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2524. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2525. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2526. // Do we have enough lines in this slice to output the dstY line
  2527. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2528. {
  2529. //Do horizontal scaling
  2530. while(lastInLumBuf < lastLumSrcY)
  2531. {
  2532. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2533. lumBufIndex++;
  2534. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2535. ASSERT(lumBufIndex < 2*vLumBufSize)
  2536. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2537. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2538. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2539. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2540. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2541. funnyYCode, c->srcFormat, formatConvBuffer,
  2542. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2543. lastInLumBuf++;
  2544. }
  2545. while(lastInChrBuf < lastChrSrcY)
  2546. {
  2547. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2548. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2549. chrBufIndex++;
  2550. ASSERT(chrBufIndex < 2*vChrBufSize)
  2551. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2552. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2553. //FIXME replace parameters through context struct (some at least)
  2554. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2555. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2556. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2557. funnyUVCode, c->srcFormat, formatConvBuffer,
  2558. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2559. lastInChrBuf++;
  2560. }
  2561. //wrap buf index around to stay inside the ring buffer
  2562. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2563. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2564. }
  2565. else // not enough lines left in this slice -> load the rest in the buffer
  2566. {
  2567. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2568. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2569. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2570. vChrBufSize, vLumBufSize);*/
  2571. //Do horizontal scaling
  2572. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2573. {
  2574. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2575. lumBufIndex++;
  2576. ASSERT(lumBufIndex < 2*vLumBufSize)
  2577. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2578. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2579. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2580. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2581. funnyYCode, c->srcFormat, formatConvBuffer,
  2582. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2583. lastInLumBuf++;
  2584. }
  2585. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2586. {
  2587. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2588. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2589. chrBufIndex++;
  2590. ASSERT(chrBufIndex < 2*vChrBufSize)
  2591. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2592. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2593. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2594. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2595. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2596. funnyUVCode, c->srcFormat, formatConvBuffer,
  2597. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2598. lastInChrBuf++;
  2599. }
  2600. //wrap buf index around to stay inside the ring buffer
  2601. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2602. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2603. break; //we can't output a dstY line so let's try with the next slice
  2604. }
  2605. #ifdef HAVE_MMX
  2606. b5Dither= dither8[dstY&1];
  2607. g6Dither= dither4[dstY&1];
  2608. g5Dither= dither8[dstY&1];
  2609. r5Dither= dither8[(dstY+1)&1];
  2610. #endif
  2611. if(dstY < dstH-2)
  2612. {
  2613. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2614. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2615. #ifdef HAVE_MMX
  2616. int i;
  2617. for(i=0; i<vLumFilterSize; i++)
  2618. {
  2619. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2620. lumMmxFilter[4*i+2]=
  2621. lumMmxFilter[4*i+3]=
  2622. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2623. }
  2624. for(i=0; i<vChrFilterSize; i++)
  2625. {
  2626. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2627. chrMmxFilter[4*i+2]=
  2628. chrMmxFilter[4*i+3]=
  2629. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2630. }
  2631. #endif
  2632. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2633. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2634. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2635. RENAME(yuv2nv12X)(c,
  2636. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2637. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2638. dest, uDest, dstW, chrDstW, dstFormat);
  2639. }
  2640. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2641. {
  2642. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2643. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2644. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2645. {
  2646. int16_t *lumBuf = lumPixBuf[0];
  2647. int16_t *chrBuf= chrPixBuf[0];
  2648. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2649. }
  2650. else //General YV12
  2651. {
  2652. RENAME(yuv2yuvX)(c,
  2653. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2654. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2655. dest, uDest, vDest, dstW, chrDstW);
  2656. }
  2657. }
  2658. else
  2659. {
  2660. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2661. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2662. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2663. {
  2664. int chrAlpha= vChrFilter[2*dstY+1];
  2665. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2666. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2667. }
  2668. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2669. {
  2670. int lumAlpha= vLumFilter[2*dstY+1];
  2671. int chrAlpha= vChrFilter[2*dstY+1];
  2672. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2673. dest, dstW, lumAlpha, chrAlpha, dstY);
  2674. }
  2675. else //General RGB
  2676. {
  2677. RENAME(yuv2packedX)(c,
  2678. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2679. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2680. dest, dstW, dstY);
  2681. }
  2682. }
  2683. }
  2684. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2685. {
  2686. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2687. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2688. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2689. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2690. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2691. yuv2nv12XinC(
  2692. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2693. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2694. dest, uDest, dstW, chrDstW, dstFormat);
  2695. }
  2696. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2697. {
  2698. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2699. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2700. yuv2yuvXinC(
  2701. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2702. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2703. dest, uDest, vDest, dstW, chrDstW);
  2704. }
  2705. else
  2706. {
  2707. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2708. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2709. yuv2packedXinC(c,
  2710. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2711. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2712. dest, dstW, dstY);
  2713. }
  2714. }
  2715. }
  2716. #ifdef HAVE_MMX
  2717. __asm __volatile(SFENCE:::"memory");
  2718. __asm __volatile(EMMS:::"memory");
  2719. #endif
  2720. /* store changed local vars back in the context */
  2721. c->dstY= dstY;
  2722. c->lumBufIndex= lumBufIndex;
  2723. c->chrBufIndex= chrBufIndex;
  2724. c->lastInLumBuf= lastInLumBuf;
  2725. c->lastInChrBuf= lastInChrBuf;
  2726. return dstY - lastDstY;
  2727. }