You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2912 lines
89KB

  1. /*
  2. Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  14. */
  15. #undef REAL_MOVNTQ
  16. #undef MOVNTQ
  17. #undef PAVGB
  18. #undef PREFETCH
  19. #undef PREFETCHW
  20. #undef EMMS
  21. #undef SFENCE
  22. #ifdef HAVE_3DNOW
  23. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  24. #define EMMS "femms"
  25. #else
  26. #define EMMS "emms"
  27. #endif
  28. #ifdef HAVE_3DNOW
  29. #define PREFETCH "prefetch"
  30. #define PREFETCHW "prefetchw"
  31. #elif defined ( HAVE_MMX2 )
  32. #define PREFETCH "prefetchnta"
  33. #define PREFETCHW "prefetcht0"
  34. #else
  35. #define PREFETCH "/nop"
  36. #define PREFETCHW "/nop"
  37. #endif
  38. #ifdef HAVE_MMX2
  39. #define SFENCE "sfence"
  40. #else
  41. #define SFENCE "/nop"
  42. #endif
  43. #ifdef HAVE_MMX2
  44. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  45. #elif defined (HAVE_3DNOW)
  46. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  47. #endif
  48. #ifdef HAVE_MMX2
  49. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  50. #else
  51. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  52. #endif
  53. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  54. #ifdef HAVE_ALTIVEC
  55. #include "swscale_altivec_template.c"
  56. #endif
  57. #define YSCALEYUV2YV12X(x, offset) \
  58. "xor %%"REG_a", %%"REG_a" \n\t"\
  59. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  60. "movq %%mm3, %%mm4 \n\t"\
  61. "lea " offset "(%0), %%"REG_d" \n\t"\
  62. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  63. ".balign 16 \n\t" /* FIXME Unroll? */\
  64. "1: \n\t"\
  65. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  66. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
  67. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
  68. "add $16, %%"REG_d" \n\t"\
  69. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  70. "test %%"REG_S", %%"REG_S" \n\t"\
  71. "pmulhw %%mm0, %%mm2 \n\t"\
  72. "pmulhw %%mm0, %%mm5 \n\t"\
  73. "paddw %%mm2, %%mm3 \n\t"\
  74. "paddw %%mm5, %%mm4 \n\t"\
  75. " jnz 1b \n\t"\
  76. "psraw $3, %%mm3 \n\t"\
  77. "psraw $3, %%mm4 \n\t"\
  78. "packuswb %%mm4, %%mm3 \n\t"\
  79. MOVNTQ(%%mm3, (%1, %%REGa))\
  80. "add $8, %%"REG_a" \n\t"\
  81. "cmp %2, %%"REG_a" \n\t"\
  82. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  83. "movq %%mm3, %%mm4 \n\t"\
  84. "lea " offset "(%0), %%"REG_d" \n\t"\
  85. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  86. "jb 1b \n\t"
  87. #define YSCALEYUV2YV121 \
  88. "mov %2, %%"REG_a" \n\t"\
  89. ".balign 16 \n\t" /* FIXME Unroll? */\
  90. "1: \n\t"\
  91. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  92. "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
  93. "psraw $7, %%mm0 \n\t"\
  94. "psraw $7, %%mm1 \n\t"\
  95. "packuswb %%mm1, %%mm0 \n\t"\
  96. MOVNTQ(%%mm0, (%1, %%REGa))\
  97. "add $8, %%"REG_a" \n\t"\
  98. "jnc 1b \n\t"
  99. /*
  100. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  101. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  102. "r" (dest), "m" (dstW),
  103. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  104. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  105. */
  106. #define YSCALEYUV2PACKEDX \
  107. "xor %%"REG_a", %%"REG_a" \n\t"\
  108. ".balign 16 \n\t"\
  109. "nop \n\t"\
  110. "1: \n\t"\
  111. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  112. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  113. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  114. "movq %%mm3, %%mm4 \n\t"\
  115. ".balign 16 \n\t"\
  116. "2: \n\t"\
  117. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  118. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  119. "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  120. "add $16, %%"REG_d" \n\t"\
  121. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  122. "pmulhw %%mm0, %%mm2 \n\t"\
  123. "pmulhw %%mm0, %%mm5 \n\t"\
  124. "paddw %%mm2, %%mm3 \n\t"\
  125. "paddw %%mm5, %%mm4 \n\t"\
  126. "test %%"REG_S", %%"REG_S" \n\t"\
  127. " jnz 2b \n\t"\
  128. \
  129. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  130. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  131. "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
  132. "movq %%mm1, %%mm7 \n\t"\
  133. ".balign 16 \n\t"\
  134. "2: \n\t"\
  135. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  136. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  137. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  138. "add $16, %%"REG_d" \n\t"\
  139. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  140. "pmulhw %%mm0, %%mm2 \n\t"\
  141. "pmulhw %%mm0, %%mm5 \n\t"\
  142. "paddw %%mm2, %%mm1 \n\t"\
  143. "paddw %%mm5, %%mm7 \n\t"\
  144. "test %%"REG_S", %%"REG_S" \n\t"\
  145. " jnz 2b \n\t"\
  146. #define YSCALEYUV2RGBX \
  147. YSCALEYUV2PACKEDX\
  148. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  149. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  150. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  151. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  152. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  153. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  154. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  155. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  156. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  157. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  158. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  159. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  160. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  161. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  162. "paddw %%mm3, %%mm4 \n\t"\
  163. "movq %%mm2, %%mm0 \n\t"\
  164. "movq %%mm5, %%mm6 \n\t"\
  165. "movq %%mm4, %%mm3 \n\t"\
  166. "punpcklwd %%mm2, %%mm2 \n\t"\
  167. "punpcklwd %%mm5, %%mm5 \n\t"\
  168. "punpcklwd %%mm4, %%mm4 \n\t"\
  169. "paddw %%mm1, %%mm2 \n\t"\
  170. "paddw %%mm1, %%mm5 \n\t"\
  171. "paddw %%mm1, %%mm4 \n\t"\
  172. "punpckhwd %%mm0, %%mm0 \n\t"\
  173. "punpckhwd %%mm6, %%mm6 \n\t"\
  174. "punpckhwd %%mm3, %%mm3 \n\t"\
  175. "paddw %%mm7, %%mm0 \n\t"\
  176. "paddw %%mm7, %%mm6 \n\t"\
  177. "paddw %%mm7, %%mm3 \n\t"\
  178. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  179. "packuswb %%mm0, %%mm2 \n\t"\
  180. "packuswb %%mm6, %%mm5 \n\t"\
  181. "packuswb %%mm3, %%mm4 \n\t"\
  182. "pxor %%mm7, %%mm7 \n\t"
  183. #if 0
  184. #define FULL_YSCALEYUV2RGB \
  185. "pxor %%mm7, %%mm7 \n\t"\
  186. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  187. "punpcklwd %%mm6, %%mm6 \n\t"\
  188. "punpcklwd %%mm6, %%mm6 \n\t"\
  189. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  190. "punpcklwd %%mm5, %%mm5 \n\t"\
  191. "punpcklwd %%mm5, %%mm5 \n\t"\
  192. "xor %%"REG_a", %%"REG_a" \n\t"\
  193. ".balign 16 \n\t"\
  194. "1: \n\t"\
  195. "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
  196. "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
  197. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  198. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  199. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  200. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  201. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  202. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  203. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  204. "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  205. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  206. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  207. "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  208. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  209. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  210. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  211. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  212. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  213. \
  214. \
  215. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  216. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  217. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  218. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  219. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  220. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  221. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  222. \
  223. \
  224. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  225. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  226. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  227. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  228. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  229. "packuswb %%mm3, %%mm3 \n\t"\
  230. \
  231. "packuswb %%mm0, %%mm0 \n\t"\
  232. "paddw %%mm4, %%mm2 \n\t"\
  233. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  234. \
  235. "packuswb %%mm1, %%mm1 \n\t"
  236. #endif
  237. #define REAL_YSCALEYUV2PACKED(index, c) \
  238. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  239. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
  240. "psraw $3, %%mm0 \n\t"\
  241. "psraw $3, %%mm1 \n\t"\
  242. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  243. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  244. "xor "#index", "#index" \n\t"\
  245. ".balign 16 \n\t"\
  246. "1: \n\t"\
  247. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  248. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  249. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  250. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  251. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  252. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  253. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  254. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  255. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  256. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  257. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  258. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  259. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  260. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  261. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  262. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  263. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  264. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  265. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  266. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  267. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  268. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  269. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  270. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  271. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  272. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  273. #define REAL_YSCALEYUV2RGB(index, c) \
  274. "xor "#index", "#index" \n\t"\
  275. ".balign 16 \n\t"\
  276. "1: \n\t"\
  277. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  278. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  279. "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
  280. "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
  281. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  282. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  283. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  284. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  285. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  286. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  287. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  288. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  289. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  290. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  291. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  292. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  293. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  294. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  295. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  296. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  297. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  298. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  299. "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
  300. "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
  301. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  302. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  303. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  304. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  305. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  306. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  307. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  308. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  309. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  310. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  311. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  312. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  313. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  314. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  315. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  316. "paddw %%mm3, %%mm4 \n\t"\
  317. "movq %%mm2, %%mm0 \n\t"\
  318. "movq %%mm5, %%mm6 \n\t"\
  319. "movq %%mm4, %%mm3 \n\t"\
  320. "punpcklwd %%mm2, %%mm2 \n\t"\
  321. "punpcklwd %%mm5, %%mm5 \n\t"\
  322. "punpcklwd %%mm4, %%mm4 \n\t"\
  323. "paddw %%mm1, %%mm2 \n\t"\
  324. "paddw %%mm1, %%mm5 \n\t"\
  325. "paddw %%mm1, %%mm4 \n\t"\
  326. "punpckhwd %%mm0, %%mm0 \n\t"\
  327. "punpckhwd %%mm6, %%mm6 \n\t"\
  328. "punpckhwd %%mm3, %%mm3 \n\t"\
  329. "paddw %%mm7, %%mm0 \n\t"\
  330. "paddw %%mm7, %%mm6 \n\t"\
  331. "paddw %%mm7, %%mm3 \n\t"\
  332. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  333. "packuswb %%mm0, %%mm2 \n\t"\
  334. "packuswb %%mm6, %%mm5 \n\t"\
  335. "packuswb %%mm3, %%mm4 \n\t"\
  336. "pxor %%mm7, %%mm7 \n\t"
  337. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  338. #define REAL_YSCALEYUV2PACKED1(index, c) \
  339. "xor "#index", "#index" \n\t"\
  340. ".balign 16 \n\t"\
  341. "1: \n\t"\
  342. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  343. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  344. "psraw $7, %%mm3 \n\t" \
  345. "psraw $7, %%mm4 \n\t" \
  346. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  347. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  348. "psraw $7, %%mm1 \n\t" \
  349. "psraw $7, %%mm7 \n\t" \
  350. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  351. #define REAL_YSCALEYUV2RGB1(index, c) \
  352. "xor "#index", "#index" \n\t"\
  353. ".balign 16 \n\t"\
  354. "1: \n\t"\
  355. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  356. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  357. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  358. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  359. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  360. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  361. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  362. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  363. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  364. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  365. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  366. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  367. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  368. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  369. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  370. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  371. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  372. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  373. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  374. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  375. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  376. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  377. "paddw %%mm3, %%mm4 \n\t"\
  378. "movq %%mm2, %%mm0 \n\t"\
  379. "movq %%mm5, %%mm6 \n\t"\
  380. "movq %%mm4, %%mm3 \n\t"\
  381. "punpcklwd %%mm2, %%mm2 \n\t"\
  382. "punpcklwd %%mm5, %%mm5 \n\t"\
  383. "punpcklwd %%mm4, %%mm4 \n\t"\
  384. "paddw %%mm1, %%mm2 \n\t"\
  385. "paddw %%mm1, %%mm5 \n\t"\
  386. "paddw %%mm1, %%mm4 \n\t"\
  387. "punpckhwd %%mm0, %%mm0 \n\t"\
  388. "punpckhwd %%mm6, %%mm6 \n\t"\
  389. "punpckhwd %%mm3, %%mm3 \n\t"\
  390. "paddw %%mm7, %%mm0 \n\t"\
  391. "paddw %%mm7, %%mm6 \n\t"\
  392. "paddw %%mm7, %%mm3 \n\t"\
  393. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  394. "packuswb %%mm0, %%mm2 \n\t"\
  395. "packuswb %%mm6, %%mm5 \n\t"\
  396. "packuswb %%mm3, %%mm4 \n\t"\
  397. "pxor %%mm7, %%mm7 \n\t"
  398. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  399. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  400. "xor "#index", "#index" \n\t"\
  401. ".balign 16 \n\t"\
  402. "1: \n\t"\
  403. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  404. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  405. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  406. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  407. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  408. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  409. "psrlw $8, %%mm3 \n\t" \
  410. "psrlw $8, %%mm4 \n\t" \
  411. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  412. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  413. "psraw $7, %%mm1 \n\t" \
  414. "psraw $7, %%mm7 \n\t"
  415. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  416. // do vertical chrominance interpolation
  417. #define REAL_YSCALEYUV2RGB1b(index, c) \
  418. "xor "#index", "#index" \n\t"\
  419. ".balign 16 \n\t"\
  420. "1: \n\t"\
  421. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  422. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  423. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  424. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  425. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  426. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  427. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  428. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  429. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  430. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  431. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  432. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  433. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  434. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  435. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  436. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  437. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  438. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  439. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  440. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  441. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  442. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  443. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  444. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  445. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  446. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  447. "paddw %%mm3, %%mm4 \n\t"\
  448. "movq %%mm2, %%mm0 \n\t"\
  449. "movq %%mm5, %%mm6 \n\t"\
  450. "movq %%mm4, %%mm3 \n\t"\
  451. "punpcklwd %%mm2, %%mm2 \n\t"\
  452. "punpcklwd %%mm5, %%mm5 \n\t"\
  453. "punpcklwd %%mm4, %%mm4 \n\t"\
  454. "paddw %%mm1, %%mm2 \n\t"\
  455. "paddw %%mm1, %%mm5 \n\t"\
  456. "paddw %%mm1, %%mm4 \n\t"\
  457. "punpckhwd %%mm0, %%mm0 \n\t"\
  458. "punpckhwd %%mm6, %%mm6 \n\t"\
  459. "punpckhwd %%mm3, %%mm3 \n\t"\
  460. "paddw %%mm7, %%mm0 \n\t"\
  461. "paddw %%mm7, %%mm6 \n\t"\
  462. "paddw %%mm7, %%mm3 \n\t"\
  463. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  464. "packuswb %%mm0, %%mm2 \n\t"\
  465. "packuswb %%mm6, %%mm5 \n\t"\
  466. "packuswb %%mm3, %%mm4 \n\t"\
  467. "pxor %%mm7, %%mm7 \n\t"
  468. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  469. #define REAL_WRITEBGR32(dst, dstw, index) \
  470. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  471. "movq %%mm2, %%mm1 \n\t" /* B */\
  472. "movq %%mm5, %%mm6 \n\t" /* R */\
  473. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  474. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  475. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  476. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  477. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  478. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  479. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  480. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  481. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  482. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  483. \
  484. MOVNTQ(%%mm0, (dst, index, 4))\
  485. MOVNTQ(%%mm2, 8(dst, index, 4))\
  486. MOVNTQ(%%mm1, 16(dst, index, 4))\
  487. MOVNTQ(%%mm3, 24(dst, index, 4))\
  488. \
  489. "add $8, "#index" \n\t"\
  490. "cmp "#dstw", "#index" \n\t"\
  491. " jb 1b \n\t"
  492. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  493. #define REAL_WRITEBGR16(dst, dstw, index) \
  494. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  495. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  496. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  497. "psrlq $3, %%mm2 \n\t"\
  498. \
  499. "movq %%mm2, %%mm1 \n\t"\
  500. "movq %%mm4, %%mm3 \n\t"\
  501. \
  502. "punpcklbw %%mm7, %%mm3 \n\t"\
  503. "punpcklbw %%mm5, %%mm2 \n\t"\
  504. "punpckhbw %%mm7, %%mm4 \n\t"\
  505. "punpckhbw %%mm5, %%mm1 \n\t"\
  506. \
  507. "psllq $3, %%mm3 \n\t"\
  508. "psllq $3, %%mm4 \n\t"\
  509. \
  510. "por %%mm3, %%mm2 \n\t"\
  511. "por %%mm4, %%mm1 \n\t"\
  512. \
  513. MOVNTQ(%%mm2, (dst, index, 2))\
  514. MOVNTQ(%%mm1, 8(dst, index, 2))\
  515. \
  516. "add $8, "#index" \n\t"\
  517. "cmp "#dstw", "#index" \n\t"\
  518. " jb 1b \n\t"
  519. #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
  520. #define REAL_WRITEBGR15(dst, dstw, index) \
  521. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  522. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  523. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  524. "psrlq $3, %%mm2 \n\t"\
  525. "psrlq $1, %%mm5 \n\t"\
  526. \
  527. "movq %%mm2, %%mm1 \n\t"\
  528. "movq %%mm4, %%mm3 \n\t"\
  529. \
  530. "punpcklbw %%mm7, %%mm3 \n\t"\
  531. "punpcklbw %%mm5, %%mm2 \n\t"\
  532. "punpckhbw %%mm7, %%mm4 \n\t"\
  533. "punpckhbw %%mm5, %%mm1 \n\t"\
  534. \
  535. "psllq $2, %%mm3 \n\t"\
  536. "psllq $2, %%mm4 \n\t"\
  537. \
  538. "por %%mm3, %%mm2 \n\t"\
  539. "por %%mm4, %%mm1 \n\t"\
  540. \
  541. MOVNTQ(%%mm2, (dst, index, 2))\
  542. MOVNTQ(%%mm1, 8(dst, index, 2))\
  543. \
  544. "add $8, "#index" \n\t"\
  545. "cmp "#dstw", "#index" \n\t"\
  546. " jb 1b \n\t"
  547. #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
  548. #define WRITEBGR24OLD(dst, dstw, index) \
  549. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  550. "movq %%mm2, %%mm1 \n\t" /* B */\
  551. "movq %%mm5, %%mm6 \n\t" /* R */\
  552. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  553. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  554. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  555. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  556. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  557. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  558. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  559. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  560. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  561. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  562. \
  563. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  564. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  565. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  566. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  567. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  568. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  569. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  570. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  571. \
  572. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  573. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  574. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  575. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  576. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  577. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  578. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  579. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  580. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  581. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  582. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  583. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  584. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  585. \
  586. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  587. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  588. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  589. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  590. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  591. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  592. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  593. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  594. \
  595. MOVNTQ(%%mm0, (dst))\
  596. MOVNTQ(%%mm2, 8(dst))\
  597. MOVNTQ(%%mm3, 16(dst))\
  598. "add $24, "#dst" \n\t"\
  599. \
  600. "add $8, "#index" \n\t"\
  601. "cmp "#dstw", "#index" \n\t"\
  602. " jb 1b \n\t"
  603. #define WRITEBGR24MMX(dst, dstw, index) \
  604. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  605. "movq %%mm2, %%mm1 \n\t" /* B */\
  606. "movq %%mm5, %%mm6 \n\t" /* R */\
  607. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  608. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  609. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  610. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  611. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  612. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  613. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  614. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  615. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  616. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  617. \
  618. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  619. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  620. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  621. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  622. \
  623. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  624. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  625. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  626. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  627. \
  628. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  629. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  630. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  631. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  632. \
  633. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  634. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  635. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  636. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  637. MOVNTQ(%%mm0, (dst))\
  638. \
  639. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  640. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  641. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  642. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  643. MOVNTQ(%%mm6, 8(dst))\
  644. \
  645. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  646. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  647. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  648. MOVNTQ(%%mm5, 16(dst))\
  649. \
  650. "add $24, "#dst" \n\t"\
  651. \
  652. "add $8, "#index" \n\t"\
  653. "cmp "#dstw", "#index" \n\t"\
  654. " jb 1b \n\t"
  655. #define WRITEBGR24MMX2(dst, dstw, index) \
  656. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  657. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  658. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  659. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  660. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  661. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  662. \
  663. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  664. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  665. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  666. \
  667. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  668. "por %%mm1, %%mm6 \n\t"\
  669. "por %%mm3, %%mm6 \n\t"\
  670. MOVNTQ(%%mm6, (dst))\
  671. \
  672. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  673. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  674. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  675. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  676. \
  677. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  678. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  679. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  680. \
  681. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  682. "por %%mm3, %%mm6 \n\t"\
  683. MOVNTQ(%%mm6, 8(dst))\
  684. \
  685. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  686. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  687. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  688. \
  689. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  690. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  691. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  692. \
  693. "por %%mm1, %%mm3 \n\t"\
  694. "por %%mm3, %%mm6 \n\t"\
  695. MOVNTQ(%%mm6, 16(dst))\
  696. \
  697. "add $24, "#dst" \n\t"\
  698. \
  699. "add $8, "#index" \n\t"\
  700. "cmp "#dstw", "#index" \n\t"\
  701. " jb 1b \n\t"
  702. #ifdef HAVE_MMX2
  703. #undef WRITEBGR24
  704. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  705. #else
  706. #undef WRITEBGR24
  707. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  708. #endif
  709. #define REAL_WRITEYUY2(dst, dstw, index) \
  710. "packuswb %%mm3, %%mm3 \n\t"\
  711. "packuswb %%mm4, %%mm4 \n\t"\
  712. "packuswb %%mm7, %%mm1 \n\t"\
  713. "punpcklbw %%mm4, %%mm3 \n\t"\
  714. "movq %%mm1, %%mm7 \n\t"\
  715. "punpcklbw %%mm3, %%mm1 \n\t"\
  716. "punpckhbw %%mm3, %%mm7 \n\t"\
  717. \
  718. MOVNTQ(%%mm1, (dst, index, 2))\
  719. MOVNTQ(%%mm7, 8(dst, index, 2))\
  720. \
  721. "add $8, "#index" \n\t"\
  722. "cmp "#dstw", "#index" \n\t"\
  723. " jb 1b \n\t"
  724. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  725. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  726. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  727. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  728. {
  729. #ifdef HAVE_MMX
  730. if(uDest != NULL)
  731. {
  732. asm volatile(
  733. YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
  734. :: "r" (&c->redDither),
  735. "r" (uDest), "p" (chrDstW)
  736. : "%"REG_a, "%"REG_d, "%"REG_S
  737. );
  738. asm volatile(
  739. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
  740. :: "r" (&c->redDither),
  741. "r" (vDest), "p" (chrDstW)
  742. : "%"REG_a, "%"REG_d, "%"REG_S
  743. );
  744. }
  745. asm volatile(
  746. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
  747. :: "r" (&c->redDither),
  748. "r" (dest), "p" (dstW)
  749. : "%"REG_a, "%"REG_d, "%"REG_S
  750. );
  751. #else
  752. #ifdef HAVE_ALTIVEC
  753. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  754. chrFilter, chrSrc, chrFilterSize,
  755. dest, uDest, vDest, dstW, chrDstW);
  756. #else //HAVE_ALTIVEC
  757. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  758. chrFilter, chrSrc, chrFilterSize,
  759. dest, uDest, vDest, dstW, chrDstW);
  760. #endif //!HAVE_ALTIVEC
  761. #endif
  762. }
  763. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  764. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  765. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  766. {
  767. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  768. chrFilter, chrSrc, chrFilterSize,
  769. dest, uDest, dstW, chrDstW, dstFormat);
  770. }
  771. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  772. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  773. {
  774. #ifdef HAVE_MMX
  775. if(uDest != NULL)
  776. {
  777. asm volatile(
  778. YSCALEYUV2YV121
  779. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  780. "g" (-chrDstW)
  781. : "%"REG_a
  782. );
  783. asm volatile(
  784. YSCALEYUV2YV121
  785. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  786. "g" (-chrDstW)
  787. : "%"REG_a
  788. );
  789. }
  790. asm volatile(
  791. YSCALEYUV2YV121
  792. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  793. "g" (-dstW)
  794. : "%"REG_a
  795. );
  796. #else
  797. int i;
  798. for(i=0; i<dstW; i++)
  799. {
  800. int val= lumSrc[i]>>7;
  801. if(val&256){
  802. if(val<0) val=0;
  803. else val=255;
  804. }
  805. dest[i]= val;
  806. }
  807. if(uDest != NULL)
  808. for(i=0; i<chrDstW; i++)
  809. {
  810. int u=chrSrc[i]>>7;
  811. int v=chrSrc[i + 2048]>>7;
  812. if((u|v)&256){
  813. if(u<0) u=0;
  814. else if (u>255) u=255;
  815. if(v<0) v=0;
  816. else if (v>255) v=255;
  817. }
  818. uDest[i]= u;
  819. vDest[i]= v;
  820. }
  821. #endif
  822. }
  823. /**
  824. * vertical scale YV12 to RGB
  825. */
  826. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  827. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  828. uint8_t *dest, int dstW, int dstY)
  829. {
  830. int dummy=0;
  831. switch(c->dstFormat)
  832. {
  833. #ifdef HAVE_MMX
  834. case IMGFMT_BGR32:
  835. {
  836. asm volatile(
  837. YSCALEYUV2RGBX
  838. WRITEBGR32(%4, %5, %%REGa)
  839. :: "r" (&c->redDither),
  840. "m" (dummy), "m" (dummy), "m" (dummy),
  841. "r" (dest), "m" (dstW)
  842. : "%"REG_a, "%"REG_d, "%"REG_S
  843. );
  844. }
  845. break;
  846. case IMGFMT_BGR24:
  847. {
  848. asm volatile(
  849. YSCALEYUV2RGBX
  850. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
  851. "add %4, %%"REG_b" \n\t"
  852. WRITEBGR24(%%REGb, %5, %%REGa)
  853. :: "r" (&c->redDither),
  854. "m" (dummy), "m" (dummy), "m" (dummy),
  855. "r" (dest), "m" (dstW)
  856. : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
  857. );
  858. }
  859. break;
  860. case IMGFMT_BGR15:
  861. {
  862. asm volatile(
  863. YSCALEYUV2RGBX
  864. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  865. #ifdef DITHER1XBPP
  866. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  867. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  868. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  869. #endif
  870. WRITEBGR15(%4, %5, %%REGa)
  871. :: "r" (&c->redDither),
  872. "m" (dummy), "m" (dummy), "m" (dummy),
  873. "r" (dest), "m" (dstW)
  874. : "%"REG_a, "%"REG_d, "%"REG_S
  875. );
  876. }
  877. break;
  878. case IMGFMT_BGR16:
  879. {
  880. asm volatile(
  881. YSCALEYUV2RGBX
  882. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  883. #ifdef DITHER1XBPP
  884. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  885. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  886. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  887. #endif
  888. WRITEBGR16(%4, %5, %%REGa)
  889. :: "r" (&c->redDither),
  890. "m" (dummy), "m" (dummy), "m" (dummy),
  891. "r" (dest), "m" (dstW)
  892. : "%"REG_a, "%"REG_d, "%"REG_S
  893. );
  894. }
  895. break;
  896. case IMGFMT_YUY2:
  897. {
  898. asm volatile(
  899. YSCALEYUV2PACKEDX
  900. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  901. "psraw $3, %%mm3 \n\t"
  902. "psraw $3, %%mm4 \n\t"
  903. "psraw $3, %%mm1 \n\t"
  904. "psraw $3, %%mm7 \n\t"
  905. WRITEYUY2(%4, %5, %%REGa)
  906. :: "r" (&c->redDither),
  907. "m" (dummy), "m" (dummy), "m" (dummy),
  908. "r" (dest), "m" (dstW)
  909. : "%"REG_a, "%"REG_d, "%"REG_S
  910. );
  911. }
  912. break;
  913. #endif
  914. default:
  915. #ifdef HAVE_ALTIVEC
  916. /* The following list of supported dstFormat values should
  917. match what's found in the body of altivec_yuv2packedX() */
  918. if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA ||
  919. c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 ||
  920. c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB)
  921. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  922. chrFilter, chrSrc, chrFilterSize,
  923. dest, dstW, dstY);
  924. else
  925. #endif
  926. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  927. chrFilter, chrSrc, chrFilterSize,
  928. dest, dstW, dstY);
  929. break;
  930. }
  931. }
  932. /**
  933. * vertical bilinear scale YV12 to RGB
  934. */
  935. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  936. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  937. {
  938. int yalpha1=yalpha^4095;
  939. int uvalpha1=uvalpha^4095;
  940. int i;
  941. #if 0 //isn't used
  942. if(flags&SWS_FULL_CHR_H_INT)
  943. {
  944. switch(dstFormat)
  945. {
  946. #ifdef HAVE_MMX
  947. case IMGFMT_BGR32:
  948. asm volatile(
  949. FULL_YSCALEYUV2RGB
  950. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  951. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  952. "movq %%mm3, %%mm1 \n\t"
  953. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  954. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  955. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  956. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  957. "add $4, %%"REG_a" \n\t"
  958. "cmp %5, %%"REG_a" \n\t"
  959. " jb 1b \n\t"
  960. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  961. "m" (yalpha1), "m" (uvalpha1)
  962. : "%"REG_a
  963. );
  964. break;
  965. case IMGFMT_BGR24:
  966. asm volatile(
  967. FULL_YSCALEYUV2RGB
  968. // lsb ... msb
  969. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  970. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  971. "movq %%mm3, %%mm1 \n\t"
  972. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  973. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  974. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  975. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  976. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  977. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  978. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  979. "movq %%mm1, %%mm2 \n\t"
  980. "psllq $48, %%mm1 \n\t" // 000000BG
  981. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  982. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  983. "psrld $16, %%mm2 \n\t" // R000R000
  984. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  985. "por %%mm2, %%mm1 \n\t" // RBGRR000
  986. "mov %4, %%"REG_b" \n\t"
  987. "add %%"REG_a", %%"REG_b" \n\t"
  988. #ifdef HAVE_MMX2
  989. //FIXME Alignment
  990. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
  991. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
  992. #else
  993. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  994. "psrlq $32, %%mm3 \n\t"
  995. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  996. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  997. #endif
  998. "add $4, %%"REG_a" \n\t"
  999. "cmp %5, %%"REG_a" \n\t"
  1000. " jb 1b \n\t"
  1001. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1002. "m" (yalpha1), "m" (uvalpha1)
  1003. : "%"REG_a, "%"REG_b
  1004. );
  1005. break;
  1006. case IMGFMT_BGR15:
  1007. asm volatile(
  1008. FULL_YSCALEYUV2RGB
  1009. #ifdef DITHER1XBPP
  1010. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  1011. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1012. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1013. #endif
  1014. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1015. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1016. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1017. "psrlw $3, %%mm3 \n\t"
  1018. "psllw $2, %%mm1 \n\t"
  1019. "psllw $7, %%mm0 \n\t"
  1020. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1021. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1022. "por %%mm3, %%mm1 \n\t"
  1023. "por %%mm1, %%mm0 \n\t"
  1024. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1025. "add $4, %%"REG_a" \n\t"
  1026. "cmp %5, %%"REG_a" \n\t"
  1027. " jb 1b \n\t"
  1028. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1029. "m" (yalpha1), "m" (uvalpha1)
  1030. : "%"REG_a
  1031. );
  1032. break;
  1033. case IMGFMT_BGR16:
  1034. asm volatile(
  1035. FULL_YSCALEYUV2RGB
  1036. #ifdef DITHER1XBPP
  1037. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  1038. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1039. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1040. #endif
  1041. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1042. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1043. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1044. "psrlw $3, %%mm3 \n\t"
  1045. "psllw $3, %%mm1 \n\t"
  1046. "psllw $8, %%mm0 \n\t"
  1047. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1048. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1049. "por %%mm3, %%mm1 \n\t"
  1050. "por %%mm1, %%mm0 \n\t"
  1051. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1052. "add $4, %%"REG_a" \n\t"
  1053. "cmp %5, %%"REG_a" \n\t"
  1054. " jb 1b \n\t"
  1055. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1056. "m" (yalpha1), "m" (uvalpha1)
  1057. : "%"REG_a
  1058. );
  1059. break;
  1060. #endif
  1061. case IMGFMT_RGB32:
  1062. #ifndef HAVE_MMX
  1063. case IMGFMT_BGR32:
  1064. #endif
  1065. if(dstFormat==IMGFMT_BGR32)
  1066. {
  1067. int i;
  1068. #ifdef WORDS_BIGENDIAN
  1069. dest++;
  1070. #endif
  1071. for(i=0;i<dstW;i++){
  1072. // vertical linear interpolation && yuv2rgb in a single step:
  1073. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1074. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1075. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1076. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1077. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1078. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1079. dest+= 4;
  1080. }
  1081. }
  1082. else if(dstFormat==IMGFMT_BGR24)
  1083. {
  1084. int i;
  1085. for(i=0;i<dstW;i++){
  1086. // vertical linear interpolation && yuv2rgb in a single step:
  1087. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1088. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1089. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1090. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1091. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1092. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1093. dest+= 3;
  1094. }
  1095. }
  1096. else if(dstFormat==IMGFMT_BGR16)
  1097. {
  1098. int i;
  1099. for(i=0;i<dstW;i++){
  1100. // vertical linear interpolation && yuv2rgb in a single step:
  1101. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1102. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1103. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1104. ((uint16_t*)dest)[i] =
  1105. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1106. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1107. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1108. }
  1109. }
  1110. else if(dstFormat==IMGFMT_BGR15)
  1111. {
  1112. int i;
  1113. for(i=0;i<dstW;i++){
  1114. // vertical linear interpolation && yuv2rgb in a single step:
  1115. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1116. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1117. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1118. ((uint16_t*)dest)[i] =
  1119. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1120. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1121. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1122. }
  1123. }
  1124. }//FULL_UV_IPOL
  1125. else
  1126. {
  1127. #endif // if 0
  1128. #ifdef HAVE_MMX
  1129. switch(c->dstFormat)
  1130. {
  1131. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1132. case IMGFMT_BGR32:
  1133. asm volatile(
  1134. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1135. "mov %4, %%"REG_SP" \n\t"
  1136. YSCALEYUV2RGB(%%REGa, %5)
  1137. WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
  1138. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1139. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1140. "r" (&c->redDither)
  1141. : "%"REG_a
  1142. );
  1143. return;
  1144. case IMGFMT_BGR24:
  1145. asm volatile(
  1146. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1147. "mov %4, %%"REG_SP" \n\t"
  1148. YSCALEYUV2RGB(%%REGa, %5)
  1149. WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
  1150. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1151. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1152. "r" (&c->redDither)
  1153. : "%"REG_a
  1154. );
  1155. return;
  1156. case IMGFMT_BGR15:
  1157. asm volatile(
  1158. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1159. "mov %4, %%"REG_SP" \n\t"
  1160. YSCALEYUV2RGB(%%REGa, %5)
  1161. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1162. #ifdef DITHER1XBPP
  1163. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1164. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1165. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1166. #endif
  1167. WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
  1168. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1169. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1170. "r" (&c->redDither)
  1171. : "%"REG_a
  1172. );
  1173. return;
  1174. case IMGFMT_BGR16:
  1175. asm volatile(
  1176. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1177. "mov %4, %%"REG_SP" \n\t"
  1178. YSCALEYUV2RGB(%%REGa, %5)
  1179. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1180. #ifdef DITHER1XBPP
  1181. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1182. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1183. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1184. #endif
  1185. WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
  1186. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1187. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1188. "r" (&c->redDither)
  1189. : "%"REG_a
  1190. );
  1191. return;
  1192. case IMGFMT_YUY2:
  1193. asm volatile(
  1194. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1195. "mov %4, %%"REG_SP" \n\t"
  1196. YSCALEYUV2PACKED(%%REGa, %5)
  1197. WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
  1198. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1199. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1200. "r" (&c->redDither)
  1201. : "%"REG_a
  1202. );
  1203. return;
  1204. default: break;
  1205. }
  1206. #endif //HAVE_MMX
  1207. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1208. }
  1209. /**
  1210. * YV12 to RGB without scaling or interpolating
  1211. */
  1212. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1213. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1214. {
  1215. const int yalpha1=0;
  1216. int i;
  1217. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1218. const int yalpha= 4096; //FIXME ...
  1219. if(flags&SWS_FULL_CHR_H_INT)
  1220. {
  1221. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1222. return;
  1223. }
  1224. #ifdef HAVE_MMX
  1225. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1226. {
  1227. switch(dstFormat)
  1228. {
  1229. case IMGFMT_BGR32:
  1230. asm volatile(
  1231. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1232. "mov %4, %%"REG_SP" \n\t"
  1233. YSCALEYUV2RGB1(%%REGa, %5)
  1234. WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
  1235. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1236. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1237. "r" (&c->redDither)
  1238. : "%"REG_a
  1239. );
  1240. return;
  1241. case IMGFMT_BGR24:
  1242. asm volatile(
  1243. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1244. "mov %4, %%"REG_SP" \n\t"
  1245. YSCALEYUV2RGB1(%%REGa, %5)
  1246. WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
  1247. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1248. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1249. "r" (&c->redDither)
  1250. : "%"REG_a
  1251. );
  1252. return;
  1253. case IMGFMT_BGR15:
  1254. asm volatile(
  1255. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1256. "mov %4, %%"REG_SP" \n\t"
  1257. YSCALEYUV2RGB1(%%REGa, %5)
  1258. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1259. #ifdef DITHER1XBPP
  1260. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1261. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1262. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1263. #endif
  1264. WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
  1265. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1266. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1267. "r" (&c->redDither)
  1268. : "%"REG_a
  1269. );
  1270. return;
  1271. case IMGFMT_BGR16:
  1272. asm volatile(
  1273. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1274. "mov %4, %%"REG_SP" \n\t"
  1275. YSCALEYUV2RGB1(%%REGa, %5)
  1276. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1277. #ifdef DITHER1XBPP
  1278. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1279. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1280. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1281. #endif
  1282. WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
  1283. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1284. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1285. "r" (&c->redDither)
  1286. : "%"REG_a
  1287. );
  1288. return;
  1289. case IMGFMT_YUY2:
  1290. asm volatile(
  1291. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1292. "mov %4, %%"REG_SP" \n\t"
  1293. YSCALEYUV2PACKED1(%%REGa, %5)
  1294. WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
  1295. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1296. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1297. "r" (&c->redDither)
  1298. : "%"REG_a
  1299. );
  1300. return;
  1301. }
  1302. }
  1303. else
  1304. {
  1305. switch(dstFormat)
  1306. {
  1307. case IMGFMT_BGR32:
  1308. asm volatile(
  1309. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1310. "mov %4, %%"REG_SP" \n\t"
  1311. YSCALEYUV2RGB1b(%%REGa, %5)
  1312. WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
  1313. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1314. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1315. "r" (&c->redDither)
  1316. : "%"REG_a
  1317. );
  1318. return;
  1319. case IMGFMT_BGR24:
  1320. asm volatile(
  1321. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1322. "mov %4, %%"REG_SP" \n\t"
  1323. YSCALEYUV2RGB1b(%%REGa, %5)
  1324. WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
  1325. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1326. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1327. "r" (&c->redDither)
  1328. : "%"REG_a
  1329. );
  1330. return;
  1331. case IMGFMT_BGR15:
  1332. asm volatile(
  1333. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1334. "mov %4, %%"REG_SP" \n\t"
  1335. YSCALEYUV2RGB1b(%%REGa, %5)
  1336. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1337. #ifdef DITHER1XBPP
  1338. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1339. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1340. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1341. #endif
  1342. WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
  1343. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1344. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1345. "r" (&c->redDither)
  1346. : "%"REG_a
  1347. );
  1348. return;
  1349. case IMGFMT_BGR16:
  1350. asm volatile(
  1351. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1352. "mov %4, %%"REG_SP" \n\t"
  1353. YSCALEYUV2RGB1b(%%REGa, %5)
  1354. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1355. #ifdef DITHER1XBPP
  1356. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1357. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1358. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1359. #endif
  1360. WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
  1361. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1362. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1363. "r" (&c->redDither)
  1364. : "%"REG_a
  1365. );
  1366. return;
  1367. case IMGFMT_YUY2:
  1368. asm volatile(
  1369. "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
  1370. "mov %4, %%"REG_SP" \n\t"
  1371. YSCALEYUV2PACKED1b(%%REGa, %5)
  1372. WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
  1373. "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
  1374. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
  1375. "r" (&c->redDither)
  1376. : "%"REG_a
  1377. );
  1378. return;
  1379. }
  1380. }
  1381. #endif
  1382. if( uvalpha < 2048 )
  1383. {
  1384. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1385. }else{
  1386. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1387. }
  1388. }
  1389. //FIXME yuy2* can read upto 7 samples to much
  1390. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
  1391. {
  1392. #ifdef HAVE_MMX
  1393. asm volatile(
  1394. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1395. "mov %0, %%"REG_a" \n\t"
  1396. "1: \n\t"
  1397. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1398. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1399. "pand %%mm2, %%mm0 \n\t"
  1400. "pand %%mm2, %%mm1 \n\t"
  1401. "packuswb %%mm1, %%mm0 \n\t"
  1402. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1403. "add $8, %%"REG_a" \n\t"
  1404. " js 1b \n\t"
  1405. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1406. : "%"REG_a
  1407. );
  1408. #else
  1409. int i;
  1410. for(i=0; i<width; i++)
  1411. dst[i]= src[2*i];
  1412. #endif
  1413. }
  1414. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1415. {
  1416. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1417. asm volatile(
  1418. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1419. "mov %0, %%"REG_a" \n\t"
  1420. "1: \n\t"
  1421. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1422. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1423. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1424. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1425. PAVGB(%%mm2, %%mm0)
  1426. PAVGB(%%mm3, %%mm1)
  1427. "psrlw $8, %%mm0 \n\t"
  1428. "psrlw $8, %%mm1 \n\t"
  1429. "packuswb %%mm1, %%mm0 \n\t"
  1430. "movq %%mm0, %%mm1 \n\t"
  1431. "psrlw $8, %%mm0 \n\t"
  1432. "pand %%mm4, %%mm1 \n\t"
  1433. "packuswb %%mm0, %%mm0 \n\t"
  1434. "packuswb %%mm1, %%mm1 \n\t"
  1435. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1436. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1437. "add $4, %%"REG_a" \n\t"
  1438. " js 1b \n\t"
  1439. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1440. : "%"REG_a
  1441. );
  1442. #else
  1443. int i;
  1444. for(i=0; i<width; i++)
  1445. {
  1446. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1447. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1448. }
  1449. #endif
  1450. }
  1451. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1452. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
  1453. {
  1454. #ifdef HAVE_MMX
  1455. asm volatile(
  1456. "mov %0, %%"REG_a" \n\t"
  1457. "1: \n\t"
  1458. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1459. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1460. "psrlw $8, %%mm0 \n\t"
  1461. "psrlw $8, %%mm1 \n\t"
  1462. "packuswb %%mm1, %%mm0 \n\t"
  1463. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1464. "add $8, %%"REG_a" \n\t"
  1465. " js 1b \n\t"
  1466. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1467. : "%"REG_a
  1468. );
  1469. #else
  1470. int i;
  1471. for(i=0; i<width; i++)
  1472. dst[i]= src[2*i+1];
  1473. #endif
  1474. }
  1475. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1476. {
  1477. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1478. asm volatile(
  1479. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1480. "mov %0, %%"REG_a" \n\t"
  1481. "1: \n\t"
  1482. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1483. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1484. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1485. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1486. PAVGB(%%mm2, %%mm0)
  1487. PAVGB(%%mm3, %%mm1)
  1488. "pand %%mm4, %%mm0 \n\t"
  1489. "pand %%mm4, %%mm1 \n\t"
  1490. "packuswb %%mm1, %%mm0 \n\t"
  1491. "movq %%mm0, %%mm1 \n\t"
  1492. "psrlw $8, %%mm0 \n\t"
  1493. "pand %%mm4, %%mm1 \n\t"
  1494. "packuswb %%mm0, %%mm0 \n\t"
  1495. "packuswb %%mm1, %%mm1 \n\t"
  1496. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1497. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1498. "add $4, %%"REG_a" \n\t"
  1499. " js 1b \n\t"
  1500. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1501. : "%"REG_a
  1502. );
  1503. #else
  1504. int i;
  1505. for(i=0; i<width; i++)
  1506. {
  1507. dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
  1508. dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
  1509. }
  1510. #endif
  1511. }
  1512. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1513. {
  1514. int i;
  1515. for(i=0; i<width; i++)
  1516. {
  1517. int b= ((uint32_t*)src)[i]&0xFF;
  1518. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1519. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1520. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1521. }
  1522. }
  1523. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1524. {
  1525. int i;
  1526. for(i=0; i<width; i++)
  1527. {
  1528. const int a= ((uint32_t*)src1)[2*i+0];
  1529. const int e= ((uint32_t*)src1)[2*i+1];
  1530. const int c= ((uint32_t*)src2)[2*i+0];
  1531. const int d= ((uint32_t*)src2)[2*i+1];
  1532. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1533. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1534. const int b= l&0x3FF;
  1535. const int g= h>>8;
  1536. const int r= l>>16;
  1537. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1538. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1539. }
  1540. }
  1541. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
  1542. {
  1543. #ifdef HAVE_MMX
  1544. asm volatile(
  1545. "mov %2, %%"REG_a" \n\t"
  1546. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1547. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1548. "pxor %%mm7, %%mm7 \n\t"
  1549. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
  1550. ".balign 16 \n\t"
  1551. "1: \n\t"
  1552. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1553. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1554. "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
  1555. "punpcklbw %%mm7, %%mm0 \n\t"
  1556. "punpcklbw %%mm7, %%mm1 \n\t"
  1557. "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
  1558. "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
  1559. "punpcklbw %%mm7, %%mm2 \n\t"
  1560. "punpcklbw %%mm7, %%mm3 \n\t"
  1561. "pmaddwd %%mm6, %%mm0 \n\t"
  1562. "pmaddwd %%mm6, %%mm1 \n\t"
  1563. "pmaddwd %%mm6, %%mm2 \n\t"
  1564. "pmaddwd %%mm6, %%mm3 \n\t"
  1565. #ifndef FAST_BGR2YV12
  1566. "psrad $8, %%mm0 \n\t"
  1567. "psrad $8, %%mm1 \n\t"
  1568. "psrad $8, %%mm2 \n\t"
  1569. "psrad $8, %%mm3 \n\t"
  1570. #endif
  1571. "packssdw %%mm1, %%mm0 \n\t"
  1572. "packssdw %%mm3, %%mm2 \n\t"
  1573. "pmaddwd %%mm5, %%mm0 \n\t"
  1574. "pmaddwd %%mm5, %%mm2 \n\t"
  1575. "packssdw %%mm2, %%mm0 \n\t"
  1576. "psraw $7, %%mm0 \n\t"
  1577. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1578. "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
  1579. "punpcklbw %%mm7, %%mm4 \n\t"
  1580. "punpcklbw %%mm7, %%mm1 \n\t"
  1581. "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
  1582. "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
  1583. "punpcklbw %%mm7, %%mm2 \n\t"
  1584. "punpcklbw %%mm7, %%mm3 \n\t"
  1585. "pmaddwd %%mm6, %%mm4 \n\t"
  1586. "pmaddwd %%mm6, %%mm1 \n\t"
  1587. "pmaddwd %%mm6, %%mm2 \n\t"
  1588. "pmaddwd %%mm6, %%mm3 \n\t"
  1589. #ifndef FAST_BGR2YV12
  1590. "psrad $8, %%mm4 \n\t"
  1591. "psrad $8, %%mm1 \n\t"
  1592. "psrad $8, %%mm2 \n\t"
  1593. "psrad $8, %%mm3 \n\t"
  1594. #endif
  1595. "packssdw %%mm1, %%mm4 \n\t"
  1596. "packssdw %%mm3, %%mm2 \n\t"
  1597. "pmaddwd %%mm5, %%mm4 \n\t"
  1598. "pmaddwd %%mm5, %%mm2 \n\t"
  1599. "add $24, %%"REG_b" \n\t"
  1600. "packssdw %%mm2, %%mm4 \n\t"
  1601. "psraw $7, %%mm4 \n\t"
  1602. "packuswb %%mm4, %%mm0 \n\t"
  1603. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1604. "movq %%mm0, (%1, %%"REG_a") \n\t"
  1605. "add $8, %%"REG_a" \n\t"
  1606. " js 1b \n\t"
  1607. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1608. : "%"REG_a, "%"REG_b
  1609. );
  1610. #else
  1611. int i;
  1612. for(i=0; i<width; i++)
  1613. {
  1614. int b= src[i*3+0];
  1615. int g= src[i*3+1];
  1616. int r= src[i*3+2];
  1617. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1618. }
  1619. #endif
  1620. }
  1621. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1622. {
  1623. #ifdef HAVE_MMX
  1624. asm volatile(
  1625. "mov %4, %%"REG_a" \n\t"
  1626. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1627. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1628. "pxor %%mm7, %%mm7 \n\t"
  1629. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t"
  1630. "add %%"REG_b", %%"REG_b" \n\t"
  1631. ".balign 16 \n\t"
  1632. "1: \n\t"
  1633. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1634. PREFETCH" 64(%1, %%"REG_b") \n\t"
  1635. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1636. "movq (%0, %%"REG_b"), %%mm0 \n\t"
  1637. "movq (%1, %%"REG_b"), %%mm1 \n\t"
  1638. "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
  1639. "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
  1640. PAVGB(%%mm1, %%mm0)
  1641. PAVGB(%%mm3, %%mm2)
  1642. "movq %%mm0, %%mm1 \n\t"
  1643. "movq %%mm2, %%mm3 \n\t"
  1644. "psrlq $24, %%mm0 \n\t"
  1645. "psrlq $24, %%mm2 \n\t"
  1646. PAVGB(%%mm1, %%mm0)
  1647. PAVGB(%%mm3, %%mm2)
  1648. "punpcklbw %%mm7, %%mm0 \n\t"
  1649. "punpcklbw %%mm7, %%mm2 \n\t"
  1650. #else
  1651. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1652. "movd (%1, %%"REG_b"), %%mm1 \n\t"
  1653. "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
  1654. "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
  1655. "punpcklbw %%mm7, %%mm0 \n\t"
  1656. "punpcklbw %%mm7, %%mm1 \n\t"
  1657. "punpcklbw %%mm7, %%mm2 \n\t"
  1658. "punpcklbw %%mm7, %%mm3 \n\t"
  1659. "paddw %%mm1, %%mm0 \n\t"
  1660. "paddw %%mm3, %%mm2 \n\t"
  1661. "paddw %%mm2, %%mm0 \n\t"
  1662. "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
  1663. "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
  1664. "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
  1665. "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
  1666. "punpcklbw %%mm7, %%mm4 \n\t"
  1667. "punpcklbw %%mm7, %%mm1 \n\t"
  1668. "punpcklbw %%mm7, %%mm2 \n\t"
  1669. "punpcklbw %%mm7, %%mm3 \n\t"
  1670. "paddw %%mm1, %%mm4 \n\t"
  1671. "paddw %%mm3, %%mm2 \n\t"
  1672. "paddw %%mm4, %%mm2 \n\t"
  1673. "psrlw $2, %%mm0 \n\t"
  1674. "psrlw $2, %%mm2 \n\t"
  1675. #endif
  1676. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1677. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1678. "pmaddwd %%mm0, %%mm1 \n\t"
  1679. "pmaddwd %%mm2, %%mm3 \n\t"
  1680. "pmaddwd %%mm6, %%mm0 \n\t"
  1681. "pmaddwd %%mm6, %%mm2 \n\t"
  1682. #ifndef FAST_BGR2YV12
  1683. "psrad $8, %%mm0 \n\t"
  1684. "psrad $8, %%mm1 \n\t"
  1685. "psrad $8, %%mm2 \n\t"
  1686. "psrad $8, %%mm3 \n\t"
  1687. #endif
  1688. "packssdw %%mm2, %%mm0 \n\t"
  1689. "packssdw %%mm3, %%mm1 \n\t"
  1690. "pmaddwd %%mm5, %%mm0 \n\t"
  1691. "pmaddwd %%mm5, %%mm1 \n\t"
  1692. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1693. "psraw $7, %%mm0 \n\t"
  1694. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1695. "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
  1696. "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
  1697. "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
  1698. "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
  1699. PAVGB(%%mm1, %%mm4)
  1700. PAVGB(%%mm3, %%mm2)
  1701. "movq %%mm4, %%mm1 \n\t"
  1702. "movq %%mm2, %%mm3 \n\t"
  1703. "psrlq $24, %%mm4 \n\t"
  1704. "psrlq $24, %%mm2 \n\t"
  1705. PAVGB(%%mm1, %%mm4)
  1706. PAVGB(%%mm3, %%mm2)
  1707. "punpcklbw %%mm7, %%mm4 \n\t"
  1708. "punpcklbw %%mm7, %%mm2 \n\t"
  1709. #else
  1710. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1711. "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
  1712. "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
  1713. "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
  1714. "punpcklbw %%mm7, %%mm4 \n\t"
  1715. "punpcklbw %%mm7, %%mm1 \n\t"
  1716. "punpcklbw %%mm7, %%mm2 \n\t"
  1717. "punpcklbw %%mm7, %%mm3 \n\t"
  1718. "paddw %%mm1, %%mm4 \n\t"
  1719. "paddw %%mm3, %%mm2 \n\t"
  1720. "paddw %%mm2, %%mm4 \n\t"
  1721. "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
  1722. "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
  1723. "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
  1724. "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
  1725. "punpcklbw %%mm7, %%mm5 \n\t"
  1726. "punpcklbw %%mm7, %%mm1 \n\t"
  1727. "punpcklbw %%mm7, %%mm2 \n\t"
  1728. "punpcklbw %%mm7, %%mm3 \n\t"
  1729. "paddw %%mm1, %%mm5 \n\t"
  1730. "paddw %%mm3, %%mm2 \n\t"
  1731. "paddw %%mm5, %%mm2 \n\t"
  1732. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1733. "psrlw $2, %%mm4 \n\t"
  1734. "psrlw $2, %%mm2 \n\t"
  1735. #endif
  1736. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1737. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1738. "pmaddwd %%mm4, %%mm1 \n\t"
  1739. "pmaddwd %%mm2, %%mm3 \n\t"
  1740. "pmaddwd %%mm6, %%mm4 \n\t"
  1741. "pmaddwd %%mm6, %%mm2 \n\t"
  1742. #ifndef FAST_BGR2YV12
  1743. "psrad $8, %%mm4 \n\t"
  1744. "psrad $8, %%mm1 \n\t"
  1745. "psrad $8, %%mm2 \n\t"
  1746. "psrad $8, %%mm3 \n\t"
  1747. #endif
  1748. "packssdw %%mm2, %%mm4 \n\t"
  1749. "packssdw %%mm3, %%mm1 \n\t"
  1750. "pmaddwd %%mm5, %%mm4 \n\t"
  1751. "pmaddwd %%mm5, %%mm1 \n\t"
  1752. "add $24, %%"REG_b" \n\t"
  1753. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1754. "psraw $7, %%mm4 \n\t"
  1755. "movq %%mm0, %%mm1 \n\t"
  1756. "punpckldq %%mm4, %%mm0 \n\t"
  1757. "punpckhdq %%mm4, %%mm1 \n\t"
  1758. "packsswb %%mm1, %%mm0 \n\t"
  1759. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1760. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1761. "punpckhdq %%mm0, %%mm0 \n\t"
  1762. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1763. "add $4, %%"REG_a" \n\t"
  1764. " js 1b \n\t"
  1765. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1766. : "%"REG_a, "%"REG_b
  1767. );
  1768. #else
  1769. int i;
  1770. for(i=0; i<width; i++)
  1771. {
  1772. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1773. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1774. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1775. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1776. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1777. }
  1778. #endif
  1779. }
  1780. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1781. {
  1782. int i;
  1783. for(i=0; i<width; i++)
  1784. {
  1785. int d= ((uint16_t*)src)[i];
  1786. int b= d&0x1F;
  1787. int g= (d>>5)&0x3F;
  1788. int r= (d>>11)&0x1F;
  1789. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1790. }
  1791. }
  1792. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1793. {
  1794. int i;
  1795. for(i=0; i<width; i++)
  1796. {
  1797. int d0= ((uint32_t*)src1)[i];
  1798. int d1= ((uint32_t*)src2)[i];
  1799. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  1800. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  1801. int dh2= (dh>>11) + (dh<<21);
  1802. int d= dh2 + dl;
  1803. int b= d&0x7F;
  1804. int r= (d>>11)&0x7F;
  1805. int g= d>>21;
  1806. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1807. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  1808. }
  1809. }
  1810. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1811. {
  1812. int i;
  1813. for(i=0; i<width; i++)
  1814. {
  1815. int d= ((uint16_t*)src)[i];
  1816. int b= d&0x1F;
  1817. int g= (d>>5)&0x1F;
  1818. int r= (d>>10)&0x1F;
  1819. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1820. }
  1821. }
  1822. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1823. {
  1824. int i;
  1825. for(i=0; i<width; i++)
  1826. {
  1827. int d0= ((uint32_t*)src1)[i];
  1828. int d1= ((uint32_t*)src2)[i];
  1829. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  1830. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  1831. int dh2= (dh>>11) + (dh<<21);
  1832. int d= dh2 + dl;
  1833. int b= d&0x7F;
  1834. int r= (d>>10)&0x7F;
  1835. int g= d>>21;
  1836. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1837. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  1838. }
  1839. }
  1840. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  1841. {
  1842. int i;
  1843. for(i=0; i<width; i++)
  1844. {
  1845. int r= ((uint32_t*)src)[i]&0xFF;
  1846. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1847. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  1848. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1849. }
  1850. }
  1851. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1852. {
  1853. int i;
  1854. for(i=0; i<width; i++)
  1855. {
  1856. const int a= ((uint32_t*)src1)[2*i+0];
  1857. const int e= ((uint32_t*)src1)[2*i+1];
  1858. const int c= ((uint32_t*)src2)[2*i+0];
  1859. const int d= ((uint32_t*)src2)[2*i+1];
  1860. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1861. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1862. const int r= l&0x3FF;
  1863. const int g= h>>8;
  1864. const int b= l>>16;
  1865. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1866. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1867. }
  1868. }
  1869. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  1870. {
  1871. int i;
  1872. for(i=0; i<width; i++)
  1873. {
  1874. int r= src[i*3+0];
  1875. int g= src[i*3+1];
  1876. int b= src[i*3+2];
  1877. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1878. }
  1879. }
  1880. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1881. {
  1882. int i;
  1883. for(i=0; i<width; i++)
  1884. {
  1885. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1886. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1887. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1888. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1889. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1890. }
  1891. }
  1892. // Bilinear / Bicubic scaling
  1893. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1894. int16_t *filter, int16_t *filterPos, long filterSize)
  1895. {
  1896. #ifdef HAVE_MMX
  1897. assert(filterSize % 4 == 0 && filterSize>0);
  1898. if(filterSize==4) // allways true for upscaling, sometimes for down too
  1899. {
  1900. long counter= -2*dstW;
  1901. filter-= counter*2;
  1902. filterPos-= counter/2;
  1903. dst-= counter/2;
  1904. asm volatile(
  1905. "pxor %%mm7, %%mm7 \n\t"
  1906. "movq "MANGLE(w02)", %%mm6 \n\t"
  1907. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1908. "mov %%"REG_a", %%"REG_BP" \n\t"
  1909. ".balign 16 \n\t"
  1910. "1: \n\t"
  1911. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1912. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  1913. "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
  1914. "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
  1915. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1916. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1917. "punpcklbw %%mm7, %%mm0 \n\t"
  1918. "punpcklbw %%mm7, %%mm2 \n\t"
  1919. "pmaddwd %%mm1, %%mm0 \n\t"
  1920. "pmaddwd %%mm2, %%mm3 \n\t"
  1921. "psrad $8, %%mm0 \n\t"
  1922. "psrad $8, %%mm3 \n\t"
  1923. "packssdw %%mm3, %%mm0 \n\t"
  1924. "pmaddwd %%mm6, %%mm0 \n\t"
  1925. "packssdw %%mm0, %%mm0 \n\t"
  1926. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1927. "add $4, %%"REG_BP" \n\t"
  1928. " jnc 1b \n\t"
  1929. "pop %%"REG_BP" \n\t"
  1930. : "+a" (counter)
  1931. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1932. : "%"REG_b
  1933. );
  1934. }
  1935. else if(filterSize==8)
  1936. {
  1937. long counter= -2*dstW;
  1938. filter-= counter*4;
  1939. filterPos-= counter/2;
  1940. dst-= counter/2;
  1941. asm volatile(
  1942. "pxor %%mm7, %%mm7 \n\t"
  1943. "movq "MANGLE(w02)", %%mm6 \n\t"
  1944. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1945. "mov %%"REG_a", %%"REG_BP" \n\t"
  1946. ".balign 16 \n\t"
  1947. "1: \n\t"
  1948. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1949. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  1950. "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
  1951. "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
  1952. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1953. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1954. "punpcklbw %%mm7, %%mm0 \n\t"
  1955. "punpcklbw %%mm7, %%mm2 \n\t"
  1956. "pmaddwd %%mm1, %%mm0 \n\t"
  1957. "pmaddwd %%mm2, %%mm3 \n\t"
  1958. "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
  1959. "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
  1960. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  1961. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  1962. "punpcklbw %%mm7, %%mm4 \n\t"
  1963. "punpcklbw %%mm7, %%mm2 \n\t"
  1964. "pmaddwd %%mm1, %%mm4 \n\t"
  1965. "pmaddwd %%mm2, %%mm5 \n\t"
  1966. "paddd %%mm4, %%mm0 \n\t"
  1967. "paddd %%mm5, %%mm3 \n\t"
  1968. "psrad $8, %%mm0 \n\t"
  1969. "psrad $8, %%mm3 \n\t"
  1970. "packssdw %%mm3, %%mm0 \n\t"
  1971. "pmaddwd %%mm6, %%mm0 \n\t"
  1972. "packssdw %%mm0, %%mm0 \n\t"
  1973. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1974. "add $4, %%"REG_BP" \n\t"
  1975. " jnc 1b \n\t"
  1976. "pop %%"REG_BP" \n\t"
  1977. : "+a" (counter)
  1978. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1979. : "%"REG_b
  1980. );
  1981. }
  1982. else
  1983. {
  1984. uint8_t *offset = src+filterSize;
  1985. long counter= -2*dstW;
  1986. // filter-= counter*filterSize/2;
  1987. filterPos-= counter/2;
  1988. dst-= counter/2;
  1989. asm volatile(
  1990. "pxor %%mm7, %%mm7 \n\t"
  1991. "movq "MANGLE(w02)", %%mm6 \n\t"
  1992. ".balign 16 \n\t"
  1993. "1: \n\t"
  1994. "mov %2, %%"REG_c" \n\t"
  1995. "movzwl (%%"REG_c", %0), %%eax \n\t"
  1996. "movzwl 2(%%"REG_c", %0), %%ebx \n\t"
  1997. "mov %5, %%"REG_c" \n\t"
  1998. "pxor %%mm4, %%mm4 \n\t"
  1999. "pxor %%mm5, %%mm5 \n\t"
  2000. "2: \n\t"
  2001. "movq (%1), %%mm1 \n\t"
  2002. "movq (%1, %6), %%mm3 \n\t"
  2003. "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
  2004. "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
  2005. "punpcklbw %%mm7, %%mm0 \n\t"
  2006. "punpcklbw %%mm7, %%mm2 \n\t"
  2007. "pmaddwd %%mm1, %%mm0 \n\t"
  2008. "pmaddwd %%mm2, %%mm3 \n\t"
  2009. "paddd %%mm3, %%mm5 \n\t"
  2010. "paddd %%mm0, %%mm4 \n\t"
  2011. "add $8, %1 \n\t"
  2012. "add $4, %%"REG_c" \n\t"
  2013. "cmp %4, %%"REG_c" \n\t"
  2014. " jb 2b \n\t"
  2015. "add %6, %1 \n\t"
  2016. "psrad $8, %%mm4 \n\t"
  2017. "psrad $8, %%mm5 \n\t"
  2018. "packssdw %%mm5, %%mm4 \n\t"
  2019. "pmaddwd %%mm6, %%mm4 \n\t"
  2020. "packssdw %%mm4, %%mm4 \n\t"
  2021. "mov %3, %%"REG_a" \n\t"
  2022. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2023. "add $4, %0 \n\t"
  2024. " jnc 1b \n\t"
  2025. : "+r" (counter), "+r" (filter)
  2026. : "m" (filterPos), "m" (dst), "m"(offset),
  2027. "m" (src), "r" (filterSize*2)
  2028. : "%"REG_b, "%"REG_a, "%"REG_c
  2029. );
  2030. }
  2031. #else
  2032. #ifdef HAVE_ALTIVEC
  2033. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2034. #else
  2035. int i;
  2036. for(i=0; i<dstW; i++)
  2037. {
  2038. int j;
  2039. int srcPos= filterPos[i];
  2040. int val=0;
  2041. // printf("filterPos: %d\n", filterPos[i]);
  2042. for(j=0; j<filterSize; j++)
  2043. {
  2044. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2045. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2046. }
  2047. // filter += hFilterSize;
  2048. dst[i] = MIN(MAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2049. // dst[i] = val>>7;
  2050. }
  2051. #endif
  2052. #endif
  2053. }
  2054. // *** horizontal scale Y line to temp buffer
  2055. static inline void RENAME(hyscale)(uint16_t *dst, int dstWidth, uint8_t *src, int srcW, int xInc,
  2056. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2057. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2058. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2059. int32_t *mmx2FilterPos)
  2060. {
  2061. if(srcFormat==IMGFMT_YUY2)
  2062. {
  2063. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2064. src= formatConvBuffer;
  2065. }
  2066. else if(srcFormat==IMGFMT_UYVY)
  2067. {
  2068. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2069. src= formatConvBuffer;
  2070. }
  2071. else if(srcFormat==IMGFMT_BGR32)
  2072. {
  2073. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2074. src= formatConvBuffer;
  2075. }
  2076. else if(srcFormat==IMGFMT_BGR24)
  2077. {
  2078. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2079. src= formatConvBuffer;
  2080. }
  2081. else if(srcFormat==IMGFMT_BGR16)
  2082. {
  2083. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2084. src= formatConvBuffer;
  2085. }
  2086. else if(srcFormat==IMGFMT_BGR15)
  2087. {
  2088. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2089. src= formatConvBuffer;
  2090. }
  2091. else if(srcFormat==IMGFMT_RGB32)
  2092. {
  2093. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2094. src= formatConvBuffer;
  2095. }
  2096. else if(srcFormat==IMGFMT_RGB24)
  2097. {
  2098. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2099. src= formatConvBuffer;
  2100. }
  2101. #ifdef HAVE_MMX
  2102. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2103. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2104. #else
  2105. if(!(flags&SWS_FAST_BILINEAR))
  2106. #endif
  2107. {
  2108. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2109. }
  2110. else // Fast Bilinear upscale / crap downscale
  2111. {
  2112. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2113. #ifdef HAVE_MMX2
  2114. int i;
  2115. if(canMMX2BeUsed)
  2116. {
  2117. asm volatile(
  2118. "pxor %%mm7, %%mm7 \n\t"
  2119. "mov %0, %%"REG_c" \n\t"
  2120. "mov %1, %%"REG_D" \n\t"
  2121. "mov %2, %%"REG_d" \n\t"
  2122. "mov %3, %%"REG_b" \n\t"
  2123. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2124. PREFETCH" (%%"REG_c") \n\t"
  2125. PREFETCH" 32(%%"REG_c") \n\t"
  2126. PREFETCH" 64(%%"REG_c") \n\t"
  2127. #ifdef ARCH_X86_64
  2128. #define FUNNY_Y_CODE \
  2129. "movl (%%"REG_b"), %%esi \n\t"\
  2130. "call *%4 \n\t"\
  2131. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2132. "add %%"REG_S", %%"REG_c" \n\t"\
  2133. "add %%"REG_a", %%"REG_D" \n\t"\
  2134. "xor %%"REG_a", %%"REG_a" \n\t"\
  2135. #else
  2136. #define FUNNY_Y_CODE \
  2137. "movl (%%"REG_b"), %%esi \n\t"\
  2138. "call *%4 \n\t"\
  2139. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2140. "add %%"REG_a", %%"REG_D" \n\t"\
  2141. "xor %%"REG_a", %%"REG_a" \n\t"\
  2142. #endif
  2143. FUNNY_Y_CODE
  2144. FUNNY_Y_CODE
  2145. FUNNY_Y_CODE
  2146. FUNNY_Y_CODE
  2147. FUNNY_Y_CODE
  2148. FUNNY_Y_CODE
  2149. FUNNY_Y_CODE
  2150. FUNNY_Y_CODE
  2151. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2152. "m" (funnyYCode)
  2153. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2154. );
  2155. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2156. }
  2157. else
  2158. {
  2159. #endif
  2160. int xInc_shr16 = xInc >> 16;
  2161. int xInc_mask = xInc & 0xffff;
  2162. //NO MMX just normal asm ...
  2163. asm volatile(
  2164. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2165. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2166. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2167. ".balign 16 \n\t"
  2168. "1: \n\t"
  2169. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2170. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2171. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2172. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2173. "shll $16, %%edi \n\t"
  2174. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2175. "mov %1, %%"REG_D" \n\t"
  2176. "shrl $9, %%esi \n\t"
  2177. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2178. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2179. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2180. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2181. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2182. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2183. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2184. "shll $16, %%edi \n\t"
  2185. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2186. "mov %1, %%"REG_D" \n\t"
  2187. "shrl $9, %%esi \n\t"
  2188. "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
  2189. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2190. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2191. "add $2, %%"REG_a" \n\t"
  2192. "cmp %2, %%"REG_a" \n\t"
  2193. " jb 1b \n\t"
  2194. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2195. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2196. );
  2197. #ifdef HAVE_MMX2
  2198. } //if MMX2 can't be used
  2199. #endif
  2200. #else
  2201. int i;
  2202. unsigned int xpos=0;
  2203. for(i=0;i<dstWidth;i++)
  2204. {
  2205. register unsigned int xx=xpos>>16;
  2206. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2207. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2208. xpos+=xInc;
  2209. }
  2210. #endif
  2211. }
  2212. }
  2213. inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2214. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2215. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2216. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2217. int32_t *mmx2FilterPos)
  2218. {
  2219. if(srcFormat==IMGFMT_YUY2)
  2220. {
  2221. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2222. src1= formatConvBuffer;
  2223. src2= formatConvBuffer+2048;
  2224. }
  2225. else if(srcFormat==IMGFMT_UYVY)
  2226. {
  2227. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2228. src1= formatConvBuffer;
  2229. src2= formatConvBuffer+2048;
  2230. }
  2231. else if(srcFormat==IMGFMT_BGR32)
  2232. {
  2233. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2234. src1= formatConvBuffer;
  2235. src2= formatConvBuffer+2048;
  2236. }
  2237. else if(srcFormat==IMGFMT_BGR24)
  2238. {
  2239. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2240. src1= formatConvBuffer;
  2241. src2= formatConvBuffer+2048;
  2242. }
  2243. else if(srcFormat==IMGFMT_BGR16)
  2244. {
  2245. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2246. src1= formatConvBuffer;
  2247. src2= formatConvBuffer+2048;
  2248. }
  2249. else if(srcFormat==IMGFMT_BGR15)
  2250. {
  2251. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2252. src1= formatConvBuffer;
  2253. src2= formatConvBuffer+2048;
  2254. }
  2255. else if(srcFormat==IMGFMT_RGB32)
  2256. {
  2257. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2258. src1= formatConvBuffer;
  2259. src2= formatConvBuffer+2048;
  2260. }
  2261. else if(srcFormat==IMGFMT_RGB24)
  2262. {
  2263. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2264. src1= formatConvBuffer;
  2265. src2= formatConvBuffer+2048;
  2266. }
  2267. else if(isGray(srcFormat))
  2268. {
  2269. return;
  2270. }
  2271. #ifdef HAVE_MMX
  2272. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2273. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2274. #else
  2275. if(!(flags&SWS_FAST_BILINEAR))
  2276. #endif
  2277. {
  2278. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2279. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2280. }
  2281. else // Fast Bilinear upscale / crap downscale
  2282. {
  2283. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2284. #ifdef HAVE_MMX2
  2285. int i;
  2286. if(canMMX2BeUsed)
  2287. {
  2288. asm volatile(
  2289. "pxor %%mm7, %%mm7 \n\t"
  2290. "mov %0, %%"REG_c" \n\t"
  2291. "mov %1, %%"REG_D" \n\t"
  2292. "mov %2, %%"REG_d" \n\t"
  2293. "mov %3, %%"REG_b" \n\t"
  2294. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2295. PREFETCH" (%%"REG_c") \n\t"
  2296. PREFETCH" 32(%%"REG_c") \n\t"
  2297. PREFETCH" 64(%%"REG_c") \n\t"
  2298. #ifdef ARCH_X86_64
  2299. #define FUNNY_UV_CODE \
  2300. "movl (%%"REG_b"), %%esi \n\t"\
  2301. "call *%4 \n\t"\
  2302. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2303. "add %%"REG_S", %%"REG_c" \n\t"\
  2304. "add %%"REG_a", %%"REG_D" \n\t"\
  2305. "xor %%"REG_a", %%"REG_a" \n\t"\
  2306. #else
  2307. #define FUNNY_UV_CODE \
  2308. "movl (%%"REG_b"), %%esi \n\t"\
  2309. "call *%4 \n\t"\
  2310. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2311. "add %%"REG_a", %%"REG_D" \n\t"\
  2312. "xor %%"REG_a", %%"REG_a" \n\t"\
  2313. #endif
  2314. FUNNY_UV_CODE
  2315. FUNNY_UV_CODE
  2316. FUNNY_UV_CODE
  2317. FUNNY_UV_CODE
  2318. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2319. "mov %5, %%"REG_c" \n\t" // src
  2320. "mov %1, %%"REG_D" \n\t" // buf1
  2321. "add $4096, %%"REG_D" \n\t"
  2322. PREFETCH" (%%"REG_c") \n\t"
  2323. PREFETCH" 32(%%"REG_c") \n\t"
  2324. PREFETCH" 64(%%"REG_c") \n\t"
  2325. FUNNY_UV_CODE
  2326. FUNNY_UV_CODE
  2327. FUNNY_UV_CODE
  2328. FUNNY_UV_CODE
  2329. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2330. "m" (funnyUVCode), "m" (src2)
  2331. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2332. );
  2333. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2334. {
  2335. // printf("%d %d %d\n", dstWidth, i, srcW);
  2336. dst[i] = src1[srcW-1]*128;
  2337. dst[i+2048] = src2[srcW-1]*128;
  2338. }
  2339. }
  2340. else
  2341. {
  2342. #endif
  2343. long xInc_shr16 = (long) (xInc >> 16);
  2344. int xInc_mask = xInc & 0xffff;
  2345. asm volatile(
  2346. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2347. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2348. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2349. ".balign 16 \n\t"
  2350. "1: \n\t"
  2351. "mov %0, %%"REG_S" \n\t"
  2352. "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
  2353. "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
  2354. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2355. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2356. "shll $16, %%edi \n\t"
  2357. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2358. "mov %1, %%"REG_D" \n\t"
  2359. "shrl $9, %%esi \n\t"
  2360. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2361. "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
  2362. "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2363. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2364. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2365. "shll $16, %%edi \n\t"
  2366. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2367. "mov %1, %%"REG_D" \n\t"
  2368. "shrl $9, %%esi \n\t"
  2369. "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
  2370. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2371. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2372. "add $1, %%"REG_a" \n\t"
  2373. "cmp %2, %%"REG_a" \n\t"
  2374. " jb 1b \n\t"
  2375. /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2376. which is needed to support GCC-4.0 */
  2377. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2378. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2379. #else
  2380. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2381. #endif
  2382. "r" (src2)
  2383. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2384. );
  2385. #ifdef HAVE_MMX2
  2386. } //if MMX2 can't be used
  2387. #endif
  2388. #else
  2389. int i;
  2390. unsigned int xpos=0;
  2391. for(i=0;i<dstWidth;i++)
  2392. {
  2393. register unsigned int xx=xpos>>16;
  2394. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2395. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2396. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2397. /* slower
  2398. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2399. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2400. */
  2401. xpos+=xInc;
  2402. }
  2403. #endif
  2404. }
  2405. }
  2406. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2407. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2408. /* load a few things into local vars to make the code more readable? and faster */
  2409. const int srcW= c->srcW;
  2410. const int dstW= c->dstW;
  2411. const int dstH= c->dstH;
  2412. const int chrDstW= c->chrDstW;
  2413. const int chrSrcW= c->chrSrcW;
  2414. const int lumXInc= c->lumXInc;
  2415. const int chrXInc= c->chrXInc;
  2416. const int dstFormat= c->dstFormat;
  2417. const int srcFormat= c->srcFormat;
  2418. const int flags= c->flags;
  2419. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2420. int16_t *vLumFilterPos= c->vLumFilterPos;
  2421. int16_t *vChrFilterPos= c->vChrFilterPos;
  2422. int16_t *hLumFilterPos= c->hLumFilterPos;
  2423. int16_t *hChrFilterPos= c->hChrFilterPos;
  2424. int16_t *vLumFilter= c->vLumFilter;
  2425. int16_t *vChrFilter= c->vChrFilter;
  2426. int16_t *hLumFilter= c->hLumFilter;
  2427. int16_t *hChrFilter= c->hChrFilter;
  2428. int32_t *lumMmxFilter= c->lumMmxFilter;
  2429. int32_t *chrMmxFilter= c->chrMmxFilter;
  2430. const int vLumFilterSize= c->vLumFilterSize;
  2431. const int vChrFilterSize= c->vChrFilterSize;
  2432. const int hLumFilterSize= c->hLumFilterSize;
  2433. const int hChrFilterSize= c->hChrFilterSize;
  2434. int16_t **lumPixBuf= c->lumPixBuf;
  2435. int16_t **chrPixBuf= c->chrPixBuf;
  2436. const int vLumBufSize= c->vLumBufSize;
  2437. const int vChrBufSize= c->vChrBufSize;
  2438. uint8_t *funnyYCode= c->funnyYCode;
  2439. uint8_t *funnyUVCode= c->funnyUVCode;
  2440. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2441. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2442. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2443. int lastDstY;
  2444. /* vars whch will change and which we need to storw back in the context */
  2445. int dstY= c->dstY;
  2446. int lumBufIndex= c->lumBufIndex;
  2447. int chrBufIndex= c->chrBufIndex;
  2448. int lastInLumBuf= c->lastInLumBuf;
  2449. int lastInChrBuf= c->lastInChrBuf;
  2450. if(isPacked(c->srcFormat)){
  2451. src[0]=
  2452. src[1]=
  2453. src[2]= src[0];
  2454. srcStride[0]=
  2455. srcStride[1]=
  2456. srcStride[2]= srcStride[0];
  2457. }
  2458. srcStride[1]<<= c->vChrDrop;
  2459. srcStride[2]<<= c->vChrDrop;
  2460. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2461. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2462. #if 0 //self test FIXME move to a vfilter or something
  2463. {
  2464. static volatile int i=0;
  2465. i++;
  2466. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2467. selfTest(src, srcStride, c->srcW, c->srcH);
  2468. i--;
  2469. }
  2470. #endif
  2471. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2472. //dstStride[0],dstStride[1],dstStride[2]);
  2473. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2474. {
  2475. static int firstTime=1; //FIXME move this into the context perhaps
  2476. if(flags & SWS_PRINT_INFO && firstTime)
  2477. {
  2478. MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
  2479. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2480. firstTime=0;
  2481. }
  2482. }
  2483. /* Note the user might start scaling the picture in the middle so this will not get executed
  2484. this is not really intended but works currently, so ppl might do it */
  2485. if(srcSliceY ==0){
  2486. lumBufIndex=0;
  2487. chrBufIndex=0;
  2488. dstY=0;
  2489. lastInLumBuf= -1;
  2490. lastInChrBuf= -1;
  2491. }
  2492. lastDstY= dstY;
  2493. for(;dstY < dstH; dstY++){
  2494. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2495. const int chrDstY= dstY>>c->chrDstVSubSample;
  2496. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2497. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2498. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2499. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2500. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2501. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2502. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2503. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2504. //handle holes (FAST_BILINEAR & weird filters)
  2505. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2506. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2507. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2508. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2509. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2510. // Do we have enough lines in this slice to output the dstY line
  2511. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2512. {
  2513. //Do horizontal scaling
  2514. while(lastInLumBuf < lastLumSrcY)
  2515. {
  2516. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2517. lumBufIndex++;
  2518. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2519. ASSERT(lumBufIndex < 2*vLumBufSize)
  2520. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2521. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2522. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2523. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2524. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2525. funnyYCode, c->srcFormat, formatConvBuffer,
  2526. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2527. lastInLumBuf++;
  2528. }
  2529. while(lastInChrBuf < lastChrSrcY)
  2530. {
  2531. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2532. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2533. chrBufIndex++;
  2534. ASSERT(chrBufIndex < 2*vChrBufSize)
  2535. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2536. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2537. //FIXME replace parameters through context struct (some at least)
  2538. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2539. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2540. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2541. funnyUVCode, c->srcFormat, formatConvBuffer,
  2542. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2543. lastInChrBuf++;
  2544. }
  2545. //wrap buf index around to stay inside the ring buffer
  2546. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2547. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2548. }
  2549. else // not enough lines left in this slice -> load the rest in the buffer
  2550. {
  2551. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2552. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2553. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2554. vChrBufSize, vLumBufSize);*/
  2555. //Do horizontal scaling
  2556. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2557. {
  2558. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2559. lumBufIndex++;
  2560. ASSERT(lumBufIndex < 2*vLumBufSize)
  2561. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2562. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2563. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2564. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2565. funnyYCode, c->srcFormat, formatConvBuffer,
  2566. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2567. lastInLumBuf++;
  2568. }
  2569. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2570. {
  2571. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2572. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2573. chrBufIndex++;
  2574. ASSERT(chrBufIndex < 2*vChrBufSize)
  2575. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2576. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2577. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2578. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2579. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2580. funnyUVCode, c->srcFormat, formatConvBuffer,
  2581. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2582. lastInChrBuf++;
  2583. }
  2584. //wrap buf index around to stay inside the ring buffer
  2585. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2586. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2587. break; //we can't output a dstY line so let's try with the next slice
  2588. }
  2589. #ifdef HAVE_MMX
  2590. b5Dither= dither8[dstY&1];
  2591. g6Dither= dither4[dstY&1];
  2592. g5Dither= dither8[dstY&1];
  2593. r5Dither= dither8[(dstY+1)&1];
  2594. #endif
  2595. if(dstY < dstH-2)
  2596. {
  2597. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2598. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2599. #ifdef HAVE_MMX
  2600. int i;
  2601. for(i=0; i<vLumFilterSize; i++)
  2602. {
  2603. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2604. lumMmxFilter[4*i+2]=
  2605. lumMmxFilter[4*i+3]=
  2606. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2607. }
  2608. for(i=0; i<vChrFilterSize; i++)
  2609. {
  2610. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2611. chrMmxFilter[4*i+2]=
  2612. chrMmxFilter[4*i+3]=
  2613. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2614. }
  2615. #endif
  2616. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2617. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2618. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2619. RENAME(yuv2nv12X)(c,
  2620. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2621. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2622. dest, uDest, dstW, chrDstW, dstFormat);
  2623. }
  2624. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2625. {
  2626. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2627. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2628. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2629. {
  2630. int16_t *lumBuf = lumPixBuf[0];
  2631. int16_t *chrBuf= chrPixBuf[0];
  2632. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2633. }
  2634. else //General YV12
  2635. {
  2636. RENAME(yuv2yuvX)(c,
  2637. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2638. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2639. dest, uDest, vDest, dstW, chrDstW);
  2640. }
  2641. }
  2642. else
  2643. {
  2644. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2645. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2646. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2647. {
  2648. int chrAlpha= vChrFilter[2*dstY+1];
  2649. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2650. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2651. }
  2652. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2653. {
  2654. int lumAlpha= vLumFilter[2*dstY+1];
  2655. int chrAlpha= vChrFilter[2*dstY+1];
  2656. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2657. dest, dstW, lumAlpha, chrAlpha, dstY);
  2658. }
  2659. else //General RGB
  2660. {
  2661. RENAME(yuv2packedX)(c,
  2662. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2663. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2664. dest, dstW, dstY);
  2665. }
  2666. }
  2667. }
  2668. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2669. {
  2670. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2671. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2672. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2673. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2674. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2675. yuv2nv12XinC(
  2676. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2677. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2678. dest, uDest, dstW, chrDstW, dstFormat);
  2679. }
  2680. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2681. {
  2682. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2683. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2684. yuv2yuvXinC(
  2685. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2686. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2687. dest, uDest, vDest, dstW, chrDstW);
  2688. }
  2689. else
  2690. {
  2691. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2692. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2693. yuv2packedXinC(c,
  2694. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2695. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2696. dest, dstW, dstY);
  2697. }
  2698. }
  2699. }
  2700. #ifdef HAVE_MMX
  2701. __asm __volatile(SFENCE:::"memory");
  2702. __asm __volatile(EMMS:::"memory");
  2703. #endif
  2704. /* store changed local vars back in the context */
  2705. c->dstY= dstY;
  2706. c->lumBufIndex= lumBufIndex;
  2707. c->chrBufIndex= chrBufIndex;
  2708. c->lastInLumBuf= lastInLumBuf;
  2709. c->lastInChrBuf= lastInChrBuf;
  2710. return dstY - lastDstY;
  2711. }