You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3139 lines
102KB

  1. /*
  2. Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  14. */
  15. #include "asmalign.h"
  16. #undef REAL_MOVNTQ
  17. #undef MOVNTQ
  18. #undef PAVGB
  19. #undef PREFETCH
  20. #undef PREFETCHW
  21. #undef EMMS
  22. #undef SFENCE
  23. #ifdef HAVE_3DNOW
  24. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  25. #define EMMS "femms"
  26. #else
  27. #define EMMS "emms"
  28. #endif
  29. #ifdef HAVE_3DNOW
  30. #define PREFETCH "prefetch"
  31. #define PREFETCHW "prefetchw"
  32. #elif defined ( HAVE_MMX2 )
  33. #define PREFETCH "prefetchnta"
  34. #define PREFETCHW "prefetcht0"
  35. #else
  36. #define PREFETCH "/nop"
  37. #define PREFETCHW "/nop"
  38. #endif
  39. #ifdef HAVE_MMX2
  40. #define SFENCE "sfence"
  41. #else
  42. #define SFENCE "/nop"
  43. #endif
  44. #ifdef HAVE_MMX2
  45. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  46. #elif defined (HAVE_3DNOW)
  47. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  48. #endif
  49. #ifdef HAVE_MMX2
  50. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  51. #else
  52. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  53. #endif
  54. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  55. #ifdef HAVE_ALTIVEC
  56. #include "swscale_altivec_template.c"
  57. #endif
  58. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  59. asm volatile(\
  60. "xor %%"REG_a", %%"REG_a" \n\t"\
  61. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  62. "movq %%mm3, %%mm4 \n\t"\
  63. "lea " offset "(%0), %%"REG_d" \n\t"\
  64. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  65. ASMALIGN16 /* FIXME Unroll? */\
  66. "1: \n\t"\
  67. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  68. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
  69. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
  70. "add $16, %%"REG_d" \n\t"\
  71. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  72. "test %%"REG_S", %%"REG_S" \n\t"\
  73. "pmulhw %%mm0, %%mm2 \n\t"\
  74. "pmulhw %%mm0, %%mm5 \n\t"\
  75. "paddw %%mm2, %%mm3 \n\t"\
  76. "paddw %%mm5, %%mm4 \n\t"\
  77. " jnz 1b \n\t"\
  78. "psraw $3, %%mm3 \n\t"\
  79. "psraw $3, %%mm4 \n\t"\
  80. "packuswb %%mm4, %%mm3 \n\t"\
  81. MOVNTQ(%%mm3, (%1, %%REGa))\
  82. "add $8, %%"REG_a" \n\t"\
  83. "cmp %2, %%"REG_a" \n\t"\
  84. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  85. "movq %%mm3, %%mm4 \n\t"\
  86. "lea " offset "(%0), %%"REG_d" \n\t"\
  87. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  88. "jb 1b \n\t"\
  89. :: "r" (&c->redDither),\
  90. "r" (dest), "p" (width)\
  91. : "%"REG_a, "%"REG_d, "%"REG_S\
  92. );
  93. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  94. asm volatile(\
  95. "lea " offset "(%0), %%"REG_d" \n\t"\
  96. "xor %%"REG_a", %%"REG_a" \n\t"\
  97. "pxor %%mm4, %%mm4 \n\t"\
  98. "pxor %%mm5, %%mm5 \n\t"\
  99. "pxor %%mm6, %%mm6 \n\t"\
  100. "pxor %%mm7, %%mm7 \n\t"\
  101. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  102. ASMALIGN16 \
  103. "1: \n\t"\
  104. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0\n\t" /* srcData */\
  105. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
  106. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  107. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1\n\t" /* srcData */\
  108. "movq %%mm0, %%mm3 \n\t"\
  109. "punpcklwd %%mm1, %%mm0 \n\t"\
  110. "punpckhwd %%mm1, %%mm3 \n\t"\
  111. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  112. "pmaddwd %%mm1, %%mm0 \n\t"\
  113. "pmaddwd %%mm1, %%mm3 \n\t"\
  114. "paddd %%mm0, %%mm4 \n\t"\
  115. "paddd %%mm3, %%mm5 \n\t"\
  116. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3\n\t" /* srcData */\
  117. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  118. "add $16, %%"REG_d" \n\t"\
  119. "test %%"REG_S", %%"REG_S" \n\t"\
  120. "movq %%mm2, %%mm0 \n\t"\
  121. "punpcklwd %%mm3, %%mm2 \n\t"\
  122. "punpckhwd %%mm3, %%mm0 \n\t"\
  123. "pmaddwd %%mm1, %%mm2 \n\t"\
  124. "pmaddwd %%mm1, %%mm0 \n\t"\
  125. "paddd %%mm2, %%mm6 \n\t"\
  126. "paddd %%mm0, %%mm7 \n\t"\
  127. " jnz 1b \n\t"\
  128. "psrad $16, %%mm4 \n\t"\
  129. "psrad $16, %%mm5 \n\t"\
  130. "psrad $16, %%mm6 \n\t"\
  131. "psrad $16, %%mm7 \n\t"\
  132. "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
  133. "packssdw %%mm5, %%mm4 \n\t"\
  134. "packssdw %%mm7, %%mm6 \n\t"\
  135. "paddw %%mm0, %%mm4 \n\t"\
  136. "paddw %%mm0, %%mm6 \n\t"\
  137. "psraw $3, %%mm4 \n\t"\
  138. "psraw $3, %%mm6 \n\t"\
  139. "packuswb %%mm6, %%mm4 \n\t"\
  140. MOVNTQ(%%mm4, (%1, %%REGa))\
  141. "add $8, %%"REG_a" \n\t"\
  142. "cmp %2, %%"REG_a" \n\t"\
  143. "lea " offset "(%0), %%"REG_d" \n\t"\
  144. "pxor %%mm4, %%mm4 \n\t"\
  145. "pxor %%mm5, %%mm5 \n\t"\
  146. "pxor %%mm6, %%mm6 \n\t"\
  147. "pxor %%mm7, %%mm7 \n\t"\
  148. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  149. "jb 1b \n\t"\
  150. :: "r" (&c->redDither),\
  151. "r" (dest), "p" (width)\
  152. : "%"REG_a, "%"REG_d, "%"REG_S\
  153. );
  154. #define YSCALEYUV2YV121 \
  155. "mov %2, %%"REG_a" \n\t"\
  156. ASMALIGN16 /* FIXME Unroll? */\
  157. "1: \n\t"\
  158. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  159. "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
  160. "psraw $7, %%mm0 \n\t"\
  161. "psraw $7, %%mm1 \n\t"\
  162. "packuswb %%mm1, %%mm0 \n\t"\
  163. MOVNTQ(%%mm0, (%1, %%REGa))\
  164. "add $8, %%"REG_a" \n\t"\
  165. "jnc 1b \n\t"
  166. /*
  167. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  168. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  169. "r" (dest), "m" (dstW),
  170. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  171. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  172. */
  173. #define YSCALEYUV2PACKEDX \
  174. asm volatile(\
  175. "xor %%"REG_a", %%"REG_a" \n\t"\
  176. ASMALIGN16\
  177. "nop \n\t"\
  178. "1: \n\t"\
  179. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  180. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  181. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  182. "movq %%mm3, %%mm4 \n\t"\
  183. ASMALIGN16\
  184. "2: \n\t"\
  185. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  186. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  187. "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  188. "add $16, %%"REG_d" \n\t"\
  189. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  190. "pmulhw %%mm0, %%mm2 \n\t"\
  191. "pmulhw %%mm0, %%mm5 \n\t"\
  192. "paddw %%mm2, %%mm3 \n\t"\
  193. "paddw %%mm5, %%mm4 \n\t"\
  194. "test %%"REG_S", %%"REG_S" \n\t"\
  195. " jnz 2b \n\t"\
  196. \
  197. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  198. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  199. "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
  200. "movq %%mm1, %%mm7 \n\t"\
  201. ASMALIGN16\
  202. "2: \n\t"\
  203. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  204. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  205. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  206. "add $16, %%"REG_d" \n\t"\
  207. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  208. "pmulhw %%mm0, %%mm2 \n\t"\
  209. "pmulhw %%mm0, %%mm5 \n\t"\
  210. "paddw %%mm2, %%mm1 \n\t"\
  211. "paddw %%mm5, %%mm7 \n\t"\
  212. "test %%"REG_S", %%"REG_S" \n\t"\
  213. " jnz 2b \n\t"\
  214. #define YSCALEYUV2PACKEDX_END\
  215. :: "r" (&c->redDither), \
  216. "m" (dummy), "m" (dummy), "m" (dummy),\
  217. "r" (dest), "m" (dstW)\
  218. : "%"REG_a, "%"REG_d, "%"REG_S\
  219. );
  220. #define YSCALEYUV2PACKEDX_ACCURATE \
  221. asm volatile(\
  222. "xor %%"REG_a", %%"REG_a" \n\t"\
  223. ASMALIGN16\
  224. "nop \n\t"\
  225. "1: \n\t"\
  226. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  227. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  228. "pxor %%mm4, %%mm4 \n\t"\
  229. "pxor %%mm5, %%mm5 \n\t"\
  230. "pxor %%mm6, %%mm6 \n\t"\
  231. "pxor %%mm7, %%mm7 \n\t"\
  232. ASMALIGN16\
  233. "2: \n\t"\
  234. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  235. "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  236. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  237. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  238. "movq %%mm0, %%mm3 \n\t"\
  239. "punpcklwd %%mm1, %%mm0 \n\t"\
  240. "punpckhwd %%mm1, %%mm3 \n\t"\
  241. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  242. "pmaddwd %%mm1, %%mm0 \n\t"\
  243. "pmaddwd %%mm1, %%mm3 \n\t"\
  244. "paddd %%mm0, %%mm4 \n\t"\
  245. "paddd %%mm3, %%mm5 \n\t"\
  246. "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  247. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  248. "add $16, %%"REG_d" \n\t"\
  249. "test %%"REG_S", %%"REG_S" \n\t"\
  250. "movq %%mm2, %%mm0 \n\t"\
  251. "punpcklwd %%mm3, %%mm2 \n\t"\
  252. "punpckhwd %%mm3, %%mm0 \n\t"\
  253. "pmaddwd %%mm1, %%mm2 \n\t"\
  254. "pmaddwd %%mm1, %%mm0 \n\t"\
  255. "paddd %%mm2, %%mm6 \n\t"\
  256. "paddd %%mm0, %%mm7 \n\t"\
  257. " jnz 2b \n\t"\
  258. "psrad $16, %%mm4 \n\t"\
  259. "psrad $16, %%mm5 \n\t"\
  260. "psrad $16, %%mm6 \n\t"\
  261. "psrad $16, %%mm7 \n\t"\
  262. "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
  263. "packssdw %%mm5, %%mm4 \n\t"\
  264. "packssdw %%mm7, %%mm6 \n\t"\
  265. "paddw %%mm0, %%mm4 \n\t"\
  266. "paddw %%mm0, %%mm6 \n\t"\
  267. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  268. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  269. \
  270. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  271. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  272. "pxor %%mm1, %%mm1 \n\t"\
  273. "pxor %%mm5, %%mm5 \n\t"\
  274. "pxor %%mm7, %%mm7 \n\t"\
  275. "pxor %%mm6, %%mm6 \n\t"\
  276. ASMALIGN16\
  277. "2: \n\t"\
  278. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  279. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  280. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  281. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  282. "movq %%mm0, %%mm3 \n\t"\
  283. "punpcklwd %%mm4, %%mm0 \n\t"\
  284. "punpckhwd %%mm4, %%mm3 \n\t"\
  285. "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  286. "pmaddwd %%mm4, %%mm0 \n\t"\
  287. "pmaddwd %%mm4, %%mm3 \n\t"\
  288. "paddd %%mm0, %%mm1 \n\t"\
  289. "paddd %%mm3, %%mm5 \n\t"\
  290. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  291. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  292. "add $16, %%"REG_d" \n\t"\
  293. "test %%"REG_S", %%"REG_S" \n\t"\
  294. "movq %%mm2, %%mm0 \n\t"\
  295. "punpcklwd %%mm3, %%mm2 \n\t"\
  296. "punpckhwd %%mm3, %%mm0 \n\t"\
  297. "pmaddwd %%mm4, %%mm2 \n\t"\
  298. "pmaddwd %%mm4, %%mm0 \n\t"\
  299. "paddd %%mm2, %%mm7 \n\t"\
  300. "paddd %%mm0, %%mm6 \n\t"\
  301. " jnz 2b \n\t"\
  302. "psrad $16, %%mm1 \n\t"\
  303. "psrad $16, %%mm5 \n\t"\
  304. "psrad $16, %%mm7 \n\t"\
  305. "psrad $16, %%mm6 \n\t"\
  306. "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
  307. "packssdw %%mm5, %%mm1 \n\t"\
  308. "packssdw %%mm6, %%mm7 \n\t"\
  309. "paddw %%mm0, %%mm1 \n\t"\
  310. "paddw %%mm0, %%mm7 \n\t"\
  311. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  312. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  313. #define YSCALEYUV2RGBX \
  314. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  315. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  316. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  317. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  318. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  319. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  320. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  321. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  322. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  323. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  324. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  325. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  326. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  327. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  328. "paddw %%mm3, %%mm4 \n\t"\
  329. "movq %%mm2, %%mm0 \n\t"\
  330. "movq %%mm5, %%mm6 \n\t"\
  331. "movq %%mm4, %%mm3 \n\t"\
  332. "punpcklwd %%mm2, %%mm2 \n\t"\
  333. "punpcklwd %%mm5, %%mm5 \n\t"\
  334. "punpcklwd %%mm4, %%mm4 \n\t"\
  335. "paddw %%mm1, %%mm2 \n\t"\
  336. "paddw %%mm1, %%mm5 \n\t"\
  337. "paddw %%mm1, %%mm4 \n\t"\
  338. "punpckhwd %%mm0, %%mm0 \n\t"\
  339. "punpckhwd %%mm6, %%mm6 \n\t"\
  340. "punpckhwd %%mm3, %%mm3 \n\t"\
  341. "paddw %%mm7, %%mm0 \n\t"\
  342. "paddw %%mm7, %%mm6 \n\t"\
  343. "paddw %%mm7, %%mm3 \n\t"\
  344. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  345. "packuswb %%mm0, %%mm2 \n\t"\
  346. "packuswb %%mm6, %%mm5 \n\t"\
  347. "packuswb %%mm3, %%mm4 \n\t"\
  348. "pxor %%mm7, %%mm7 \n\t"
  349. #if 0
  350. #define FULL_YSCALEYUV2RGB \
  351. "pxor %%mm7, %%mm7 \n\t"\
  352. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  353. "punpcklwd %%mm6, %%mm6 \n\t"\
  354. "punpcklwd %%mm6, %%mm6 \n\t"\
  355. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  356. "punpcklwd %%mm5, %%mm5 \n\t"\
  357. "punpcklwd %%mm5, %%mm5 \n\t"\
  358. "xor %%"REG_a", %%"REG_a" \n\t"\
  359. ASMALIGN16\
  360. "1: \n\t"\
  361. "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
  362. "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
  363. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  364. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  365. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  366. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  367. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  368. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  369. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  370. "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  371. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  372. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  373. "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  374. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  375. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  376. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  377. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  378. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  379. \
  380. \
  381. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  382. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  383. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  384. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  385. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  386. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  387. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  388. \
  389. \
  390. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  391. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  392. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  393. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  394. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  395. "packuswb %%mm3, %%mm3 \n\t"\
  396. \
  397. "packuswb %%mm0, %%mm0 \n\t"\
  398. "paddw %%mm4, %%mm2 \n\t"\
  399. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  400. \
  401. "packuswb %%mm1, %%mm1 \n\t"
  402. #endif
  403. #define REAL_YSCALEYUV2PACKED(index, c) \
  404. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  405. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
  406. "psraw $3, %%mm0 \n\t"\
  407. "psraw $3, %%mm1 \n\t"\
  408. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  409. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  410. "xor "#index", "#index" \n\t"\
  411. ASMALIGN16\
  412. "1: \n\t"\
  413. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  414. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  415. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  416. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  417. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  418. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  419. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  420. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  421. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  422. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  423. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  424. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  425. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  426. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  427. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  428. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  429. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  430. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  431. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  432. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  433. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  434. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  435. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  436. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  437. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  438. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  439. #define REAL_YSCALEYUV2RGB(index, c) \
  440. "xor "#index", "#index" \n\t"\
  441. ASMALIGN16\
  442. "1: \n\t"\
  443. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  444. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  445. "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
  446. "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
  447. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  448. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  449. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  450. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  451. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  452. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  453. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  454. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  455. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  456. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  457. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  458. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  459. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  460. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  461. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  462. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  463. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  464. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  465. "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
  466. "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
  467. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  468. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  469. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  470. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  471. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  472. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  473. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  474. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  475. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  476. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  477. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  478. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  479. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  480. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  481. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  482. "paddw %%mm3, %%mm4 \n\t"\
  483. "movq %%mm2, %%mm0 \n\t"\
  484. "movq %%mm5, %%mm6 \n\t"\
  485. "movq %%mm4, %%mm3 \n\t"\
  486. "punpcklwd %%mm2, %%mm2 \n\t"\
  487. "punpcklwd %%mm5, %%mm5 \n\t"\
  488. "punpcklwd %%mm4, %%mm4 \n\t"\
  489. "paddw %%mm1, %%mm2 \n\t"\
  490. "paddw %%mm1, %%mm5 \n\t"\
  491. "paddw %%mm1, %%mm4 \n\t"\
  492. "punpckhwd %%mm0, %%mm0 \n\t"\
  493. "punpckhwd %%mm6, %%mm6 \n\t"\
  494. "punpckhwd %%mm3, %%mm3 \n\t"\
  495. "paddw %%mm7, %%mm0 \n\t"\
  496. "paddw %%mm7, %%mm6 \n\t"\
  497. "paddw %%mm7, %%mm3 \n\t"\
  498. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  499. "packuswb %%mm0, %%mm2 \n\t"\
  500. "packuswb %%mm6, %%mm5 \n\t"\
  501. "packuswb %%mm3, %%mm4 \n\t"\
  502. "pxor %%mm7, %%mm7 \n\t"
  503. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  504. #define REAL_YSCALEYUV2PACKED1(index, c) \
  505. "xor "#index", "#index" \n\t"\
  506. ASMALIGN16\
  507. "1: \n\t"\
  508. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  509. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  510. "psraw $7, %%mm3 \n\t" \
  511. "psraw $7, %%mm4 \n\t" \
  512. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  513. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  514. "psraw $7, %%mm1 \n\t" \
  515. "psraw $7, %%mm7 \n\t" \
  516. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  517. #define REAL_YSCALEYUV2RGB1(index, c) \
  518. "xor "#index", "#index" \n\t"\
  519. ASMALIGN16\
  520. "1: \n\t"\
  521. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  522. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  523. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  524. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  525. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  526. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  527. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  528. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  529. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  530. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  531. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  532. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  533. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  534. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  535. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  536. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  537. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  538. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  539. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  540. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  541. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  542. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  543. "paddw %%mm3, %%mm4 \n\t"\
  544. "movq %%mm2, %%mm0 \n\t"\
  545. "movq %%mm5, %%mm6 \n\t"\
  546. "movq %%mm4, %%mm3 \n\t"\
  547. "punpcklwd %%mm2, %%mm2 \n\t"\
  548. "punpcklwd %%mm5, %%mm5 \n\t"\
  549. "punpcklwd %%mm4, %%mm4 \n\t"\
  550. "paddw %%mm1, %%mm2 \n\t"\
  551. "paddw %%mm1, %%mm5 \n\t"\
  552. "paddw %%mm1, %%mm4 \n\t"\
  553. "punpckhwd %%mm0, %%mm0 \n\t"\
  554. "punpckhwd %%mm6, %%mm6 \n\t"\
  555. "punpckhwd %%mm3, %%mm3 \n\t"\
  556. "paddw %%mm7, %%mm0 \n\t"\
  557. "paddw %%mm7, %%mm6 \n\t"\
  558. "paddw %%mm7, %%mm3 \n\t"\
  559. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  560. "packuswb %%mm0, %%mm2 \n\t"\
  561. "packuswb %%mm6, %%mm5 \n\t"\
  562. "packuswb %%mm3, %%mm4 \n\t"\
  563. "pxor %%mm7, %%mm7 \n\t"
  564. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  565. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  566. "xor "#index", "#index" \n\t"\
  567. ASMALIGN16\
  568. "1: \n\t"\
  569. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  570. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  571. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  572. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  573. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  574. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  575. "psrlw $8, %%mm3 \n\t" \
  576. "psrlw $8, %%mm4 \n\t" \
  577. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  578. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  579. "psraw $7, %%mm1 \n\t" \
  580. "psraw $7, %%mm7 \n\t"
  581. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  582. // do vertical chrominance interpolation
  583. #define REAL_YSCALEYUV2RGB1b(index, c) \
  584. "xor "#index", "#index" \n\t"\
  585. ASMALIGN16\
  586. "1: \n\t"\
  587. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  588. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  589. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  590. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  591. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  592. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  593. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  594. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  595. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  596. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  597. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  598. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  599. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  600. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  601. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  602. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  603. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  604. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  605. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  606. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  607. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  608. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  609. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  610. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  611. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  612. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  613. "paddw %%mm3, %%mm4 \n\t"\
  614. "movq %%mm2, %%mm0 \n\t"\
  615. "movq %%mm5, %%mm6 \n\t"\
  616. "movq %%mm4, %%mm3 \n\t"\
  617. "punpcklwd %%mm2, %%mm2 \n\t"\
  618. "punpcklwd %%mm5, %%mm5 \n\t"\
  619. "punpcklwd %%mm4, %%mm4 \n\t"\
  620. "paddw %%mm1, %%mm2 \n\t"\
  621. "paddw %%mm1, %%mm5 \n\t"\
  622. "paddw %%mm1, %%mm4 \n\t"\
  623. "punpckhwd %%mm0, %%mm0 \n\t"\
  624. "punpckhwd %%mm6, %%mm6 \n\t"\
  625. "punpckhwd %%mm3, %%mm3 \n\t"\
  626. "paddw %%mm7, %%mm0 \n\t"\
  627. "paddw %%mm7, %%mm6 \n\t"\
  628. "paddw %%mm7, %%mm3 \n\t"\
  629. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  630. "packuswb %%mm0, %%mm2 \n\t"\
  631. "packuswb %%mm6, %%mm5 \n\t"\
  632. "packuswb %%mm3, %%mm4 \n\t"\
  633. "pxor %%mm7, %%mm7 \n\t"
  634. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  635. #define REAL_WRITEBGR32(dst, dstw, index) \
  636. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  637. "movq %%mm2, %%mm1 \n\t" /* B */\
  638. "movq %%mm5, %%mm6 \n\t" /* R */\
  639. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  640. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  641. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  642. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  643. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  644. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  645. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  646. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  647. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  648. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  649. \
  650. MOVNTQ(%%mm0, (dst, index, 4))\
  651. MOVNTQ(%%mm2, 8(dst, index, 4))\
  652. MOVNTQ(%%mm1, 16(dst, index, 4))\
  653. MOVNTQ(%%mm3, 24(dst, index, 4))\
  654. \
  655. "add $8, "#index" \n\t"\
  656. "cmp "#dstw", "#index" \n\t"\
  657. " jb 1b \n\t"
  658. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  659. #define REAL_WRITEBGR16(dst, dstw, index) \
  660. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  661. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  662. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  663. "psrlq $3, %%mm2 \n\t"\
  664. \
  665. "movq %%mm2, %%mm1 \n\t"\
  666. "movq %%mm4, %%mm3 \n\t"\
  667. \
  668. "punpcklbw %%mm7, %%mm3 \n\t"\
  669. "punpcklbw %%mm5, %%mm2 \n\t"\
  670. "punpckhbw %%mm7, %%mm4 \n\t"\
  671. "punpckhbw %%mm5, %%mm1 \n\t"\
  672. \
  673. "psllq $3, %%mm3 \n\t"\
  674. "psllq $3, %%mm4 \n\t"\
  675. \
  676. "por %%mm3, %%mm2 \n\t"\
  677. "por %%mm4, %%mm1 \n\t"\
  678. \
  679. MOVNTQ(%%mm2, (dst, index, 2))\
  680. MOVNTQ(%%mm1, 8(dst, index, 2))\
  681. \
  682. "add $8, "#index" \n\t"\
  683. "cmp "#dstw", "#index" \n\t"\
  684. " jb 1b \n\t"
  685. #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
  686. #define REAL_WRITEBGR15(dst, dstw, index) \
  687. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  688. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  689. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  690. "psrlq $3, %%mm2 \n\t"\
  691. "psrlq $1, %%mm5 \n\t"\
  692. \
  693. "movq %%mm2, %%mm1 \n\t"\
  694. "movq %%mm4, %%mm3 \n\t"\
  695. \
  696. "punpcklbw %%mm7, %%mm3 \n\t"\
  697. "punpcklbw %%mm5, %%mm2 \n\t"\
  698. "punpckhbw %%mm7, %%mm4 \n\t"\
  699. "punpckhbw %%mm5, %%mm1 \n\t"\
  700. \
  701. "psllq $2, %%mm3 \n\t"\
  702. "psllq $2, %%mm4 \n\t"\
  703. \
  704. "por %%mm3, %%mm2 \n\t"\
  705. "por %%mm4, %%mm1 \n\t"\
  706. \
  707. MOVNTQ(%%mm2, (dst, index, 2))\
  708. MOVNTQ(%%mm1, 8(dst, index, 2))\
  709. \
  710. "add $8, "#index" \n\t"\
  711. "cmp "#dstw", "#index" \n\t"\
  712. " jb 1b \n\t"
  713. #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
  714. #define WRITEBGR24OLD(dst, dstw, index) \
  715. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  716. "movq %%mm2, %%mm1 \n\t" /* B */\
  717. "movq %%mm5, %%mm6 \n\t" /* R */\
  718. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  719. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  720. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  721. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  722. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  723. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  724. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  725. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  726. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  727. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  728. \
  729. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  730. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  731. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  732. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  733. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  734. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  735. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  736. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  737. \
  738. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  739. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  740. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  741. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  742. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  743. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  744. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  745. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  746. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  747. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  748. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  749. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  750. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  751. \
  752. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  753. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  754. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  755. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  756. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  757. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  758. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  759. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  760. \
  761. MOVNTQ(%%mm0, (dst))\
  762. MOVNTQ(%%mm2, 8(dst))\
  763. MOVNTQ(%%mm3, 16(dst))\
  764. "add $24, "#dst" \n\t"\
  765. \
  766. "add $8, "#index" \n\t"\
  767. "cmp "#dstw", "#index" \n\t"\
  768. " jb 1b \n\t"
  769. #define WRITEBGR24MMX(dst, dstw, index) \
  770. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  771. "movq %%mm2, %%mm1 \n\t" /* B */\
  772. "movq %%mm5, %%mm6 \n\t" /* R */\
  773. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  774. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  775. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  776. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  777. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  778. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  779. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  780. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  781. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  782. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  783. \
  784. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  785. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  786. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  787. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  788. \
  789. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  790. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  791. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  792. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  793. \
  794. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  795. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  796. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  797. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  798. \
  799. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  800. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  801. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  802. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  803. MOVNTQ(%%mm0, (dst))\
  804. \
  805. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  806. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  807. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  808. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  809. MOVNTQ(%%mm6, 8(dst))\
  810. \
  811. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  812. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  813. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  814. MOVNTQ(%%mm5, 16(dst))\
  815. \
  816. "add $24, "#dst" \n\t"\
  817. \
  818. "add $8, "#index" \n\t"\
  819. "cmp "#dstw", "#index" \n\t"\
  820. " jb 1b \n\t"
  821. #define WRITEBGR24MMX2(dst, dstw, index) \
  822. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  823. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  824. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  825. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  826. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  827. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  828. \
  829. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  830. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  831. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  832. \
  833. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  834. "por %%mm1, %%mm6 \n\t"\
  835. "por %%mm3, %%mm6 \n\t"\
  836. MOVNTQ(%%mm6, (dst))\
  837. \
  838. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  839. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  840. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  841. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  842. \
  843. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  844. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  845. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  846. \
  847. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  848. "por %%mm3, %%mm6 \n\t"\
  849. MOVNTQ(%%mm6, 8(dst))\
  850. \
  851. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  852. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  853. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  854. \
  855. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  856. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  857. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  858. \
  859. "por %%mm1, %%mm3 \n\t"\
  860. "por %%mm3, %%mm6 \n\t"\
  861. MOVNTQ(%%mm6, 16(dst))\
  862. \
  863. "add $24, "#dst" \n\t"\
  864. \
  865. "add $8, "#index" \n\t"\
  866. "cmp "#dstw", "#index" \n\t"\
  867. " jb 1b \n\t"
  868. #ifdef HAVE_MMX2
  869. #undef WRITEBGR24
  870. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  871. #else
  872. #undef WRITEBGR24
  873. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  874. #endif
  875. #define REAL_WRITEYUY2(dst, dstw, index) \
  876. "packuswb %%mm3, %%mm3 \n\t"\
  877. "packuswb %%mm4, %%mm4 \n\t"\
  878. "packuswb %%mm7, %%mm1 \n\t"\
  879. "punpcklbw %%mm4, %%mm3 \n\t"\
  880. "movq %%mm1, %%mm7 \n\t"\
  881. "punpcklbw %%mm3, %%mm1 \n\t"\
  882. "punpckhbw %%mm3, %%mm7 \n\t"\
  883. \
  884. MOVNTQ(%%mm1, (dst, index, 2))\
  885. MOVNTQ(%%mm7, 8(dst, index, 2))\
  886. \
  887. "add $8, "#index" \n\t"\
  888. "cmp "#dstw", "#index" \n\t"\
  889. " jb 1b \n\t"
  890. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  891. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  892. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  893. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  894. {
  895. #ifdef HAVE_MMX
  896. if(c->flags & SWS_ACCURATE_RND){
  897. if(uDest){
  898. YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  899. YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  900. }
  901. YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
  902. }else{
  903. if(uDest){
  904. YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  905. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  906. }
  907. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
  908. }
  909. #else
  910. #ifdef HAVE_ALTIVEC
  911. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  912. chrFilter, chrSrc, chrFilterSize,
  913. dest, uDest, vDest, dstW, chrDstW);
  914. #else //HAVE_ALTIVEC
  915. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  916. chrFilter, chrSrc, chrFilterSize,
  917. dest, uDest, vDest, dstW, chrDstW);
  918. #endif //!HAVE_ALTIVEC
  919. #endif
  920. }
  921. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  922. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  923. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  924. {
  925. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  926. chrFilter, chrSrc, chrFilterSize,
  927. dest, uDest, dstW, chrDstW, dstFormat);
  928. }
  929. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  930. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  931. {
  932. #ifdef HAVE_MMX
  933. if(uDest != NULL)
  934. {
  935. asm volatile(
  936. YSCALEYUV2YV121
  937. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  938. "g" (-chrDstW)
  939. : "%"REG_a
  940. );
  941. asm volatile(
  942. YSCALEYUV2YV121
  943. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  944. "g" (-chrDstW)
  945. : "%"REG_a
  946. );
  947. }
  948. asm volatile(
  949. YSCALEYUV2YV121
  950. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  951. "g" (-dstW)
  952. : "%"REG_a
  953. );
  954. #else
  955. int i;
  956. for(i=0; i<dstW; i++)
  957. {
  958. int val= lumSrc[i]>>7;
  959. if(val&256){
  960. if(val<0) val=0;
  961. else val=255;
  962. }
  963. dest[i]= val;
  964. }
  965. if(uDest != NULL)
  966. for(i=0; i<chrDstW; i++)
  967. {
  968. int u=chrSrc[i]>>7;
  969. int v=chrSrc[i + 2048]>>7;
  970. if((u|v)&256){
  971. if(u<0) u=0;
  972. else if (u>255) u=255;
  973. if(v<0) v=0;
  974. else if (v>255) v=255;
  975. }
  976. uDest[i]= u;
  977. vDest[i]= v;
  978. }
  979. #endif
  980. }
  981. /**
  982. * vertical scale YV12 to RGB
  983. */
  984. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  985. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  986. uint8_t *dest, long dstW, long dstY)
  987. {
  988. long dummy=0;
  989. #ifdef HAVE_MMX
  990. if(c->flags & SWS_ACCURATE_RND){
  991. switch(c->dstFormat){
  992. case IMGFMT_BGR32:
  993. YSCALEYUV2PACKEDX_ACCURATE
  994. YSCALEYUV2RGBX
  995. WRITEBGR32(%4, %5, %%REGa)
  996. YSCALEYUV2PACKEDX_END
  997. return;
  998. case IMGFMT_BGR24:
  999. YSCALEYUV2PACKEDX_ACCURATE
  1000. YSCALEYUV2RGBX
  1001. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
  1002. "add %4, %%"REG_b" \n\t"
  1003. WRITEBGR24(%%REGb, %5, %%REGa)
  1004. :: "r" (&c->redDither),
  1005. "m" (dummy), "m" (dummy), "m" (dummy),
  1006. "r" (dest), "m" (dstW)
  1007. : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
  1008. );
  1009. return;
  1010. case IMGFMT_BGR15:
  1011. YSCALEYUV2PACKEDX_ACCURATE
  1012. YSCALEYUV2RGBX
  1013. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1014. #ifdef DITHER1XBPP
  1015. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1016. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1017. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1018. #endif
  1019. WRITEBGR15(%4, %5, %%REGa)
  1020. YSCALEYUV2PACKEDX_END
  1021. return;
  1022. case IMGFMT_BGR16:
  1023. YSCALEYUV2PACKEDX_ACCURATE
  1024. YSCALEYUV2RGBX
  1025. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1026. #ifdef DITHER1XBPP
  1027. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1028. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1029. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1030. #endif
  1031. WRITEBGR16(%4, %5, %%REGa)
  1032. YSCALEYUV2PACKEDX_END
  1033. return;
  1034. case IMGFMT_YUY2:
  1035. YSCALEYUV2PACKEDX_ACCURATE
  1036. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1037. "psraw $3, %%mm3 \n\t"
  1038. "psraw $3, %%mm4 \n\t"
  1039. "psraw $3, %%mm1 \n\t"
  1040. "psraw $3, %%mm7 \n\t"
  1041. WRITEYUY2(%4, %5, %%REGa)
  1042. YSCALEYUV2PACKEDX_END
  1043. return;
  1044. }
  1045. }else{
  1046. switch(c->dstFormat)
  1047. {
  1048. case IMGFMT_BGR32:
  1049. YSCALEYUV2PACKEDX
  1050. YSCALEYUV2RGBX
  1051. WRITEBGR32(%4, %5, %%REGa)
  1052. YSCALEYUV2PACKEDX_END
  1053. return;
  1054. case IMGFMT_BGR24:
  1055. YSCALEYUV2PACKEDX
  1056. YSCALEYUV2RGBX
  1057. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
  1058. "add %4, %%"REG_b" \n\t"
  1059. WRITEBGR24(%%REGb, %5, %%REGa)
  1060. :: "r" (&c->redDither),
  1061. "m" (dummy), "m" (dummy), "m" (dummy),
  1062. "r" (dest), "m" (dstW)
  1063. : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
  1064. );
  1065. return;
  1066. case IMGFMT_BGR15:
  1067. YSCALEYUV2PACKEDX
  1068. YSCALEYUV2RGBX
  1069. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1070. #ifdef DITHER1XBPP
  1071. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1072. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1073. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1074. #endif
  1075. WRITEBGR15(%4, %5, %%REGa)
  1076. YSCALEYUV2PACKEDX_END
  1077. return;
  1078. case IMGFMT_BGR16:
  1079. YSCALEYUV2PACKEDX
  1080. YSCALEYUV2RGBX
  1081. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1082. #ifdef DITHER1XBPP
  1083. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1084. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1085. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1086. #endif
  1087. WRITEBGR16(%4, %5, %%REGa)
  1088. YSCALEYUV2PACKEDX_END
  1089. return;
  1090. case IMGFMT_YUY2:
  1091. YSCALEYUV2PACKEDX
  1092. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1093. "psraw $3, %%mm3 \n\t"
  1094. "psraw $3, %%mm4 \n\t"
  1095. "psraw $3, %%mm1 \n\t"
  1096. "psraw $3, %%mm7 \n\t"
  1097. WRITEYUY2(%4, %5, %%REGa)
  1098. YSCALEYUV2PACKEDX_END
  1099. return;
  1100. }
  1101. }
  1102. #endif
  1103. #ifdef HAVE_ALTIVEC
  1104. /* The following list of supported dstFormat values should
  1105. match what's found in the body of altivec_yuv2packedX() */
  1106. if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA ||
  1107. c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 ||
  1108. c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB)
  1109. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  1110. chrFilter, chrSrc, chrFilterSize,
  1111. dest, dstW, dstY);
  1112. else
  1113. #endif
  1114. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1115. chrFilter, chrSrc, chrFilterSize,
  1116. dest, dstW, dstY);
  1117. }
  1118. /**
  1119. * vertical bilinear scale YV12 to RGB
  1120. */
  1121. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1122. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1123. {
  1124. int yalpha1=yalpha^4095;
  1125. int uvalpha1=uvalpha^4095;
  1126. int i;
  1127. #if 0 //isn't used
  1128. if(flags&SWS_FULL_CHR_H_INT)
  1129. {
  1130. switch(dstFormat)
  1131. {
  1132. #ifdef HAVE_MMX
  1133. case IMGFMT_BGR32:
  1134. asm volatile(
  1135. FULL_YSCALEYUV2RGB
  1136. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1137. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1138. "movq %%mm3, %%mm1 \n\t"
  1139. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1140. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1141. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  1142. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  1143. "add $4, %%"REG_a" \n\t"
  1144. "cmp %5, %%"REG_a" \n\t"
  1145. " jb 1b \n\t"
  1146. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  1147. "m" (yalpha1), "m" (uvalpha1)
  1148. : "%"REG_a
  1149. );
  1150. break;
  1151. case IMGFMT_BGR24:
  1152. asm volatile(
  1153. FULL_YSCALEYUV2RGB
  1154. // lsb ... msb
  1155. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1156. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1157. "movq %%mm3, %%mm1 \n\t"
  1158. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1159. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1160. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  1161. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  1162. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  1163. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  1164. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  1165. "movq %%mm1, %%mm2 \n\t"
  1166. "psllq $48, %%mm1 \n\t" // 000000BG
  1167. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  1168. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  1169. "psrld $16, %%mm2 \n\t" // R000R000
  1170. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  1171. "por %%mm2, %%mm1 \n\t" // RBGRR000
  1172. "mov %4, %%"REG_b" \n\t"
  1173. "add %%"REG_a", %%"REG_b" \n\t"
  1174. #ifdef HAVE_MMX2
  1175. //FIXME Alignment
  1176. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
  1177. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
  1178. #else
  1179. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1180. "psrlq $32, %%mm3 \n\t"
  1181. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  1182. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1183. #endif
  1184. "add $4, %%"REG_a" \n\t"
  1185. "cmp %5, %%"REG_a" \n\t"
  1186. " jb 1b \n\t"
  1187. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1188. "m" (yalpha1), "m" (uvalpha1)
  1189. : "%"REG_a, "%"REG_b
  1190. );
  1191. break;
  1192. case IMGFMT_BGR15:
  1193. asm volatile(
  1194. FULL_YSCALEYUV2RGB
  1195. #ifdef DITHER1XBPP
  1196. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  1197. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1198. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1199. #endif
  1200. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1201. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1202. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1203. "psrlw $3, %%mm3 \n\t"
  1204. "psllw $2, %%mm1 \n\t"
  1205. "psllw $7, %%mm0 \n\t"
  1206. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1207. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1208. "por %%mm3, %%mm1 \n\t"
  1209. "por %%mm1, %%mm0 \n\t"
  1210. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1211. "add $4, %%"REG_a" \n\t"
  1212. "cmp %5, %%"REG_a" \n\t"
  1213. " jb 1b \n\t"
  1214. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1215. "m" (yalpha1), "m" (uvalpha1)
  1216. : "%"REG_a
  1217. );
  1218. break;
  1219. case IMGFMT_BGR16:
  1220. asm volatile(
  1221. FULL_YSCALEYUV2RGB
  1222. #ifdef DITHER1XBPP
  1223. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  1224. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1225. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1226. #endif
  1227. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1228. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1229. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1230. "psrlw $3, %%mm3 \n\t"
  1231. "psllw $3, %%mm1 \n\t"
  1232. "psllw $8, %%mm0 \n\t"
  1233. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1234. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1235. "por %%mm3, %%mm1 \n\t"
  1236. "por %%mm1, %%mm0 \n\t"
  1237. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1238. "add $4, %%"REG_a" \n\t"
  1239. "cmp %5, %%"REG_a" \n\t"
  1240. " jb 1b \n\t"
  1241. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1242. "m" (yalpha1), "m" (uvalpha1)
  1243. : "%"REG_a
  1244. );
  1245. break;
  1246. #endif
  1247. case IMGFMT_RGB32:
  1248. #ifndef HAVE_MMX
  1249. case IMGFMT_BGR32:
  1250. #endif
  1251. if(dstFormat==IMGFMT_BGR32)
  1252. {
  1253. int i;
  1254. #ifdef WORDS_BIGENDIAN
  1255. dest++;
  1256. #endif
  1257. for(i=0;i<dstW;i++){
  1258. // vertical linear interpolation && yuv2rgb in a single step:
  1259. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1260. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1261. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1262. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1263. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1264. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1265. dest+= 4;
  1266. }
  1267. }
  1268. else if(dstFormat==IMGFMT_BGR24)
  1269. {
  1270. int i;
  1271. for(i=0;i<dstW;i++){
  1272. // vertical linear interpolation && yuv2rgb in a single step:
  1273. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1274. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1275. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1276. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1277. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1278. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1279. dest+= 3;
  1280. }
  1281. }
  1282. else if(dstFormat==IMGFMT_BGR16)
  1283. {
  1284. int i;
  1285. for(i=0;i<dstW;i++){
  1286. // vertical linear interpolation && yuv2rgb in a single step:
  1287. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1288. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1289. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1290. ((uint16_t*)dest)[i] =
  1291. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1292. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1293. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1294. }
  1295. }
  1296. else if(dstFormat==IMGFMT_BGR15)
  1297. {
  1298. int i;
  1299. for(i=0;i<dstW;i++){
  1300. // vertical linear interpolation && yuv2rgb in a single step:
  1301. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1302. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1303. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1304. ((uint16_t*)dest)[i] =
  1305. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1306. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1307. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1308. }
  1309. }
  1310. }//FULL_UV_IPOL
  1311. else
  1312. {
  1313. #endif // if 0
  1314. #ifdef HAVE_MMX
  1315. switch(c->dstFormat)
  1316. {
  1317. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1318. case IMGFMT_BGR32:
  1319. asm volatile(
  1320. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1321. "mov %4, %%"REG_b" \n\t"
  1322. "push %%"REG_BP" \n\t"
  1323. YSCALEYUV2RGB(%%REGBP, %5)
  1324. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1325. "pop %%"REG_BP" \n\t"
  1326. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1327. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1328. "a" (&c->redDither)
  1329. );
  1330. return;
  1331. case IMGFMT_BGR24:
  1332. asm volatile(
  1333. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1334. "mov %4, %%"REG_b" \n\t"
  1335. "push %%"REG_BP" \n\t"
  1336. YSCALEYUV2RGB(%%REGBP, %5)
  1337. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1338. "pop %%"REG_BP" \n\t"
  1339. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1340. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1341. "a" (&c->redDither)
  1342. );
  1343. return;
  1344. case IMGFMT_BGR15:
  1345. asm volatile(
  1346. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1347. "mov %4, %%"REG_b" \n\t"
  1348. "push %%"REG_BP" \n\t"
  1349. YSCALEYUV2RGB(%%REGBP, %5)
  1350. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1351. #ifdef DITHER1XBPP
  1352. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1353. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1354. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1355. #endif
  1356. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1357. "pop %%"REG_BP" \n\t"
  1358. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1359. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1360. "a" (&c->redDither)
  1361. );
  1362. return;
  1363. case IMGFMT_BGR16:
  1364. asm volatile(
  1365. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1366. "mov %4, %%"REG_b" \n\t"
  1367. "push %%"REG_BP" \n\t"
  1368. YSCALEYUV2RGB(%%REGBP, %5)
  1369. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1370. #ifdef DITHER1XBPP
  1371. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1372. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1373. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1374. #endif
  1375. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1376. "pop %%"REG_BP" \n\t"
  1377. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1378. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1379. "a" (&c->redDither)
  1380. );
  1381. return;
  1382. case IMGFMT_YUY2:
  1383. asm volatile(
  1384. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1385. "mov %4, %%"REG_b" \n\t"
  1386. "push %%"REG_BP" \n\t"
  1387. YSCALEYUV2PACKED(%%REGBP, %5)
  1388. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1389. "pop %%"REG_BP" \n\t"
  1390. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1391. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1392. "a" (&c->redDither)
  1393. );
  1394. return;
  1395. default: break;
  1396. }
  1397. #endif //HAVE_MMX
  1398. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1399. }
  1400. /**
  1401. * YV12 to RGB without scaling or interpolating
  1402. */
  1403. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1404. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1405. {
  1406. const int yalpha1=0;
  1407. int i;
  1408. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1409. const int yalpha= 4096; //FIXME ...
  1410. if(flags&SWS_FULL_CHR_H_INT)
  1411. {
  1412. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1413. return;
  1414. }
  1415. #ifdef HAVE_MMX
  1416. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1417. {
  1418. switch(dstFormat)
  1419. {
  1420. case IMGFMT_BGR32:
  1421. asm volatile(
  1422. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1423. "mov %4, %%"REG_b" \n\t"
  1424. "push %%"REG_BP" \n\t"
  1425. YSCALEYUV2RGB1(%%REGBP, %5)
  1426. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1427. "pop %%"REG_BP" \n\t"
  1428. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1429. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1430. "a" (&c->redDither)
  1431. );
  1432. return;
  1433. case IMGFMT_BGR24:
  1434. asm volatile(
  1435. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1436. "mov %4, %%"REG_b" \n\t"
  1437. "push %%"REG_BP" \n\t"
  1438. YSCALEYUV2RGB1(%%REGBP, %5)
  1439. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1440. "pop %%"REG_BP" \n\t"
  1441. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1442. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1443. "a" (&c->redDither)
  1444. );
  1445. return;
  1446. case IMGFMT_BGR15:
  1447. asm volatile(
  1448. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1449. "mov %4, %%"REG_b" \n\t"
  1450. "push %%"REG_BP" \n\t"
  1451. YSCALEYUV2RGB1(%%REGBP, %5)
  1452. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1453. #ifdef DITHER1XBPP
  1454. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1455. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1456. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1457. #endif
  1458. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1459. "pop %%"REG_BP" \n\t"
  1460. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1461. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1462. "a" (&c->redDither)
  1463. );
  1464. return;
  1465. case IMGFMT_BGR16:
  1466. asm volatile(
  1467. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1468. "mov %4, %%"REG_b" \n\t"
  1469. "push %%"REG_BP" \n\t"
  1470. YSCALEYUV2RGB1(%%REGBP, %5)
  1471. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1472. #ifdef DITHER1XBPP
  1473. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1474. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1475. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1476. #endif
  1477. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1478. "pop %%"REG_BP" \n\t"
  1479. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1480. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1481. "a" (&c->redDither)
  1482. );
  1483. return;
  1484. case IMGFMT_YUY2:
  1485. asm volatile(
  1486. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1487. "mov %4, %%"REG_b" \n\t"
  1488. "push %%"REG_BP" \n\t"
  1489. YSCALEYUV2PACKED1(%%REGBP, %5)
  1490. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1491. "pop %%"REG_BP" \n\t"
  1492. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1493. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1494. "a" (&c->redDither)
  1495. );
  1496. return;
  1497. }
  1498. }
  1499. else
  1500. {
  1501. switch(dstFormat)
  1502. {
  1503. case IMGFMT_BGR32:
  1504. asm volatile(
  1505. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1506. "mov %4, %%"REG_b" \n\t"
  1507. "push %%"REG_BP" \n\t"
  1508. YSCALEYUV2RGB1b(%%REGBP, %5)
  1509. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1510. "pop %%"REG_BP" \n\t"
  1511. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1512. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1513. "a" (&c->redDither)
  1514. );
  1515. return;
  1516. case IMGFMT_BGR24:
  1517. asm volatile(
  1518. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1519. "mov %4, %%"REG_b" \n\t"
  1520. "push %%"REG_BP" \n\t"
  1521. YSCALEYUV2RGB1b(%%REGBP, %5)
  1522. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1523. "pop %%"REG_BP" \n\t"
  1524. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1525. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1526. "a" (&c->redDither)
  1527. );
  1528. return;
  1529. case IMGFMT_BGR15:
  1530. asm volatile(
  1531. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1532. "mov %4, %%"REG_b" \n\t"
  1533. "push %%"REG_BP" \n\t"
  1534. YSCALEYUV2RGB1b(%%REGBP, %5)
  1535. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1536. #ifdef DITHER1XBPP
  1537. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1538. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1539. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1540. #endif
  1541. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1542. "pop %%"REG_BP" \n\t"
  1543. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1544. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1545. "a" (&c->redDither)
  1546. );
  1547. return;
  1548. case IMGFMT_BGR16:
  1549. asm volatile(
  1550. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1551. "mov %4, %%"REG_b" \n\t"
  1552. "push %%"REG_BP" \n\t"
  1553. YSCALEYUV2RGB1b(%%REGBP, %5)
  1554. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1555. #ifdef DITHER1XBPP
  1556. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1557. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1558. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1559. #endif
  1560. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1561. "pop %%"REG_BP" \n\t"
  1562. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1563. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1564. "a" (&c->redDither)
  1565. );
  1566. return;
  1567. case IMGFMT_YUY2:
  1568. asm volatile(
  1569. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1570. "mov %4, %%"REG_b" \n\t"
  1571. "push %%"REG_BP" \n\t"
  1572. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1573. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1574. "pop %%"REG_BP" \n\t"
  1575. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1576. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1577. "a" (&c->redDither)
  1578. );
  1579. return;
  1580. }
  1581. }
  1582. #endif
  1583. if( uvalpha < 2048 )
  1584. {
  1585. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1586. }else{
  1587. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1588. }
  1589. }
  1590. //FIXME yuy2* can read upto 7 samples to much
  1591. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
  1592. {
  1593. #ifdef HAVE_MMX
  1594. asm volatile(
  1595. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1596. "mov %0, %%"REG_a" \n\t"
  1597. "1: \n\t"
  1598. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1599. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1600. "pand %%mm2, %%mm0 \n\t"
  1601. "pand %%mm2, %%mm1 \n\t"
  1602. "packuswb %%mm1, %%mm0 \n\t"
  1603. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1604. "add $8, %%"REG_a" \n\t"
  1605. " js 1b \n\t"
  1606. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1607. : "%"REG_a
  1608. );
  1609. #else
  1610. int i;
  1611. for(i=0; i<width; i++)
  1612. dst[i]= src[2*i];
  1613. #endif
  1614. }
  1615. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1616. {
  1617. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1618. asm volatile(
  1619. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1620. "mov %0, %%"REG_a" \n\t"
  1621. "1: \n\t"
  1622. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1623. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1624. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1625. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1626. PAVGB(%%mm2, %%mm0)
  1627. PAVGB(%%mm3, %%mm1)
  1628. "psrlw $8, %%mm0 \n\t"
  1629. "psrlw $8, %%mm1 \n\t"
  1630. "packuswb %%mm1, %%mm0 \n\t"
  1631. "movq %%mm0, %%mm1 \n\t"
  1632. "psrlw $8, %%mm0 \n\t"
  1633. "pand %%mm4, %%mm1 \n\t"
  1634. "packuswb %%mm0, %%mm0 \n\t"
  1635. "packuswb %%mm1, %%mm1 \n\t"
  1636. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1637. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1638. "add $4, %%"REG_a" \n\t"
  1639. " js 1b \n\t"
  1640. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1641. : "%"REG_a
  1642. );
  1643. #else
  1644. int i;
  1645. for(i=0; i<width; i++)
  1646. {
  1647. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1648. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1649. }
  1650. #endif
  1651. }
  1652. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1653. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
  1654. {
  1655. #ifdef HAVE_MMX
  1656. asm volatile(
  1657. "mov %0, %%"REG_a" \n\t"
  1658. "1: \n\t"
  1659. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1660. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1661. "psrlw $8, %%mm0 \n\t"
  1662. "psrlw $8, %%mm1 \n\t"
  1663. "packuswb %%mm1, %%mm0 \n\t"
  1664. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1665. "add $8, %%"REG_a" \n\t"
  1666. " js 1b \n\t"
  1667. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1668. : "%"REG_a
  1669. );
  1670. #else
  1671. int i;
  1672. for(i=0; i<width; i++)
  1673. dst[i]= src[2*i+1];
  1674. #endif
  1675. }
  1676. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1677. {
  1678. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1679. asm volatile(
  1680. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1681. "mov %0, %%"REG_a" \n\t"
  1682. "1: \n\t"
  1683. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1684. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1685. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1686. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1687. PAVGB(%%mm2, %%mm0)
  1688. PAVGB(%%mm3, %%mm1)
  1689. "pand %%mm4, %%mm0 \n\t"
  1690. "pand %%mm4, %%mm1 \n\t"
  1691. "packuswb %%mm1, %%mm0 \n\t"
  1692. "movq %%mm0, %%mm1 \n\t"
  1693. "psrlw $8, %%mm0 \n\t"
  1694. "pand %%mm4, %%mm1 \n\t"
  1695. "packuswb %%mm0, %%mm0 \n\t"
  1696. "packuswb %%mm1, %%mm1 \n\t"
  1697. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1698. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1699. "add $4, %%"REG_a" \n\t"
  1700. " js 1b \n\t"
  1701. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1702. : "%"REG_a
  1703. );
  1704. #else
  1705. int i;
  1706. for(i=0; i<width; i++)
  1707. {
  1708. dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
  1709. dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
  1710. }
  1711. #endif
  1712. }
  1713. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1714. {
  1715. int i;
  1716. for(i=0; i<width; i++)
  1717. {
  1718. int b= ((uint32_t*)src)[i]&0xFF;
  1719. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1720. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1721. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1722. }
  1723. }
  1724. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1725. {
  1726. int i;
  1727. for(i=0; i<width; i++)
  1728. {
  1729. const int a= ((uint32_t*)src1)[2*i+0];
  1730. const int e= ((uint32_t*)src1)[2*i+1];
  1731. const int c= ((uint32_t*)src2)[2*i+0];
  1732. const int d= ((uint32_t*)src2)[2*i+1];
  1733. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1734. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1735. const int b= l&0x3FF;
  1736. const int g= h>>8;
  1737. const int r= l>>16;
  1738. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1739. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1740. }
  1741. }
  1742. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
  1743. {
  1744. #ifdef HAVE_MMX
  1745. asm volatile(
  1746. "mov %2, %%"REG_a" \n\t"
  1747. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1748. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1749. "pxor %%mm7, %%mm7 \n\t"
  1750. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
  1751. ASMALIGN16
  1752. "1: \n\t"
  1753. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1754. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1755. "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
  1756. "punpcklbw %%mm7, %%mm0 \n\t"
  1757. "punpcklbw %%mm7, %%mm1 \n\t"
  1758. "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
  1759. "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
  1760. "punpcklbw %%mm7, %%mm2 \n\t"
  1761. "punpcklbw %%mm7, %%mm3 \n\t"
  1762. "pmaddwd %%mm6, %%mm0 \n\t"
  1763. "pmaddwd %%mm6, %%mm1 \n\t"
  1764. "pmaddwd %%mm6, %%mm2 \n\t"
  1765. "pmaddwd %%mm6, %%mm3 \n\t"
  1766. #ifndef FAST_BGR2YV12
  1767. "psrad $8, %%mm0 \n\t"
  1768. "psrad $8, %%mm1 \n\t"
  1769. "psrad $8, %%mm2 \n\t"
  1770. "psrad $8, %%mm3 \n\t"
  1771. #endif
  1772. "packssdw %%mm1, %%mm0 \n\t"
  1773. "packssdw %%mm3, %%mm2 \n\t"
  1774. "pmaddwd %%mm5, %%mm0 \n\t"
  1775. "pmaddwd %%mm5, %%mm2 \n\t"
  1776. "packssdw %%mm2, %%mm0 \n\t"
  1777. "psraw $7, %%mm0 \n\t"
  1778. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1779. "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
  1780. "punpcklbw %%mm7, %%mm4 \n\t"
  1781. "punpcklbw %%mm7, %%mm1 \n\t"
  1782. "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
  1783. "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
  1784. "punpcklbw %%mm7, %%mm2 \n\t"
  1785. "punpcklbw %%mm7, %%mm3 \n\t"
  1786. "pmaddwd %%mm6, %%mm4 \n\t"
  1787. "pmaddwd %%mm6, %%mm1 \n\t"
  1788. "pmaddwd %%mm6, %%mm2 \n\t"
  1789. "pmaddwd %%mm6, %%mm3 \n\t"
  1790. #ifndef FAST_BGR2YV12
  1791. "psrad $8, %%mm4 \n\t"
  1792. "psrad $8, %%mm1 \n\t"
  1793. "psrad $8, %%mm2 \n\t"
  1794. "psrad $8, %%mm3 \n\t"
  1795. #endif
  1796. "packssdw %%mm1, %%mm4 \n\t"
  1797. "packssdw %%mm3, %%mm2 \n\t"
  1798. "pmaddwd %%mm5, %%mm4 \n\t"
  1799. "pmaddwd %%mm5, %%mm2 \n\t"
  1800. "add $24, %%"REG_b" \n\t"
  1801. "packssdw %%mm2, %%mm4 \n\t"
  1802. "psraw $7, %%mm4 \n\t"
  1803. "packuswb %%mm4, %%mm0 \n\t"
  1804. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1805. "movq %%mm0, (%1, %%"REG_a") \n\t"
  1806. "add $8, %%"REG_a" \n\t"
  1807. " js 1b \n\t"
  1808. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1809. : "%"REG_a, "%"REG_b
  1810. );
  1811. #else
  1812. int i;
  1813. for(i=0; i<width; i++)
  1814. {
  1815. int b= src[i*3+0];
  1816. int g= src[i*3+1];
  1817. int r= src[i*3+2];
  1818. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1819. }
  1820. #endif
  1821. }
  1822. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1823. {
  1824. #ifdef HAVE_MMX
  1825. asm volatile(
  1826. "mov %4, %%"REG_a" \n\t"
  1827. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1828. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1829. "pxor %%mm7, %%mm7 \n\t"
  1830. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t"
  1831. "add %%"REG_b", %%"REG_b" \n\t"
  1832. ASMALIGN16
  1833. "1: \n\t"
  1834. PREFETCH" 64(%0, %%"REG_b") \n\t"
  1835. PREFETCH" 64(%1, %%"REG_b") \n\t"
  1836. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1837. "movq (%0, %%"REG_b"), %%mm0 \n\t"
  1838. "movq (%1, %%"REG_b"), %%mm1 \n\t"
  1839. "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
  1840. "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
  1841. PAVGB(%%mm1, %%mm0)
  1842. PAVGB(%%mm3, %%mm2)
  1843. "movq %%mm0, %%mm1 \n\t"
  1844. "movq %%mm2, %%mm3 \n\t"
  1845. "psrlq $24, %%mm0 \n\t"
  1846. "psrlq $24, %%mm2 \n\t"
  1847. PAVGB(%%mm1, %%mm0)
  1848. PAVGB(%%mm3, %%mm2)
  1849. "punpcklbw %%mm7, %%mm0 \n\t"
  1850. "punpcklbw %%mm7, %%mm2 \n\t"
  1851. #else
  1852. "movd (%0, %%"REG_b"), %%mm0 \n\t"
  1853. "movd (%1, %%"REG_b"), %%mm1 \n\t"
  1854. "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
  1855. "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
  1856. "punpcklbw %%mm7, %%mm0 \n\t"
  1857. "punpcklbw %%mm7, %%mm1 \n\t"
  1858. "punpcklbw %%mm7, %%mm2 \n\t"
  1859. "punpcklbw %%mm7, %%mm3 \n\t"
  1860. "paddw %%mm1, %%mm0 \n\t"
  1861. "paddw %%mm3, %%mm2 \n\t"
  1862. "paddw %%mm2, %%mm0 \n\t"
  1863. "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
  1864. "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
  1865. "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
  1866. "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
  1867. "punpcklbw %%mm7, %%mm4 \n\t"
  1868. "punpcklbw %%mm7, %%mm1 \n\t"
  1869. "punpcklbw %%mm7, %%mm2 \n\t"
  1870. "punpcklbw %%mm7, %%mm3 \n\t"
  1871. "paddw %%mm1, %%mm4 \n\t"
  1872. "paddw %%mm3, %%mm2 \n\t"
  1873. "paddw %%mm4, %%mm2 \n\t"
  1874. "psrlw $2, %%mm0 \n\t"
  1875. "psrlw $2, %%mm2 \n\t"
  1876. #endif
  1877. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1878. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1879. "pmaddwd %%mm0, %%mm1 \n\t"
  1880. "pmaddwd %%mm2, %%mm3 \n\t"
  1881. "pmaddwd %%mm6, %%mm0 \n\t"
  1882. "pmaddwd %%mm6, %%mm2 \n\t"
  1883. #ifndef FAST_BGR2YV12
  1884. "psrad $8, %%mm0 \n\t"
  1885. "psrad $8, %%mm1 \n\t"
  1886. "psrad $8, %%mm2 \n\t"
  1887. "psrad $8, %%mm3 \n\t"
  1888. #endif
  1889. "packssdw %%mm2, %%mm0 \n\t"
  1890. "packssdw %%mm3, %%mm1 \n\t"
  1891. "pmaddwd %%mm5, %%mm0 \n\t"
  1892. "pmaddwd %%mm5, %%mm1 \n\t"
  1893. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1894. "psraw $7, %%mm0 \n\t"
  1895. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1896. "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
  1897. "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
  1898. "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
  1899. "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
  1900. PAVGB(%%mm1, %%mm4)
  1901. PAVGB(%%mm3, %%mm2)
  1902. "movq %%mm4, %%mm1 \n\t"
  1903. "movq %%mm2, %%mm3 \n\t"
  1904. "psrlq $24, %%mm4 \n\t"
  1905. "psrlq $24, %%mm2 \n\t"
  1906. PAVGB(%%mm1, %%mm4)
  1907. PAVGB(%%mm3, %%mm2)
  1908. "punpcklbw %%mm7, %%mm4 \n\t"
  1909. "punpcklbw %%mm7, %%mm2 \n\t"
  1910. #else
  1911. "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
  1912. "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
  1913. "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
  1914. "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
  1915. "punpcklbw %%mm7, %%mm4 \n\t"
  1916. "punpcklbw %%mm7, %%mm1 \n\t"
  1917. "punpcklbw %%mm7, %%mm2 \n\t"
  1918. "punpcklbw %%mm7, %%mm3 \n\t"
  1919. "paddw %%mm1, %%mm4 \n\t"
  1920. "paddw %%mm3, %%mm2 \n\t"
  1921. "paddw %%mm2, %%mm4 \n\t"
  1922. "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
  1923. "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
  1924. "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
  1925. "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
  1926. "punpcklbw %%mm7, %%mm5 \n\t"
  1927. "punpcklbw %%mm7, %%mm1 \n\t"
  1928. "punpcklbw %%mm7, %%mm2 \n\t"
  1929. "punpcklbw %%mm7, %%mm3 \n\t"
  1930. "paddw %%mm1, %%mm5 \n\t"
  1931. "paddw %%mm3, %%mm2 \n\t"
  1932. "paddw %%mm5, %%mm2 \n\t"
  1933. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1934. "psrlw $2, %%mm4 \n\t"
  1935. "psrlw $2, %%mm2 \n\t"
  1936. #endif
  1937. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1938. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1939. "pmaddwd %%mm4, %%mm1 \n\t"
  1940. "pmaddwd %%mm2, %%mm3 \n\t"
  1941. "pmaddwd %%mm6, %%mm4 \n\t"
  1942. "pmaddwd %%mm6, %%mm2 \n\t"
  1943. #ifndef FAST_BGR2YV12
  1944. "psrad $8, %%mm4 \n\t"
  1945. "psrad $8, %%mm1 \n\t"
  1946. "psrad $8, %%mm2 \n\t"
  1947. "psrad $8, %%mm3 \n\t"
  1948. #endif
  1949. "packssdw %%mm2, %%mm4 \n\t"
  1950. "packssdw %%mm3, %%mm1 \n\t"
  1951. "pmaddwd %%mm5, %%mm4 \n\t"
  1952. "pmaddwd %%mm5, %%mm1 \n\t"
  1953. "add $24, %%"REG_b" \n\t"
  1954. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1955. "psraw $7, %%mm4 \n\t"
  1956. "movq %%mm0, %%mm1 \n\t"
  1957. "punpckldq %%mm4, %%mm0 \n\t"
  1958. "punpckhdq %%mm4, %%mm1 \n\t"
  1959. "packsswb %%mm1, %%mm0 \n\t"
  1960. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1961. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1962. "punpckhdq %%mm0, %%mm0 \n\t"
  1963. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1964. "add $4, %%"REG_a" \n\t"
  1965. " js 1b \n\t"
  1966. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1967. : "%"REG_a, "%"REG_b
  1968. );
  1969. #else
  1970. int i;
  1971. for(i=0; i<width; i++)
  1972. {
  1973. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1974. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1975. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1976. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1977. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1978. }
  1979. #endif
  1980. }
  1981. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1982. {
  1983. int i;
  1984. for(i=0; i<width; i++)
  1985. {
  1986. int d= ((uint16_t*)src)[i];
  1987. int b= d&0x1F;
  1988. int g= (d>>5)&0x3F;
  1989. int r= (d>>11)&0x1F;
  1990. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1991. }
  1992. }
  1993. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1994. {
  1995. int i;
  1996. for(i=0; i<width; i++)
  1997. {
  1998. int d0= ((uint32_t*)src1)[i];
  1999. int d1= ((uint32_t*)src2)[i];
  2000. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  2001. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  2002. int dh2= (dh>>11) + (dh<<21);
  2003. int d= dh2 + dl;
  2004. int b= d&0x7F;
  2005. int r= (d>>11)&0x7F;
  2006. int g= d>>21;
  2007. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  2008. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  2009. }
  2010. }
  2011. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  2012. {
  2013. int i;
  2014. for(i=0; i<width; i++)
  2015. {
  2016. int d= ((uint16_t*)src)[i];
  2017. int b= d&0x1F;
  2018. int g= (d>>5)&0x1F;
  2019. int r= (d>>10)&0x1F;
  2020. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  2021. }
  2022. }
  2023. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2024. {
  2025. int i;
  2026. for(i=0; i<width; i++)
  2027. {
  2028. int d0= ((uint32_t*)src1)[i];
  2029. int d1= ((uint32_t*)src2)[i];
  2030. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  2031. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  2032. int dh2= (dh>>11) + (dh<<21);
  2033. int d= dh2 + dl;
  2034. int b= d&0x7F;
  2035. int r= (d>>10)&0x7F;
  2036. int g= d>>21;
  2037. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  2038. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  2039. }
  2040. }
  2041. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  2042. {
  2043. int i;
  2044. for(i=0; i<width; i++)
  2045. {
  2046. int r= ((uint32_t*)src)[i]&0xFF;
  2047. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  2048. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  2049. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  2050. }
  2051. }
  2052. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2053. {
  2054. int i;
  2055. for(i=0; i<width; i++)
  2056. {
  2057. const int a= ((uint32_t*)src1)[2*i+0];
  2058. const int e= ((uint32_t*)src1)[2*i+1];
  2059. const int c= ((uint32_t*)src2)[2*i+0];
  2060. const int d= ((uint32_t*)src2)[2*i+1];
  2061. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  2062. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  2063. const int r= l&0x3FF;
  2064. const int g= h>>8;
  2065. const int b= l>>16;
  2066. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2067. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2068. }
  2069. }
  2070. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  2071. {
  2072. int i;
  2073. for(i=0; i<width; i++)
  2074. {
  2075. int r= src[i*3+0];
  2076. int g= src[i*3+1];
  2077. int b= src[i*3+2];
  2078. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  2079. }
  2080. }
  2081. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2082. {
  2083. int i;
  2084. for(i=0; i<width; i++)
  2085. {
  2086. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  2087. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  2088. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  2089. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2090. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2091. }
  2092. }
  2093. // Bilinear / Bicubic scaling
  2094. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  2095. int16_t *filter, int16_t *filterPos, long filterSize)
  2096. {
  2097. #ifdef HAVE_MMX
  2098. assert(filterSize % 4 == 0 && filterSize>0);
  2099. if(filterSize==4) // allways true for upscaling, sometimes for down too
  2100. {
  2101. long counter= -2*dstW;
  2102. filter-= counter*2;
  2103. filterPos-= counter/2;
  2104. dst-= counter/2;
  2105. asm volatile(
  2106. "pxor %%mm7, %%mm7 \n\t"
  2107. "movq "MANGLE(w02)", %%mm6 \n\t"
  2108. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2109. "mov %%"REG_a", %%"REG_BP" \n\t"
  2110. ASMALIGN16
  2111. "1: \n\t"
  2112. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2113. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  2114. "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
  2115. "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
  2116. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2117. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2118. "punpcklbw %%mm7, %%mm0 \n\t"
  2119. "punpcklbw %%mm7, %%mm2 \n\t"
  2120. "pmaddwd %%mm1, %%mm0 \n\t"
  2121. "pmaddwd %%mm2, %%mm3 \n\t"
  2122. "psrad $8, %%mm0 \n\t"
  2123. "psrad $8, %%mm3 \n\t"
  2124. "packssdw %%mm3, %%mm0 \n\t"
  2125. "pmaddwd %%mm6, %%mm0 \n\t"
  2126. "packssdw %%mm0, %%mm0 \n\t"
  2127. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2128. "add $4, %%"REG_BP" \n\t"
  2129. " jnc 1b \n\t"
  2130. "pop %%"REG_BP" \n\t"
  2131. : "+a" (counter)
  2132. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2133. : "%"REG_b
  2134. );
  2135. }
  2136. else if(filterSize==8)
  2137. {
  2138. long counter= -2*dstW;
  2139. filter-= counter*4;
  2140. filterPos-= counter/2;
  2141. dst-= counter/2;
  2142. asm volatile(
  2143. "pxor %%mm7, %%mm7 \n\t"
  2144. "movq "MANGLE(w02)", %%mm6 \n\t"
  2145. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2146. "mov %%"REG_a", %%"REG_BP" \n\t"
  2147. ASMALIGN16
  2148. "1: \n\t"
  2149. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2150. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  2151. "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
  2152. "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
  2153. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2154. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2155. "punpcklbw %%mm7, %%mm0 \n\t"
  2156. "punpcklbw %%mm7, %%mm2 \n\t"
  2157. "pmaddwd %%mm1, %%mm0 \n\t"
  2158. "pmaddwd %%mm2, %%mm3 \n\t"
  2159. "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
  2160. "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
  2161. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  2162. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  2163. "punpcklbw %%mm7, %%mm4 \n\t"
  2164. "punpcklbw %%mm7, %%mm2 \n\t"
  2165. "pmaddwd %%mm1, %%mm4 \n\t"
  2166. "pmaddwd %%mm2, %%mm5 \n\t"
  2167. "paddd %%mm4, %%mm0 \n\t"
  2168. "paddd %%mm5, %%mm3 \n\t"
  2169. "psrad $8, %%mm0 \n\t"
  2170. "psrad $8, %%mm3 \n\t"
  2171. "packssdw %%mm3, %%mm0 \n\t"
  2172. "pmaddwd %%mm6, %%mm0 \n\t"
  2173. "packssdw %%mm0, %%mm0 \n\t"
  2174. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2175. "add $4, %%"REG_BP" \n\t"
  2176. " jnc 1b \n\t"
  2177. "pop %%"REG_BP" \n\t"
  2178. : "+a" (counter)
  2179. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2180. : "%"REG_b
  2181. );
  2182. }
  2183. else
  2184. {
  2185. uint8_t *offset = src+filterSize;
  2186. long counter= -2*dstW;
  2187. // filter-= counter*filterSize/2;
  2188. filterPos-= counter/2;
  2189. dst-= counter/2;
  2190. asm volatile(
  2191. "pxor %%mm7, %%mm7 \n\t"
  2192. "movq "MANGLE(w02)", %%mm6 \n\t"
  2193. ASMALIGN16
  2194. "1: \n\t"
  2195. "mov %2, %%"REG_c" \n\t"
  2196. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2197. "movzwl 2(%%"REG_c", %0), %%ebx \n\t"
  2198. "mov %5, %%"REG_c" \n\t"
  2199. "pxor %%mm4, %%mm4 \n\t"
  2200. "pxor %%mm5, %%mm5 \n\t"
  2201. "2: \n\t"
  2202. "movq (%1), %%mm1 \n\t"
  2203. "movq (%1, %6), %%mm3 \n\t"
  2204. "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
  2205. "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
  2206. "punpcklbw %%mm7, %%mm0 \n\t"
  2207. "punpcklbw %%mm7, %%mm2 \n\t"
  2208. "pmaddwd %%mm1, %%mm0 \n\t"
  2209. "pmaddwd %%mm2, %%mm3 \n\t"
  2210. "paddd %%mm3, %%mm5 \n\t"
  2211. "paddd %%mm0, %%mm4 \n\t"
  2212. "add $8, %1 \n\t"
  2213. "add $4, %%"REG_c" \n\t"
  2214. "cmp %4, %%"REG_c" \n\t"
  2215. " jb 2b \n\t"
  2216. "add %6, %1 \n\t"
  2217. "psrad $8, %%mm4 \n\t"
  2218. "psrad $8, %%mm5 \n\t"
  2219. "packssdw %%mm5, %%mm4 \n\t"
  2220. "pmaddwd %%mm6, %%mm4 \n\t"
  2221. "packssdw %%mm4, %%mm4 \n\t"
  2222. "mov %3, %%"REG_a" \n\t"
  2223. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2224. "add $4, %0 \n\t"
  2225. " jnc 1b \n\t"
  2226. : "+r" (counter), "+r" (filter)
  2227. : "m" (filterPos), "m" (dst), "m"(offset),
  2228. "m" (src), "r" (filterSize*2)
  2229. : "%"REG_b, "%"REG_a, "%"REG_c
  2230. );
  2231. }
  2232. #else
  2233. #ifdef HAVE_ALTIVEC
  2234. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2235. #else
  2236. int i;
  2237. for(i=0; i<dstW; i++)
  2238. {
  2239. int j;
  2240. int srcPos= filterPos[i];
  2241. int val=0;
  2242. // printf("filterPos: %d\n", filterPos[i]);
  2243. for(j=0; j<filterSize; j++)
  2244. {
  2245. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2246. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2247. }
  2248. // filter += hFilterSize;
  2249. dst[i] = FFMIN(FFMAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2250. // dst[i] = val>>7;
  2251. }
  2252. #endif
  2253. #endif
  2254. }
  2255. // *** horizontal scale Y line to temp buffer
  2256. static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  2257. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2258. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2259. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2260. int32_t *mmx2FilterPos)
  2261. {
  2262. if(srcFormat==IMGFMT_YUY2)
  2263. {
  2264. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2265. src= formatConvBuffer;
  2266. }
  2267. else if(srcFormat==IMGFMT_UYVY)
  2268. {
  2269. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2270. src= formatConvBuffer;
  2271. }
  2272. else if(srcFormat==IMGFMT_BGR32)
  2273. {
  2274. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2275. src= formatConvBuffer;
  2276. }
  2277. else if(srcFormat==IMGFMT_BGR24)
  2278. {
  2279. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2280. src= formatConvBuffer;
  2281. }
  2282. else if(srcFormat==IMGFMT_BGR16)
  2283. {
  2284. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2285. src= formatConvBuffer;
  2286. }
  2287. else if(srcFormat==IMGFMT_BGR15)
  2288. {
  2289. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2290. src= formatConvBuffer;
  2291. }
  2292. else if(srcFormat==IMGFMT_RGB32)
  2293. {
  2294. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2295. src= formatConvBuffer;
  2296. }
  2297. else if(srcFormat==IMGFMT_RGB24)
  2298. {
  2299. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2300. src= formatConvBuffer;
  2301. }
  2302. #ifdef HAVE_MMX
  2303. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2304. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2305. #else
  2306. if(!(flags&SWS_FAST_BILINEAR))
  2307. #endif
  2308. {
  2309. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2310. }
  2311. else // Fast Bilinear upscale / crap downscale
  2312. {
  2313. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2314. #ifdef HAVE_MMX2
  2315. int i;
  2316. if(canMMX2BeUsed)
  2317. {
  2318. asm volatile(
  2319. "pxor %%mm7, %%mm7 \n\t"
  2320. "mov %0, %%"REG_c" \n\t"
  2321. "mov %1, %%"REG_D" \n\t"
  2322. "mov %2, %%"REG_d" \n\t"
  2323. "mov %3, %%"REG_b" \n\t"
  2324. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2325. PREFETCH" (%%"REG_c") \n\t"
  2326. PREFETCH" 32(%%"REG_c") \n\t"
  2327. PREFETCH" 64(%%"REG_c") \n\t"
  2328. #ifdef ARCH_X86_64
  2329. #define FUNNY_Y_CODE \
  2330. "movl (%%"REG_b"), %%esi \n\t"\
  2331. "call *%4 \n\t"\
  2332. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2333. "add %%"REG_S", %%"REG_c" \n\t"\
  2334. "add %%"REG_a", %%"REG_D" \n\t"\
  2335. "xor %%"REG_a", %%"REG_a" \n\t"\
  2336. #else
  2337. #define FUNNY_Y_CODE \
  2338. "movl (%%"REG_b"), %%esi \n\t"\
  2339. "call *%4 \n\t"\
  2340. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2341. "add %%"REG_a", %%"REG_D" \n\t"\
  2342. "xor %%"REG_a", %%"REG_a" \n\t"\
  2343. #endif
  2344. FUNNY_Y_CODE
  2345. FUNNY_Y_CODE
  2346. FUNNY_Y_CODE
  2347. FUNNY_Y_CODE
  2348. FUNNY_Y_CODE
  2349. FUNNY_Y_CODE
  2350. FUNNY_Y_CODE
  2351. FUNNY_Y_CODE
  2352. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2353. "m" (funnyYCode)
  2354. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2355. );
  2356. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2357. }
  2358. else
  2359. {
  2360. #endif
  2361. long xInc_shr16 = xInc >> 16;
  2362. uint16_t xInc_mask = xInc & 0xffff;
  2363. //NO MMX just normal asm ...
  2364. asm volatile(
  2365. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2366. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2367. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2368. ASMALIGN16
  2369. "1: \n\t"
  2370. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2371. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2372. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2373. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2374. "shll $16, %%edi \n\t"
  2375. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2376. "mov %1, %%"REG_D" \n\t"
  2377. "shrl $9, %%esi \n\t"
  2378. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2379. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2380. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2381. "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
  2382. "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2383. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2384. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2385. "shll $16, %%edi \n\t"
  2386. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2387. "mov %1, %%"REG_D" \n\t"
  2388. "shrl $9, %%esi \n\t"
  2389. "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
  2390. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2391. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2392. "add $2, %%"REG_a" \n\t"
  2393. "cmp %2, %%"REG_a" \n\t"
  2394. " jb 1b \n\t"
  2395. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2396. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2397. );
  2398. #ifdef HAVE_MMX2
  2399. } //if MMX2 can't be used
  2400. #endif
  2401. #else
  2402. int i;
  2403. unsigned int xpos=0;
  2404. for(i=0;i<dstWidth;i++)
  2405. {
  2406. register unsigned int xx=xpos>>16;
  2407. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2408. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2409. xpos+=xInc;
  2410. }
  2411. #endif
  2412. }
  2413. }
  2414. inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2415. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2416. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2417. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2418. int32_t *mmx2FilterPos)
  2419. {
  2420. if(srcFormat==IMGFMT_YUY2)
  2421. {
  2422. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2423. src1= formatConvBuffer;
  2424. src2= formatConvBuffer+2048;
  2425. }
  2426. else if(srcFormat==IMGFMT_UYVY)
  2427. {
  2428. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2429. src1= formatConvBuffer;
  2430. src2= formatConvBuffer+2048;
  2431. }
  2432. else if(srcFormat==IMGFMT_BGR32)
  2433. {
  2434. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2435. src1= formatConvBuffer;
  2436. src2= formatConvBuffer+2048;
  2437. }
  2438. else if(srcFormat==IMGFMT_BGR24)
  2439. {
  2440. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2441. src1= formatConvBuffer;
  2442. src2= formatConvBuffer+2048;
  2443. }
  2444. else if(srcFormat==IMGFMT_BGR16)
  2445. {
  2446. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2447. src1= formatConvBuffer;
  2448. src2= formatConvBuffer+2048;
  2449. }
  2450. else if(srcFormat==IMGFMT_BGR15)
  2451. {
  2452. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2453. src1= formatConvBuffer;
  2454. src2= formatConvBuffer+2048;
  2455. }
  2456. else if(srcFormat==IMGFMT_RGB32)
  2457. {
  2458. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2459. src1= formatConvBuffer;
  2460. src2= formatConvBuffer+2048;
  2461. }
  2462. else if(srcFormat==IMGFMT_RGB24)
  2463. {
  2464. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2465. src1= formatConvBuffer;
  2466. src2= formatConvBuffer+2048;
  2467. }
  2468. else if(isGray(srcFormat))
  2469. {
  2470. return;
  2471. }
  2472. #ifdef HAVE_MMX
  2473. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2474. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2475. #else
  2476. if(!(flags&SWS_FAST_BILINEAR))
  2477. #endif
  2478. {
  2479. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2480. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2481. }
  2482. else // Fast Bilinear upscale / crap downscale
  2483. {
  2484. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2485. #ifdef HAVE_MMX2
  2486. int i;
  2487. if(canMMX2BeUsed)
  2488. {
  2489. asm volatile(
  2490. "pxor %%mm7, %%mm7 \n\t"
  2491. "mov %0, %%"REG_c" \n\t"
  2492. "mov %1, %%"REG_D" \n\t"
  2493. "mov %2, %%"REG_d" \n\t"
  2494. "mov %3, %%"REG_b" \n\t"
  2495. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2496. PREFETCH" (%%"REG_c") \n\t"
  2497. PREFETCH" 32(%%"REG_c") \n\t"
  2498. PREFETCH" 64(%%"REG_c") \n\t"
  2499. #ifdef ARCH_X86_64
  2500. #define FUNNY_UV_CODE \
  2501. "movl (%%"REG_b"), %%esi \n\t"\
  2502. "call *%4 \n\t"\
  2503. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2504. "add %%"REG_S", %%"REG_c" \n\t"\
  2505. "add %%"REG_a", %%"REG_D" \n\t"\
  2506. "xor %%"REG_a", %%"REG_a" \n\t"\
  2507. #else
  2508. #define FUNNY_UV_CODE \
  2509. "movl (%%"REG_b"), %%esi \n\t"\
  2510. "call *%4 \n\t"\
  2511. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2512. "add %%"REG_a", %%"REG_D" \n\t"\
  2513. "xor %%"REG_a", %%"REG_a" \n\t"\
  2514. #endif
  2515. FUNNY_UV_CODE
  2516. FUNNY_UV_CODE
  2517. FUNNY_UV_CODE
  2518. FUNNY_UV_CODE
  2519. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2520. "mov %5, %%"REG_c" \n\t" // src
  2521. "mov %1, %%"REG_D" \n\t" // buf1
  2522. "add $4096, %%"REG_D" \n\t"
  2523. PREFETCH" (%%"REG_c") \n\t"
  2524. PREFETCH" 32(%%"REG_c") \n\t"
  2525. PREFETCH" 64(%%"REG_c") \n\t"
  2526. FUNNY_UV_CODE
  2527. FUNNY_UV_CODE
  2528. FUNNY_UV_CODE
  2529. FUNNY_UV_CODE
  2530. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2531. "m" (funnyUVCode), "m" (src2)
  2532. : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2533. );
  2534. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2535. {
  2536. // printf("%d %d %d\n", dstWidth, i, srcW);
  2537. dst[i] = src1[srcW-1]*128;
  2538. dst[i+2048] = src2[srcW-1]*128;
  2539. }
  2540. }
  2541. else
  2542. {
  2543. #endif
  2544. long xInc_shr16 = (long) (xInc >> 16);
  2545. uint16_t xInc_mask = xInc & 0xffff;
  2546. asm volatile(
  2547. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2548. "xor %%"REG_b", %%"REG_b" \n\t" // xx
  2549. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2550. ASMALIGN16
  2551. "1: \n\t"
  2552. "mov %0, %%"REG_S" \n\t"
  2553. "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
  2554. "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
  2555. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2556. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2557. "shll $16, %%edi \n\t"
  2558. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2559. "mov %1, %%"REG_D" \n\t"
  2560. "shrl $9, %%esi \n\t"
  2561. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2562. "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
  2563. "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
  2564. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2565. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2566. "shll $16, %%edi \n\t"
  2567. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2568. "mov %1, %%"REG_D" \n\t"
  2569. "shrl $9, %%esi \n\t"
  2570. "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
  2571. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2572. "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
  2573. "add $1, %%"REG_a" \n\t"
  2574. "cmp %2, %%"REG_a" \n\t"
  2575. " jb 1b \n\t"
  2576. /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2577. which is needed to support GCC-4.0 */
  2578. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2579. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2580. #else
  2581. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2582. #endif
  2583. "r" (src2)
  2584. : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
  2585. );
  2586. #ifdef HAVE_MMX2
  2587. } //if MMX2 can't be used
  2588. #endif
  2589. #else
  2590. int i;
  2591. unsigned int xpos=0;
  2592. for(i=0;i<dstWidth;i++)
  2593. {
  2594. register unsigned int xx=xpos>>16;
  2595. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2596. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2597. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2598. /* slower
  2599. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2600. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2601. */
  2602. xpos+=xInc;
  2603. }
  2604. #endif
  2605. }
  2606. }
  2607. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2608. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2609. /* load a few things into local vars to make the code more readable? and faster */
  2610. const int srcW= c->srcW;
  2611. const int dstW= c->dstW;
  2612. const int dstH= c->dstH;
  2613. const int chrDstW= c->chrDstW;
  2614. const int chrSrcW= c->chrSrcW;
  2615. const int lumXInc= c->lumXInc;
  2616. const int chrXInc= c->chrXInc;
  2617. const int dstFormat= c->dstFormat;
  2618. const int srcFormat= c->srcFormat;
  2619. const int flags= c->flags;
  2620. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2621. int16_t *vLumFilterPos= c->vLumFilterPos;
  2622. int16_t *vChrFilterPos= c->vChrFilterPos;
  2623. int16_t *hLumFilterPos= c->hLumFilterPos;
  2624. int16_t *hChrFilterPos= c->hChrFilterPos;
  2625. int16_t *vLumFilter= c->vLumFilter;
  2626. int16_t *vChrFilter= c->vChrFilter;
  2627. int16_t *hLumFilter= c->hLumFilter;
  2628. int16_t *hChrFilter= c->hChrFilter;
  2629. int32_t *lumMmxFilter= c->lumMmxFilter;
  2630. int32_t *chrMmxFilter= c->chrMmxFilter;
  2631. const int vLumFilterSize= c->vLumFilterSize;
  2632. const int vChrFilterSize= c->vChrFilterSize;
  2633. const int hLumFilterSize= c->hLumFilterSize;
  2634. const int hChrFilterSize= c->hChrFilterSize;
  2635. int16_t **lumPixBuf= c->lumPixBuf;
  2636. int16_t **chrPixBuf= c->chrPixBuf;
  2637. const int vLumBufSize= c->vLumBufSize;
  2638. const int vChrBufSize= c->vChrBufSize;
  2639. uint8_t *funnyYCode= c->funnyYCode;
  2640. uint8_t *funnyUVCode= c->funnyUVCode;
  2641. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2642. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2643. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2644. int lastDstY;
  2645. /* vars whch will change and which we need to storw back in the context */
  2646. int dstY= c->dstY;
  2647. int lumBufIndex= c->lumBufIndex;
  2648. int chrBufIndex= c->chrBufIndex;
  2649. int lastInLumBuf= c->lastInLumBuf;
  2650. int lastInChrBuf= c->lastInChrBuf;
  2651. if(isPacked(c->srcFormat)){
  2652. src[0]=
  2653. src[1]=
  2654. src[2]= src[0];
  2655. srcStride[0]=
  2656. srcStride[1]=
  2657. srcStride[2]= srcStride[0];
  2658. }
  2659. srcStride[1]<<= c->vChrDrop;
  2660. srcStride[2]<<= c->vChrDrop;
  2661. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2662. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2663. #if 0 //self test FIXME move to a vfilter or something
  2664. {
  2665. static volatile int i=0;
  2666. i++;
  2667. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2668. selfTest(src, srcStride, c->srcW, c->srcH);
  2669. i--;
  2670. }
  2671. #endif
  2672. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2673. //dstStride[0],dstStride[1],dstStride[2]);
  2674. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2675. {
  2676. static int firstTime=1; //FIXME move this into the context perhaps
  2677. if(flags & SWS_PRINT_INFO && firstTime)
  2678. {
  2679. MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
  2680. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2681. firstTime=0;
  2682. }
  2683. }
  2684. /* Note the user might start scaling the picture in the middle so this will not get executed
  2685. this is not really intended but works currently, so ppl might do it */
  2686. if(srcSliceY ==0){
  2687. lumBufIndex=0;
  2688. chrBufIndex=0;
  2689. dstY=0;
  2690. lastInLumBuf= -1;
  2691. lastInChrBuf= -1;
  2692. }
  2693. lastDstY= dstY;
  2694. for(;dstY < dstH; dstY++){
  2695. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2696. const int chrDstY= dstY>>c->chrDstVSubSample;
  2697. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2698. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2699. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2700. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2701. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2702. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2703. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2704. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2705. //handle holes (FAST_BILINEAR & weird filters)
  2706. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2707. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2708. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2709. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2710. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2711. // Do we have enough lines in this slice to output the dstY line
  2712. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2713. {
  2714. //Do horizontal scaling
  2715. while(lastInLumBuf < lastLumSrcY)
  2716. {
  2717. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2718. lumBufIndex++;
  2719. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2720. ASSERT(lumBufIndex < 2*vLumBufSize)
  2721. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2722. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2723. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2724. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2725. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2726. funnyYCode, c->srcFormat, formatConvBuffer,
  2727. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2728. lastInLumBuf++;
  2729. }
  2730. while(lastInChrBuf < lastChrSrcY)
  2731. {
  2732. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2733. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2734. chrBufIndex++;
  2735. ASSERT(chrBufIndex < 2*vChrBufSize)
  2736. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2737. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2738. //FIXME replace parameters through context struct (some at least)
  2739. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2740. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2741. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2742. funnyUVCode, c->srcFormat, formatConvBuffer,
  2743. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2744. lastInChrBuf++;
  2745. }
  2746. //wrap buf index around to stay inside the ring buffer
  2747. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2748. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2749. }
  2750. else // not enough lines left in this slice -> load the rest in the buffer
  2751. {
  2752. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2753. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2754. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2755. vChrBufSize, vLumBufSize);*/
  2756. //Do horizontal scaling
  2757. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2758. {
  2759. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2760. lumBufIndex++;
  2761. ASSERT(lumBufIndex < 2*vLumBufSize)
  2762. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2763. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2764. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2765. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2766. funnyYCode, c->srcFormat, formatConvBuffer,
  2767. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2768. lastInLumBuf++;
  2769. }
  2770. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2771. {
  2772. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2773. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2774. chrBufIndex++;
  2775. ASSERT(chrBufIndex < 2*vChrBufSize)
  2776. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2777. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2778. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2779. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2780. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2781. funnyUVCode, c->srcFormat, formatConvBuffer,
  2782. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2783. lastInChrBuf++;
  2784. }
  2785. //wrap buf index around to stay inside the ring buffer
  2786. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2787. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2788. break; //we can't output a dstY line so let's try with the next slice
  2789. }
  2790. #ifdef HAVE_MMX
  2791. b5Dither= dither8[dstY&1];
  2792. g6Dither= dither4[dstY&1];
  2793. g5Dither= dither8[dstY&1];
  2794. r5Dither= dither8[(dstY+1)&1];
  2795. #endif
  2796. if(dstY < dstH-2)
  2797. {
  2798. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2799. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2800. #ifdef HAVE_MMX
  2801. int i;
  2802. if(flags & SWS_ACCURATE_RND){
  2803. for(i=0; i<vLumFilterSize; i+=2){
  2804. lumMmxFilter[2*i+0]= lumSrcPtr[i ];
  2805. lumMmxFilter[2*i+1]= lumSrcPtr[i+(vLumFilterSize>1)];
  2806. lumMmxFilter[2*i+2]=
  2807. lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ]
  2808. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2809. }
  2810. for(i=0; i<vChrFilterSize; i+=2){
  2811. chrMmxFilter[2*i+0]= chrSrcPtr[i ];
  2812. chrMmxFilter[2*i+1]= chrSrcPtr[i+(vChrFilterSize>1)];
  2813. chrMmxFilter[2*i+2]=
  2814. chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2815. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2816. }
  2817. }else{
  2818. for(i=0; i<vLumFilterSize; i++)
  2819. {
  2820. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2821. lumMmxFilter[4*i+2]=
  2822. lumMmxFilter[4*i+3]=
  2823. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2824. }
  2825. for(i=0; i<vChrFilterSize; i++)
  2826. {
  2827. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2828. chrMmxFilter[4*i+2]=
  2829. chrMmxFilter[4*i+3]=
  2830. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2831. }
  2832. }
  2833. #endif
  2834. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2835. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2836. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2837. RENAME(yuv2nv12X)(c,
  2838. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2839. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2840. dest, uDest, dstW, chrDstW, dstFormat);
  2841. }
  2842. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2843. {
  2844. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2845. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2846. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2847. {
  2848. int16_t *lumBuf = lumPixBuf[0];
  2849. int16_t *chrBuf= chrPixBuf[0];
  2850. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2851. }
  2852. else //General YV12
  2853. {
  2854. RENAME(yuv2yuvX)(c,
  2855. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2856. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2857. dest, uDest, vDest, dstW, chrDstW);
  2858. }
  2859. }
  2860. else
  2861. {
  2862. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2863. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2864. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2865. {
  2866. int chrAlpha= vChrFilter[2*dstY+1];
  2867. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2868. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2869. }
  2870. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2871. {
  2872. int lumAlpha= vLumFilter[2*dstY+1];
  2873. int chrAlpha= vChrFilter[2*dstY+1];
  2874. lumMmxFilter[2]=
  2875. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  2876. chrMmxFilter[2]=
  2877. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  2878. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2879. dest, dstW, lumAlpha, chrAlpha, dstY);
  2880. }
  2881. else //General RGB
  2882. {
  2883. RENAME(yuv2packedX)(c,
  2884. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2885. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2886. dest, dstW, dstY);
  2887. }
  2888. }
  2889. }
  2890. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2891. {
  2892. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2893. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2894. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2895. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2896. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2897. yuv2nv12XinC(
  2898. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2899. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2900. dest, uDest, dstW, chrDstW, dstFormat);
  2901. }
  2902. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2903. {
  2904. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2905. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2906. yuv2yuvXinC(
  2907. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2908. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2909. dest, uDest, vDest, dstW, chrDstW);
  2910. }
  2911. else
  2912. {
  2913. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2914. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2915. yuv2packedXinC(c,
  2916. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2917. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2918. dest, dstW, dstY);
  2919. }
  2920. }
  2921. }
  2922. #ifdef HAVE_MMX
  2923. __asm __volatile(SFENCE:::"memory");
  2924. __asm __volatile(EMMS:::"memory");
  2925. #endif
  2926. /* store changed local vars back in the context */
  2927. c->dstY= dstY;
  2928. c->lumBufIndex= lumBufIndex;
  2929. c->chrBufIndex= chrBufIndex;
  2930. c->lastInLumBuf= lastInLumBuf;
  2931. c->lastInChrBuf= lastInChrBuf;
  2932. return dstY - lastDstY;
  2933. }