You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3183 lines
103KB

  1. /*
  2. Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  14. */
  15. #undef REAL_MOVNTQ
  16. #undef MOVNTQ
  17. #undef PAVGB
  18. #undef PREFETCH
  19. #undef PREFETCHW
  20. #undef EMMS
  21. #undef SFENCE
  22. #ifdef HAVE_3DNOW
  23. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  24. #define EMMS "femms"
  25. #else
  26. #define EMMS "emms"
  27. #endif
  28. #ifdef HAVE_3DNOW
  29. #define PREFETCH "prefetch"
  30. #define PREFETCHW "prefetchw"
  31. #elif defined ( HAVE_MMX2 )
  32. #define PREFETCH "prefetchnta"
  33. #define PREFETCHW "prefetcht0"
  34. #else
  35. #define PREFETCH "/nop"
  36. #define PREFETCHW "/nop"
  37. #endif
  38. #ifdef HAVE_MMX2
  39. #define SFENCE "sfence"
  40. #else
  41. #define SFENCE "/nop"
  42. #endif
  43. #ifdef HAVE_MMX2
  44. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  45. #elif defined (HAVE_3DNOW)
  46. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  47. #endif
  48. #ifdef HAVE_MMX2
  49. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  50. #else
  51. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  52. #endif
  53. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  54. #ifdef HAVE_ALTIVEC
  55. #include "swscale_altivec_template.c"
  56. #endif
  57. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  58. asm volatile(\
  59. "xor %%"REG_a", %%"REG_a" \n\t"\
  60. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  61. "movq %%mm3, %%mm4 \n\t"\
  62. "lea " offset "(%0), %%"REG_d" \n\t"\
  63. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  64. ASMALIGN(4) /* FIXME Unroll? */\
  65. "1: \n\t"\
  66. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  67. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
  68. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
  69. "add $16, %%"REG_d" \n\t"\
  70. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  71. "test %%"REG_S", %%"REG_S" \n\t"\
  72. "pmulhw %%mm0, %%mm2 \n\t"\
  73. "pmulhw %%mm0, %%mm5 \n\t"\
  74. "paddw %%mm2, %%mm3 \n\t"\
  75. "paddw %%mm5, %%mm4 \n\t"\
  76. " jnz 1b \n\t"\
  77. "psraw $3, %%mm3 \n\t"\
  78. "psraw $3, %%mm4 \n\t"\
  79. "packuswb %%mm4, %%mm3 \n\t"\
  80. MOVNTQ(%%mm3, (%1, %%REGa))\
  81. "add $8, %%"REG_a" \n\t"\
  82. "cmp %2, %%"REG_a" \n\t"\
  83. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  84. "movq %%mm3, %%mm4 \n\t"\
  85. "lea " offset "(%0), %%"REG_d" \n\t"\
  86. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  87. "jb 1b \n\t"\
  88. :: "r" (&c->redDither),\
  89. "r" (dest), "p" (width)\
  90. : "%"REG_a, "%"REG_d, "%"REG_S\
  91. );
  92. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  93. asm volatile(\
  94. "lea " offset "(%0), %%"REG_d" \n\t"\
  95. "xor %%"REG_a", %%"REG_a" \n\t"\
  96. "pxor %%mm4, %%mm4 \n\t"\
  97. "pxor %%mm5, %%mm5 \n\t"\
  98. "pxor %%mm6, %%mm6 \n\t"\
  99. "pxor %%mm7, %%mm7 \n\t"\
  100. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  101. ASMALIGN(4) \
  102. "1: \n\t"\
  103. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0\n\t" /* srcData */\
  104. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
  105. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  106. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1\n\t" /* srcData */\
  107. "movq %%mm0, %%mm3 \n\t"\
  108. "punpcklwd %%mm1, %%mm0 \n\t"\
  109. "punpckhwd %%mm1, %%mm3 \n\t"\
  110. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  111. "pmaddwd %%mm1, %%mm0 \n\t"\
  112. "pmaddwd %%mm1, %%mm3 \n\t"\
  113. "paddd %%mm0, %%mm4 \n\t"\
  114. "paddd %%mm3, %%mm5 \n\t"\
  115. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3\n\t" /* srcData */\
  116. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  117. "add $16, %%"REG_d" \n\t"\
  118. "test %%"REG_S", %%"REG_S" \n\t"\
  119. "movq %%mm2, %%mm0 \n\t"\
  120. "punpcklwd %%mm3, %%mm2 \n\t"\
  121. "punpckhwd %%mm3, %%mm0 \n\t"\
  122. "pmaddwd %%mm1, %%mm2 \n\t"\
  123. "pmaddwd %%mm1, %%mm0 \n\t"\
  124. "paddd %%mm2, %%mm6 \n\t"\
  125. "paddd %%mm0, %%mm7 \n\t"\
  126. " jnz 1b \n\t"\
  127. "psrad $16, %%mm4 \n\t"\
  128. "psrad $16, %%mm5 \n\t"\
  129. "psrad $16, %%mm6 \n\t"\
  130. "psrad $16, %%mm7 \n\t"\
  131. "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
  132. "packssdw %%mm5, %%mm4 \n\t"\
  133. "packssdw %%mm7, %%mm6 \n\t"\
  134. "paddw %%mm0, %%mm4 \n\t"\
  135. "paddw %%mm0, %%mm6 \n\t"\
  136. "psraw $3, %%mm4 \n\t"\
  137. "psraw $3, %%mm6 \n\t"\
  138. "packuswb %%mm6, %%mm4 \n\t"\
  139. MOVNTQ(%%mm4, (%1, %%REGa))\
  140. "add $8, %%"REG_a" \n\t"\
  141. "cmp %2, %%"REG_a" \n\t"\
  142. "lea " offset "(%0), %%"REG_d" \n\t"\
  143. "pxor %%mm4, %%mm4 \n\t"\
  144. "pxor %%mm5, %%mm5 \n\t"\
  145. "pxor %%mm6, %%mm6 \n\t"\
  146. "pxor %%mm7, %%mm7 \n\t"\
  147. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  148. "jb 1b \n\t"\
  149. :: "r" (&c->redDither),\
  150. "r" (dest), "p" (width)\
  151. : "%"REG_a, "%"REG_d, "%"REG_S\
  152. );
  153. #define YSCALEYUV2YV121 \
  154. "mov %2, %%"REG_a" \n\t"\
  155. ASMALIGN(4) /* FIXME Unroll? */\
  156. "1: \n\t"\
  157. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  158. "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
  159. "psraw $7, %%mm0 \n\t"\
  160. "psraw $7, %%mm1 \n\t"\
  161. "packuswb %%mm1, %%mm0 \n\t"\
  162. MOVNTQ(%%mm0, (%1, %%REGa))\
  163. "add $8, %%"REG_a" \n\t"\
  164. "jnc 1b \n\t"
  165. /*
  166. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  167. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  168. "r" (dest), "m" (dstW),
  169. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  170. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  171. */
  172. #define YSCALEYUV2PACKEDX \
  173. asm volatile(\
  174. "xor %%"REG_a", %%"REG_a" \n\t"\
  175. ASMALIGN(4)\
  176. "nop \n\t"\
  177. "1: \n\t"\
  178. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  179. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  180. "movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
  181. "movq %%mm3, %%mm4 \n\t"\
  182. ASMALIGN(4)\
  183. "2: \n\t"\
  184. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  185. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  186. "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  187. "add $16, %%"REG_d" \n\t"\
  188. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  189. "pmulhw %%mm0, %%mm2 \n\t"\
  190. "pmulhw %%mm0, %%mm5 \n\t"\
  191. "paddw %%mm2, %%mm3 \n\t"\
  192. "paddw %%mm5, %%mm4 \n\t"\
  193. "test %%"REG_S", %%"REG_S" \n\t"\
  194. " jnz 2b \n\t"\
  195. \
  196. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  197. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  198. "movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
  199. "movq %%mm1, %%mm7 \n\t"\
  200. ASMALIGN(4)\
  201. "2: \n\t"\
  202. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  203. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  204. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  205. "add $16, %%"REG_d" \n\t"\
  206. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  207. "pmulhw %%mm0, %%mm2 \n\t"\
  208. "pmulhw %%mm0, %%mm5 \n\t"\
  209. "paddw %%mm2, %%mm1 \n\t"\
  210. "paddw %%mm5, %%mm7 \n\t"\
  211. "test %%"REG_S", %%"REG_S" \n\t"\
  212. " jnz 2b \n\t"\
  213. #define YSCALEYUV2PACKEDX_END\
  214. :: "r" (&c->redDither), \
  215. "m" (dummy), "m" (dummy), "m" (dummy),\
  216. "r" (dest), "m" (dstW)\
  217. : "%"REG_a, "%"REG_d, "%"REG_S\
  218. );
  219. #define YSCALEYUV2PACKEDX_ACCURATE \
  220. asm volatile(\
  221. "xor %%"REG_a", %%"REG_a" \n\t"\
  222. ASMALIGN(4)\
  223. "nop \n\t"\
  224. "1: \n\t"\
  225. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  226. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  227. "pxor %%mm4, %%mm4 \n\t"\
  228. "pxor %%mm5, %%mm5 \n\t"\
  229. "pxor %%mm6, %%mm6 \n\t"\
  230. "pxor %%mm7, %%mm7 \n\t"\
  231. ASMALIGN(4)\
  232. "2: \n\t"\
  233. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  234. "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  235. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  236. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  237. "movq %%mm0, %%mm3 \n\t"\
  238. "punpcklwd %%mm1, %%mm0 \n\t"\
  239. "punpckhwd %%mm1, %%mm3 \n\t"\
  240. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  241. "pmaddwd %%mm1, %%mm0 \n\t"\
  242. "pmaddwd %%mm1, %%mm3 \n\t"\
  243. "paddd %%mm0, %%mm4 \n\t"\
  244. "paddd %%mm3, %%mm5 \n\t"\
  245. "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  246. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  247. "add $16, %%"REG_d" \n\t"\
  248. "test %%"REG_S", %%"REG_S" \n\t"\
  249. "movq %%mm2, %%mm0 \n\t"\
  250. "punpcklwd %%mm3, %%mm2 \n\t"\
  251. "punpckhwd %%mm3, %%mm0 \n\t"\
  252. "pmaddwd %%mm1, %%mm2 \n\t"\
  253. "pmaddwd %%mm1, %%mm0 \n\t"\
  254. "paddd %%mm2, %%mm6 \n\t"\
  255. "paddd %%mm0, %%mm7 \n\t"\
  256. " jnz 2b \n\t"\
  257. "psrad $16, %%mm4 \n\t"\
  258. "psrad $16, %%mm5 \n\t"\
  259. "psrad $16, %%mm6 \n\t"\
  260. "psrad $16, %%mm7 \n\t"\
  261. "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
  262. "packssdw %%mm5, %%mm4 \n\t"\
  263. "packssdw %%mm7, %%mm6 \n\t"\
  264. "paddw %%mm0, %%mm4 \n\t"\
  265. "paddw %%mm0, %%mm6 \n\t"\
  266. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  267. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  268. \
  269. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
  270. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  271. "pxor %%mm1, %%mm1 \n\t"\
  272. "pxor %%mm5, %%mm5 \n\t"\
  273. "pxor %%mm7, %%mm7 \n\t"\
  274. "pxor %%mm6, %%mm6 \n\t"\
  275. ASMALIGN(4)\
  276. "2: \n\t"\
  277. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  278. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  279. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  280. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  281. "movq %%mm0, %%mm3 \n\t"\
  282. "punpcklwd %%mm4, %%mm0 \n\t"\
  283. "punpckhwd %%mm4, %%mm3 \n\t"\
  284. "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  285. "pmaddwd %%mm4, %%mm0 \n\t"\
  286. "pmaddwd %%mm4, %%mm3 \n\t"\
  287. "paddd %%mm0, %%mm1 \n\t"\
  288. "paddd %%mm3, %%mm5 \n\t"\
  289. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  290. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  291. "add $16, %%"REG_d" \n\t"\
  292. "test %%"REG_S", %%"REG_S" \n\t"\
  293. "movq %%mm2, %%mm0 \n\t"\
  294. "punpcklwd %%mm3, %%mm2 \n\t"\
  295. "punpckhwd %%mm3, %%mm0 \n\t"\
  296. "pmaddwd %%mm4, %%mm2 \n\t"\
  297. "pmaddwd %%mm4, %%mm0 \n\t"\
  298. "paddd %%mm2, %%mm7 \n\t"\
  299. "paddd %%mm0, %%mm6 \n\t"\
  300. " jnz 2b \n\t"\
  301. "psrad $16, %%mm1 \n\t"\
  302. "psrad $16, %%mm5 \n\t"\
  303. "psrad $16, %%mm7 \n\t"\
  304. "psrad $16, %%mm6 \n\t"\
  305. "movq "VROUNDER_OFFSET"(%0), %%mm0\n\t"\
  306. "packssdw %%mm5, %%mm1 \n\t"\
  307. "packssdw %%mm6, %%mm7 \n\t"\
  308. "paddw %%mm0, %%mm1 \n\t"\
  309. "paddw %%mm0, %%mm7 \n\t"\
  310. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  311. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  312. #define YSCALEYUV2RGBX \
  313. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  314. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  315. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  316. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  317. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  318. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  319. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  320. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  321. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  322. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  323. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  324. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  325. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  326. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  327. "paddw %%mm3, %%mm4 \n\t"\
  328. "movq %%mm2, %%mm0 \n\t"\
  329. "movq %%mm5, %%mm6 \n\t"\
  330. "movq %%mm4, %%mm3 \n\t"\
  331. "punpcklwd %%mm2, %%mm2 \n\t"\
  332. "punpcklwd %%mm5, %%mm5 \n\t"\
  333. "punpcklwd %%mm4, %%mm4 \n\t"\
  334. "paddw %%mm1, %%mm2 \n\t"\
  335. "paddw %%mm1, %%mm5 \n\t"\
  336. "paddw %%mm1, %%mm4 \n\t"\
  337. "punpckhwd %%mm0, %%mm0 \n\t"\
  338. "punpckhwd %%mm6, %%mm6 \n\t"\
  339. "punpckhwd %%mm3, %%mm3 \n\t"\
  340. "paddw %%mm7, %%mm0 \n\t"\
  341. "paddw %%mm7, %%mm6 \n\t"\
  342. "paddw %%mm7, %%mm3 \n\t"\
  343. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  344. "packuswb %%mm0, %%mm2 \n\t"\
  345. "packuswb %%mm6, %%mm5 \n\t"\
  346. "packuswb %%mm3, %%mm4 \n\t"\
  347. "pxor %%mm7, %%mm7 \n\t"
  348. #if 0
  349. #define FULL_YSCALEYUV2RGB \
  350. "pxor %%mm7, %%mm7 \n\t"\
  351. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  352. "punpcklwd %%mm6, %%mm6 \n\t"\
  353. "punpcklwd %%mm6, %%mm6 \n\t"\
  354. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  355. "punpcklwd %%mm5, %%mm5 \n\t"\
  356. "punpcklwd %%mm5, %%mm5 \n\t"\
  357. "xor %%"REG_a", %%"REG_a" \n\t"\
  358. ASMALIGN(4)\
  359. "1: \n\t"\
  360. "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
  361. "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
  362. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  363. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  364. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  365. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  366. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  367. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  368. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  369. "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  370. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  371. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  372. "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  373. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  374. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  375. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  376. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  377. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  378. \
  379. \
  380. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  381. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  382. "pmulhw "MANGLE(ubCoeff)", %%mm3\n\t"\
  383. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  384. "pmulhw "MANGLE(ugCoeff)", %%mm2\n\t"\
  385. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  386. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  387. \
  388. \
  389. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  390. "pmulhw "MANGLE(vrCoeff)", %%mm0\n\t"\
  391. "pmulhw "MANGLE(vgCoeff)", %%mm4\n\t"\
  392. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  393. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  394. "packuswb %%mm3, %%mm3 \n\t"\
  395. \
  396. "packuswb %%mm0, %%mm0 \n\t"\
  397. "paddw %%mm4, %%mm2 \n\t"\
  398. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  399. \
  400. "packuswb %%mm1, %%mm1 \n\t"
  401. #endif
  402. #define REAL_YSCALEYUV2PACKED(index, c) \
  403. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  404. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
  405. "psraw $3, %%mm0 \n\t"\
  406. "psraw $3, %%mm1 \n\t"\
  407. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  408. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
  409. "xor "#index", "#index" \n\t"\
  410. ASMALIGN(4)\
  411. "1: \n\t"\
  412. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  413. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  414. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  415. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  416. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  417. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  418. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  419. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  420. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  421. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  422. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  423. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  424. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  425. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  426. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  427. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  428. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  429. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  430. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  431. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  432. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  433. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  434. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  435. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  436. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  437. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  438. #define REAL_YSCALEYUV2RGB(index, c) \
  439. "xor "#index", "#index" \n\t"\
  440. ASMALIGN(4)\
  441. "1: \n\t"\
  442. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  443. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  444. "movq 4096(%2, "#index"), %%mm5\n\t" /* uvbuf0[eax+2048]*/\
  445. "movq 4096(%3, "#index"), %%mm4\n\t" /* uvbuf1[eax+2048]*/\
  446. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  447. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  448. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
  449. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  450. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  451. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  452. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  453. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  454. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  455. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  456. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  457. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  458. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  459. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  460. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  461. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  462. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  463. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  464. "movq 8(%0, "#index", 2), %%mm6\n\t" /*buf0[eax]*/\
  465. "movq 8(%1, "#index", 2), %%mm7\n\t" /*buf1[eax]*/\
  466. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  467. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  468. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  469. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6\n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  470. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  471. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  472. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  473. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  474. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  475. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  476. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  477. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  478. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  479. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  480. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  481. "paddw %%mm3, %%mm4 \n\t"\
  482. "movq %%mm2, %%mm0 \n\t"\
  483. "movq %%mm5, %%mm6 \n\t"\
  484. "movq %%mm4, %%mm3 \n\t"\
  485. "punpcklwd %%mm2, %%mm2 \n\t"\
  486. "punpcklwd %%mm5, %%mm5 \n\t"\
  487. "punpcklwd %%mm4, %%mm4 \n\t"\
  488. "paddw %%mm1, %%mm2 \n\t"\
  489. "paddw %%mm1, %%mm5 \n\t"\
  490. "paddw %%mm1, %%mm4 \n\t"\
  491. "punpckhwd %%mm0, %%mm0 \n\t"\
  492. "punpckhwd %%mm6, %%mm6 \n\t"\
  493. "punpckhwd %%mm3, %%mm3 \n\t"\
  494. "paddw %%mm7, %%mm0 \n\t"\
  495. "paddw %%mm7, %%mm6 \n\t"\
  496. "paddw %%mm7, %%mm3 \n\t"\
  497. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  498. "packuswb %%mm0, %%mm2 \n\t"\
  499. "packuswb %%mm6, %%mm5 \n\t"\
  500. "packuswb %%mm3, %%mm4 \n\t"\
  501. "pxor %%mm7, %%mm7 \n\t"
  502. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  503. #define REAL_YSCALEYUV2PACKED1(index, c) \
  504. "xor "#index", "#index" \n\t"\
  505. ASMALIGN(4)\
  506. "1: \n\t"\
  507. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  508. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  509. "psraw $7, %%mm3 \n\t" \
  510. "psraw $7, %%mm4 \n\t" \
  511. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  512. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  513. "psraw $7, %%mm1 \n\t" \
  514. "psraw $7, %%mm7 \n\t" \
  515. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  516. #define REAL_YSCALEYUV2RGB1(index, c) \
  517. "xor "#index", "#index" \n\t"\
  518. ASMALIGN(4)\
  519. "1: \n\t"\
  520. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  521. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  522. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  523. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  524. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  525. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  526. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  527. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  528. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  529. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  530. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  531. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  532. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  533. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  534. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  535. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  536. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  537. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  538. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  539. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  540. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  541. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  542. "paddw %%mm3, %%mm4 \n\t"\
  543. "movq %%mm2, %%mm0 \n\t"\
  544. "movq %%mm5, %%mm6 \n\t"\
  545. "movq %%mm4, %%mm3 \n\t"\
  546. "punpcklwd %%mm2, %%mm2 \n\t"\
  547. "punpcklwd %%mm5, %%mm5 \n\t"\
  548. "punpcklwd %%mm4, %%mm4 \n\t"\
  549. "paddw %%mm1, %%mm2 \n\t"\
  550. "paddw %%mm1, %%mm5 \n\t"\
  551. "paddw %%mm1, %%mm4 \n\t"\
  552. "punpckhwd %%mm0, %%mm0 \n\t"\
  553. "punpckhwd %%mm6, %%mm6 \n\t"\
  554. "punpckhwd %%mm3, %%mm3 \n\t"\
  555. "paddw %%mm7, %%mm0 \n\t"\
  556. "paddw %%mm7, %%mm6 \n\t"\
  557. "paddw %%mm7, %%mm3 \n\t"\
  558. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  559. "packuswb %%mm0, %%mm2 \n\t"\
  560. "packuswb %%mm6, %%mm5 \n\t"\
  561. "packuswb %%mm3, %%mm4 \n\t"\
  562. "pxor %%mm7, %%mm7 \n\t"
  563. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  564. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  565. "xor "#index", "#index" \n\t"\
  566. ASMALIGN(4)\
  567. "1: \n\t"\
  568. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  569. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  570. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  571. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  572. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  573. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  574. "psrlw $8, %%mm3 \n\t" \
  575. "psrlw $8, %%mm4 \n\t" \
  576. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  577. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  578. "psraw $7, %%mm1 \n\t" \
  579. "psraw $7, %%mm7 \n\t"
  580. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  581. // do vertical chrominance interpolation
  582. #define REAL_YSCALEYUV2RGB1b(index, c) \
  583. "xor "#index", "#index" \n\t"\
  584. ASMALIGN(4)\
  585. "1: \n\t"\
  586. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  587. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  588. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  589. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  590. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  591. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  592. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  593. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  594. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  595. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  596. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  597. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  598. "pmulhw "UG_COEFF"("#c"), %%mm3\n\t"\
  599. "pmulhw "VG_COEFF"("#c"), %%mm4\n\t"\
  600. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  601. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  602. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  603. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  604. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  605. "pmulhw "UB_COEFF"("#c"), %%mm2\n\t"\
  606. "pmulhw "VR_COEFF"("#c"), %%mm5\n\t"\
  607. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  608. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  609. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  610. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  611. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  612. "paddw %%mm3, %%mm4 \n\t"\
  613. "movq %%mm2, %%mm0 \n\t"\
  614. "movq %%mm5, %%mm6 \n\t"\
  615. "movq %%mm4, %%mm3 \n\t"\
  616. "punpcklwd %%mm2, %%mm2 \n\t"\
  617. "punpcklwd %%mm5, %%mm5 \n\t"\
  618. "punpcklwd %%mm4, %%mm4 \n\t"\
  619. "paddw %%mm1, %%mm2 \n\t"\
  620. "paddw %%mm1, %%mm5 \n\t"\
  621. "paddw %%mm1, %%mm4 \n\t"\
  622. "punpckhwd %%mm0, %%mm0 \n\t"\
  623. "punpckhwd %%mm6, %%mm6 \n\t"\
  624. "punpckhwd %%mm3, %%mm3 \n\t"\
  625. "paddw %%mm7, %%mm0 \n\t"\
  626. "paddw %%mm7, %%mm6 \n\t"\
  627. "paddw %%mm7, %%mm3 \n\t"\
  628. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  629. "packuswb %%mm0, %%mm2 \n\t"\
  630. "packuswb %%mm6, %%mm5 \n\t"\
  631. "packuswb %%mm3, %%mm4 \n\t"\
  632. "pxor %%mm7, %%mm7 \n\t"
  633. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  634. #define REAL_WRITEBGR32(dst, dstw, index) \
  635. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  636. "movq %%mm2, %%mm1 \n\t" /* B */\
  637. "movq %%mm5, %%mm6 \n\t" /* R */\
  638. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  639. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  640. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  641. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  642. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  643. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  644. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  645. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  646. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  647. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  648. \
  649. MOVNTQ(%%mm0, (dst, index, 4))\
  650. MOVNTQ(%%mm2, 8(dst, index, 4))\
  651. MOVNTQ(%%mm1, 16(dst, index, 4))\
  652. MOVNTQ(%%mm3, 24(dst, index, 4))\
  653. \
  654. "add $8, "#index" \n\t"\
  655. "cmp "#dstw", "#index" \n\t"\
  656. " jb 1b \n\t"
  657. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  658. #define REAL_WRITEBGR16(dst, dstw, index) \
  659. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  660. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  661. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  662. "psrlq $3, %%mm2 \n\t"\
  663. \
  664. "movq %%mm2, %%mm1 \n\t"\
  665. "movq %%mm4, %%mm3 \n\t"\
  666. \
  667. "punpcklbw %%mm7, %%mm3 \n\t"\
  668. "punpcklbw %%mm5, %%mm2 \n\t"\
  669. "punpckhbw %%mm7, %%mm4 \n\t"\
  670. "punpckhbw %%mm5, %%mm1 \n\t"\
  671. \
  672. "psllq $3, %%mm3 \n\t"\
  673. "psllq $3, %%mm4 \n\t"\
  674. \
  675. "por %%mm3, %%mm2 \n\t"\
  676. "por %%mm4, %%mm1 \n\t"\
  677. \
  678. MOVNTQ(%%mm2, (dst, index, 2))\
  679. MOVNTQ(%%mm1, 8(dst, index, 2))\
  680. \
  681. "add $8, "#index" \n\t"\
  682. "cmp "#dstw", "#index" \n\t"\
  683. " jb 1b \n\t"
  684. #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
  685. #define REAL_WRITEBGR15(dst, dstw, index) \
  686. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  687. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  688. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  689. "psrlq $3, %%mm2 \n\t"\
  690. "psrlq $1, %%mm5 \n\t"\
  691. \
  692. "movq %%mm2, %%mm1 \n\t"\
  693. "movq %%mm4, %%mm3 \n\t"\
  694. \
  695. "punpcklbw %%mm7, %%mm3 \n\t"\
  696. "punpcklbw %%mm5, %%mm2 \n\t"\
  697. "punpckhbw %%mm7, %%mm4 \n\t"\
  698. "punpckhbw %%mm5, %%mm1 \n\t"\
  699. \
  700. "psllq $2, %%mm3 \n\t"\
  701. "psllq $2, %%mm4 \n\t"\
  702. \
  703. "por %%mm3, %%mm2 \n\t"\
  704. "por %%mm4, %%mm1 \n\t"\
  705. \
  706. MOVNTQ(%%mm2, (dst, index, 2))\
  707. MOVNTQ(%%mm1, 8(dst, index, 2))\
  708. \
  709. "add $8, "#index" \n\t"\
  710. "cmp "#dstw", "#index" \n\t"\
  711. " jb 1b \n\t"
  712. #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
  713. #define WRITEBGR24OLD(dst, dstw, index) \
  714. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  715. "movq %%mm2, %%mm1 \n\t" /* B */\
  716. "movq %%mm5, %%mm6 \n\t" /* R */\
  717. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  718. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  719. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  720. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  721. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  722. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  723. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  724. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  725. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  726. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  727. \
  728. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  729. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  730. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 0 */\
  731. "pand "MANGLE(bm11111000)", %%mm0\n\t" /* 00RGB000 0.5 */\
  732. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  733. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  734. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  735. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  736. \
  737. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  738. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  739. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  740. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  741. "pand "MANGLE(bm00001111)", %%mm2\n\t" /* 0000RGBR 1 */\
  742. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  743. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  744. "pand "MANGLE(bm00000111)", %%mm4\n\t" /* 00000RGB 2 */\
  745. "pand "MANGLE(bm11111000)", %%mm1\n\t" /* 00RGB000 2.5 */\
  746. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  747. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  748. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  749. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  750. \
  751. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  752. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  753. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  754. "pand "MANGLE(bm00000111)", %%mm5\n\t" /* 00000RGB 3 */\
  755. "pand "MANGLE(bm11111000)", %%mm3\n\t" /* 00RGB000 3.5 */\
  756. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  757. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  758. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  759. \
  760. MOVNTQ(%%mm0, (dst))\
  761. MOVNTQ(%%mm2, 8(dst))\
  762. MOVNTQ(%%mm3, 16(dst))\
  763. "add $24, "#dst" \n\t"\
  764. \
  765. "add $8, "#index" \n\t"\
  766. "cmp "#dstw", "#index" \n\t"\
  767. " jb 1b \n\t"
  768. #define WRITEBGR24MMX(dst, dstw, index) \
  769. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  770. "movq %%mm2, %%mm1 \n\t" /* B */\
  771. "movq %%mm5, %%mm6 \n\t" /* R */\
  772. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  773. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  774. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  775. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  776. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  777. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  778. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  779. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  780. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  781. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  782. \
  783. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  784. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  785. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  786. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  787. \
  788. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  789. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  790. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  791. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  792. \
  793. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  794. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  795. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  796. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  797. \
  798. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  799. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  800. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  801. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  802. MOVNTQ(%%mm0, (dst))\
  803. \
  804. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  805. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  806. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  807. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  808. MOVNTQ(%%mm6, 8(dst))\
  809. \
  810. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  811. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  812. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  813. MOVNTQ(%%mm5, 16(dst))\
  814. \
  815. "add $24, "#dst" \n\t"\
  816. \
  817. "add $8, "#index" \n\t"\
  818. "cmp "#dstw", "#index" \n\t"\
  819. " jb 1b \n\t"
  820. #define WRITEBGR24MMX2(dst, dstw, index) \
  821. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  822. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  823. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  824. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  825. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  826. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  827. \
  828. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  829. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  830. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  831. \
  832. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  833. "por %%mm1, %%mm6 \n\t"\
  834. "por %%mm3, %%mm6 \n\t"\
  835. MOVNTQ(%%mm6, (dst))\
  836. \
  837. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  838. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  839. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  840. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  841. \
  842. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  843. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  844. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  845. \
  846. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  847. "por %%mm3, %%mm6 \n\t"\
  848. MOVNTQ(%%mm6, 8(dst))\
  849. \
  850. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  851. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  852. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  853. \
  854. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  855. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  856. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  857. \
  858. "por %%mm1, %%mm3 \n\t"\
  859. "por %%mm3, %%mm6 \n\t"\
  860. MOVNTQ(%%mm6, 16(dst))\
  861. \
  862. "add $24, "#dst" \n\t"\
  863. \
  864. "add $8, "#index" \n\t"\
  865. "cmp "#dstw", "#index" \n\t"\
  866. " jb 1b \n\t"
  867. #ifdef HAVE_MMX2
  868. #undef WRITEBGR24
  869. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  870. #else
  871. #undef WRITEBGR24
  872. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  873. #endif
  874. #define REAL_WRITEYUY2(dst, dstw, index) \
  875. "packuswb %%mm3, %%mm3 \n\t"\
  876. "packuswb %%mm4, %%mm4 \n\t"\
  877. "packuswb %%mm7, %%mm1 \n\t"\
  878. "punpcklbw %%mm4, %%mm3 \n\t"\
  879. "movq %%mm1, %%mm7 \n\t"\
  880. "punpcklbw %%mm3, %%mm1 \n\t"\
  881. "punpckhbw %%mm3, %%mm7 \n\t"\
  882. \
  883. MOVNTQ(%%mm1, (dst, index, 2))\
  884. MOVNTQ(%%mm7, 8(dst, index, 2))\
  885. \
  886. "add $8, "#index" \n\t"\
  887. "cmp "#dstw", "#index" \n\t"\
  888. " jb 1b \n\t"
  889. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  890. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  891. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  892. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  893. {
  894. #ifdef HAVE_MMX
  895. if(c->flags & SWS_ACCURATE_RND){
  896. if(uDest){
  897. YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  898. YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  899. }
  900. YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
  901. }else{
  902. if(uDest){
  903. YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  904. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  905. }
  906. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
  907. }
  908. #else
  909. #ifdef HAVE_ALTIVEC
  910. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  911. chrFilter, chrSrc, chrFilterSize,
  912. dest, uDest, vDest, dstW, chrDstW);
  913. #else //HAVE_ALTIVEC
  914. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  915. chrFilter, chrSrc, chrFilterSize,
  916. dest, uDest, vDest, dstW, chrDstW);
  917. #endif //!HAVE_ALTIVEC
  918. #endif
  919. }
  920. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  921. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  922. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  923. {
  924. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  925. chrFilter, chrSrc, chrFilterSize,
  926. dest, uDest, dstW, chrDstW, dstFormat);
  927. }
  928. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  929. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  930. {
  931. #ifdef HAVE_MMX
  932. if(uDest != NULL)
  933. {
  934. asm volatile(
  935. YSCALEYUV2YV121
  936. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  937. "g" (-chrDstW)
  938. : "%"REG_a
  939. );
  940. asm volatile(
  941. YSCALEYUV2YV121
  942. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  943. "g" (-chrDstW)
  944. : "%"REG_a
  945. );
  946. }
  947. asm volatile(
  948. YSCALEYUV2YV121
  949. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  950. "g" (-dstW)
  951. : "%"REG_a
  952. );
  953. #else
  954. int i;
  955. for(i=0; i<dstW; i++)
  956. {
  957. int val= lumSrc[i]>>7;
  958. if(val&256){
  959. if(val<0) val=0;
  960. else val=255;
  961. }
  962. dest[i]= val;
  963. }
  964. if(uDest != NULL)
  965. for(i=0; i<chrDstW; i++)
  966. {
  967. int u=chrSrc[i]>>7;
  968. int v=chrSrc[i + 2048]>>7;
  969. if((u|v)&256){
  970. if(u<0) u=0;
  971. else if (u>255) u=255;
  972. if(v<0) v=0;
  973. else if (v>255) v=255;
  974. }
  975. uDest[i]= u;
  976. vDest[i]= v;
  977. }
  978. #endif
  979. }
  980. /**
  981. * vertical scale YV12 to RGB
  982. */
  983. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  984. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  985. uint8_t *dest, long dstW, long dstY)
  986. {
  987. long dummy=0;
  988. #ifdef HAVE_MMX
  989. if(c->flags & SWS_ACCURATE_RND){
  990. switch(c->dstFormat){
  991. case IMGFMT_BGR32:
  992. YSCALEYUV2PACKEDX_ACCURATE
  993. YSCALEYUV2RGBX
  994. WRITEBGR32(%4, %5, %%REGa)
  995. YSCALEYUV2PACKEDX_END
  996. return;
  997. case IMGFMT_BGR24:
  998. YSCALEYUV2PACKEDX_ACCURATE
  999. YSCALEYUV2RGBX
  1000. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  1001. "add %4, %%"REG_c" \n\t"
  1002. WRITEBGR24(%%REGc, %5, %%REGa)
  1003. :: "r" (&c->redDither),
  1004. "m" (dummy), "m" (dummy), "m" (dummy),
  1005. "r" (dest), "m" (dstW)
  1006. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1007. );
  1008. return;
  1009. case IMGFMT_BGR15:
  1010. YSCALEYUV2PACKEDX_ACCURATE
  1011. YSCALEYUV2RGBX
  1012. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1013. #ifdef DITHER1XBPP
  1014. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1015. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1016. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1017. #endif
  1018. WRITEBGR15(%4, %5, %%REGa)
  1019. YSCALEYUV2PACKEDX_END
  1020. return;
  1021. case IMGFMT_BGR16:
  1022. YSCALEYUV2PACKEDX_ACCURATE
  1023. YSCALEYUV2RGBX
  1024. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1025. #ifdef DITHER1XBPP
  1026. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1027. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1028. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1029. #endif
  1030. WRITEBGR16(%4, %5, %%REGa)
  1031. YSCALEYUV2PACKEDX_END
  1032. return;
  1033. case IMGFMT_YUY2:
  1034. YSCALEYUV2PACKEDX_ACCURATE
  1035. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1036. "psraw $3, %%mm3 \n\t"
  1037. "psraw $3, %%mm4 \n\t"
  1038. "psraw $3, %%mm1 \n\t"
  1039. "psraw $3, %%mm7 \n\t"
  1040. WRITEYUY2(%4, %5, %%REGa)
  1041. YSCALEYUV2PACKEDX_END
  1042. return;
  1043. }
  1044. }else{
  1045. switch(c->dstFormat)
  1046. {
  1047. case IMGFMT_BGR32:
  1048. YSCALEYUV2PACKEDX
  1049. YSCALEYUV2RGBX
  1050. WRITEBGR32(%4, %5, %%REGa)
  1051. YSCALEYUV2PACKEDX_END
  1052. return;
  1053. case IMGFMT_BGR24:
  1054. YSCALEYUV2PACKEDX
  1055. YSCALEYUV2RGBX
  1056. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  1057. "add %4, %%"REG_c" \n\t"
  1058. WRITEBGR24(%%REGc, %5, %%REGa)
  1059. :: "r" (&c->redDither),
  1060. "m" (dummy), "m" (dummy), "m" (dummy),
  1061. "r" (dest), "m" (dstW)
  1062. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1063. );
  1064. return;
  1065. case IMGFMT_BGR15:
  1066. YSCALEYUV2PACKEDX
  1067. YSCALEYUV2RGBX
  1068. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1069. #ifdef DITHER1XBPP
  1070. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1071. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1072. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1073. #endif
  1074. WRITEBGR15(%4, %5, %%REGa)
  1075. YSCALEYUV2PACKEDX_END
  1076. return;
  1077. case IMGFMT_BGR16:
  1078. YSCALEYUV2PACKEDX
  1079. YSCALEYUV2RGBX
  1080. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1081. #ifdef DITHER1XBPP
  1082. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1083. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1084. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1085. #endif
  1086. WRITEBGR16(%4, %5, %%REGa)
  1087. YSCALEYUV2PACKEDX_END
  1088. return;
  1089. case IMGFMT_YUY2:
  1090. YSCALEYUV2PACKEDX
  1091. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1092. "psraw $3, %%mm3 \n\t"
  1093. "psraw $3, %%mm4 \n\t"
  1094. "psraw $3, %%mm1 \n\t"
  1095. "psraw $3, %%mm7 \n\t"
  1096. WRITEYUY2(%4, %5, %%REGa)
  1097. YSCALEYUV2PACKEDX_END
  1098. return;
  1099. }
  1100. }
  1101. #endif
  1102. #ifdef HAVE_ALTIVEC
  1103. /* The following list of supported dstFormat values should
  1104. match what's found in the body of altivec_yuv2packedX() */
  1105. if(c->dstFormat==IMGFMT_ABGR || c->dstFormat==IMGFMT_BGRA ||
  1106. c->dstFormat==IMGFMT_BGR24 || c->dstFormat==IMGFMT_RGB24 ||
  1107. c->dstFormat==IMGFMT_RGBA || c->dstFormat==IMGFMT_ARGB)
  1108. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  1109. chrFilter, chrSrc, chrFilterSize,
  1110. dest, dstW, dstY);
  1111. else
  1112. #endif
  1113. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1114. chrFilter, chrSrc, chrFilterSize,
  1115. dest, dstW, dstY);
  1116. }
  1117. /**
  1118. * vertical bilinear scale YV12 to RGB
  1119. */
  1120. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1121. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1122. {
  1123. int yalpha1=yalpha^4095;
  1124. int uvalpha1=uvalpha^4095;
  1125. int i;
  1126. #if 0 //isn't used
  1127. if(flags&SWS_FULL_CHR_H_INT)
  1128. {
  1129. switch(dstFormat)
  1130. {
  1131. #ifdef HAVE_MMX
  1132. case IMGFMT_BGR32:
  1133. asm volatile(
  1134. FULL_YSCALEYUV2RGB
  1135. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1136. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1137. "movq %%mm3, %%mm1 \n\t"
  1138. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1139. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1140. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  1141. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  1142. "add $4, %%"REG_a" \n\t"
  1143. "cmp %5, %%"REG_a" \n\t"
  1144. " jb 1b \n\t"
  1145. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  1146. "m" (yalpha1), "m" (uvalpha1)
  1147. : "%"REG_a
  1148. );
  1149. break;
  1150. case IMGFMT_BGR24:
  1151. asm volatile(
  1152. FULL_YSCALEYUV2RGB
  1153. // lsb ... msb
  1154. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1155. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1156. "movq %%mm3, %%mm1 \n\t"
  1157. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1158. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1159. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  1160. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  1161. "pand "MANGLE(bm00000111)", %%mm2\n\t" // BGR00000
  1162. "pand "MANGLE(bm11111000)", %%mm3\n\t" // 000BGR00
  1163. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  1164. "movq %%mm1, %%mm2 \n\t"
  1165. "psllq $48, %%mm1 \n\t" // 000000BG
  1166. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  1167. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  1168. "psrld $16, %%mm2 \n\t" // R000R000
  1169. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  1170. "por %%mm2, %%mm1 \n\t" // RBGRR000
  1171. "mov %4, %%"REG_b" \n\t"
  1172. "add %%"REG_a", %%"REG_b" \n\t"
  1173. #ifdef HAVE_MMX2
  1174. //FIXME Alignment
  1175. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
  1176. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
  1177. #else
  1178. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1179. "psrlq $32, %%mm3 \n\t"
  1180. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  1181. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1182. #endif
  1183. "add $4, %%"REG_a" \n\t"
  1184. "cmp %5, %%"REG_a" \n\t"
  1185. " jb 1b \n\t"
  1186. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1187. "m" (yalpha1), "m" (uvalpha1)
  1188. : "%"REG_a, "%"REG_b
  1189. );
  1190. break;
  1191. case IMGFMT_BGR15:
  1192. asm volatile(
  1193. FULL_YSCALEYUV2RGB
  1194. #ifdef DITHER1XBPP
  1195. "paddusb "MANGLE(g5Dither)", %%mm1\n\t"
  1196. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1197. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1198. #endif
  1199. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1200. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1201. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1202. "psrlw $3, %%mm3 \n\t"
  1203. "psllw $2, %%mm1 \n\t"
  1204. "psllw $7, %%mm0 \n\t"
  1205. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1206. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1207. "por %%mm3, %%mm1 \n\t"
  1208. "por %%mm1, %%mm0 \n\t"
  1209. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1210. "add $4, %%"REG_a" \n\t"
  1211. "cmp %5, %%"REG_a" \n\t"
  1212. " jb 1b \n\t"
  1213. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1214. "m" (yalpha1), "m" (uvalpha1)
  1215. : "%"REG_a
  1216. );
  1217. break;
  1218. case IMGFMT_BGR16:
  1219. asm volatile(
  1220. FULL_YSCALEYUV2RGB
  1221. #ifdef DITHER1XBPP
  1222. "paddusb "MANGLE(g6Dither)", %%mm1\n\t"
  1223. "paddusb "MANGLE(r5Dither)", %%mm0\n\t"
  1224. "paddusb "MANGLE(b5Dither)", %%mm3\n\t"
  1225. #endif
  1226. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1227. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1228. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1229. "psrlw $3, %%mm3 \n\t"
  1230. "psllw $3, %%mm1 \n\t"
  1231. "psllw $8, %%mm0 \n\t"
  1232. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1233. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1234. "por %%mm3, %%mm1 \n\t"
  1235. "por %%mm1, %%mm0 \n\t"
  1236. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1237. "add $4, %%"REG_a" \n\t"
  1238. "cmp %5, %%"REG_a" \n\t"
  1239. " jb 1b \n\t"
  1240. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1241. "m" (yalpha1), "m" (uvalpha1)
  1242. : "%"REG_a
  1243. );
  1244. break;
  1245. #endif
  1246. case IMGFMT_RGB32:
  1247. #ifndef HAVE_MMX
  1248. case IMGFMT_BGR32:
  1249. #endif
  1250. if(dstFormat==IMGFMT_BGR32)
  1251. {
  1252. int i;
  1253. #ifdef WORDS_BIGENDIAN
  1254. dest++;
  1255. #endif
  1256. for(i=0;i<dstW;i++){
  1257. // vertical linear interpolation && yuv2rgb in a single step:
  1258. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1259. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1260. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1261. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1262. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1263. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1264. dest+= 4;
  1265. }
  1266. }
  1267. else if(dstFormat==IMGFMT_BGR24)
  1268. {
  1269. int i;
  1270. for(i=0;i<dstW;i++){
  1271. // vertical linear interpolation && yuv2rgb in a single step:
  1272. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1273. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1274. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1275. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1276. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1277. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1278. dest+= 3;
  1279. }
  1280. }
  1281. else if(dstFormat==IMGFMT_BGR16)
  1282. {
  1283. int i;
  1284. for(i=0;i<dstW;i++){
  1285. // vertical linear interpolation && yuv2rgb in a single step:
  1286. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1287. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1288. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1289. ((uint16_t*)dest)[i] =
  1290. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1291. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1292. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1293. }
  1294. }
  1295. else if(dstFormat==IMGFMT_BGR15)
  1296. {
  1297. int i;
  1298. for(i=0;i<dstW;i++){
  1299. // vertical linear interpolation && yuv2rgb in a single step:
  1300. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1301. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1302. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1303. ((uint16_t*)dest)[i] =
  1304. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1305. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1306. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1307. }
  1308. }
  1309. }//FULL_UV_IPOL
  1310. else
  1311. {
  1312. #endif // if 0
  1313. #ifdef HAVE_MMX
  1314. switch(c->dstFormat)
  1315. {
  1316. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1317. case IMGFMT_BGR32:
  1318. asm volatile(
  1319. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1320. "mov %4, %%"REG_b" \n\t"
  1321. "push %%"REG_BP" \n\t"
  1322. YSCALEYUV2RGB(%%REGBP, %5)
  1323. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1324. "pop %%"REG_BP" \n\t"
  1325. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1326. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1327. "a" (&c->redDither)
  1328. );
  1329. return;
  1330. case IMGFMT_BGR24:
  1331. asm volatile(
  1332. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1333. "mov %4, %%"REG_b" \n\t"
  1334. "push %%"REG_BP" \n\t"
  1335. YSCALEYUV2RGB(%%REGBP, %5)
  1336. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1337. "pop %%"REG_BP" \n\t"
  1338. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1339. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1340. "a" (&c->redDither)
  1341. );
  1342. return;
  1343. case IMGFMT_BGR15:
  1344. asm volatile(
  1345. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1346. "mov %4, %%"REG_b" \n\t"
  1347. "push %%"REG_BP" \n\t"
  1348. YSCALEYUV2RGB(%%REGBP, %5)
  1349. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1350. #ifdef DITHER1XBPP
  1351. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1352. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1353. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1354. #endif
  1355. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1356. "pop %%"REG_BP" \n\t"
  1357. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1358. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1359. "a" (&c->redDither)
  1360. );
  1361. return;
  1362. case IMGFMT_BGR16:
  1363. asm volatile(
  1364. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1365. "mov %4, %%"REG_b" \n\t"
  1366. "push %%"REG_BP" \n\t"
  1367. YSCALEYUV2RGB(%%REGBP, %5)
  1368. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1369. #ifdef DITHER1XBPP
  1370. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1371. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1372. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1373. #endif
  1374. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1375. "pop %%"REG_BP" \n\t"
  1376. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1377. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1378. "a" (&c->redDither)
  1379. );
  1380. return;
  1381. case IMGFMT_YUY2:
  1382. asm volatile(
  1383. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1384. "mov %4, %%"REG_b" \n\t"
  1385. "push %%"REG_BP" \n\t"
  1386. YSCALEYUV2PACKED(%%REGBP, %5)
  1387. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1388. "pop %%"REG_BP" \n\t"
  1389. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1390. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1391. "a" (&c->redDither)
  1392. );
  1393. return;
  1394. default: break;
  1395. }
  1396. #endif //HAVE_MMX
  1397. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1398. }
  1399. /**
  1400. * YV12 to RGB without scaling or interpolating
  1401. */
  1402. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1403. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1404. {
  1405. const int yalpha1=0;
  1406. int i;
  1407. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1408. const int yalpha= 4096; //FIXME ...
  1409. if(flags&SWS_FULL_CHR_H_INT)
  1410. {
  1411. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1412. return;
  1413. }
  1414. #ifdef HAVE_MMX
  1415. if( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but its a bit faster
  1416. {
  1417. switch(dstFormat)
  1418. {
  1419. case IMGFMT_BGR32:
  1420. asm volatile(
  1421. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1422. "mov %4, %%"REG_b" \n\t"
  1423. "push %%"REG_BP" \n\t"
  1424. YSCALEYUV2RGB1(%%REGBP, %5)
  1425. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1426. "pop %%"REG_BP" \n\t"
  1427. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1428. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1429. "a" (&c->redDither)
  1430. );
  1431. return;
  1432. case IMGFMT_BGR24:
  1433. asm volatile(
  1434. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1435. "mov %4, %%"REG_b" \n\t"
  1436. "push %%"REG_BP" \n\t"
  1437. YSCALEYUV2RGB1(%%REGBP, %5)
  1438. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1439. "pop %%"REG_BP" \n\t"
  1440. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1441. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1442. "a" (&c->redDither)
  1443. );
  1444. return;
  1445. case IMGFMT_BGR15:
  1446. asm volatile(
  1447. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1448. "mov %4, %%"REG_b" \n\t"
  1449. "push %%"REG_BP" \n\t"
  1450. YSCALEYUV2RGB1(%%REGBP, %5)
  1451. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1452. #ifdef DITHER1XBPP
  1453. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1454. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1455. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1456. #endif
  1457. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1458. "pop %%"REG_BP" \n\t"
  1459. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1460. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1461. "a" (&c->redDither)
  1462. );
  1463. return;
  1464. case IMGFMT_BGR16:
  1465. asm volatile(
  1466. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1467. "mov %4, %%"REG_b" \n\t"
  1468. "push %%"REG_BP" \n\t"
  1469. YSCALEYUV2RGB1(%%REGBP, %5)
  1470. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1471. #ifdef DITHER1XBPP
  1472. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1473. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1474. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1475. #endif
  1476. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1477. "pop %%"REG_BP" \n\t"
  1478. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1479. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1480. "a" (&c->redDither)
  1481. );
  1482. return;
  1483. case IMGFMT_YUY2:
  1484. asm volatile(
  1485. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1486. "mov %4, %%"REG_b" \n\t"
  1487. "push %%"REG_BP" \n\t"
  1488. YSCALEYUV2PACKED1(%%REGBP, %5)
  1489. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1490. "pop %%"REG_BP" \n\t"
  1491. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1492. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1493. "a" (&c->redDither)
  1494. );
  1495. return;
  1496. }
  1497. }
  1498. else
  1499. {
  1500. switch(dstFormat)
  1501. {
  1502. case IMGFMT_BGR32:
  1503. asm volatile(
  1504. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1505. "mov %4, %%"REG_b" \n\t"
  1506. "push %%"REG_BP" \n\t"
  1507. YSCALEYUV2RGB1b(%%REGBP, %5)
  1508. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1509. "pop %%"REG_BP" \n\t"
  1510. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1511. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1512. "a" (&c->redDither)
  1513. );
  1514. return;
  1515. case IMGFMT_BGR24:
  1516. asm volatile(
  1517. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1518. "mov %4, %%"REG_b" \n\t"
  1519. "push %%"REG_BP" \n\t"
  1520. YSCALEYUV2RGB1b(%%REGBP, %5)
  1521. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1522. "pop %%"REG_BP" \n\t"
  1523. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1524. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1525. "a" (&c->redDither)
  1526. );
  1527. return;
  1528. case IMGFMT_BGR15:
  1529. asm volatile(
  1530. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1531. "mov %4, %%"REG_b" \n\t"
  1532. "push %%"REG_BP" \n\t"
  1533. YSCALEYUV2RGB1b(%%REGBP, %5)
  1534. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1535. #ifdef DITHER1XBPP
  1536. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1537. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1538. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1539. #endif
  1540. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1541. "pop %%"REG_BP" \n\t"
  1542. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1543. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1544. "a" (&c->redDither)
  1545. );
  1546. return;
  1547. case IMGFMT_BGR16:
  1548. asm volatile(
  1549. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1550. "mov %4, %%"REG_b" \n\t"
  1551. "push %%"REG_BP" \n\t"
  1552. YSCALEYUV2RGB1b(%%REGBP, %5)
  1553. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1554. #ifdef DITHER1XBPP
  1555. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1556. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1557. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1558. #endif
  1559. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1560. "pop %%"REG_BP" \n\t"
  1561. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1562. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1563. "a" (&c->redDither)
  1564. );
  1565. return;
  1566. case IMGFMT_YUY2:
  1567. asm volatile(
  1568. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1569. "mov %4, %%"REG_b" \n\t"
  1570. "push %%"REG_BP" \n\t"
  1571. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1572. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1573. "pop %%"REG_BP" \n\t"
  1574. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1575. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1576. "a" (&c->redDither)
  1577. );
  1578. return;
  1579. }
  1580. }
  1581. #endif
  1582. if( uvalpha < 2048 )
  1583. {
  1584. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1585. }else{
  1586. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1587. }
  1588. }
  1589. //FIXME yuy2* can read upto 7 samples to much
  1590. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
  1591. {
  1592. #ifdef HAVE_MMX
  1593. asm volatile(
  1594. "movq "MANGLE(bm01010101)", %%mm2\n\t"
  1595. "mov %0, %%"REG_a" \n\t"
  1596. "1: \n\t"
  1597. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1598. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1599. "pand %%mm2, %%mm0 \n\t"
  1600. "pand %%mm2, %%mm1 \n\t"
  1601. "packuswb %%mm1, %%mm0 \n\t"
  1602. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1603. "add $8, %%"REG_a" \n\t"
  1604. " js 1b \n\t"
  1605. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1606. : "%"REG_a
  1607. );
  1608. #else
  1609. int i;
  1610. for(i=0; i<width; i++)
  1611. dst[i]= src[2*i];
  1612. #endif
  1613. }
  1614. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1615. {
  1616. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1617. asm volatile(
  1618. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1619. "mov %0, %%"REG_a" \n\t"
  1620. "1: \n\t"
  1621. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1622. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1623. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1624. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1625. PAVGB(%%mm2, %%mm0)
  1626. PAVGB(%%mm3, %%mm1)
  1627. "psrlw $8, %%mm0 \n\t"
  1628. "psrlw $8, %%mm1 \n\t"
  1629. "packuswb %%mm1, %%mm0 \n\t"
  1630. "movq %%mm0, %%mm1 \n\t"
  1631. "psrlw $8, %%mm0 \n\t"
  1632. "pand %%mm4, %%mm1 \n\t"
  1633. "packuswb %%mm0, %%mm0 \n\t"
  1634. "packuswb %%mm1, %%mm1 \n\t"
  1635. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1636. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1637. "add $4, %%"REG_a" \n\t"
  1638. " js 1b \n\t"
  1639. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1640. : "%"REG_a
  1641. );
  1642. #else
  1643. int i;
  1644. for(i=0; i<width; i++)
  1645. {
  1646. dstU[i]= (src1[4*i + 1] + src2[4*i + 1])>>1;
  1647. dstV[i]= (src1[4*i + 3] + src2[4*i + 3])>>1;
  1648. }
  1649. #endif
  1650. }
  1651. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1652. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
  1653. {
  1654. #ifdef HAVE_MMX
  1655. asm volatile(
  1656. "mov %0, %%"REG_a" \n\t"
  1657. "1: \n\t"
  1658. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1659. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1660. "psrlw $8, %%mm0 \n\t"
  1661. "psrlw $8, %%mm1 \n\t"
  1662. "packuswb %%mm1, %%mm0 \n\t"
  1663. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1664. "add $8, %%"REG_a" \n\t"
  1665. " js 1b \n\t"
  1666. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1667. : "%"REG_a
  1668. );
  1669. #else
  1670. int i;
  1671. for(i=0; i<width; i++)
  1672. dst[i]= src[2*i+1];
  1673. #endif
  1674. }
  1675. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1676. {
  1677. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1678. asm volatile(
  1679. "movq "MANGLE(bm01010101)", %%mm4\n\t"
  1680. "mov %0, %%"REG_a" \n\t"
  1681. "1: \n\t"
  1682. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1683. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1684. "movq (%2, %%"REG_a",4), %%mm2 \n\t"
  1685. "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
  1686. PAVGB(%%mm2, %%mm0)
  1687. PAVGB(%%mm3, %%mm1)
  1688. "pand %%mm4, %%mm0 \n\t"
  1689. "pand %%mm4, %%mm1 \n\t"
  1690. "packuswb %%mm1, %%mm0 \n\t"
  1691. "movq %%mm0, %%mm1 \n\t"
  1692. "psrlw $8, %%mm0 \n\t"
  1693. "pand %%mm4, %%mm1 \n\t"
  1694. "packuswb %%mm0, %%mm0 \n\t"
  1695. "packuswb %%mm1, %%mm1 \n\t"
  1696. "movd %%mm0, (%4, %%"REG_a") \n\t"
  1697. "movd %%mm1, (%3, %%"REG_a") \n\t"
  1698. "add $4, %%"REG_a" \n\t"
  1699. " js 1b \n\t"
  1700. : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
  1701. : "%"REG_a
  1702. );
  1703. #else
  1704. int i;
  1705. for(i=0; i<width; i++)
  1706. {
  1707. dstU[i]= (src1[4*i + 0] + src2[4*i + 0])>>1;
  1708. dstV[i]= (src1[4*i + 2] + src2[4*i + 2])>>1;
  1709. }
  1710. #endif
  1711. }
  1712. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1713. {
  1714. int i;
  1715. for(i=0; i<width; i++)
  1716. {
  1717. int b= ((uint32_t*)src)[i]&0xFF;
  1718. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1719. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1720. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1721. }
  1722. }
  1723. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1724. {
  1725. int i;
  1726. for(i=0; i<width; i++)
  1727. {
  1728. const int a= ((uint32_t*)src1)[2*i+0];
  1729. const int e= ((uint32_t*)src1)[2*i+1];
  1730. const int c= ((uint32_t*)src2)[2*i+0];
  1731. const int d= ((uint32_t*)src2)[2*i+1];
  1732. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  1733. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  1734. const int b= l&0x3FF;
  1735. const int g= h>>8;
  1736. const int r= l>>16;
  1737. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1738. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1739. }
  1740. }
  1741. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
  1742. {
  1743. #ifdef HAVE_MMX
  1744. asm volatile(
  1745. "mov %2, %%"REG_a" \n\t"
  1746. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1747. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1748. "pxor %%mm7, %%mm7 \n\t"
  1749. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d"\n\t"
  1750. ASMALIGN(4)
  1751. "1: \n\t"
  1752. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1753. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1754. "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
  1755. "punpcklbw %%mm7, %%mm0 \n\t"
  1756. "punpcklbw %%mm7, %%mm1 \n\t"
  1757. "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
  1758. "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
  1759. "punpcklbw %%mm7, %%mm2 \n\t"
  1760. "punpcklbw %%mm7, %%mm3 \n\t"
  1761. "pmaddwd %%mm6, %%mm0 \n\t"
  1762. "pmaddwd %%mm6, %%mm1 \n\t"
  1763. "pmaddwd %%mm6, %%mm2 \n\t"
  1764. "pmaddwd %%mm6, %%mm3 \n\t"
  1765. #ifndef FAST_BGR2YV12
  1766. "psrad $8, %%mm0 \n\t"
  1767. "psrad $8, %%mm1 \n\t"
  1768. "psrad $8, %%mm2 \n\t"
  1769. "psrad $8, %%mm3 \n\t"
  1770. #endif
  1771. "packssdw %%mm1, %%mm0 \n\t"
  1772. "packssdw %%mm3, %%mm2 \n\t"
  1773. "pmaddwd %%mm5, %%mm0 \n\t"
  1774. "pmaddwd %%mm5, %%mm2 \n\t"
  1775. "packssdw %%mm2, %%mm0 \n\t"
  1776. "psraw $7, %%mm0 \n\t"
  1777. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1778. "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
  1779. "punpcklbw %%mm7, %%mm4 \n\t"
  1780. "punpcklbw %%mm7, %%mm1 \n\t"
  1781. "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
  1782. "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
  1783. "punpcklbw %%mm7, %%mm2 \n\t"
  1784. "punpcklbw %%mm7, %%mm3 \n\t"
  1785. "pmaddwd %%mm6, %%mm4 \n\t"
  1786. "pmaddwd %%mm6, %%mm1 \n\t"
  1787. "pmaddwd %%mm6, %%mm2 \n\t"
  1788. "pmaddwd %%mm6, %%mm3 \n\t"
  1789. #ifndef FAST_BGR2YV12
  1790. "psrad $8, %%mm4 \n\t"
  1791. "psrad $8, %%mm1 \n\t"
  1792. "psrad $8, %%mm2 \n\t"
  1793. "psrad $8, %%mm3 \n\t"
  1794. #endif
  1795. "packssdw %%mm1, %%mm4 \n\t"
  1796. "packssdw %%mm3, %%mm2 \n\t"
  1797. "pmaddwd %%mm5, %%mm4 \n\t"
  1798. "pmaddwd %%mm5, %%mm2 \n\t"
  1799. "add $24, %%"REG_d" \n\t"
  1800. "packssdw %%mm2, %%mm4 \n\t"
  1801. "psraw $7, %%mm4 \n\t"
  1802. "packuswb %%mm4, %%mm0 \n\t"
  1803. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1804. "movq %%mm0, (%1, %%"REG_a") \n\t"
  1805. "add $8, %%"REG_a" \n\t"
  1806. " js 1b \n\t"
  1807. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1808. : "%"REG_a, "%"REG_d
  1809. );
  1810. #else
  1811. int i;
  1812. for(i=0; i<width; i++)
  1813. {
  1814. int b= src[i*3+0];
  1815. int g= src[i*3+1];
  1816. int r= src[i*3+2];
  1817. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1818. }
  1819. #endif
  1820. }
  1821. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1822. {
  1823. #ifdef HAVE_MMX
  1824. asm volatile(
  1825. "mov %4, %%"REG_a" \n\t"
  1826. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1827. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1828. "pxor %%mm7, %%mm7 \n\t"
  1829. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1830. "add %%"REG_d", %%"REG_d" \n\t"
  1831. ASMALIGN(4)
  1832. "1: \n\t"
  1833. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1834. PREFETCH" 64(%1, %%"REG_d") \n\t"
  1835. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1836. "movq (%0, %%"REG_d"), %%mm0 \n\t"
  1837. "movq (%1, %%"REG_d"), %%mm1 \n\t"
  1838. "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
  1839. "movq 6(%1, %%"REG_d"), %%mm3 \n\t"
  1840. PAVGB(%%mm1, %%mm0)
  1841. PAVGB(%%mm3, %%mm2)
  1842. "movq %%mm0, %%mm1 \n\t"
  1843. "movq %%mm2, %%mm3 \n\t"
  1844. "psrlq $24, %%mm0 \n\t"
  1845. "psrlq $24, %%mm2 \n\t"
  1846. PAVGB(%%mm1, %%mm0)
  1847. PAVGB(%%mm3, %%mm2)
  1848. "punpcklbw %%mm7, %%mm0 \n\t"
  1849. "punpcklbw %%mm7, %%mm2 \n\t"
  1850. #else
  1851. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1852. "movd (%1, %%"REG_d"), %%mm1 \n\t"
  1853. "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
  1854. "movd 3(%1, %%"REG_d"), %%mm3 \n\t"
  1855. "punpcklbw %%mm7, %%mm0 \n\t"
  1856. "punpcklbw %%mm7, %%mm1 \n\t"
  1857. "punpcklbw %%mm7, %%mm2 \n\t"
  1858. "punpcklbw %%mm7, %%mm3 \n\t"
  1859. "paddw %%mm1, %%mm0 \n\t"
  1860. "paddw %%mm3, %%mm2 \n\t"
  1861. "paddw %%mm2, %%mm0 \n\t"
  1862. "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
  1863. "movd 6(%1, %%"REG_d"), %%mm1 \n\t"
  1864. "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
  1865. "movd 9(%1, %%"REG_d"), %%mm3 \n\t"
  1866. "punpcklbw %%mm7, %%mm4 \n\t"
  1867. "punpcklbw %%mm7, %%mm1 \n\t"
  1868. "punpcklbw %%mm7, %%mm2 \n\t"
  1869. "punpcklbw %%mm7, %%mm3 \n\t"
  1870. "paddw %%mm1, %%mm4 \n\t"
  1871. "paddw %%mm3, %%mm2 \n\t"
  1872. "paddw %%mm4, %%mm2 \n\t"
  1873. "psrlw $2, %%mm0 \n\t"
  1874. "psrlw $2, %%mm2 \n\t"
  1875. #endif
  1876. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1877. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1878. "pmaddwd %%mm0, %%mm1 \n\t"
  1879. "pmaddwd %%mm2, %%mm3 \n\t"
  1880. "pmaddwd %%mm6, %%mm0 \n\t"
  1881. "pmaddwd %%mm6, %%mm2 \n\t"
  1882. #ifndef FAST_BGR2YV12
  1883. "psrad $8, %%mm0 \n\t"
  1884. "psrad $8, %%mm1 \n\t"
  1885. "psrad $8, %%mm2 \n\t"
  1886. "psrad $8, %%mm3 \n\t"
  1887. #endif
  1888. "packssdw %%mm2, %%mm0 \n\t"
  1889. "packssdw %%mm3, %%mm1 \n\t"
  1890. "pmaddwd %%mm5, %%mm0 \n\t"
  1891. "pmaddwd %%mm5, %%mm1 \n\t"
  1892. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1893. "psraw $7, %%mm0 \n\t"
  1894. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1895. "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
  1896. "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
  1897. "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
  1898. "movq 18(%1, %%"REG_d"), %%mm3 \n\t"
  1899. PAVGB(%%mm1, %%mm4)
  1900. PAVGB(%%mm3, %%mm2)
  1901. "movq %%mm4, %%mm1 \n\t"
  1902. "movq %%mm2, %%mm3 \n\t"
  1903. "psrlq $24, %%mm4 \n\t"
  1904. "psrlq $24, %%mm2 \n\t"
  1905. PAVGB(%%mm1, %%mm4)
  1906. PAVGB(%%mm3, %%mm2)
  1907. "punpcklbw %%mm7, %%mm4 \n\t"
  1908. "punpcklbw %%mm7, %%mm2 \n\t"
  1909. #else
  1910. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1911. "movd 12(%1, %%"REG_d"), %%mm1 \n\t"
  1912. "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
  1913. "movd 15(%1, %%"REG_d"), %%mm3 \n\t"
  1914. "punpcklbw %%mm7, %%mm4 \n\t"
  1915. "punpcklbw %%mm7, %%mm1 \n\t"
  1916. "punpcklbw %%mm7, %%mm2 \n\t"
  1917. "punpcklbw %%mm7, %%mm3 \n\t"
  1918. "paddw %%mm1, %%mm4 \n\t"
  1919. "paddw %%mm3, %%mm2 \n\t"
  1920. "paddw %%mm2, %%mm4 \n\t"
  1921. "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
  1922. "movd 18(%1, %%"REG_d"), %%mm1 \n\t"
  1923. "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
  1924. "movd 21(%1, %%"REG_d"), %%mm3 \n\t"
  1925. "punpcklbw %%mm7, %%mm5 \n\t"
  1926. "punpcklbw %%mm7, %%mm1 \n\t"
  1927. "punpcklbw %%mm7, %%mm2 \n\t"
  1928. "punpcklbw %%mm7, %%mm3 \n\t"
  1929. "paddw %%mm1, %%mm5 \n\t"
  1930. "paddw %%mm3, %%mm2 \n\t"
  1931. "paddw %%mm5, %%mm2 \n\t"
  1932. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1933. "psrlw $2, %%mm4 \n\t"
  1934. "psrlw $2, %%mm2 \n\t"
  1935. #endif
  1936. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1937. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1938. "pmaddwd %%mm4, %%mm1 \n\t"
  1939. "pmaddwd %%mm2, %%mm3 \n\t"
  1940. "pmaddwd %%mm6, %%mm4 \n\t"
  1941. "pmaddwd %%mm6, %%mm2 \n\t"
  1942. #ifndef FAST_BGR2YV12
  1943. "psrad $8, %%mm4 \n\t"
  1944. "psrad $8, %%mm1 \n\t"
  1945. "psrad $8, %%mm2 \n\t"
  1946. "psrad $8, %%mm3 \n\t"
  1947. #endif
  1948. "packssdw %%mm2, %%mm4 \n\t"
  1949. "packssdw %%mm3, %%mm1 \n\t"
  1950. "pmaddwd %%mm5, %%mm4 \n\t"
  1951. "pmaddwd %%mm5, %%mm1 \n\t"
  1952. "add $24, %%"REG_d" \n\t"
  1953. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1954. "psraw $7, %%mm4 \n\t"
  1955. "movq %%mm0, %%mm1 \n\t"
  1956. "punpckldq %%mm4, %%mm0 \n\t"
  1957. "punpckhdq %%mm4, %%mm1 \n\t"
  1958. "packsswb %%mm1, %%mm0 \n\t"
  1959. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1960. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1961. "punpckhdq %%mm0, %%mm0 \n\t"
  1962. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1963. "add $4, %%"REG_a" \n\t"
  1964. " js 1b \n\t"
  1965. : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1966. : "%"REG_a, "%"REG_d
  1967. );
  1968. #else
  1969. int i;
  1970. for(i=0; i<width; i++)
  1971. {
  1972. int b= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  1973. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  1974. int r= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  1975. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1976. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  1977. }
  1978. #endif
  1979. }
  1980. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1981. {
  1982. int i;
  1983. for(i=0; i<width; i++)
  1984. {
  1985. int d= ((uint16_t*)src)[i];
  1986. int b= d&0x1F;
  1987. int g= (d>>5)&0x3F;
  1988. int r= (d>>11)&0x1F;
  1989. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1990. }
  1991. }
  1992. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1993. {
  1994. int i;
  1995. for(i=0; i<width; i++)
  1996. {
  1997. int d0= ((uint32_t*)src1)[i];
  1998. int d1= ((uint32_t*)src2)[i];
  1999. int dl= (d0&0x07E0F81F) + (d1&0x07E0F81F);
  2000. int dh= ((d0>>5)&0x07C0F83F) + ((d1>>5)&0x07C0F83F);
  2001. int dh2= (dh>>11) + (dh<<21);
  2002. int d= dh2 + dl;
  2003. int b= d&0x7F;
  2004. int r= (d>>11)&0x7F;
  2005. int g= d>>21;
  2006. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  2007. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+2-2)) + 128;
  2008. }
  2009. }
  2010. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  2011. {
  2012. int i;
  2013. for(i=0; i<width; i++)
  2014. {
  2015. int d= ((uint16_t*)src)[i];
  2016. int b= d&0x1F;
  2017. int g= (d>>5)&0x1F;
  2018. int r= (d>>10)&0x1F;
  2019. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  2020. }
  2021. }
  2022. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2023. {
  2024. int i;
  2025. for(i=0; i<width; i++)
  2026. {
  2027. int d0= ((uint32_t*)src1)[i];
  2028. int d1= ((uint32_t*)src2)[i];
  2029. int dl= (d0&0x03E07C1F) + (d1&0x03E07C1F);
  2030. int dh= ((d0>>5)&0x03E0F81F) + ((d1>>5)&0x03E0F81F);
  2031. int dh2= (dh>>11) + (dh<<21);
  2032. int d= dh2 + dl;
  2033. int b= d&0x7F;
  2034. int r= (d>>10)&0x7F;
  2035. int g= d>>21;
  2036. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  2037. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2-3)) + 128;
  2038. }
  2039. }
  2040. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  2041. {
  2042. int i;
  2043. for(i=0; i<width; i++)
  2044. {
  2045. int r= ((uint32_t*)src)[i]&0xFF;
  2046. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  2047. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  2048. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  2049. }
  2050. }
  2051. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2052. {
  2053. int i;
  2054. for(i=0; i<width; i++)
  2055. {
  2056. const int a= ((uint32_t*)src1)[2*i+0];
  2057. const int e= ((uint32_t*)src1)[2*i+1];
  2058. const int c= ((uint32_t*)src2)[2*i+0];
  2059. const int d= ((uint32_t*)src2)[2*i+1];
  2060. const int l= (a&0xFF00FF) + (e&0xFF00FF) + (c&0xFF00FF) + (d&0xFF00FF);
  2061. const int h= (a&0x00FF00) + (e&0x00FF00) + (c&0x00FF00) + (d&0x00FF00);
  2062. const int r= l&0x3FF;
  2063. const int g= h>>8;
  2064. const int b= l>>16;
  2065. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2066. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2067. }
  2068. }
  2069. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  2070. {
  2071. int i;
  2072. for(i=0; i<width; i++)
  2073. {
  2074. int r= src[i*3+0];
  2075. int g= src[i*3+1];
  2076. int b= src[i*3+2];
  2077. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  2078. }
  2079. }
  2080. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2081. {
  2082. int i;
  2083. for(i=0; i<width; i++)
  2084. {
  2085. int r= src1[6*i + 0] + src1[6*i + 3] + src2[6*i + 0] + src2[6*i + 3];
  2086. int g= src1[6*i + 1] + src1[6*i + 4] + src2[6*i + 1] + src2[6*i + 4];
  2087. int b= src1[6*i + 2] + src1[6*i + 5] + src2[6*i + 2] + src2[6*i + 5];
  2088. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2089. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+2)) + 128;
  2090. }
  2091. }
  2092. // Bilinear / Bicubic scaling
  2093. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  2094. int16_t *filter, int16_t *filterPos, long filterSize)
  2095. {
  2096. #ifdef HAVE_MMX
  2097. assert(filterSize % 4 == 0 && filterSize>0);
  2098. if(filterSize==4) // allways true for upscaling, sometimes for down too
  2099. {
  2100. long counter= -2*dstW;
  2101. filter-= counter*2;
  2102. filterPos-= counter/2;
  2103. dst-= counter/2;
  2104. asm volatile(
  2105. #if defined(PIC)
  2106. "push %%"REG_b" \n\t"
  2107. #endif
  2108. "pxor %%mm7, %%mm7 \n\t"
  2109. "movq "MANGLE(w02)", %%mm6 \n\t"
  2110. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2111. "mov %%"REG_a", %%"REG_BP" \n\t"
  2112. ASMALIGN(4)
  2113. "1: \n\t"
  2114. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2115. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  2116. "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
  2117. "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
  2118. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2119. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2120. "punpcklbw %%mm7, %%mm0 \n\t"
  2121. "punpcklbw %%mm7, %%mm2 \n\t"
  2122. "pmaddwd %%mm1, %%mm0 \n\t"
  2123. "pmaddwd %%mm2, %%mm3 \n\t"
  2124. "psrad $8, %%mm0 \n\t"
  2125. "psrad $8, %%mm3 \n\t"
  2126. "packssdw %%mm3, %%mm0 \n\t"
  2127. "pmaddwd %%mm6, %%mm0 \n\t"
  2128. "packssdw %%mm0, %%mm0 \n\t"
  2129. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2130. "add $4, %%"REG_BP" \n\t"
  2131. " jnc 1b \n\t"
  2132. "pop %%"REG_BP" \n\t"
  2133. #if defined(PIC)
  2134. "pop %%"REG_b" \n\t"
  2135. #endif
  2136. : "+a" (counter)
  2137. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2138. #if !defined(PIC)
  2139. : "%"REG_b
  2140. #endif
  2141. );
  2142. }
  2143. else if(filterSize==8)
  2144. {
  2145. long counter= -2*dstW;
  2146. filter-= counter*4;
  2147. filterPos-= counter/2;
  2148. dst-= counter/2;
  2149. asm volatile(
  2150. #if defined(PIC)
  2151. "push %%"REG_b" \n\t"
  2152. #endif
  2153. "pxor %%mm7, %%mm7 \n\t"
  2154. "movq "MANGLE(w02)", %%mm6 \n\t"
  2155. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2156. "mov %%"REG_a", %%"REG_BP" \n\t"
  2157. ASMALIGN(4)
  2158. "1: \n\t"
  2159. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2160. "movzwl 2(%2, %%"REG_BP"), %%ebx\n\t"
  2161. "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
  2162. "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
  2163. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2164. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2165. "punpcklbw %%mm7, %%mm0 \n\t"
  2166. "punpcklbw %%mm7, %%mm2 \n\t"
  2167. "pmaddwd %%mm1, %%mm0 \n\t"
  2168. "pmaddwd %%mm2, %%mm3 \n\t"
  2169. "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
  2170. "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
  2171. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  2172. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  2173. "punpcklbw %%mm7, %%mm4 \n\t"
  2174. "punpcklbw %%mm7, %%mm2 \n\t"
  2175. "pmaddwd %%mm1, %%mm4 \n\t"
  2176. "pmaddwd %%mm2, %%mm5 \n\t"
  2177. "paddd %%mm4, %%mm0 \n\t"
  2178. "paddd %%mm5, %%mm3 \n\t"
  2179. "psrad $8, %%mm0 \n\t"
  2180. "psrad $8, %%mm3 \n\t"
  2181. "packssdw %%mm3, %%mm0 \n\t"
  2182. "pmaddwd %%mm6, %%mm0 \n\t"
  2183. "packssdw %%mm0, %%mm0 \n\t"
  2184. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2185. "add $4, %%"REG_BP" \n\t"
  2186. " jnc 1b \n\t"
  2187. "pop %%"REG_BP" \n\t"
  2188. #if defined(PIC)
  2189. "pop %%"REG_b" \n\t"
  2190. #endif
  2191. : "+a" (counter)
  2192. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2193. #if !defined(PIC)
  2194. : "%"REG_b
  2195. #endif
  2196. );
  2197. }
  2198. else
  2199. {
  2200. uint8_t *offset = src+filterSize;
  2201. long counter= -2*dstW;
  2202. // filter-= counter*filterSize/2;
  2203. filterPos-= counter/2;
  2204. dst-= counter/2;
  2205. asm volatile(
  2206. "pxor %%mm7, %%mm7 \n\t"
  2207. "movq "MANGLE(w02)", %%mm6 \n\t"
  2208. ASMALIGN(4)
  2209. "1: \n\t"
  2210. "mov %2, %%"REG_c" \n\t"
  2211. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2212. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  2213. "mov %5, %%"REG_c" \n\t"
  2214. "pxor %%mm4, %%mm4 \n\t"
  2215. "pxor %%mm5, %%mm5 \n\t"
  2216. "2: \n\t"
  2217. "movq (%1), %%mm1 \n\t"
  2218. "movq (%1, %6), %%mm3 \n\t"
  2219. "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
  2220. "movd (%%"REG_c", %%"REG_d"), %%mm2\n\t"
  2221. "punpcklbw %%mm7, %%mm0 \n\t"
  2222. "punpcklbw %%mm7, %%mm2 \n\t"
  2223. "pmaddwd %%mm1, %%mm0 \n\t"
  2224. "pmaddwd %%mm2, %%mm3 \n\t"
  2225. "paddd %%mm3, %%mm5 \n\t"
  2226. "paddd %%mm0, %%mm4 \n\t"
  2227. "add $8, %1 \n\t"
  2228. "add $4, %%"REG_c" \n\t"
  2229. "cmp %4, %%"REG_c" \n\t"
  2230. " jb 2b \n\t"
  2231. "add %6, %1 \n\t"
  2232. "psrad $8, %%mm4 \n\t"
  2233. "psrad $8, %%mm5 \n\t"
  2234. "packssdw %%mm5, %%mm4 \n\t"
  2235. "pmaddwd %%mm6, %%mm4 \n\t"
  2236. "packssdw %%mm4, %%mm4 \n\t"
  2237. "mov %3, %%"REG_a" \n\t"
  2238. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2239. "add $4, %0 \n\t"
  2240. " jnc 1b \n\t"
  2241. : "+r" (counter), "+r" (filter)
  2242. : "m" (filterPos), "m" (dst), "m"(offset),
  2243. "m" (src), "r" (filterSize*2)
  2244. : "%"REG_a, "%"REG_c, "%"REG_d
  2245. );
  2246. }
  2247. #else
  2248. #ifdef HAVE_ALTIVEC
  2249. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2250. #else
  2251. int i;
  2252. for(i=0; i<dstW; i++)
  2253. {
  2254. int j;
  2255. int srcPos= filterPos[i];
  2256. int val=0;
  2257. // printf("filterPos: %d\n", filterPos[i]);
  2258. for(j=0; j<filterSize; j++)
  2259. {
  2260. // printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2261. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2262. }
  2263. // filter += hFilterSize;
  2264. dst[i] = FFMIN(FFMAX(0, val>>7), (1<<15)-1); // the cubic equation does overflow ...
  2265. // dst[i] = val>>7;
  2266. }
  2267. #endif
  2268. #endif
  2269. }
  2270. // *** horizontal scale Y line to temp buffer
  2271. static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  2272. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2273. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2274. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2275. int32_t *mmx2FilterPos)
  2276. {
  2277. if(srcFormat==IMGFMT_YUY2)
  2278. {
  2279. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2280. src= formatConvBuffer;
  2281. }
  2282. else if(srcFormat==IMGFMT_UYVY)
  2283. {
  2284. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2285. src= formatConvBuffer;
  2286. }
  2287. else if(srcFormat==IMGFMT_BGR32)
  2288. {
  2289. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2290. src= formatConvBuffer;
  2291. }
  2292. else if(srcFormat==IMGFMT_BGR24)
  2293. {
  2294. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2295. src= formatConvBuffer;
  2296. }
  2297. else if(srcFormat==IMGFMT_BGR16)
  2298. {
  2299. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2300. src= formatConvBuffer;
  2301. }
  2302. else if(srcFormat==IMGFMT_BGR15)
  2303. {
  2304. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2305. src= formatConvBuffer;
  2306. }
  2307. else if(srcFormat==IMGFMT_RGB32)
  2308. {
  2309. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2310. src= formatConvBuffer;
  2311. }
  2312. else if(srcFormat==IMGFMT_RGB24)
  2313. {
  2314. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2315. src= formatConvBuffer;
  2316. }
  2317. #ifdef HAVE_MMX
  2318. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2319. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2320. #else
  2321. if(!(flags&SWS_FAST_BILINEAR))
  2322. #endif
  2323. {
  2324. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2325. }
  2326. else // Fast Bilinear upscale / crap downscale
  2327. {
  2328. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2329. #ifdef HAVE_MMX2
  2330. int i;
  2331. #if defined(PIC)
  2332. uint64_t ebxsave __attribute__((aligned(8)));
  2333. #endif
  2334. if(canMMX2BeUsed)
  2335. {
  2336. asm volatile(
  2337. #if defined(PIC)
  2338. "mov %%"REG_b", %5 \n\t"
  2339. #endif
  2340. "pxor %%mm7, %%mm7 \n\t"
  2341. "mov %0, %%"REG_c" \n\t"
  2342. "mov %1, %%"REG_D" \n\t"
  2343. "mov %2, %%"REG_d" \n\t"
  2344. "mov %3, %%"REG_b" \n\t"
  2345. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2346. PREFETCH" (%%"REG_c") \n\t"
  2347. PREFETCH" 32(%%"REG_c") \n\t"
  2348. PREFETCH" 64(%%"REG_c") \n\t"
  2349. #ifdef ARCH_X86_64
  2350. #define FUNNY_Y_CODE \
  2351. "movl (%%"REG_b"), %%esi \n\t"\
  2352. "call *%4 \n\t"\
  2353. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2354. "add %%"REG_S", %%"REG_c" \n\t"\
  2355. "add %%"REG_a", %%"REG_D" \n\t"\
  2356. "xor %%"REG_a", %%"REG_a" \n\t"\
  2357. #else
  2358. #define FUNNY_Y_CODE \
  2359. "movl (%%"REG_b"), %%esi \n\t"\
  2360. "call *%4 \n\t"\
  2361. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2362. "add %%"REG_a", %%"REG_D" \n\t"\
  2363. "xor %%"REG_a", %%"REG_a" \n\t"\
  2364. #endif
  2365. FUNNY_Y_CODE
  2366. FUNNY_Y_CODE
  2367. FUNNY_Y_CODE
  2368. FUNNY_Y_CODE
  2369. FUNNY_Y_CODE
  2370. FUNNY_Y_CODE
  2371. FUNNY_Y_CODE
  2372. FUNNY_Y_CODE
  2373. #if defined(PIC)
  2374. "mov %5, %%"REG_b" \n\t"
  2375. #endif
  2376. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2377. "m" (funnyYCode)
  2378. #if defined(PIC)
  2379. ,"m" (ebxsave)
  2380. #endif
  2381. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2382. #if !defined(PIC)
  2383. ,"%"REG_b
  2384. #endif
  2385. );
  2386. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2387. }
  2388. else
  2389. {
  2390. #endif
  2391. long xInc_shr16 = xInc >> 16;
  2392. uint16_t xInc_mask = xInc & 0xffff;
  2393. //NO MMX just normal asm ...
  2394. asm volatile(
  2395. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2396. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2397. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2398. ASMALIGN(4)
  2399. "1: \n\t"
  2400. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2401. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2402. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2403. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2404. "shll $16, %%edi \n\t"
  2405. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2406. "mov %1, %%"REG_D" \n\t"
  2407. "shrl $9, %%esi \n\t"
  2408. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2409. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2410. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2411. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2412. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2413. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2414. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2415. "shll $16, %%edi \n\t"
  2416. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2417. "mov %1, %%"REG_D" \n\t"
  2418. "shrl $9, %%esi \n\t"
  2419. "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
  2420. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2421. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2422. "add $2, %%"REG_a" \n\t"
  2423. "cmp %2, %%"REG_a" \n\t"
  2424. " jb 1b \n\t"
  2425. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2426. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2427. );
  2428. #ifdef HAVE_MMX2
  2429. } //if MMX2 can't be used
  2430. #endif
  2431. #else
  2432. int i;
  2433. unsigned int xpos=0;
  2434. for(i=0;i<dstWidth;i++)
  2435. {
  2436. register unsigned int xx=xpos>>16;
  2437. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2438. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2439. xpos+=xInc;
  2440. }
  2441. #endif
  2442. }
  2443. }
  2444. inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2445. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2446. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2447. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2448. int32_t *mmx2FilterPos)
  2449. {
  2450. if(srcFormat==IMGFMT_YUY2)
  2451. {
  2452. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2453. src1= formatConvBuffer;
  2454. src2= formatConvBuffer+2048;
  2455. }
  2456. else if(srcFormat==IMGFMT_UYVY)
  2457. {
  2458. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2459. src1= formatConvBuffer;
  2460. src2= formatConvBuffer+2048;
  2461. }
  2462. else if(srcFormat==IMGFMT_BGR32)
  2463. {
  2464. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2465. src1= formatConvBuffer;
  2466. src2= formatConvBuffer+2048;
  2467. }
  2468. else if(srcFormat==IMGFMT_BGR24)
  2469. {
  2470. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2471. src1= formatConvBuffer;
  2472. src2= formatConvBuffer+2048;
  2473. }
  2474. else if(srcFormat==IMGFMT_BGR16)
  2475. {
  2476. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2477. src1= formatConvBuffer;
  2478. src2= formatConvBuffer+2048;
  2479. }
  2480. else if(srcFormat==IMGFMT_BGR15)
  2481. {
  2482. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2483. src1= formatConvBuffer;
  2484. src2= formatConvBuffer+2048;
  2485. }
  2486. else if(srcFormat==IMGFMT_RGB32)
  2487. {
  2488. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2489. src1= formatConvBuffer;
  2490. src2= formatConvBuffer+2048;
  2491. }
  2492. else if(srcFormat==IMGFMT_RGB24)
  2493. {
  2494. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2495. src1= formatConvBuffer;
  2496. src2= formatConvBuffer+2048;
  2497. }
  2498. else if(isGray(srcFormat))
  2499. {
  2500. return;
  2501. }
  2502. #ifdef HAVE_MMX
  2503. // use the new MMX scaler if the mmx2 can't be used (its faster than the x86asm one)
  2504. if(!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2505. #else
  2506. if(!(flags&SWS_FAST_BILINEAR))
  2507. #endif
  2508. {
  2509. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2510. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2511. }
  2512. else // Fast Bilinear upscale / crap downscale
  2513. {
  2514. #if defined(ARCH_X86) || defined(ARCH_X86_64)
  2515. #ifdef HAVE_MMX2
  2516. int i;
  2517. #if defined(PIC)
  2518. uint64_t ebxsave __attribute__((aligned(8)));
  2519. #endif
  2520. if(canMMX2BeUsed)
  2521. {
  2522. asm volatile(
  2523. #if defined(PIC)
  2524. "mov %%"REG_b", %6 \n\t"
  2525. #endif
  2526. "pxor %%mm7, %%mm7 \n\t"
  2527. "mov %0, %%"REG_c" \n\t"
  2528. "mov %1, %%"REG_D" \n\t"
  2529. "mov %2, %%"REG_d" \n\t"
  2530. "mov %3, %%"REG_b" \n\t"
  2531. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2532. PREFETCH" (%%"REG_c") \n\t"
  2533. PREFETCH" 32(%%"REG_c") \n\t"
  2534. PREFETCH" 64(%%"REG_c") \n\t"
  2535. #ifdef ARCH_X86_64
  2536. #define FUNNY_UV_CODE \
  2537. "movl (%%"REG_b"), %%esi \n\t"\
  2538. "call *%4 \n\t"\
  2539. "movl (%%"REG_b", %%"REG_a"), %%esi\n\t"\
  2540. "add %%"REG_S", %%"REG_c" \n\t"\
  2541. "add %%"REG_a", %%"REG_D" \n\t"\
  2542. "xor %%"REG_a", %%"REG_a" \n\t"\
  2543. #else
  2544. #define FUNNY_UV_CODE \
  2545. "movl (%%"REG_b"), %%esi \n\t"\
  2546. "call *%4 \n\t"\
  2547. "addl (%%"REG_b", %%"REG_a"), %%"REG_c"\n\t"\
  2548. "add %%"REG_a", %%"REG_D" \n\t"\
  2549. "xor %%"REG_a", %%"REG_a" \n\t"\
  2550. #endif
  2551. FUNNY_UV_CODE
  2552. FUNNY_UV_CODE
  2553. FUNNY_UV_CODE
  2554. FUNNY_UV_CODE
  2555. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2556. "mov %5, %%"REG_c" \n\t" // src
  2557. "mov %1, %%"REG_D" \n\t" // buf1
  2558. "add $4096, %%"REG_D" \n\t"
  2559. PREFETCH" (%%"REG_c") \n\t"
  2560. PREFETCH" 32(%%"REG_c") \n\t"
  2561. PREFETCH" 64(%%"REG_c") \n\t"
  2562. FUNNY_UV_CODE
  2563. FUNNY_UV_CODE
  2564. FUNNY_UV_CODE
  2565. FUNNY_UV_CODE
  2566. #if defined(PIC)
  2567. "mov %6, %%"REG_b" \n\t"
  2568. #endif
  2569. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2570. "m" (funnyUVCode), "m" (src2)
  2571. #if defined(PIC)
  2572. ,"m" (ebxsave)
  2573. #endif
  2574. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2575. #if !defined(PIC)
  2576. ,"%"REG_b
  2577. #endif
  2578. );
  2579. for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2580. {
  2581. // printf("%d %d %d\n", dstWidth, i, srcW);
  2582. dst[i] = src1[srcW-1]*128;
  2583. dst[i+2048] = src2[srcW-1]*128;
  2584. }
  2585. }
  2586. else
  2587. {
  2588. #endif
  2589. long xInc_shr16 = (long) (xInc >> 16);
  2590. uint16_t xInc_mask = xInc & 0xffff;
  2591. asm volatile(
  2592. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2593. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2594. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2595. ASMALIGN(4)
  2596. "1: \n\t"
  2597. "mov %0, %%"REG_S" \n\t"
  2598. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2599. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2600. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2601. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2602. "shll $16, %%edi \n\t"
  2603. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2604. "mov %1, %%"REG_D" \n\t"
  2605. "shrl $9, %%esi \n\t"
  2606. "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
  2607. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2608. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2609. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2610. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2611. "shll $16, %%edi \n\t"
  2612. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2613. "mov %1, %%"REG_D" \n\t"
  2614. "shrl $9, %%esi \n\t"
  2615. "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
  2616. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2617. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2618. "add $1, %%"REG_a" \n\t"
  2619. "cmp %2, %%"REG_a" \n\t"
  2620. " jb 1b \n\t"
  2621. /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2622. which is needed to support GCC-4.0 */
  2623. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2624. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2625. #else
  2626. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2627. #endif
  2628. "r" (src2)
  2629. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2630. );
  2631. #ifdef HAVE_MMX2
  2632. } //if MMX2 can't be used
  2633. #endif
  2634. #else
  2635. int i;
  2636. unsigned int xpos=0;
  2637. for(i=0;i<dstWidth;i++)
  2638. {
  2639. register unsigned int xx=xpos>>16;
  2640. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2641. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2642. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2643. /* slower
  2644. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2645. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2646. */
  2647. xpos+=xInc;
  2648. }
  2649. #endif
  2650. }
  2651. }
  2652. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2653. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2654. /* load a few things into local vars to make the code more readable? and faster */
  2655. const int srcW= c->srcW;
  2656. const int dstW= c->dstW;
  2657. const int dstH= c->dstH;
  2658. const int chrDstW= c->chrDstW;
  2659. const int chrSrcW= c->chrSrcW;
  2660. const int lumXInc= c->lumXInc;
  2661. const int chrXInc= c->chrXInc;
  2662. const int dstFormat= c->dstFormat;
  2663. const int srcFormat= c->srcFormat;
  2664. const int flags= c->flags;
  2665. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2666. int16_t *vLumFilterPos= c->vLumFilterPos;
  2667. int16_t *vChrFilterPos= c->vChrFilterPos;
  2668. int16_t *hLumFilterPos= c->hLumFilterPos;
  2669. int16_t *hChrFilterPos= c->hChrFilterPos;
  2670. int16_t *vLumFilter= c->vLumFilter;
  2671. int16_t *vChrFilter= c->vChrFilter;
  2672. int16_t *hLumFilter= c->hLumFilter;
  2673. int16_t *hChrFilter= c->hChrFilter;
  2674. int32_t *lumMmxFilter= c->lumMmxFilter;
  2675. int32_t *chrMmxFilter= c->chrMmxFilter;
  2676. const int vLumFilterSize= c->vLumFilterSize;
  2677. const int vChrFilterSize= c->vChrFilterSize;
  2678. const int hLumFilterSize= c->hLumFilterSize;
  2679. const int hChrFilterSize= c->hChrFilterSize;
  2680. int16_t **lumPixBuf= c->lumPixBuf;
  2681. int16_t **chrPixBuf= c->chrPixBuf;
  2682. const int vLumBufSize= c->vLumBufSize;
  2683. const int vChrBufSize= c->vChrBufSize;
  2684. uint8_t *funnyYCode= c->funnyYCode;
  2685. uint8_t *funnyUVCode= c->funnyUVCode;
  2686. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2687. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2688. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2689. int lastDstY;
  2690. /* vars whch will change and which we need to storw back in the context */
  2691. int dstY= c->dstY;
  2692. int lumBufIndex= c->lumBufIndex;
  2693. int chrBufIndex= c->chrBufIndex;
  2694. int lastInLumBuf= c->lastInLumBuf;
  2695. int lastInChrBuf= c->lastInChrBuf;
  2696. if(isPacked(c->srcFormat)){
  2697. src[0]=
  2698. src[1]=
  2699. src[2]= src[0];
  2700. srcStride[0]=
  2701. srcStride[1]=
  2702. srcStride[2]= srcStride[0];
  2703. }
  2704. srcStride[1]<<= c->vChrDrop;
  2705. srcStride[2]<<= c->vChrDrop;
  2706. // printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2707. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2708. #if 0 //self test FIXME move to a vfilter or something
  2709. {
  2710. static volatile int i=0;
  2711. i++;
  2712. if(srcFormat==IMGFMT_YV12 && i==1 && srcSliceH>= c->srcH)
  2713. selfTest(src, srcStride, c->srcW, c->srcH);
  2714. i--;
  2715. }
  2716. #endif
  2717. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2718. //dstStride[0],dstStride[1],dstStride[2]);
  2719. if(dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2720. {
  2721. static int firstTime=1; //FIXME move this into the context perhaps
  2722. if(flags & SWS_PRINT_INFO && firstTime)
  2723. {
  2724. MSG_WARN("SwScaler: Warning: dstStride is not aligned!\n"
  2725. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2726. firstTime=0;
  2727. }
  2728. }
  2729. /* Note the user might start scaling the picture in the middle so this will not get executed
  2730. this is not really intended but works currently, so ppl might do it */
  2731. if(srcSliceY ==0){
  2732. lumBufIndex=0;
  2733. chrBufIndex=0;
  2734. dstY=0;
  2735. lastInLumBuf= -1;
  2736. lastInChrBuf= -1;
  2737. }
  2738. lastDstY= dstY;
  2739. for(;dstY < dstH; dstY++){
  2740. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2741. const int chrDstY= dstY>>c->chrDstVSubSample;
  2742. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2743. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2744. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2745. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2746. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2747. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2748. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2749. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2750. //handle holes (FAST_BILINEAR & weird filters)
  2751. if(firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2752. if(firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2753. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2754. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2755. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2756. // Do we have enough lines in this slice to output the dstY line
  2757. if(lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2758. {
  2759. //Do horizontal scaling
  2760. while(lastInLumBuf < lastLumSrcY)
  2761. {
  2762. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2763. lumBufIndex++;
  2764. // printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2765. ASSERT(lumBufIndex < 2*vLumBufSize)
  2766. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2767. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2768. // printf("%d %d\n", lumBufIndex, vLumBufSize);
  2769. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2770. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2771. funnyYCode, c->srcFormat, formatConvBuffer,
  2772. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2773. lastInLumBuf++;
  2774. }
  2775. while(lastInChrBuf < lastChrSrcY)
  2776. {
  2777. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2778. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2779. chrBufIndex++;
  2780. ASSERT(chrBufIndex < 2*vChrBufSize)
  2781. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2782. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2783. //FIXME replace parameters through context struct (some at least)
  2784. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2785. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2786. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2787. funnyUVCode, c->srcFormat, formatConvBuffer,
  2788. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2789. lastInChrBuf++;
  2790. }
  2791. //wrap buf index around to stay inside the ring buffer
  2792. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2793. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2794. }
  2795. else // not enough lines left in this slice -> load the rest in the buffer
  2796. {
  2797. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2798. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2799. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2800. vChrBufSize, vLumBufSize);*/
  2801. //Do horizontal scaling
  2802. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2803. {
  2804. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2805. lumBufIndex++;
  2806. ASSERT(lumBufIndex < 2*vLumBufSize)
  2807. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2808. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2809. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2810. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2811. funnyYCode, c->srcFormat, formatConvBuffer,
  2812. c->lumMmx2Filter, c->lumMmx2FilterPos);
  2813. lastInLumBuf++;
  2814. }
  2815. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2816. {
  2817. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2818. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2819. chrBufIndex++;
  2820. ASSERT(chrBufIndex < 2*vChrBufSize)
  2821. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2822. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2823. if(!(isGray(srcFormat) || isGray(dstFormat)))
  2824. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2825. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2826. funnyUVCode, c->srcFormat, formatConvBuffer,
  2827. c->chrMmx2Filter, c->chrMmx2FilterPos);
  2828. lastInChrBuf++;
  2829. }
  2830. //wrap buf index around to stay inside the ring buffer
  2831. if(lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2832. if(chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2833. break; //we can't output a dstY line so let's try with the next slice
  2834. }
  2835. #ifdef HAVE_MMX
  2836. b5Dither= dither8[dstY&1];
  2837. g6Dither= dither4[dstY&1];
  2838. g5Dither= dither8[dstY&1];
  2839. r5Dither= dither8[(dstY+1)&1];
  2840. #endif
  2841. if(dstY < dstH-2)
  2842. {
  2843. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2844. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2845. #ifdef HAVE_MMX
  2846. int i;
  2847. if(flags & SWS_ACCURATE_RND){
  2848. for(i=0; i<vLumFilterSize; i+=2){
  2849. lumMmxFilter[2*i+0]= lumSrcPtr[i ];
  2850. lumMmxFilter[2*i+1]= lumSrcPtr[i+(vLumFilterSize>1)];
  2851. lumMmxFilter[2*i+2]=
  2852. lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ]
  2853. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2854. }
  2855. for(i=0; i<vChrFilterSize; i+=2){
  2856. chrMmxFilter[2*i+0]= chrSrcPtr[i ];
  2857. chrMmxFilter[2*i+1]= chrSrcPtr[i+(vChrFilterSize>1)];
  2858. chrMmxFilter[2*i+2]=
  2859. chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2860. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2861. }
  2862. }else{
  2863. for(i=0; i<vLumFilterSize; i++)
  2864. {
  2865. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2866. lumMmxFilter[4*i+2]=
  2867. lumMmxFilter[4*i+3]=
  2868. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2869. }
  2870. for(i=0; i<vChrFilterSize; i++)
  2871. {
  2872. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2873. chrMmxFilter[4*i+2]=
  2874. chrMmxFilter[4*i+3]=
  2875. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2876. }
  2877. }
  2878. #endif
  2879. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2880. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2881. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2882. RENAME(yuv2nv12X)(c,
  2883. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2884. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2885. dest, uDest, dstW, chrDstW, dstFormat);
  2886. }
  2887. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2888. {
  2889. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2890. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2891. if(vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2892. {
  2893. int16_t *lumBuf = lumPixBuf[0];
  2894. int16_t *chrBuf= chrPixBuf[0];
  2895. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2896. }
  2897. else //General YV12
  2898. {
  2899. RENAME(yuv2yuvX)(c,
  2900. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2901. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2902. dest, uDest, vDest, dstW, chrDstW);
  2903. }
  2904. }
  2905. else
  2906. {
  2907. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2908. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2909. if(vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2910. {
  2911. int chrAlpha= vChrFilter[2*dstY+1];
  2912. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2913. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2914. }
  2915. else if(vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2916. {
  2917. int lumAlpha= vLumFilter[2*dstY+1];
  2918. int chrAlpha= vChrFilter[2*dstY+1];
  2919. lumMmxFilter[2]=
  2920. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  2921. chrMmxFilter[2]=
  2922. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  2923. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2924. dest, dstW, lumAlpha, chrAlpha, dstY);
  2925. }
  2926. else //General RGB
  2927. {
  2928. RENAME(yuv2packedX)(c,
  2929. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2930. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2931. dest, dstW, dstY);
  2932. }
  2933. }
  2934. }
  2935. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2936. {
  2937. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2938. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2939. if(dstFormat == IMGFMT_NV12 || dstFormat == IMGFMT_NV21){
  2940. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2941. if(dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2942. yuv2nv12XinC(
  2943. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2944. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2945. dest, uDest, dstW, chrDstW, dstFormat);
  2946. }
  2947. else if(isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  2948. {
  2949. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2950. if((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2951. yuv2yuvXinC(
  2952. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2953. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2954. dest, uDest, vDest, dstW, chrDstW);
  2955. }
  2956. else
  2957. {
  2958. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2959. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2960. yuv2packedXinC(c,
  2961. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2962. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2963. dest, dstW, dstY);
  2964. }
  2965. }
  2966. }
  2967. #ifdef HAVE_MMX
  2968. __asm __volatile(SFENCE:::"memory");
  2969. __asm __volatile(EMMS:::"memory");
  2970. #endif
  2971. /* store changed local vars back in the context */
  2972. c->dstY= dstY;
  2973. c->lumBufIndex= lumBufIndex;
  2974. c->chrBufIndex= chrBufIndex;
  2975. c->lastInLumBuf= lastInLumBuf;
  2976. c->lastInChrBuf= lastInChrBuf;
  2977. return dstY - lastDstY;
  2978. }