You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3274 lines
132KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * the C code (not assembly, mmx, ...) of this file can be used
  21. * under the LGPL license too
  22. */
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef PAVGB
  26. #undef PREFETCH
  27. #undef PREFETCHW
  28. #undef EMMS
  29. #undef SFENCE
  30. #ifdef HAVE_3DNOW
  31. /* On K6 femms is faster of emms. On K7 femms is directly mapped on emms. */
  32. #define EMMS "femms"
  33. #else
  34. #define EMMS "emms"
  35. #endif
  36. #ifdef HAVE_3DNOW
  37. #define PREFETCH "prefetch"
  38. #define PREFETCHW "prefetchw"
  39. #elif defined ( HAVE_MMX2 )
  40. #define PREFETCH "prefetchnta"
  41. #define PREFETCHW "prefetcht0"
  42. #else
  43. #define PREFETCH " # nop"
  44. #define PREFETCHW " # nop"
  45. #endif
  46. #ifdef HAVE_MMX2
  47. #define SFENCE "sfence"
  48. #else
  49. #define SFENCE " # nop"
  50. #endif
  51. #ifdef HAVE_MMX2
  52. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  53. #elif defined (HAVE_3DNOW)
  54. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  55. #endif
  56. #ifdef HAVE_MMX2
  57. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  58. #else
  59. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  60. #endif
  61. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  62. #ifdef HAVE_ALTIVEC
  63. #include "swscale_altivec_template.c"
  64. #endif
  65. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  66. asm volatile(\
  67. "xor %%"REG_a", %%"REG_a" \n\t"\
  68. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  69. "movq %%mm3, %%mm4 \n\t"\
  70. "lea " offset "(%0), %%"REG_d" \n\t"\
  71. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  72. ASMALIGN(4) /* FIXME Unroll? */\
  73. "1: \n\t"\
  74. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  75. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  76. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
  77. "add $16, %%"REG_d" \n\t"\
  78. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  79. "test %%"REG_S", %%"REG_S" \n\t"\
  80. "pmulhw %%mm0, %%mm2 \n\t"\
  81. "pmulhw %%mm0, %%mm5 \n\t"\
  82. "paddw %%mm2, %%mm3 \n\t"\
  83. "paddw %%mm5, %%mm4 \n\t"\
  84. " jnz 1b \n\t"\
  85. "psraw $3, %%mm3 \n\t"\
  86. "psraw $3, %%mm4 \n\t"\
  87. "packuswb %%mm4, %%mm3 \n\t"\
  88. MOVNTQ(%%mm3, (%1, %%REGa))\
  89. "add $8, %%"REG_a" \n\t"\
  90. "cmp %2, %%"REG_a" \n\t"\
  91. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  92. "movq %%mm3, %%mm4 \n\t"\
  93. "lea " offset "(%0), %%"REG_d" \n\t"\
  94. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  95. "jb 1b \n\t"\
  96. :: "r" (&c->redDither),\
  97. "r" (dest), "g" (width)\
  98. : "%"REG_a, "%"REG_d, "%"REG_S\
  99. );
  100. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  101. asm volatile(\
  102. "lea " offset "(%0), %%"REG_d" \n\t"\
  103. "xor %%"REG_a", %%"REG_a" \n\t"\
  104. "pxor %%mm4, %%mm4 \n\t"\
  105. "pxor %%mm5, %%mm5 \n\t"\
  106. "pxor %%mm6, %%mm6 \n\t"\
  107. "pxor %%mm7, %%mm7 \n\t"\
  108. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  109. ASMALIGN(4) \
  110. "1: \n\t"\
  111. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
  112. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  113. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  114. "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
  115. "movq %%mm0, %%mm3 \n\t"\
  116. "punpcklwd %%mm1, %%mm0 \n\t"\
  117. "punpckhwd %%mm1, %%mm3 \n\t"\
  118. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  119. "pmaddwd %%mm1, %%mm0 \n\t"\
  120. "pmaddwd %%mm1, %%mm3 \n\t"\
  121. "paddd %%mm0, %%mm4 \n\t"\
  122. "paddd %%mm3, %%mm5 \n\t"\
  123. "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
  124. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  125. "add $16, %%"REG_d" \n\t"\
  126. "test %%"REG_S", %%"REG_S" \n\t"\
  127. "movq %%mm2, %%mm0 \n\t"\
  128. "punpcklwd %%mm3, %%mm2 \n\t"\
  129. "punpckhwd %%mm3, %%mm0 \n\t"\
  130. "pmaddwd %%mm1, %%mm2 \n\t"\
  131. "pmaddwd %%mm1, %%mm0 \n\t"\
  132. "paddd %%mm2, %%mm6 \n\t"\
  133. "paddd %%mm0, %%mm7 \n\t"\
  134. " jnz 1b \n\t"\
  135. "psrad $16, %%mm4 \n\t"\
  136. "psrad $16, %%mm5 \n\t"\
  137. "psrad $16, %%mm6 \n\t"\
  138. "psrad $16, %%mm7 \n\t"\
  139. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  140. "packssdw %%mm5, %%mm4 \n\t"\
  141. "packssdw %%mm7, %%mm6 \n\t"\
  142. "paddw %%mm0, %%mm4 \n\t"\
  143. "paddw %%mm0, %%mm6 \n\t"\
  144. "psraw $3, %%mm4 \n\t"\
  145. "psraw $3, %%mm6 \n\t"\
  146. "packuswb %%mm6, %%mm4 \n\t"\
  147. MOVNTQ(%%mm4, (%1, %%REGa))\
  148. "add $8, %%"REG_a" \n\t"\
  149. "cmp %2, %%"REG_a" \n\t"\
  150. "lea " offset "(%0), %%"REG_d" \n\t"\
  151. "pxor %%mm4, %%mm4 \n\t"\
  152. "pxor %%mm5, %%mm5 \n\t"\
  153. "pxor %%mm6, %%mm6 \n\t"\
  154. "pxor %%mm7, %%mm7 \n\t"\
  155. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  156. "jb 1b \n\t"\
  157. :: "r" (&c->redDither),\
  158. "r" (dest), "g" (width)\
  159. : "%"REG_a, "%"REG_d, "%"REG_S\
  160. );
  161. #define YSCALEYUV2YV121 \
  162. "mov %2, %%"REG_a" \n\t"\
  163. ASMALIGN(4) /* FIXME Unroll? */\
  164. "1: \n\t"\
  165. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  166. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  167. "psraw $7, %%mm0 \n\t"\
  168. "psraw $7, %%mm1 \n\t"\
  169. "packuswb %%mm1, %%mm0 \n\t"\
  170. MOVNTQ(%%mm0, (%1, %%REGa))\
  171. "add $8, %%"REG_a" \n\t"\
  172. "jnc 1b \n\t"
  173. /*
  174. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  175. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  176. "r" (dest), "m" (dstW),
  177. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  178. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  179. */
  180. #define YSCALEYUV2PACKEDX \
  181. asm volatile(\
  182. "xor %%"REG_a", %%"REG_a" \n\t"\
  183. ASMALIGN(4)\
  184. "nop \n\t"\
  185. "1: \n\t"\
  186. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  187. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  188. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  189. "movq %%mm3, %%mm4 \n\t"\
  190. ASMALIGN(4)\
  191. "2: \n\t"\
  192. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  193. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  194. "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  195. "add $16, %%"REG_d" \n\t"\
  196. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  197. "pmulhw %%mm0, %%mm2 \n\t"\
  198. "pmulhw %%mm0, %%mm5 \n\t"\
  199. "paddw %%mm2, %%mm3 \n\t"\
  200. "paddw %%mm5, %%mm4 \n\t"\
  201. "test %%"REG_S", %%"REG_S" \n\t"\
  202. " jnz 2b \n\t"\
  203. \
  204. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  205. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  206. "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\
  207. "movq %%mm1, %%mm7 \n\t"\
  208. ASMALIGN(4)\
  209. "2: \n\t"\
  210. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  211. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  212. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  213. "add $16, %%"REG_d" \n\t"\
  214. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  215. "pmulhw %%mm0, %%mm2 \n\t"\
  216. "pmulhw %%mm0, %%mm5 \n\t"\
  217. "paddw %%mm2, %%mm1 \n\t"\
  218. "paddw %%mm5, %%mm7 \n\t"\
  219. "test %%"REG_S", %%"REG_S" \n\t"\
  220. " jnz 2b \n\t"\
  221. #define YSCALEYUV2PACKEDX_END \
  222. :: "r" (&c->redDither), \
  223. "m" (dummy), "m" (dummy), "m" (dummy),\
  224. "r" (dest), "m" (dstW) \
  225. : "%"REG_a, "%"REG_d, "%"REG_S \
  226. );
  227. #define YSCALEYUV2PACKEDX_ACCURATE \
  228. asm volatile(\
  229. "xor %%"REG_a", %%"REG_a" \n\t"\
  230. ASMALIGN(4)\
  231. "nop \n\t"\
  232. "1: \n\t"\
  233. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  234. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  235. "pxor %%mm4, %%mm4 \n\t"\
  236. "pxor %%mm5, %%mm5 \n\t"\
  237. "pxor %%mm6, %%mm6 \n\t"\
  238. "pxor %%mm7, %%mm7 \n\t"\
  239. ASMALIGN(4)\
  240. "2: \n\t"\
  241. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  242. "movq 4096(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  243. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  244. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  245. "movq %%mm0, %%mm3 \n\t"\
  246. "punpcklwd %%mm1, %%mm0 \n\t"\
  247. "punpckhwd %%mm1, %%mm3 \n\t"\
  248. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  249. "pmaddwd %%mm1, %%mm0 \n\t"\
  250. "pmaddwd %%mm1, %%mm3 \n\t"\
  251. "paddd %%mm0, %%mm4 \n\t"\
  252. "paddd %%mm3, %%mm5 \n\t"\
  253. "movq 4096(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  254. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  255. "add $16, %%"REG_d" \n\t"\
  256. "test %%"REG_S", %%"REG_S" \n\t"\
  257. "movq %%mm2, %%mm0 \n\t"\
  258. "punpcklwd %%mm3, %%mm2 \n\t"\
  259. "punpckhwd %%mm3, %%mm0 \n\t"\
  260. "pmaddwd %%mm1, %%mm2 \n\t"\
  261. "pmaddwd %%mm1, %%mm0 \n\t"\
  262. "paddd %%mm2, %%mm6 \n\t"\
  263. "paddd %%mm0, %%mm7 \n\t"\
  264. " jnz 2b \n\t"\
  265. "psrad $16, %%mm4 \n\t"\
  266. "psrad $16, %%mm5 \n\t"\
  267. "psrad $16, %%mm6 \n\t"\
  268. "psrad $16, %%mm7 \n\t"\
  269. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  270. "packssdw %%mm5, %%mm4 \n\t"\
  271. "packssdw %%mm7, %%mm6 \n\t"\
  272. "paddw %%mm0, %%mm4 \n\t"\
  273. "paddw %%mm0, %%mm6 \n\t"\
  274. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  275. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  276. \
  277. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  278. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  279. "pxor %%mm1, %%mm1 \n\t"\
  280. "pxor %%mm5, %%mm5 \n\t"\
  281. "pxor %%mm7, %%mm7 \n\t"\
  282. "pxor %%mm6, %%mm6 \n\t"\
  283. ASMALIGN(4)\
  284. "2: \n\t"\
  285. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  286. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  287. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  288. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  289. "movq %%mm0, %%mm3 \n\t"\
  290. "punpcklwd %%mm4, %%mm0 \n\t"\
  291. "punpckhwd %%mm4, %%mm3 \n\t"\
  292. "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  293. "pmaddwd %%mm4, %%mm0 \n\t"\
  294. "pmaddwd %%mm4, %%mm3 \n\t"\
  295. "paddd %%mm0, %%mm1 \n\t"\
  296. "paddd %%mm3, %%mm5 \n\t"\
  297. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  298. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  299. "add $16, %%"REG_d" \n\t"\
  300. "test %%"REG_S", %%"REG_S" \n\t"\
  301. "movq %%mm2, %%mm0 \n\t"\
  302. "punpcklwd %%mm3, %%mm2 \n\t"\
  303. "punpckhwd %%mm3, %%mm0 \n\t"\
  304. "pmaddwd %%mm4, %%mm2 \n\t"\
  305. "pmaddwd %%mm4, %%mm0 \n\t"\
  306. "paddd %%mm2, %%mm7 \n\t"\
  307. "paddd %%mm0, %%mm6 \n\t"\
  308. " jnz 2b \n\t"\
  309. "psrad $16, %%mm1 \n\t"\
  310. "psrad $16, %%mm5 \n\t"\
  311. "psrad $16, %%mm7 \n\t"\
  312. "psrad $16, %%mm6 \n\t"\
  313. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  314. "packssdw %%mm5, %%mm1 \n\t"\
  315. "packssdw %%mm6, %%mm7 \n\t"\
  316. "paddw %%mm0, %%mm1 \n\t"\
  317. "paddw %%mm0, %%mm7 \n\t"\
  318. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  319. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  320. #define YSCALEYUV2RGBX \
  321. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  322. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  323. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  324. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  325. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  326. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  327. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  328. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  329. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  330. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  331. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  332. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  333. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  334. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  335. "paddw %%mm3, %%mm4 \n\t"\
  336. "movq %%mm2, %%mm0 \n\t"\
  337. "movq %%mm5, %%mm6 \n\t"\
  338. "movq %%mm4, %%mm3 \n\t"\
  339. "punpcklwd %%mm2, %%mm2 \n\t"\
  340. "punpcklwd %%mm5, %%mm5 \n\t"\
  341. "punpcklwd %%mm4, %%mm4 \n\t"\
  342. "paddw %%mm1, %%mm2 \n\t"\
  343. "paddw %%mm1, %%mm5 \n\t"\
  344. "paddw %%mm1, %%mm4 \n\t"\
  345. "punpckhwd %%mm0, %%mm0 \n\t"\
  346. "punpckhwd %%mm6, %%mm6 \n\t"\
  347. "punpckhwd %%mm3, %%mm3 \n\t"\
  348. "paddw %%mm7, %%mm0 \n\t"\
  349. "paddw %%mm7, %%mm6 \n\t"\
  350. "paddw %%mm7, %%mm3 \n\t"\
  351. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  352. "packuswb %%mm0, %%mm2 \n\t"\
  353. "packuswb %%mm6, %%mm5 \n\t"\
  354. "packuswb %%mm3, %%mm4 \n\t"\
  355. "pxor %%mm7, %%mm7 \n\t"
  356. #if 0
  357. #define FULL_YSCALEYUV2RGB \
  358. "pxor %%mm7, %%mm7 \n\t"\
  359. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  360. "punpcklwd %%mm6, %%mm6 \n\t"\
  361. "punpcklwd %%mm6, %%mm6 \n\t"\
  362. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  363. "punpcklwd %%mm5, %%mm5 \n\t"\
  364. "punpcklwd %%mm5, %%mm5 \n\t"\
  365. "xor %%"REG_a", %%"REG_a" \n\t"\
  366. ASMALIGN(4)\
  367. "1: \n\t"\
  368. "movq (%0, %%"REG_a",2), %%mm0 \n\t" /*buf0[eax]*/\
  369. "movq (%1, %%"REG_a",2), %%mm1 \n\t" /*buf1[eax]*/\
  370. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  371. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  372. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  373. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  374. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  375. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  376. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  377. "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  378. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  379. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  380. "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  381. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  382. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  383. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  384. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  385. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  386. \
  387. \
  388. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  389. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  390. "pmulhw "MANGLE(ubCoeff)", %%mm3 \n\t"\
  391. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  392. "pmulhw "MANGLE(ugCoeff)", %%mm2 \n\t"\
  393. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  394. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  395. \
  396. \
  397. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  398. "pmulhw "MANGLE(vrCoeff)", %%mm0 \n\t"\
  399. "pmulhw "MANGLE(vgCoeff)", %%mm4 \n\t"\
  400. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  401. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  402. "packuswb %%mm3, %%mm3 \n\t"\
  403. \
  404. "packuswb %%mm0, %%mm0 \n\t"\
  405. "paddw %%mm4, %%mm2 \n\t"\
  406. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  407. \
  408. "packuswb %%mm1, %%mm1 \n\t"
  409. #endif
  410. #define REAL_YSCALEYUV2PACKED(index, c) \
  411. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  412. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  413. "psraw $3, %%mm0 \n\t"\
  414. "psraw $3, %%mm1 \n\t"\
  415. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  416. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  417. "xor "#index", "#index" \n\t"\
  418. ASMALIGN(4)\
  419. "1: \n\t"\
  420. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  421. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  422. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  423. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  424. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  425. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  426. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  427. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  428. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  429. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  430. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  431. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  432. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  433. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  434. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  435. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  436. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  437. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  438. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  439. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  440. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  441. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  442. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  443. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  444. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  445. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  446. #define REAL_YSCALEYUV2RGB(index, c) \
  447. "xor "#index", "#index" \n\t"\
  448. ASMALIGN(4)\
  449. "1: \n\t"\
  450. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  451. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  452. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  453. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  454. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  455. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  456. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  457. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  458. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  459. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  460. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  461. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  462. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  463. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  464. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  465. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  466. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  467. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  468. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  469. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  470. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  471. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  472. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  473. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  474. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  475. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  476. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  477. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  478. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  479. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  480. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  481. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  482. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  483. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  484. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  485. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  486. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  487. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  488. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  489. "paddw %%mm3, %%mm4 \n\t"\
  490. "movq %%mm2, %%mm0 \n\t"\
  491. "movq %%mm5, %%mm6 \n\t"\
  492. "movq %%mm4, %%mm3 \n\t"\
  493. "punpcklwd %%mm2, %%mm2 \n\t"\
  494. "punpcklwd %%mm5, %%mm5 \n\t"\
  495. "punpcklwd %%mm4, %%mm4 \n\t"\
  496. "paddw %%mm1, %%mm2 \n\t"\
  497. "paddw %%mm1, %%mm5 \n\t"\
  498. "paddw %%mm1, %%mm4 \n\t"\
  499. "punpckhwd %%mm0, %%mm0 \n\t"\
  500. "punpckhwd %%mm6, %%mm6 \n\t"\
  501. "punpckhwd %%mm3, %%mm3 \n\t"\
  502. "paddw %%mm7, %%mm0 \n\t"\
  503. "paddw %%mm7, %%mm6 \n\t"\
  504. "paddw %%mm7, %%mm3 \n\t"\
  505. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  506. "packuswb %%mm0, %%mm2 \n\t"\
  507. "packuswb %%mm6, %%mm5 \n\t"\
  508. "packuswb %%mm3, %%mm4 \n\t"\
  509. "pxor %%mm7, %%mm7 \n\t"
  510. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  511. #define REAL_YSCALEYUV2PACKED1(index, c) \
  512. "xor "#index", "#index" \n\t"\
  513. ASMALIGN(4)\
  514. "1: \n\t"\
  515. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  516. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  517. "psraw $7, %%mm3 \n\t" \
  518. "psraw $7, %%mm4 \n\t" \
  519. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  520. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  521. "psraw $7, %%mm1 \n\t" \
  522. "psraw $7, %%mm7 \n\t" \
  523. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  524. #define REAL_YSCALEYUV2RGB1(index, c) \
  525. "xor "#index", "#index" \n\t"\
  526. ASMALIGN(4)\
  527. "1: \n\t"\
  528. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  529. "movq 4096(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  530. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  531. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  532. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  533. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  534. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  535. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  536. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  537. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  538. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  539. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  540. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  541. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  542. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  543. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  544. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  545. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  546. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  547. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  548. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  549. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  550. "paddw %%mm3, %%mm4 \n\t"\
  551. "movq %%mm2, %%mm0 \n\t"\
  552. "movq %%mm5, %%mm6 \n\t"\
  553. "movq %%mm4, %%mm3 \n\t"\
  554. "punpcklwd %%mm2, %%mm2 \n\t"\
  555. "punpcklwd %%mm5, %%mm5 \n\t"\
  556. "punpcklwd %%mm4, %%mm4 \n\t"\
  557. "paddw %%mm1, %%mm2 \n\t"\
  558. "paddw %%mm1, %%mm5 \n\t"\
  559. "paddw %%mm1, %%mm4 \n\t"\
  560. "punpckhwd %%mm0, %%mm0 \n\t"\
  561. "punpckhwd %%mm6, %%mm6 \n\t"\
  562. "punpckhwd %%mm3, %%mm3 \n\t"\
  563. "paddw %%mm7, %%mm0 \n\t"\
  564. "paddw %%mm7, %%mm6 \n\t"\
  565. "paddw %%mm7, %%mm3 \n\t"\
  566. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  567. "packuswb %%mm0, %%mm2 \n\t"\
  568. "packuswb %%mm6, %%mm5 \n\t"\
  569. "packuswb %%mm3, %%mm4 \n\t"\
  570. "pxor %%mm7, %%mm7 \n\t"
  571. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  572. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  573. "xor "#index", "#index" \n\t"\
  574. ASMALIGN(4)\
  575. "1: \n\t"\
  576. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  577. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  578. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  579. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  580. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  581. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  582. "psrlw $8, %%mm3 \n\t" \
  583. "psrlw $8, %%mm4 \n\t" \
  584. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  585. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  586. "psraw $7, %%mm1 \n\t" \
  587. "psraw $7, %%mm7 \n\t"
  588. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  589. // do vertical chrominance interpolation
  590. #define REAL_YSCALEYUV2RGB1b(index, c) \
  591. "xor "#index", "#index" \n\t"\
  592. ASMALIGN(4)\
  593. "1: \n\t"\
  594. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  595. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  596. "movq 4096(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  597. "movq 4096(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  598. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  599. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  600. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  601. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  602. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  603. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  604. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  605. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  606. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  607. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  608. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  609. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  610. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  611. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  612. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  613. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  614. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  615. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  616. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  617. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  618. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  619. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  620. "paddw %%mm3, %%mm4 \n\t"\
  621. "movq %%mm2, %%mm0 \n\t"\
  622. "movq %%mm5, %%mm6 \n\t"\
  623. "movq %%mm4, %%mm3 \n\t"\
  624. "punpcklwd %%mm2, %%mm2 \n\t"\
  625. "punpcklwd %%mm5, %%mm5 \n\t"\
  626. "punpcklwd %%mm4, %%mm4 \n\t"\
  627. "paddw %%mm1, %%mm2 \n\t"\
  628. "paddw %%mm1, %%mm5 \n\t"\
  629. "paddw %%mm1, %%mm4 \n\t"\
  630. "punpckhwd %%mm0, %%mm0 \n\t"\
  631. "punpckhwd %%mm6, %%mm6 \n\t"\
  632. "punpckhwd %%mm3, %%mm3 \n\t"\
  633. "paddw %%mm7, %%mm0 \n\t"\
  634. "paddw %%mm7, %%mm6 \n\t"\
  635. "paddw %%mm7, %%mm3 \n\t"\
  636. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  637. "packuswb %%mm0, %%mm2 \n\t"\
  638. "packuswb %%mm6, %%mm5 \n\t"\
  639. "packuswb %%mm3, %%mm4 \n\t"\
  640. "pxor %%mm7, %%mm7 \n\t"
  641. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  642. #define REAL_WRITEBGR32(dst, dstw, index) \
  643. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  644. "movq %%mm2, %%mm1 \n\t" /* B */\
  645. "movq %%mm5, %%mm6 \n\t" /* R */\
  646. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  647. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  648. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  649. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  650. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  651. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  652. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  653. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  654. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  655. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  656. \
  657. MOVNTQ(%%mm0, (dst, index, 4))\
  658. MOVNTQ(%%mm2, 8(dst, index, 4))\
  659. MOVNTQ(%%mm1, 16(dst, index, 4))\
  660. MOVNTQ(%%mm3, 24(dst, index, 4))\
  661. \
  662. "add $8, "#index" \n\t"\
  663. "cmp "#dstw", "#index" \n\t"\
  664. " jb 1b \n\t"
  665. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  666. #define REAL_WRITEBGR16(dst, dstw, index) \
  667. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  668. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  669. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  670. "psrlq $3, %%mm2 \n\t"\
  671. \
  672. "movq %%mm2, %%mm1 \n\t"\
  673. "movq %%mm4, %%mm3 \n\t"\
  674. \
  675. "punpcklbw %%mm7, %%mm3 \n\t"\
  676. "punpcklbw %%mm5, %%mm2 \n\t"\
  677. "punpckhbw %%mm7, %%mm4 \n\t"\
  678. "punpckhbw %%mm5, %%mm1 \n\t"\
  679. \
  680. "psllq $3, %%mm3 \n\t"\
  681. "psllq $3, %%mm4 \n\t"\
  682. \
  683. "por %%mm3, %%mm2 \n\t"\
  684. "por %%mm4, %%mm1 \n\t"\
  685. \
  686. MOVNTQ(%%mm2, (dst, index, 2))\
  687. MOVNTQ(%%mm1, 8(dst, index, 2))\
  688. \
  689. "add $8, "#index" \n\t"\
  690. "cmp "#dstw", "#index" \n\t"\
  691. " jb 1b \n\t"
  692. #define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
  693. #define REAL_WRITEBGR15(dst, dstw, index) \
  694. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  695. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  696. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  697. "psrlq $3, %%mm2 \n\t"\
  698. "psrlq $1, %%mm5 \n\t"\
  699. \
  700. "movq %%mm2, %%mm1 \n\t"\
  701. "movq %%mm4, %%mm3 \n\t"\
  702. \
  703. "punpcklbw %%mm7, %%mm3 \n\t"\
  704. "punpcklbw %%mm5, %%mm2 \n\t"\
  705. "punpckhbw %%mm7, %%mm4 \n\t"\
  706. "punpckhbw %%mm5, %%mm1 \n\t"\
  707. \
  708. "psllq $2, %%mm3 \n\t"\
  709. "psllq $2, %%mm4 \n\t"\
  710. \
  711. "por %%mm3, %%mm2 \n\t"\
  712. "por %%mm4, %%mm1 \n\t"\
  713. \
  714. MOVNTQ(%%mm2, (dst, index, 2))\
  715. MOVNTQ(%%mm1, 8(dst, index, 2))\
  716. \
  717. "add $8, "#index" \n\t"\
  718. "cmp "#dstw", "#index" \n\t"\
  719. " jb 1b \n\t"
  720. #define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
  721. #define WRITEBGR24OLD(dst, dstw, index) \
  722. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  723. "movq %%mm2, %%mm1 \n\t" /* B */\
  724. "movq %%mm5, %%mm6 \n\t" /* R */\
  725. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  726. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  727. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  728. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  729. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  730. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  731. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  732. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  733. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  734. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  735. \
  736. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  737. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  738. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
  739. "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
  740. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  741. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  742. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  743. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  744. \
  745. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  746. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  747. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  748. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  749. "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
  750. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  751. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  752. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
  753. "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
  754. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  755. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  756. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  757. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  758. \
  759. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  760. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  761. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  762. "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
  763. "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
  764. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  765. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  766. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  767. \
  768. MOVNTQ(%%mm0, (dst))\
  769. MOVNTQ(%%mm2, 8(dst))\
  770. MOVNTQ(%%mm3, 16(dst))\
  771. "add $24, "#dst" \n\t"\
  772. \
  773. "add $8, "#index" \n\t"\
  774. "cmp "#dstw", "#index" \n\t"\
  775. " jb 1b \n\t"
  776. #define WRITEBGR24MMX(dst, dstw, index) \
  777. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  778. "movq %%mm2, %%mm1 \n\t" /* B */\
  779. "movq %%mm5, %%mm6 \n\t" /* R */\
  780. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  781. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  782. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  783. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  784. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  785. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  786. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  787. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  788. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  789. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  790. \
  791. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  792. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  793. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  794. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  795. \
  796. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  797. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  798. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  799. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  800. \
  801. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  802. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  803. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  804. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  805. \
  806. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  807. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  808. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  809. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  810. MOVNTQ(%%mm0, (dst))\
  811. \
  812. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  813. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  814. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  815. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  816. MOVNTQ(%%mm6, 8(dst))\
  817. \
  818. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  819. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  820. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  821. MOVNTQ(%%mm5, 16(dst))\
  822. \
  823. "add $24, "#dst" \n\t"\
  824. \
  825. "add $8, "#index" \n\t"\
  826. "cmp "#dstw", "#index" \n\t"\
  827. " jb 1b \n\t"
  828. #define WRITEBGR24MMX2(dst, dstw, index) \
  829. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  830. "movq "MANGLE(M24A)", %%mm0 \n\t"\
  831. "movq "MANGLE(M24C)", %%mm7 \n\t"\
  832. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  833. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  834. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  835. \
  836. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  837. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  838. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  839. \
  840. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  841. "por %%mm1, %%mm6 \n\t"\
  842. "por %%mm3, %%mm6 \n\t"\
  843. MOVNTQ(%%mm6, (dst))\
  844. \
  845. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  846. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  847. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  848. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  849. \
  850. "pand "MANGLE(M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  851. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  852. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  853. \
  854. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  855. "por %%mm3, %%mm6 \n\t"\
  856. MOVNTQ(%%mm6, 8(dst))\
  857. \
  858. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  859. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  860. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  861. \
  862. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  863. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  864. "pand "MANGLE(M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  865. \
  866. "por %%mm1, %%mm3 \n\t"\
  867. "por %%mm3, %%mm6 \n\t"\
  868. MOVNTQ(%%mm6, 16(dst))\
  869. \
  870. "add $24, "#dst" \n\t"\
  871. \
  872. "add $8, "#index" \n\t"\
  873. "cmp "#dstw", "#index" \n\t"\
  874. " jb 1b \n\t"
  875. #ifdef HAVE_MMX2
  876. #undef WRITEBGR24
  877. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  878. #else
  879. #undef WRITEBGR24
  880. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  881. #endif
  882. #define REAL_WRITEYUY2(dst, dstw, index) \
  883. "packuswb %%mm3, %%mm3 \n\t"\
  884. "packuswb %%mm4, %%mm4 \n\t"\
  885. "packuswb %%mm7, %%mm1 \n\t"\
  886. "punpcklbw %%mm4, %%mm3 \n\t"\
  887. "movq %%mm1, %%mm7 \n\t"\
  888. "punpcklbw %%mm3, %%mm1 \n\t"\
  889. "punpckhbw %%mm3, %%mm7 \n\t"\
  890. \
  891. MOVNTQ(%%mm1, (dst, index, 2))\
  892. MOVNTQ(%%mm7, 8(dst, index, 2))\
  893. \
  894. "add $8, "#index" \n\t"\
  895. "cmp "#dstw", "#index" \n\t"\
  896. " jb 1b \n\t"
  897. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  898. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  899. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  900. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  901. {
  902. #ifdef HAVE_MMX
  903. if (c->flags & SWS_ACCURATE_RND){
  904. if (uDest){
  905. YSCALEYUV2YV12X_ACCURATE( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  906. YSCALEYUV2YV12X_ACCURATE(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  907. }
  908. YSCALEYUV2YV12X_ACCURATE(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
  909. }else{
  910. if (uDest){
  911. YSCALEYUV2YV12X( 0, CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  912. YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  913. }
  914. YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET, dest, dstW)
  915. }
  916. #else
  917. #ifdef HAVE_ALTIVEC
  918. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  919. chrFilter, chrSrc, chrFilterSize,
  920. dest, uDest, vDest, dstW, chrDstW);
  921. #else //HAVE_ALTIVEC
  922. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  923. chrFilter, chrSrc, chrFilterSize,
  924. dest, uDest, vDest, dstW, chrDstW);
  925. #endif //!HAVE_ALTIVEC
  926. #endif
  927. }
  928. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  929. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  930. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  931. {
  932. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  933. chrFilter, chrSrc, chrFilterSize,
  934. dest, uDest, dstW, chrDstW, dstFormat);
  935. }
  936. static inline void RENAME(yuv2yuv1)(int16_t *lumSrc, int16_t *chrSrc,
  937. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  938. {
  939. #ifdef HAVE_MMX
  940. if (uDest != NULL)
  941. {
  942. asm volatile(
  943. YSCALEYUV2YV121
  944. :: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
  945. "g" (-chrDstW)
  946. : "%"REG_a
  947. );
  948. asm volatile(
  949. YSCALEYUV2YV121
  950. :: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
  951. "g" (-chrDstW)
  952. : "%"REG_a
  953. );
  954. }
  955. asm volatile(
  956. YSCALEYUV2YV121
  957. :: "r" (lumSrc + dstW), "r" (dest + dstW),
  958. "g" (-dstW)
  959. : "%"REG_a
  960. );
  961. #else
  962. int i;
  963. for (i=0; i<dstW; i++)
  964. {
  965. int val= lumSrc[i]>>7;
  966. if (val&256){
  967. if (val<0) val=0;
  968. else val=255;
  969. }
  970. dest[i]= val;
  971. }
  972. if (uDest != NULL)
  973. for (i=0; i<chrDstW; i++)
  974. {
  975. int u=chrSrc[i]>>7;
  976. int v=chrSrc[i + 2048]>>7;
  977. if ((u|v)&256){
  978. if (u<0) u=0;
  979. else if (u>255) u=255;
  980. if (v<0) v=0;
  981. else if (v>255) v=255;
  982. }
  983. uDest[i]= u;
  984. vDest[i]= v;
  985. }
  986. #endif
  987. }
  988. /**
  989. * vertical scale YV12 to RGB
  990. */
  991. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  992. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  993. uint8_t *dest, long dstW, long dstY)
  994. {
  995. #ifdef HAVE_MMX
  996. long dummy=0;
  997. if (c->flags & SWS_ACCURATE_RND){
  998. switch(c->dstFormat){
  999. case PIX_FMT_RGB32:
  1000. YSCALEYUV2PACKEDX_ACCURATE
  1001. YSCALEYUV2RGBX
  1002. WRITEBGR32(%4, %5, %%REGa)
  1003. YSCALEYUV2PACKEDX_END
  1004. return;
  1005. case PIX_FMT_BGR24:
  1006. YSCALEYUV2PACKEDX_ACCURATE
  1007. YSCALEYUV2RGBX
  1008. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  1009. "add %4, %%"REG_c" \n\t"
  1010. WRITEBGR24(%%REGc, %5, %%REGa)
  1011. :: "r" (&c->redDither),
  1012. "m" (dummy), "m" (dummy), "m" (dummy),
  1013. "r" (dest), "m" (dstW)
  1014. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1015. );
  1016. return;
  1017. case PIX_FMT_BGR555:
  1018. YSCALEYUV2PACKEDX_ACCURATE
  1019. YSCALEYUV2RGBX
  1020. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1021. #ifdef DITHER1XBPP
  1022. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1023. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1024. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1025. #endif
  1026. WRITEBGR15(%4, %5, %%REGa)
  1027. YSCALEYUV2PACKEDX_END
  1028. return;
  1029. case PIX_FMT_BGR565:
  1030. YSCALEYUV2PACKEDX_ACCURATE
  1031. YSCALEYUV2RGBX
  1032. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1033. #ifdef DITHER1XBPP
  1034. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1035. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1036. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1037. #endif
  1038. WRITEBGR16(%4, %5, %%REGa)
  1039. YSCALEYUV2PACKEDX_END
  1040. return;
  1041. case PIX_FMT_YUYV422:
  1042. YSCALEYUV2PACKEDX_ACCURATE
  1043. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1044. "psraw $3, %%mm3 \n\t"
  1045. "psraw $3, %%mm4 \n\t"
  1046. "psraw $3, %%mm1 \n\t"
  1047. "psraw $3, %%mm7 \n\t"
  1048. WRITEYUY2(%4, %5, %%REGa)
  1049. YSCALEYUV2PACKEDX_END
  1050. return;
  1051. }
  1052. }else{
  1053. switch(c->dstFormat)
  1054. {
  1055. case PIX_FMT_RGB32:
  1056. YSCALEYUV2PACKEDX
  1057. YSCALEYUV2RGBX
  1058. WRITEBGR32(%4, %5, %%REGa)
  1059. YSCALEYUV2PACKEDX_END
  1060. return;
  1061. case PIX_FMT_BGR24:
  1062. YSCALEYUV2PACKEDX
  1063. YSCALEYUV2RGBX
  1064. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  1065. "add %4, %%"REG_c" \n\t"
  1066. WRITEBGR24(%%REGc, %5, %%REGa)
  1067. :: "r" (&c->redDither),
  1068. "m" (dummy), "m" (dummy), "m" (dummy),
  1069. "r" (dest), "m" (dstW)
  1070. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1071. );
  1072. return;
  1073. case PIX_FMT_BGR555:
  1074. YSCALEYUV2PACKEDX
  1075. YSCALEYUV2RGBX
  1076. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1077. #ifdef DITHER1XBPP
  1078. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1079. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1080. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1081. #endif
  1082. WRITEBGR15(%4, %5, %%REGa)
  1083. YSCALEYUV2PACKEDX_END
  1084. return;
  1085. case PIX_FMT_BGR565:
  1086. YSCALEYUV2PACKEDX
  1087. YSCALEYUV2RGBX
  1088. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1089. #ifdef DITHER1XBPP
  1090. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1091. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1092. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1093. #endif
  1094. WRITEBGR16(%4, %5, %%REGa)
  1095. YSCALEYUV2PACKEDX_END
  1096. return;
  1097. case PIX_FMT_YUYV422:
  1098. YSCALEYUV2PACKEDX
  1099. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1100. "psraw $3, %%mm3 \n\t"
  1101. "psraw $3, %%mm4 \n\t"
  1102. "psraw $3, %%mm1 \n\t"
  1103. "psraw $3, %%mm7 \n\t"
  1104. WRITEYUY2(%4, %5, %%REGa)
  1105. YSCALEYUV2PACKEDX_END
  1106. return;
  1107. }
  1108. }
  1109. #endif
  1110. #ifdef HAVE_ALTIVEC
  1111. /* The following list of supported dstFormat values should
  1112. match what's found in the body of altivec_yuv2packedX() */
  1113. if (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
  1114. c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
  1115. c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
  1116. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  1117. chrFilter, chrSrc, chrFilterSize,
  1118. dest, dstW, dstY);
  1119. else
  1120. #endif
  1121. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1122. chrFilter, chrSrc, chrFilterSize,
  1123. dest, dstW, dstY);
  1124. }
  1125. /**
  1126. * vertical bilinear scale YV12 to RGB
  1127. */
  1128. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1129. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1130. {
  1131. int yalpha1=yalpha^4095;
  1132. int uvalpha1=uvalpha^4095;
  1133. int i;
  1134. #if 0 //isn't used
  1135. if (flags&SWS_FULL_CHR_H_INT)
  1136. {
  1137. switch(dstFormat)
  1138. {
  1139. #ifdef HAVE_MMX
  1140. case PIX_FMT_RGB32:
  1141. asm volatile(
  1142. FULL_YSCALEYUV2RGB
  1143. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1144. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1145. "movq %%mm3, %%mm1 \n\t"
  1146. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1147. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1148. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  1149. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  1150. "add $4, %%"REG_a" \n\t"
  1151. "cmp %5, %%"REG_a" \n\t"
  1152. " jb 1b \n\t"
  1153. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  1154. "m" (yalpha1), "m" (uvalpha1)
  1155. : "%"REG_a
  1156. );
  1157. break;
  1158. case PIX_FMT_BGR24:
  1159. asm volatile(
  1160. FULL_YSCALEYUV2RGB
  1161. // lsb ... msb
  1162. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1163. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1164. "movq %%mm3, %%mm1 \n\t"
  1165. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1166. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1167. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  1168. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  1169. "pand "MANGLE(bm00000111)", %%mm2 \n\t" // BGR00000
  1170. "pand "MANGLE(bm11111000)", %%mm3 \n\t" // 000BGR00
  1171. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  1172. "movq %%mm1, %%mm2 \n\t"
  1173. "psllq $48, %%mm1 \n\t" // 000000BG
  1174. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  1175. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  1176. "psrld $16, %%mm2 \n\t" // R000R000
  1177. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  1178. "por %%mm2, %%mm1 \n\t" // RBGRR000
  1179. "mov %4, %%"REG_b" \n\t"
  1180. "add %%"REG_a", %%"REG_b" \n\t"
  1181. #ifdef HAVE_MMX2
  1182. //FIXME Alignment
  1183. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1184. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1185. #else
  1186. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1187. "psrlq $32, %%mm3 \n\t"
  1188. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  1189. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1190. #endif
  1191. "add $4, %%"REG_a" \n\t"
  1192. "cmp %5, %%"REG_a" \n\t"
  1193. " jb 1b \n\t"
  1194. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1195. "m" (yalpha1), "m" (uvalpha1)
  1196. : "%"REG_a, "%"REG_b
  1197. );
  1198. break;
  1199. case PIX_FMT_BGR555:
  1200. asm volatile(
  1201. FULL_YSCALEYUV2RGB
  1202. #ifdef DITHER1XBPP
  1203. "paddusb "MANGLE(g5Dither)", %%mm1 \n\t"
  1204. "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
  1205. "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
  1206. #endif
  1207. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1208. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1209. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1210. "psrlw $3, %%mm3 \n\t"
  1211. "psllw $2, %%mm1 \n\t"
  1212. "psllw $7, %%mm0 \n\t"
  1213. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1214. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1215. "por %%mm3, %%mm1 \n\t"
  1216. "por %%mm1, %%mm0 \n\t"
  1217. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1218. "add $4, %%"REG_a" \n\t"
  1219. "cmp %5, %%"REG_a" \n\t"
  1220. " jb 1b \n\t"
  1221. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1222. "m" (yalpha1), "m" (uvalpha1)
  1223. : "%"REG_a
  1224. );
  1225. break;
  1226. case PIX_FMT_BGR565:
  1227. asm volatile(
  1228. FULL_YSCALEYUV2RGB
  1229. #ifdef DITHER1XBPP
  1230. "paddusb "MANGLE(g6Dither)", %%mm1 \n\t"
  1231. "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
  1232. "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
  1233. #endif
  1234. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1235. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1236. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1237. "psrlw $3, %%mm3 \n\t"
  1238. "psllw $3, %%mm1 \n\t"
  1239. "psllw $8, %%mm0 \n\t"
  1240. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1241. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1242. "por %%mm3, %%mm1 \n\t"
  1243. "por %%mm1, %%mm0 \n\t"
  1244. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1245. "add $4, %%"REG_a" \n\t"
  1246. "cmp %5, %%"REG_a" \n\t"
  1247. " jb 1b \n\t"
  1248. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1249. "m" (yalpha1), "m" (uvalpha1)
  1250. : "%"REG_a
  1251. );
  1252. break;
  1253. #endif
  1254. case PIX_FMT_BGR32:
  1255. #ifndef HAVE_MMX
  1256. case PIX_FMT_RGB32:
  1257. #endif
  1258. if (dstFormat==PIX_FMT_RGB32)
  1259. {
  1260. int i;
  1261. #ifdef WORDS_BIGENDIAN
  1262. dest++;
  1263. #endif
  1264. for (i=0;i<dstW;i++){
  1265. // vertical linear interpolation && yuv2rgb in a single step:
  1266. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1267. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1268. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1269. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1270. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1271. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1272. dest+= 4;
  1273. }
  1274. }
  1275. else if (dstFormat==PIX_FMT_BGR24)
  1276. {
  1277. int i;
  1278. for (i=0;i<dstW;i++){
  1279. // vertical linear interpolation && yuv2rgb in a single step:
  1280. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1281. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1282. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1283. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1284. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1285. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1286. dest+= 3;
  1287. }
  1288. }
  1289. else if (dstFormat==PIX_FMT_BGR565)
  1290. {
  1291. int i;
  1292. for (i=0;i<dstW;i++){
  1293. // vertical linear interpolation && yuv2rgb in a single step:
  1294. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1295. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1296. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1297. ((uint16_t*)dest)[i] =
  1298. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1299. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1300. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1301. }
  1302. }
  1303. else if (dstFormat==PIX_FMT_BGR555)
  1304. {
  1305. int i;
  1306. for (i=0;i<dstW;i++){
  1307. // vertical linear interpolation && yuv2rgb in a single step:
  1308. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1309. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1310. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>19);
  1311. ((uint16_t*)dest)[i] =
  1312. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1313. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1314. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1315. }
  1316. }
  1317. }//FULL_UV_IPOL
  1318. else
  1319. {
  1320. #endif // if 0
  1321. #ifdef HAVE_MMX
  1322. switch(c->dstFormat)
  1323. {
  1324. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1325. case PIX_FMT_RGB32:
  1326. asm volatile(
  1327. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1328. "mov %4, %%"REG_b" \n\t"
  1329. "push %%"REG_BP" \n\t"
  1330. YSCALEYUV2RGB(%%REGBP, %5)
  1331. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1332. "pop %%"REG_BP" \n\t"
  1333. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1334. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1335. "a" (&c->redDither)
  1336. );
  1337. return;
  1338. case PIX_FMT_BGR24:
  1339. asm volatile(
  1340. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1341. "mov %4, %%"REG_b" \n\t"
  1342. "push %%"REG_BP" \n\t"
  1343. YSCALEYUV2RGB(%%REGBP, %5)
  1344. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1345. "pop %%"REG_BP" \n\t"
  1346. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1347. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1348. "a" (&c->redDither)
  1349. );
  1350. return;
  1351. case PIX_FMT_BGR555:
  1352. asm volatile(
  1353. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1354. "mov %4, %%"REG_b" \n\t"
  1355. "push %%"REG_BP" \n\t"
  1356. YSCALEYUV2RGB(%%REGBP, %5)
  1357. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1358. #ifdef DITHER1XBPP
  1359. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1360. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1361. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1362. #endif
  1363. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1364. "pop %%"REG_BP" \n\t"
  1365. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1366. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1367. "a" (&c->redDither)
  1368. );
  1369. return;
  1370. case PIX_FMT_BGR565:
  1371. asm volatile(
  1372. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1373. "mov %4, %%"REG_b" \n\t"
  1374. "push %%"REG_BP" \n\t"
  1375. YSCALEYUV2RGB(%%REGBP, %5)
  1376. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1377. #ifdef DITHER1XBPP
  1378. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1379. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1380. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1381. #endif
  1382. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1383. "pop %%"REG_BP" \n\t"
  1384. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1385. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1386. "a" (&c->redDither)
  1387. );
  1388. return;
  1389. case PIX_FMT_YUYV422:
  1390. asm volatile(
  1391. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1392. "mov %4, %%"REG_b" \n\t"
  1393. "push %%"REG_BP" \n\t"
  1394. YSCALEYUV2PACKED(%%REGBP, %5)
  1395. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1396. "pop %%"REG_BP" \n\t"
  1397. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1398. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1399. "a" (&c->redDither)
  1400. );
  1401. return;
  1402. default: break;
  1403. }
  1404. #endif //HAVE_MMX
  1405. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C)
  1406. }
  1407. /**
  1408. * YV12 to RGB without scaling or interpolating
  1409. */
  1410. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1411. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1412. {
  1413. const int yalpha1=0;
  1414. int i;
  1415. uint16_t *buf1= buf0; //FIXME needed for the rgb1/bgr1
  1416. const int yalpha= 4096; //FIXME ...
  1417. if (flags&SWS_FULL_CHR_H_INT)
  1418. {
  1419. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1420. return;
  1421. }
  1422. #ifdef HAVE_MMX
  1423. if ( uvalpha < 2048 ) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1424. {
  1425. switch(dstFormat)
  1426. {
  1427. case PIX_FMT_RGB32:
  1428. asm volatile(
  1429. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1430. "mov %4, %%"REG_b" \n\t"
  1431. "push %%"REG_BP" \n\t"
  1432. YSCALEYUV2RGB1(%%REGBP, %5)
  1433. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1434. "pop %%"REG_BP" \n\t"
  1435. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1436. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1437. "a" (&c->redDither)
  1438. );
  1439. return;
  1440. case PIX_FMT_BGR24:
  1441. asm volatile(
  1442. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1443. "mov %4, %%"REG_b" \n\t"
  1444. "push %%"REG_BP" \n\t"
  1445. YSCALEYUV2RGB1(%%REGBP, %5)
  1446. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1447. "pop %%"REG_BP" \n\t"
  1448. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1449. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1450. "a" (&c->redDither)
  1451. );
  1452. return;
  1453. case PIX_FMT_BGR555:
  1454. asm volatile(
  1455. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1456. "mov %4, %%"REG_b" \n\t"
  1457. "push %%"REG_BP" \n\t"
  1458. YSCALEYUV2RGB1(%%REGBP, %5)
  1459. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1460. #ifdef DITHER1XBPP
  1461. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1462. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1463. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1464. #endif
  1465. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1466. "pop %%"REG_BP" \n\t"
  1467. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1468. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1469. "a" (&c->redDither)
  1470. );
  1471. return;
  1472. case PIX_FMT_BGR565:
  1473. asm volatile(
  1474. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1475. "mov %4, %%"REG_b" \n\t"
  1476. "push %%"REG_BP" \n\t"
  1477. YSCALEYUV2RGB1(%%REGBP, %5)
  1478. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1479. #ifdef DITHER1XBPP
  1480. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1481. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1482. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1483. #endif
  1484. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1485. "pop %%"REG_BP" \n\t"
  1486. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1487. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1488. "a" (&c->redDither)
  1489. );
  1490. return;
  1491. case PIX_FMT_YUYV422:
  1492. asm volatile(
  1493. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1494. "mov %4, %%"REG_b" \n\t"
  1495. "push %%"REG_BP" \n\t"
  1496. YSCALEYUV2PACKED1(%%REGBP, %5)
  1497. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1498. "pop %%"REG_BP" \n\t"
  1499. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1500. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1501. "a" (&c->redDither)
  1502. );
  1503. return;
  1504. }
  1505. }
  1506. else
  1507. {
  1508. switch(dstFormat)
  1509. {
  1510. case PIX_FMT_RGB32:
  1511. asm volatile(
  1512. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1513. "mov %4, %%"REG_b" \n\t"
  1514. "push %%"REG_BP" \n\t"
  1515. YSCALEYUV2RGB1b(%%REGBP, %5)
  1516. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1517. "pop %%"REG_BP" \n\t"
  1518. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1519. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1520. "a" (&c->redDither)
  1521. );
  1522. return;
  1523. case PIX_FMT_BGR24:
  1524. asm volatile(
  1525. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1526. "mov %4, %%"REG_b" \n\t"
  1527. "push %%"REG_BP" \n\t"
  1528. YSCALEYUV2RGB1b(%%REGBP, %5)
  1529. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1530. "pop %%"REG_BP" \n\t"
  1531. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1532. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1533. "a" (&c->redDither)
  1534. );
  1535. return;
  1536. case PIX_FMT_BGR555:
  1537. asm volatile(
  1538. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1539. "mov %4, %%"REG_b" \n\t"
  1540. "push %%"REG_BP" \n\t"
  1541. YSCALEYUV2RGB1b(%%REGBP, %5)
  1542. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1543. #ifdef DITHER1XBPP
  1544. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1545. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1546. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1547. #endif
  1548. WRITEBGR15(%%REGb, 8280(%5), %%REGBP)
  1549. "pop %%"REG_BP" \n\t"
  1550. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1551. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1552. "a" (&c->redDither)
  1553. );
  1554. return;
  1555. case PIX_FMT_BGR565:
  1556. asm volatile(
  1557. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1558. "mov %4, %%"REG_b" \n\t"
  1559. "push %%"REG_BP" \n\t"
  1560. YSCALEYUV2RGB1b(%%REGBP, %5)
  1561. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1562. #ifdef DITHER1XBPP
  1563. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1564. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1565. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1566. #endif
  1567. WRITEBGR16(%%REGb, 8280(%5), %%REGBP)
  1568. "pop %%"REG_BP" \n\t"
  1569. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1570. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1571. "a" (&c->redDither)
  1572. );
  1573. return;
  1574. case PIX_FMT_YUYV422:
  1575. asm volatile(
  1576. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1577. "mov %4, %%"REG_b" \n\t"
  1578. "push %%"REG_BP" \n\t"
  1579. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1580. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1581. "pop %%"REG_BP" \n\t"
  1582. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1583. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1584. "a" (&c->redDither)
  1585. );
  1586. return;
  1587. }
  1588. }
  1589. #endif
  1590. if ( uvalpha < 2048 )
  1591. {
  1592. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C)
  1593. }else{
  1594. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C)
  1595. }
  1596. }
  1597. //FIXME yuy2* can read upto 7 samples to much
  1598. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
  1599. {
  1600. #ifdef HAVE_MMX
  1601. asm volatile(
  1602. "movq "MANGLE(bm01010101)", %%mm2 \n\t"
  1603. "mov %0, %%"REG_a" \n\t"
  1604. "1: \n\t"
  1605. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1606. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1607. "pand %%mm2, %%mm0 \n\t"
  1608. "pand %%mm2, %%mm1 \n\t"
  1609. "packuswb %%mm1, %%mm0 \n\t"
  1610. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1611. "add $8, %%"REG_a" \n\t"
  1612. " js 1b \n\t"
  1613. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1614. : "%"REG_a
  1615. );
  1616. #else
  1617. int i;
  1618. for (i=0; i<width; i++)
  1619. dst[i]= src[2*i];
  1620. #endif
  1621. }
  1622. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1623. {
  1624. #ifdef HAVE_MMX
  1625. asm volatile(
  1626. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1627. "mov %0, %%"REG_a" \n\t"
  1628. "1: \n\t"
  1629. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1630. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1631. "psrlw $8, %%mm0 \n\t"
  1632. "psrlw $8, %%mm1 \n\t"
  1633. "packuswb %%mm1, %%mm0 \n\t"
  1634. "movq %%mm0, %%mm1 \n\t"
  1635. "psrlw $8, %%mm0 \n\t"
  1636. "pand %%mm4, %%mm1 \n\t"
  1637. "packuswb %%mm0, %%mm0 \n\t"
  1638. "packuswb %%mm1, %%mm1 \n\t"
  1639. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1640. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1641. "add $4, %%"REG_a" \n\t"
  1642. " js 1b \n\t"
  1643. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1644. : "%"REG_a
  1645. );
  1646. #else
  1647. int i;
  1648. for (i=0; i<width; i++)
  1649. {
  1650. dstU[i]= src1[4*i + 1];
  1651. dstV[i]= src1[4*i + 3];
  1652. }
  1653. #endif
  1654. assert(src1 == src2);
  1655. }
  1656. //this is allmost identical to the previous, end exists only cuz yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses
  1657. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
  1658. {
  1659. #ifdef HAVE_MMX
  1660. asm volatile(
  1661. "mov %0, %%"REG_a" \n\t"
  1662. "1: \n\t"
  1663. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1664. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1665. "psrlw $8, %%mm0 \n\t"
  1666. "psrlw $8, %%mm1 \n\t"
  1667. "packuswb %%mm1, %%mm0 \n\t"
  1668. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1669. "add $8, %%"REG_a" \n\t"
  1670. " js 1b \n\t"
  1671. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1672. : "%"REG_a
  1673. );
  1674. #else
  1675. int i;
  1676. for (i=0; i<width; i++)
  1677. dst[i]= src[2*i+1];
  1678. #endif
  1679. }
  1680. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1681. {
  1682. #ifdef HAVE_MMX
  1683. asm volatile(
  1684. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1685. "mov %0, %%"REG_a" \n\t"
  1686. "1: \n\t"
  1687. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1688. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1689. "pand %%mm4, %%mm0 \n\t"
  1690. "pand %%mm4, %%mm1 \n\t"
  1691. "packuswb %%mm1, %%mm0 \n\t"
  1692. "movq %%mm0, %%mm1 \n\t"
  1693. "psrlw $8, %%mm0 \n\t"
  1694. "pand %%mm4, %%mm1 \n\t"
  1695. "packuswb %%mm0, %%mm0 \n\t"
  1696. "packuswb %%mm1, %%mm1 \n\t"
  1697. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1698. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1699. "add $4, %%"REG_a" \n\t"
  1700. " js 1b \n\t"
  1701. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1702. : "%"REG_a
  1703. );
  1704. #else
  1705. int i;
  1706. for (i=0; i<width; i++)
  1707. {
  1708. dstU[i]= src1[4*i + 0];
  1709. dstV[i]= src1[4*i + 2];
  1710. }
  1711. #endif
  1712. assert(src1 == src2);
  1713. }
  1714. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1715. {
  1716. int i;
  1717. for (i=0; i<width; i++)
  1718. {
  1719. int b= ((uint32_t*)src)[i]&0xFF;
  1720. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1721. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1722. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1723. }
  1724. }
  1725. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1726. {
  1727. int i;
  1728. assert(src1 == src2);
  1729. for (i=0; i<width; i++)
  1730. {
  1731. const int a= ((uint32_t*)src1)[2*i+0];
  1732. const int e= ((uint32_t*)src1)[2*i+1];
  1733. const int l= (a&0xFF00FF) + (e&0xFF00FF);
  1734. const int h= (a&0x00FF00) + (e&0x00FF00);
  1735. const int b= l&0x3FF;
  1736. const int g= h>>8;
  1737. const int r= l>>16;
  1738. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1739. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1740. }
  1741. }
  1742. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
  1743. {
  1744. #ifdef HAVE_MMX
  1745. asm volatile(
  1746. "mov %2, %%"REG_a" \n\t"
  1747. "movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
  1748. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1749. "pxor %%mm7, %%mm7 \n\t"
  1750. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1751. ASMALIGN(4)
  1752. "1: \n\t"
  1753. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1754. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1755. "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
  1756. "punpcklbw %%mm7, %%mm0 \n\t"
  1757. "punpcklbw %%mm7, %%mm1 \n\t"
  1758. "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
  1759. "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
  1760. "punpcklbw %%mm7, %%mm2 \n\t"
  1761. "punpcklbw %%mm7, %%mm3 \n\t"
  1762. "pmaddwd %%mm6, %%mm0 \n\t"
  1763. "pmaddwd %%mm6, %%mm1 \n\t"
  1764. "pmaddwd %%mm6, %%mm2 \n\t"
  1765. "pmaddwd %%mm6, %%mm3 \n\t"
  1766. #ifndef FAST_BGR2YV12
  1767. "psrad $8, %%mm0 \n\t"
  1768. "psrad $8, %%mm1 \n\t"
  1769. "psrad $8, %%mm2 \n\t"
  1770. "psrad $8, %%mm3 \n\t"
  1771. #endif
  1772. "packssdw %%mm1, %%mm0 \n\t"
  1773. "packssdw %%mm3, %%mm2 \n\t"
  1774. "pmaddwd %%mm5, %%mm0 \n\t"
  1775. "pmaddwd %%mm5, %%mm2 \n\t"
  1776. "packssdw %%mm2, %%mm0 \n\t"
  1777. "psraw $7, %%mm0 \n\t"
  1778. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1779. "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
  1780. "punpcklbw %%mm7, %%mm4 \n\t"
  1781. "punpcklbw %%mm7, %%mm1 \n\t"
  1782. "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
  1783. "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
  1784. "punpcklbw %%mm7, %%mm2 \n\t"
  1785. "punpcklbw %%mm7, %%mm3 \n\t"
  1786. "pmaddwd %%mm6, %%mm4 \n\t"
  1787. "pmaddwd %%mm6, %%mm1 \n\t"
  1788. "pmaddwd %%mm6, %%mm2 \n\t"
  1789. "pmaddwd %%mm6, %%mm3 \n\t"
  1790. #ifndef FAST_BGR2YV12
  1791. "psrad $8, %%mm4 \n\t"
  1792. "psrad $8, %%mm1 \n\t"
  1793. "psrad $8, %%mm2 \n\t"
  1794. "psrad $8, %%mm3 \n\t"
  1795. #endif
  1796. "packssdw %%mm1, %%mm4 \n\t"
  1797. "packssdw %%mm3, %%mm2 \n\t"
  1798. "pmaddwd %%mm5, %%mm4 \n\t"
  1799. "pmaddwd %%mm5, %%mm2 \n\t"
  1800. "add $24, %%"REG_d" \n\t"
  1801. "packssdw %%mm2, %%mm4 \n\t"
  1802. "psraw $7, %%mm4 \n\t"
  1803. "packuswb %%mm4, %%mm0 \n\t"
  1804. "paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
  1805. "movq %%mm0, (%1, %%"REG_a") \n\t"
  1806. "add $8, %%"REG_a" \n\t"
  1807. " js 1b \n\t"
  1808. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1809. : "%"REG_a, "%"REG_d
  1810. );
  1811. #else
  1812. int i;
  1813. for (i=0; i<width; i++)
  1814. {
  1815. int b= src[i*3+0];
  1816. int g= src[i*3+1];
  1817. int r= src[i*3+2];
  1818. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  1819. }
  1820. #endif
  1821. }
  1822. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1823. {
  1824. #ifdef HAVE_MMX
  1825. asm volatile(
  1826. "mov %3, %%"REG_a" \n\t"
  1827. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1828. "movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
  1829. "pxor %%mm7, %%mm7 \n\t"
  1830. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1831. "add %%"REG_d", %%"REG_d" \n\t"
  1832. ASMALIGN(4)
  1833. "1: \n\t"
  1834. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1835. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1836. "movq (%0, %%"REG_d"), %%mm0 \n\t"
  1837. "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
  1838. "movq %%mm0, %%mm1 \n\t"
  1839. "movq %%mm2, %%mm3 \n\t"
  1840. "psrlq $24, %%mm0 \n\t"
  1841. "psrlq $24, %%mm2 \n\t"
  1842. PAVGB(%%mm1, %%mm0)
  1843. PAVGB(%%mm3, %%mm2)
  1844. "punpcklbw %%mm7, %%mm0 \n\t"
  1845. "punpcklbw %%mm7, %%mm2 \n\t"
  1846. #else
  1847. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1848. "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
  1849. "punpcklbw %%mm7, %%mm0 \n\t"
  1850. "punpcklbw %%mm7, %%mm2 \n\t"
  1851. "paddw %%mm2, %%mm0 \n\t"
  1852. "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
  1853. "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
  1854. "punpcklbw %%mm7, %%mm4 \n\t"
  1855. "punpcklbw %%mm7, %%mm2 \n\t"
  1856. "paddw %%mm4, %%mm2 \n\t"
  1857. "psrlw $1, %%mm0 \n\t"
  1858. "psrlw $1, %%mm2 \n\t"
  1859. #endif
  1860. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1861. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1862. "pmaddwd %%mm0, %%mm1 \n\t"
  1863. "pmaddwd %%mm2, %%mm3 \n\t"
  1864. "pmaddwd %%mm6, %%mm0 \n\t"
  1865. "pmaddwd %%mm6, %%mm2 \n\t"
  1866. #ifndef FAST_BGR2YV12
  1867. "psrad $8, %%mm0 \n\t"
  1868. "psrad $8, %%mm1 \n\t"
  1869. "psrad $8, %%mm2 \n\t"
  1870. "psrad $8, %%mm3 \n\t"
  1871. #endif
  1872. "packssdw %%mm2, %%mm0 \n\t"
  1873. "packssdw %%mm3, %%mm1 \n\t"
  1874. "pmaddwd %%mm5, %%mm0 \n\t"
  1875. "pmaddwd %%mm5, %%mm1 \n\t"
  1876. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1877. "psraw $7, %%mm0 \n\t"
  1878. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1879. "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
  1880. "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
  1881. "movq %%mm4, %%mm1 \n\t"
  1882. "movq %%mm2, %%mm3 \n\t"
  1883. "psrlq $24, %%mm4 \n\t"
  1884. "psrlq $24, %%mm2 \n\t"
  1885. PAVGB(%%mm1, %%mm4)
  1886. PAVGB(%%mm3, %%mm2)
  1887. "punpcklbw %%mm7, %%mm4 \n\t"
  1888. "punpcklbw %%mm7, %%mm2 \n\t"
  1889. #else
  1890. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1891. "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
  1892. "punpcklbw %%mm7, %%mm4 \n\t"
  1893. "punpcklbw %%mm7, %%mm2 \n\t"
  1894. "paddw %%mm2, %%mm4 \n\t"
  1895. "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
  1896. "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
  1897. "punpcklbw %%mm7, %%mm5 \n\t"
  1898. "punpcklbw %%mm7, %%mm2 \n\t"
  1899. "paddw %%mm5, %%mm2 \n\t"
  1900. "movq "MANGLE(w1111)", %%mm5 \n\t"
  1901. "psrlw $2, %%mm4 \n\t"
  1902. "psrlw $2, %%mm2 \n\t"
  1903. #endif
  1904. "movq "MANGLE(bgr2VCoeff)", %%mm1 \n\t"
  1905. "movq "MANGLE(bgr2VCoeff)", %%mm3 \n\t"
  1906. "pmaddwd %%mm4, %%mm1 \n\t"
  1907. "pmaddwd %%mm2, %%mm3 \n\t"
  1908. "pmaddwd %%mm6, %%mm4 \n\t"
  1909. "pmaddwd %%mm6, %%mm2 \n\t"
  1910. #ifndef FAST_BGR2YV12
  1911. "psrad $8, %%mm4 \n\t"
  1912. "psrad $8, %%mm1 \n\t"
  1913. "psrad $8, %%mm2 \n\t"
  1914. "psrad $8, %%mm3 \n\t"
  1915. #endif
  1916. "packssdw %%mm2, %%mm4 \n\t"
  1917. "packssdw %%mm3, %%mm1 \n\t"
  1918. "pmaddwd %%mm5, %%mm4 \n\t"
  1919. "pmaddwd %%mm5, %%mm1 \n\t"
  1920. "add $24, %%"REG_d" \n\t"
  1921. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1922. "psraw $7, %%mm4 \n\t"
  1923. "movq %%mm0, %%mm1 \n\t"
  1924. "punpckldq %%mm4, %%mm0 \n\t"
  1925. "punpckhdq %%mm4, %%mm1 \n\t"
  1926. "packsswb %%mm1, %%mm0 \n\t"
  1927. "paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
  1928. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1929. "punpckhdq %%mm0, %%mm0 \n\t"
  1930. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1931. "add $4, %%"REG_a" \n\t"
  1932. " js 1b \n\t"
  1933. : : "r" (src1+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1934. : "%"REG_a, "%"REG_d
  1935. );
  1936. #else
  1937. int i;
  1938. for (i=0; i<width; i++)
  1939. {
  1940. int b= src1[6*i + 0] + src1[6*i + 3];
  1941. int g= src1[6*i + 1] + src1[6*i + 4];
  1942. int r= src1[6*i + 2] + src1[6*i + 5];
  1943. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1944. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1945. }
  1946. #endif
  1947. assert(src1 == src2);
  1948. }
  1949. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  1950. {
  1951. int i;
  1952. for (i=0; i<width; i++)
  1953. {
  1954. int d= ((uint16_t*)src)[i];
  1955. int b= d&0x1F;
  1956. int g= (d>>5)&0x3F;
  1957. int r= (d>>11)&0x1F;
  1958. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1959. }
  1960. }
  1961. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1962. {
  1963. int i;
  1964. assert(src1==src2);
  1965. for (i=0; i<width; i++)
  1966. {
  1967. int d0= ((uint32_t*)src1)[i];
  1968. int dl= (d0&0x07E0F81F);
  1969. int dh= ((d0>>5)&0x07C0F83F);
  1970. int dh2= (dh>>11) + (dh<<21);
  1971. int d= dh2 + dl;
  1972. int b= d&0x7F;
  1973. int r= (d>>11)&0x7F;
  1974. int g= d>>21;
  1975. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  1976. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  1977. }
  1978. }
  1979. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  1980. {
  1981. int i;
  1982. for (i=0; i<width; i++)
  1983. {
  1984. int d= ((uint16_t*)src)[i];
  1985. int b= d&0x1F;
  1986. int g= (d>>5)&0x1F;
  1987. int r= (d>>10)&0x1F;
  1988. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  1989. }
  1990. }
  1991. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1992. {
  1993. int i;
  1994. assert(src1==src2);
  1995. for (i=0; i<width; i++)
  1996. {
  1997. int d0= ((uint32_t*)src1)[i];
  1998. int dl= (d0&0x03E07C1F);
  1999. int dh= ((d0>>5)&0x03E0F81F);
  2000. int dh2= (dh>>11) + (dh<<21);
  2001. int d= dh2 + dl;
  2002. int b= d&0x7F;
  2003. int r= (d>>10)&0x7F;
  2004. int g= d>>21;
  2005. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2006. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2007. }
  2008. }
  2009. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  2010. {
  2011. int i;
  2012. for (i=0; i<width; i++)
  2013. {
  2014. int r= ((uint32_t*)src)[i]&0xFF;
  2015. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  2016. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  2017. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  2018. }
  2019. }
  2020. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2021. {
  2022. int i;
  2023. assert(src1==src2);
  2024. for (i=0; i<width; i++)
  2025. {
  2026. const int a= ((uint32_t*)src1)[2*i+0];
  2027. const int e= ((uint32_t*)src1)[2*i+1];
  2028. const int l= (a&0xFF00FF) + (e&0xFF00FF);
  2029. const int h= (a&0x00FF00) + (e&0x00FF00);
  2030. const int r= l&0x3FF;
  2031. const int g= h>>8;
  2032. const int b= l>>16;
  2033. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2034. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2035. }
  2036. }
  2037. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  2038. {
  2039. int i;
  2040. for (i=0; i<width; i++)
  2041. {
  2042. int r= src[i*3+0];
  2043. int g= src[i*3+1];
  2044. int b= src[i*3+2];
  2045. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)) )>>RGB2YUV_SHIFT);
  2046. }
  2047. }
  2048. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2049. {
  2050. int i;
  2051. assert(src1==src2);
  2052. for (i=0; i<width; i++)
  2053. {
  2054. int r= src1[6*i + 0] + src1[6*i + 3];
  2055. int g= src1[6*i + 1] + src1[6*i + 4];
  2056. int b= src1[6*i + 2] + src1[6*i + 5];
  2057. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2058. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2059. }
  2060. }
  2061. static inline void RENAME(rgb16ToY)(uint8_t *dst, uint8_t *src, int width)
  2062. {
  2063. int i;
  2064. for (i=0; i<width; i++)
  2065. {
  2066. int d= ((uint16_t*)src)[i];
  2067. int r= d&0x1F;
  2068. int g= (d>>5)&0x3F;
  2069. int b= (d>>11)&0x1F;
  2070. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  2071. }
  2072. }
  2073. static inline void RENAME(rgb16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2074. {
  2075. int i;
  2076. assert(src1 == src2);
  2077. for (i=0; i<width; i++)
  2078. {
  2079. int d0= ((uint32_t*)src1)[i];
  2080. int dl= (d0&0x07E0F81F);
  2081. int d= dl + (((d0>>16) + (d0<<16))&0x07E0F81F);
  2082. int r= d&0x3F;
  2083. int b= (d>>11)&0x3F;
  2084. int g= d>>21;
  2085. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  2086. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  2087. }
  2088. }
  2089. static inline void RENAME(rgb15ToY)(uint8_t *dst, uint8_t *src, int width)
  2090. {
  2091. int i;
  2092. for (i=0; i<width; i++)
  2093. {
  2094. int d= ((uint16_t*)src)[i];
  2095. int r= d&0x1F;
  2096. int g= (d>>5)&0x1F;
  2097. int b= (d>>10)&0x1F;
  2098. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  2099. }
  2100. }
  2101. static inline void RENAME(rgb15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2102. {
  2103. int i;
  2104. assert(src1 == src2);
  2105. for (i=0; i<width; i++)
  2106. {
  2107. int d0= ((uint32_t*)src1)[i];
  2108. int dl= (d0&0x03E07C1F);
  2109. int d= dl + (((d0>>16) + (d0<<16))&0x03E07C1F);
  2110. int r= d&0x3F;
  2111. int b= (d>>10)&0x3F;
  2112. int g= d>>21;
  2113. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2114. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2115. }
  2116. }
  2117. static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, int width, uint32_t *pal)
  2118. {
  2119. int i;
  2120. for (i=0; i<width; i++)
  2121. {
  2122. int d= src[i];
  2123. dst[i]= pal[d] & 0xFF;
  2124. }
  2125. }
  2126. static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width, uint32_t *pal)
  2127. {
  2128. int i;
  2129. assert(src1 == src2);
  2130. for (i=0; i<width; i++)
  2131. {
  2132. int p= pal[src1[i]];
  2133. dstU[i]= p>>8;
  2134. dstV[i]= p>>16;
  2135. }
  2136. }
  2137. // Bilinear / Bicubic scaling
  2138. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  2139. int16_t *filter, int16_t *filterPos, long filterSize)
  2140. {
  2141. #ifdef HAVE_MMX
  2142. assert(filterSize % 4 == 0 && filterSize>0);
  2143. if (filterSize==4) // Always true for upscaling, sometimes for down, too.
  2144. {
  2145. long counter= -2*dstW;
  2146. filter-= counter*2;
  2147. filterPos-= counter/2;
  2148. dst-= counter/2;
  2149. asm volatile(
  2150. #if defined(PIC)
  2151. "push %%"REG_b" \n\t"
  2152. #endif
  2153. "pxor %%mm7, %%mm7 \n\t"
  2154. "movq "MANGLE(w02)", %%mm6 \n\t"
  2155. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2156. "mov %%"REG_a", %%"REG_BP" \n\t"
  2157. ASMALIGN(4)
  2158. "1: \n\t"
  2159. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2160. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  2161. "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
  2162. "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
  2163. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2164. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2165. "punpcklbw %%mm7, %%mm0 \n\t"
  2166. "punpcklbw %%mm7, %%mm2 \n\t"
  2167. "pmaddwd %%mm1, %%mm0 \n\t"
  2168. "pmaddwd %%mm2, %%mm3 \n\t"
  2169. "psrad $8, %%mm0 \n\t"
  2170. "psrad $8, %%mm3 \n\t"
  2171. "packssdw %%mm3, %%mm0 \n\t"
  2172. "pmaddwd %%mm6, %%mm0 \n\t"
  2173. "packssdw %%mm0, %%mm0 \n\t"
  2174. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2175. "add $4, %%"REG_BP" \n\t"
  2176. " jnc 1b \n\t"
  2177. "pop %%"REG_BP" \n\t"
  2178. #if defined(PIC)
  2179. "pop %%"REG_b" \n\t"
  2180. #endif
  2181. : "+a" (counter)
  2182. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2183. #if !defined(PIC)
  2184. : "%"REG_b
  2185. #endif
  2186. );
  2187. }
  2188. else if (filterSize==8)
  2189. {
  2190. long counter= -2*dstW;
  2191. filter-= counter*4;
  2192. filterPos-= counter/2;
  2193. dst-= counter/2;
  2194. asm volatile(
  2195. #if defined(PIC)
  2196. "push %%"REG_b" \n\t"
  2197. #endif
  2198. "pxor %%mm7, %%mm7 \n\t"
  2199. "movq "MANGLE(w02)", %%mm6 \n\t"
  2200. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2201. "mov %%"REG_a", %%"REG_BP" \n\t"
  2202. ASMALIGN(4)
  2203. "1: \n\t"
  2204. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2205. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  2206. "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
  2207. "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
  2208. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2209. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2210. "punpcklbw %%mm7, %%mm0 \n\t"
  2211. "punpcklbw %%mm7, %%mm2 \n\t"
  2212. "pmaddwd %%mm1, %%mm0 \n\t"
  2213. "pmaddwd %%mm2, %%mm3 \n\t"
  2214. "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
  2215. "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
  2216. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  2217. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  2218. "punpcklbw %%mm7, %%mm4 \n\t"
  2219. "punpcklbw %%mm7, %%mm2 \n\t"
  2220. "pmaddwd %%mm1, %%mm4 \n\t"
  2221. "pmaddwd %%mm2, %%mm5 \n\t"
  2222. "paddd %%mm4, %%mm0 \n\t"
  2223. "paddd %%mm5, %%mm3 \n\t"
  2224. "psrad $8, %%mm0 \n\t"
  2225. "psrad $8, %%mm3 \n\t"
  2226. "packssdw %%mm3, %%mm0 \n\t"
  2227. "pmaddwd %%mm6, %%mm0 \n\t"
  2228. "packssdw %%mm0, %%mm0 \n\t"
  2229. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2230. "add $4, %%"REG_BP" \n\t"
  2231. " jnc 1b \n\t"
  2232. "pop %%"REG_BP" \n\t"
  2233. #if defined(PIC)
  2234. "pop %%"REG_b" \n\t"
  2235. #endif
  2236. : "+a" (counter)
  2237. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2238. #if !defined(PIC)
  2239. : "%"REG_b
  2240. #endif
  2241. );
  2242. }
  2243. else
  2244. {
  2245. uint8_t *offset = src+filterSize;
  2246. long counter= -2*dstW;
  2247. //filter-= counter*filterSize/2;
  2248. filterPos-= counter/2;
  2249. dst-= counter/2;
  2250. asm volatile(
  2251. "pxor %%mm7, %%mm7 \n\t"
  2252. "movq "MANGLE(w02)", %%mm6 \n\t"
  2253. ASMALIGN(4)
  2254. "1: \n\t"
  2255. "mov %2, %%"REG_c" \n\t"
  2256. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2257. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  2258. "mov %5, %%"REG_c" \n\t"
  2259. "pxor %%mm4, %%mm4 \n\t"
  2260. "pxor %%mm5, %%mm5 \n\t"
  2261. "2: \n\t"
  2262. "movq (%1), %%mm1 \n\t"
  2263. "movq (%1, %6), %%mm3 \n\t"
  2264. "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
  2265. "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
  2266. "punpcklbw %%mm7, %%mm0 \n\t"
  2267. "punpcklbw %%mm7, %%mm2 \n\t"
  2268. "pmaddwd %%mm1, %%mm0 \n\t"
  2269. "pmaddwd %%mm2, %%mm3 \n\t"
  2270. "paddd %%mm3, %%mm5 \n\t"
  2271. "paddd %%mm0, %%mm4 \n\t"
  2272. "add $8, %1 \n\t"
  2273. "add $4, %%"REG_c" \n\t"
  2274. "cmp %4, %%"REG_c" \n\t"
  2275. " jb 2b \n\t"
  2276. "add %6, %1 \n\t"
  2277. "psrad $8, %%mm4 \n\t"
  2278. "psrad $8, %%mm5 \n\t"
  2279. "packssdw %%mm5, %%mm4 \n\t"
  2280. "pmaddwd %%mm6, %%mm4 \n\t"
  2281. "packssdw %%mm4, %%mm4 \n\t"
  2282. "mov %3, %%"REG_a" \n\t"
  2283. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2284. "add $4, %0 \n\t"
  2285. " jnc 1b \n\t"
  2286. : "+r" (counter), "+r" (filter)
  2287. : "m" (filterPos), "m" (dst), "m"(offset),
  2288. "m" (src), "r" (filterSize*2)
  2289. : "%"REG_a, "%"REG_c, "%"REG_d
  2290. );
  2291. }
  2292. #else
  2293. #ifdef HAVE_ALTIVEC
  2294. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2295. #else
  2296. int i;
  2297. for (i=0; i<dstW; i++)
  2298. {
  2299. int j;
  2300. int srcPos= filterPos[i];
  2301. int val=0;
  2302. //printf("filterPos: %d\n", filterPos[i]);
  2303. for (j=0; j<filterSize; j++)
  2304. {
  2305. //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2306. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2307. }
  2308. //filter += hFilterSize;
  2309. dst[i] = av_clip(val>>7, 0, (1<<15)-1); // the cubic equation does overflow ...
  2310. //dst[i] = val>>7;
  2311. }
  2312. #endif
  2313. #endif
  2314. }
  2315. // *** horizontal scale Y line to temp buffer
  2316. static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  2317. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2318. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2319. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2320. int32_t *mmx2FilterPos, uint8_t *pal)
  2321. {
  2322. if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
  2323. {
  2324. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2325. src= formatConvBuffer;
  2326. }
  2327. else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
  2328. {
  2329. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2330. src= formatConvBuffer;
  2331. }
  2332. else if (srcFormat==PIX_FMT_RGB32)
  2333. {
  2334. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2335. src= formatConvBuffer;
  2336. }
  2337. else if (srcFormat==PIX_FMT_BGR24)
  2338. {
  2339. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2340. src= formatConvBuffer;
  2341. }
  2342. else if (srcFormat==PIX_FMT_BGR565)
  2343. {
  2344. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2345. src= formatConvBuffer;
  2346. }
  2347. else if (srcFormat==PIX_FMT_BGR555)
  2348. {
  2349. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2350. src= formatConvBuffer;
  2351. }
  2352. else if (srcFormat==PIX_FMT_BGR32)
  2353. {
  2354. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2355. src= formatConvBuffer;
  2356. }
  2357. else if (srcFormat==PIX_FMT_RGB24)
  2358. {
  2359. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2360. src= formatConvBuffer;
  2361. }
  2362. else if (srcFormat==PIX_FMT_RGB565)
  2363. {
  2364. RENAME(rgb16ToY)(formatConvBuffer, src, srcW);
  2365. src= formatConvBuffer;
  2366. }
  2367. else if (srcFormat==PIX_FMT_RGB555)
  2368. {
  2369. RENAME(rgb15ToY)(formatConvBuffer, src, srcW);
  2370. src= formatConvBuffer;
  2371. }
  2372. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2373. {
  2374. RENAME(palToY)(formatConvBuffer, src, srcW, pal);
  2375. src= formatConvBuffer;
  2376. }
  2377. #ifdef HAVE_MMX
  2378. // use the new MMX scaler if the mmx2 can't be used (it is faster than the x86 ASM one)
  2379. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2380. #else
  2381. if (!(flags&SWS_FAST_BILINEAR))
  2382. #endif
  2383. {
  2384. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2385. }
  2386. else // Fast Bilinear upscale / crap downscale
  2387. {
  2388. #if defined(ARCH_X86)
  2389. #ifdef HAVE_MMX2
  2390. int i;
  2391. #if defined(PIC)
  2392. uint64_t ebxsave __attribute__((aligned(8)));
  2393. #endif
  2394. if (canMMX2BeUsed)
  2395. {
  2396. asm volatile(
  2397. #if defined(PIC)
  2398. "mov %%"REG_b", %5 \n\t"
  2399. #endif
  2400. "pxor %%mm7, %%mm7 \n\t"
  2401. "mov %0, %%"REG_c" \n\t"
  2402. "mov %1, %%"REG_D" \n\t"
  2403. "mov %2, %%"REG_d" \n\t"
  2404. "mov %3, %%"REG_b" \n\t"
  2405. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2406. PREFETCH" (%%"REG_c") \n\t"
  2407. PREFETCH" 32(%%"REG_c") \n\t"
  2408. PREFETCH" 64(%%"REG_c") \n\t"
  2409. #ifdef ARCH_X86_64
  2410. #define FUNNY_Y_CODE \
  2411. "movl (%%"REG_b"), %%esi \n\t"\
  2412. "call *%4 \n\t"\
  2413. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2414. "add %%"REG_S", %%"REG_c" \n\t"\
  2415. "add %%"REG_a", %%"REG_D" \n\t"\
  2416. "xor %%"REG_a", %%"REG_a" \n\t"\
  2417. #else
  2418. #define FUNNY_Y_CODE \
  2419. "movl (%%"REG_b"), %%esi \n\t"\
  2420. "call *%4 \n\t"\
  2421. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2422. "add %%"REG_a", %%"REG_D" \n\t"\
  2423. "xor %%"REG_a", %%"REG_a" \n\t"\
  2424. #endif
  2425. FUNNY_Y_CODE
  2426. FUNNY_Y_CODE
  2427. FUNNY_Y_CODE
  2428. FUNNY_Y_CODE
  2429. FUNNY_Y_CODE
  2430. FUNNY_Y_CODE
  2431. FUNNY_Y_CODE
  2432. FUNNY_Y_CODE
  2433. #if defined(PIC)
  2434. "mov %5, %%"REG_b" \n\t"
  2435. #endif
  2436. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2437. "m" (funnyYCode)
  2438. #if defined(PIC)
  2439. ,"m" (ebxsave)
  2440. #endif
  2441. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2442. #if !defined(PIC)
  2443. ,"%"REG_b
  2444. #endif
  2445. );
  2446. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2447. }
  2448. else
  2449. {
  2450. #endif
  2451. long xInc_shr16 = xInc >> 16;
  2452. uint16_t xInc_mask = xInc & 0xffff;
  2453. //NO MMX just normal asm ...
  2454. asm volatile(
  2455. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2456. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2457. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2458. ASMALIGN(4)
  2459. "1: \n\t"
  2460. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2461. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2462. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2463. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2464. "shll $16, %%edi \n\t"
  2465. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2466. "mov %1, %%"REG_D" \n\t"
  2467. "shrl $9, %%esi \n\t"
  2468. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2469. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2470. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2471. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2472. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2473. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2474. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2475. "shll $16, %%edi \n\t"
  2476. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2477. "mov %1, %%"REG_D" \n\t"
  2478. "shrl $9, %%esi \n\t"
  2479. "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
  2480. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2481. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2482. "add $2, %%"REG_a" \n\t"
  2483. "cmp %2, %%"REG_a" \n\t"
  2484. " jb 1b \n\t"
  2485. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2486. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2487. );
  2488. #ifdef HAVE_MMX2
  2489. } //if MMX2 can't be used
  2490. #endif
  2491. #else
  2492. int i;
  2493. unsigned int xpos=0;
  2494. for (i=0;i<dstWidth;i++)
  2495. {
  2496. register unsigned int xx=xpos>>16;
  2497. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2498. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2499. xpos+=xInc;
  2500. }
  2501. #endif
  2502. }
  2503. }
  2504. inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2505. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2506. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2507. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2508. int32_t *mmx2FilterPos, uint8_t *pal)
  2509. {
  2510. if (srcFormat==PIX_FMT_YUYV422)
  2511. {
  2512. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2513. src1= formatConvBuffer;
  2514. src2= formatConvBuffer+2048;
  2515. }
  2516. else if (srcFormat==PIX_FMT_UYVY422)
  2517. {
  2518. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2519. src1= formatConvBuffer;
  2520. src2= formatConvBuffer+2048;
  2521. }
  2522. else if (srcFormat==PIX_FMT_RGB32)
  2523. {
  2524. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2525. src1= formatConvBuffer;
  2526. src2= formatConvBuffer+2048;
  2527. }
  2528. else if (srcFormat==PIX_FMT_BGR24)
  2529. {
  2530. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2531. src1= formatConvBuffer;
  2532. src2= formatConvBuffer+2048;
  2533. }
  2534. else if (srcFormat==PIX_FMT_BGR565)
  2535. {
  2536. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2537. src1= formatConvBuffer;
  2538. src2= formatConvBuffer+2048;
  2539. }
  2540. else if (srcFormat==PIX_FMT_BGR555)
  2541. {
  2542. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2543. src1= formatConvBuffer;
  2544. src2= formatConvBuffer+2048;
  2545. }
  2546. else if (srcFormat==PIX_FMT_BGR32)
  2547. {
  2548. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2549. src1= formatConvBuffer;
  2550. src2= formatConvBuffer+2048;
  2551. }
  2552. else if (srcFormat==PIX_FMT_RGB24)
  2553. {
  2554. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2555. src1= formatConvBuffer;
  2556. src2= formatConvBuffer+2048;
  2557. }
  2558. else if (srcFormat==PIX_FMT_RGB565)
  2559. {
  2560. RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2561. src1= formatConvBuffer;
  2562. src2= formatConvBuffer+2048;
  2563. }
  2564. else if (srcFormat==PIX_FMT_RGB555)
  2565. {
  2566. RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW);
  2567. src1= formatConvBuffer;
  2568. src2= formatConvBuffer+2048;
  2569. }
  2570. else if (isGray(srcFormat))
  2571. {
  2572. return;
  2573. }
  2574. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2575. {
  2576. RENAME(palToUV)(formatConvBuffer, formatConvBuffer+2048, src1, src2, srcW, pal);
  2577. src1= formatConvBuffer;
  2578. src2= formatConvBuffer+2048;
  2579. }
  2580. #ifdef HAVE_MMX
  2581. // use the new MMX scaler if the mmx2 can't be used (it is faster than the x86 ASM one)
  2582. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2583. #else
  2584. if (!(flags&SWS_FAST_BILINEAR))
  2585. #endif
  2586. {
  2587. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2588. RENAME(hScale)(dst+2048, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2589. }
  2590. else // Fast Bilinear upscale / crap downscale
  2591. {
  2592. #if defined(ARCH_X86)
  2593. #ifdef HAVE_MMX2
  2594. int i;
  2595. #if defined(PIC)
  2596. uint64_t ebxsave __attribute__((aligned(8)));
  2597. #endif
  2598. if (canMMX2BeUsed)
  2599. {
  2600. asm volatile(
  2601. #if defined(PIC)
  2602. "mov %%"REG_b", %6 \n\t"
  2603. #endif
  2604. "pxor %%mm7, %%mm7 \n\t"
  2605. "mov %0, %%"REG_c" \n\t"
  2606. "mov %1, %%"REG_D" \n\t"
  2607. "mov %2, %%"REG_d" \n\t"
  2608. "mov %3, %%"REG_b" \n\t"
  2609. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2610. PREFETCH" (%%"REG_c") \n\t"
  2611. PREFETCH" 32(%%"REG_c") \n\t"
  2612. PREFETCH" 64(%%"REG_c") \n\t"
  2613. #ifdef ARCH_X86_64
  2614. #define FUNNY_UV_CODE \
  2615. "movl (%%"REG_b"), %%esi \n\t"\
  2616. "call *%4 \n\t"\
  2617. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2618. "add %%"REG_S", %%"REG_c" \n\t"\
  2619. "add %%"REG_a", %%"REG_D" \n\t"\
  2620. "xor %%"REG_a", %%"REG_a" \n\t"\
  2621. #else
  2622. #define FUNNY_UV_CODE \
  2623. "movl (%%"REG_b"), %%esi \n\t"\
  2624. "call *%4 \n\t"\
  2625. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2626. "add %%"REG_a", %%"REG_D" \n\t"\
  2627. "xor %%"REG_a", %%"REG_a" \n\t"\
  2628. #endif
  2629. FUNNY_UV_CODE
  2630. FUNNY_UV_CODE
  2631. FUNNY_UV_CODE
  2632. FUNNY_UV_CODE
  2633. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2634. "mov %5, %%"REG_c" \n\t" // src
  2635. "mov %1, %%"REG_D" \n\t" // buf1
  2636. "add $4096, %%"REG_D" \n\t"
  2637. PREFETCH" (%%"REG_c") \n\t"
  2638. PREFETCH" 32(%%"REG_c") \n\t"
  2639. PREFETCH" 64(%%"REG_c") \n\t"
  2640. FUNNY_UV_CODE
  2641. FUNNY_UV_CODE
  2642. FUNNY_UV_CODE
  2643. FUNNY_UV_CODE
  2644. #if defined(PIC)
  2645. "mov %6, %%"REG_b" \n\t"
  2646. #endif
  2647. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2648. "m" (funnyUVCode), "m" (src2)
  2649. #if defined(PIC)
  2650. ,"m" (ebxsave)
  2651. #endif
  2652. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2653. #if !defined(PIC)
  2654. ,"%"REG_b
  2655. #endif
  2656. );
  2657. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2658. {
  2659. //printf("%d %d %d\n", dstWidth, i, srcW);
  2660. dst[i] = src1[srcW-1]*128;
  2661. dst[i+2048] = src2[srcW-1]*128;
  2662. }
  2663. }
  2664. else
  2665. {
  2666. #endif
  2667. long xInc_shr16 = (long) (xInc >> 16);
  2668. uint16_t xInc_mask = xInc & 0xffff;
  2669. asm volatile(
  2670. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2671. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2672. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2673. ASMALIGN(4)
  2674. "1: \n\t"
  2675. "mov %0, %%"REG_S" \n\t"
  2676. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2677. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2678. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2679. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2680. "shll $16, %%edi \n\t"
  2681. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2682. "mov %1, %%"REG_D" \n\t"
  2683. "shrl $9, %%esi \n\t"
  2684. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2685. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2686. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2687. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2688. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2689. "shll $16, %%edi \n\t"
  2690. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2691. "mov %1, %%"REG_D" \n\t"
  2692. "shrl $9, %%esi \n\t"
  2693. "movw %%si, 4096(%%"REG_D", %%"REG_a", 2) \n\t"
  2694. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2695. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2696. "add $1, %%"REG_a" \n\t"
  2697. "cmp %2, %%"REG_a" \n\t"
  2698. " jb 1b \n\t"
  2699. /* GCC-3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2700. which is needed to support GCC-4.0 */
  2701. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || ( __GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2702. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2703. #else
  2704. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2705. #endif
  2706. "r" (src2)
  2707. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2708. );
  2709. #ifdef HAVE_MMX2
  2710. } //if MMX2 can't be used
  2711. #endif
  2712. #else
  2713. int i;
  2714. unsigned int xpos=0;
  2715. for (i=0;i<dstWidth;i++)
  2716. {
  2717. register unsigned int xx=xpos>>16;
  2718. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2719. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2720. dst[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2721. /* slower
  2722. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2723. dst[i+2048]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2724. */
  2725. xpos+=xInc;
  2726. }
  2727. #endif
  2728. }
  2729. }
  2730. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2731. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2732. /* load a few things into local vars to make the code more readable? and faster */
  2733. const int srcW= c->srcW;
  2734. const int dstW= c->dstW;
  2735. const int dstH= c->dstH;
  2736. const int chrDstW= c->chrDstW;
  2737. const int chrSrcW= c->chrSrcW;
  2738. const int lumXInc= c->lumXInc;
  2739. const int chrXInc= c->chrXInc;
  2740. const int dstFormat= c->dstFormat;
  2741. const int srcFormat= c->srcFormat;
  2742. const int flags= c->flags;
  2743. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2744. int16_t *vLumFilterPos= c->vLumFilterPos;
  2745. int16_t *vChrFilterPos= c->vChrFilterPos;
  2746. int16_t *hLumFilterPos= c->hLumFilterPos;
  2747. int16_t *hChrFilterPos= c->hChrFilterPos;
  2748. int16_t *vLumFilter= c->vLumFilter;
  2749. int16_t *vChrFilter= c->vChrFilter;
  2750. int16_t *hLumFilter= c->hLumFilter;
  2751. int16_t *hChrFilter= c->hChrFilter;
  2752. int32_t *lumMmxFilter= c->lumMmxFilter;
  2753. int32_t *chrMmxFilter= c->chrMmxFilter;
  2754. const int vLumFilterSize= c->vLumFilterSize;
  2755. const int vChrFilterSize= c->vChrFilterSize;
  2756. const int hLumFilterSize= c->hLumFilterSize;
  2757. const int hChrFilterSize= c->hChrFilterSize;
  2758. int16_t **lumPixBuf= c->lumPixBuf;
  2759. int16_t **chrPixBuf= c->chrPixBuf;
  2760. const int vLumBufSize= c->vLumBufSize;
  2761. const int vChrBufSize= c->vChrBufSize;
  2762. uint8_t *funnyYCode= c->funnyYCode;
  2763. uint8_t *funnyUVCode= c->funnyUVCode;
  2764. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2765. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2766. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2767. int lastDstY;
  2768. uint8_t *pal=NULL;
  2769. /* vars whch will change and which we need to storw back in the context */
  2770. int dstY= c->dstY;
  2771. int lumBufIndex= c->lumBufIndex;
  2772. int chrBufIndex= c->chrBufIndex;
  2773. int lastInLumBuf= c->lastInLumBuf;
  2774. int lastInChrBuf= c->lastInChrBuf;
  2775. if (isPacked(c->srcFormat)){
  2776. pal= src[1];
  2777. src[0]=
  2778. src[1]=
  2779. src[2]= src[0];
  2780. srcStride[0]=
  2781. srcStride[1]=
  2782. srcStride[2]= srcStride[0];
  2783. }
  2784. srcStride[1]<<= c->vChrDrop;
  2785. srcStride[2]<<= c->vChrDrop;
  2786. //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2787. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2788. #if 0 //self test FIXME move to a vfilter or something
  2789. {
  2790. static volatile int i=0;
  2791. i++;
  2792. if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
  2793. selfTest(src, srcStride, c->srcW, c->srcH);
  2794. i--;
  2795. }
  2796. #endif
  2797. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2798. //dstStride[0],dstStride[1],dstStride[2]);
  2799. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2800. {
  2801. static int firstTime=1; //FIXME move this into the context perhaps
  2802. if (flags & SWS_PRINT_INFO && firstTime)
  2803. {
  2804. av_log(c, AV_LOG_WARNING, "SwScaler: Warning: dstStride is not aligned!\n"
  2805. "SwScaler: ->cannot do aligned memory acesses anymore\n");
  2806. firstTime=0;
  2807. }
  2808. }
  2809. /* Note the user might start scaling the picture in the middle so this will not get executed
  2810. this is not really intended but works currently, so ppl might do it */
  2811. if (srcSliceY ==0){
  2812. lumBufIndex=0;
  2813. chrBufIndex=0;
  2814. dstY=0;
  2815. lastInLumBuf= -1;
  2816. lastInChrBuf= -1;
  2817. }
  2818. lastDstY= dstY;
  2819. for (;dstY < dstH; dstY++){
  2820. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2821. const int chrDstY= dstY>>c->chrDstVSubSample;
  2822. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2823. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2824. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2825. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2826. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2827. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2828. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2829. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2830. //handle holes (FAST_BILINEAR & weird filters)
  2831. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2832. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2833. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2834. ASSERT(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1)
  2835. ASSERT(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1)
  2836. // Do we have enough lines in this slice to output the dstY line
  2837. if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2838. {
  2839. //Do horizontal scaling
  2840. while(lastInLumBuf < lastLumSrcY)
  2841. {
  2842. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2843. lumBufIndex++;
  2844. //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2845. ASSERT(lumBufIndex < 2*vLumBufSize)
  2846. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2847. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2848. //printf("%d %d\n", lumBufIndex, vLumBufSize);
  2849. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2850. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2851. funnyYCode, c->srcFormat, formatConvBuffer,
  2852. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2853. lastInLumBuf++;
  2854. }
  2855. while(lastInChrBuf < lastChrSrcY)
  2856. {
  2857. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2858. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2859. chrBufIndex++;
  2860. ASSERT(chrBufIndex < 2*vChrBufSize)
  2861. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH))
  2862. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2863. //FIXME replace parameters through context struct (some at least)
  2864. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2865. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2866. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2867. funnyUVCode, c->srcFormat, formatConvBuffer,
  2868. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2869. lastInChrBuf++;
  2870. }
  2871. //wrap buf index around to stay inside the ring buffer
  2872. if (lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2873. if (chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2874. }
  2875. else // not enough lines left in this slice -> load the rest in the buffer
  2876. {
  2877. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2878. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2879. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2880. vChrBufSize, vLumBufSize);*/
  2881. //Do horizontal scaling
  2882. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2883. {
  2884. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2885. lumBufIndex++;
  2886. ASSERT(lumBufIndex < 2*vLumBufSize)
  2887. ASSERT(lastInLumBuf + 1 - srcSliceY < srcSliceH)
  2888. ASSERT(lastInLumBuf + 1 - srcSliceY >= 0)
  2889. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2890. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2891. funnyYCode, c->srcFormat, formatConvBuffer,
  2892. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2893. lastInLumBuf++;
  2894. }
  2895. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2896. {
  2897. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2898. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2899. chrBufIndex++;
  2900. ASSERT(chrBufIndex < 2*vChrBufSize)
  2901. ASSERT(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH)
  2902. ASSERT(lastInChrBuf + 1 - chrSrcSliceY >= 0)
  2903. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2904. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2905. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2906. funnyUVCode, c->srcFormat, formatConvBuffer,
  2907. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2908. lastInChrBuf++;
  2909. }
  2910. //wrap buf index around to stay inside the ring buffer
  2911. if (lumBufIndex >= vLumBufSize ) lumBufIndex-= vLumBufSize;
  2912. if (chrBufIndex >= vChrBufSize ) chrBufIndex-= vChrBufSize;
  2913. break; //we can't output a dstY line so let's try with the next slice
  2914. }
  2915. #ifdef HAVE_MMX
  2916. b5Dither= dither8[dstY&1];
  2917. g6Dither= dither4[dstY&1];
  2918. g5Dither= dither8[dstY&1];
  2919. r5Dither= dither8[(dstY+1)&1];
  2920. #endif
  2921. if (dstY < dstH-2)
  2922. {
  2923. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2924. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2925. #ifdef HAVE_MMX
  2926. int i;
  2927. if (flags & SWS_ACCURATE_RND){
  2928. for (i=0; i<vLumFilterSize; i+=2){
  2929. lumMmxFilter[2*i+0]= (int32_t)lumSrcPtr[i ];
  2930. lumMmxFilter[2*i+1]= (int32_t)lumSrcPtr[i+(vLumFilterSize>1)];
  2931. lumMmxFilter[2*i+2]=
  2932. lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ]
  2933. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2934. }
  2935. for (i=0; i<vChrFilterSize; i+=2){
  2936. chrMmxFilter[2*i+0]= (int32_t)chrSrcPtr[i ];
  2937. chrMmxFilter[2*i+1]= (int32_t)chrSrcPtr[i+(vChrFilterSize>1)];
  2938. chrMmxFilter[2*i+2]=
  2939. chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2940. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2941. }
  2942. }else{
  2943. for (i=0; i<vLumFilterSize; i++)
  2944. {
  2945. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2946. lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
  2947. lumMmxFilter[4*i+2]=
  2948. lumMmxFilter[4*i+3]=
  2949. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2950. }
  2951. for (i=0; i<vChrFilterSize; i++)
  2952. {
  2953. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2954. chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
  2955. chrMmxFilter[4*i+2]=
  2956. chrMmxFilter[4*i+3]=
  2957. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2958. }
  2959. }
  2960. #endif
  2961. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2962. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2963. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2964. RENAME(yuv2nv12X)(c,
  2965. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2966. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2967. dest, uDest, dstW, chrDstW, dstFormat);
  2968. }
  2969. else if (isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12 like
  2970. {
  2971. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2972. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2973. if (vLumFilterSize == 1 && vChrFilterSize == 1) // Unscaled YV12
  2974. {
  2975. int16_t *lumBuf = lumPixBuf[0];
  2976. int16_t *chrBuf= chrPixBuf[0];
  2977. RENAME(yuv2yuv1)(lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2978. }
  2979. else //General YV12
  2980. {
  2981. RENAME(yuv2yuvX)(c,
  2982. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2983. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2984. dest, uDest, vDest, dstW, chrDstW);
  2985. }
  2986. }
  2987. else
  2988. {
  2989. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2990. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2991. if (vLumFilterSize == 1 && vChrFilterSize == 2) //Unscaled RGB
  2992. {
  2993. int chrAlpha= vChrFilter[2*dstY+1];
  2994. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2995. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2996. }
  2997. else if (vLumFilterSize == 2 && vChrFilterSize == 2) //BiLinear Upscale RGB
  2998. {
  2999. int lumAlpha= vLumFilter[2*dstY+1];
  3000. int chrAlpha= vChrFilter[2*dstY+1];
  3001. lumMmxFilter[2]=
  3002. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  3003. chrMmxFilter[2]=
  3004. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  3005. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  3006. dest, dstW, lumAlpha, chrAlpha, dstY);
  3007. }
  3008. else //General RGB
  3009. {
  3010. RENAME(yuv2packedX)(c,
  3011. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3012. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3013. dest, dstW, dstY);
  3014. }
  3015. }
  3016. }
  3017. else // hmm looks like we can't use MMX here without overwriting this array's tail
  3018. {
  3019. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  3020. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  3021. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  3022. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3023. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  3024. yuv2nv12XinC(
  3025. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3026. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3027. dest, uDest, dstW, chrDstW, dstFormat);
  3028. }
  3029. else if (isPlanarYUV(dstFormat) || isGray(dstFormat)) //YV12
  3030. {
  3031. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3032. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  3033. yuv2yuvXinC(
  3034. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3035. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3036. dest, uDest, vDest, dstW, chrDstW);
  3037. }
  3038. else
  3039. {
  3040. ASSERT(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  3041. ASSERT(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  3042. yuv2packedXinC(c,
  3043. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3044. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3045. dest, dstW, dstY);
  3046. }
  3047. }
  3048. }
  3049. #ifdef HAVE_MMX
  3050. __asm __volatile(SFENCE:::"memory");
  3051. __asm __volatile(EMMS:::"memory");
  3052. #endif
  3053. /* store changed local vars back in the context */
  3054. c->dstY= dstY;
  3055. c->lumBufIndex= lumBufIndex;
  3056. c->chrBufIndex= chrBufIndex;
  3057. c->lastInLumBuf= lastInLumBuf;
  3058. c->lastInChrBuf= lastInChrBuf;
  3059. return dstY - lastDstY;
  3060. }