You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3318 lines
135KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * The C code (not assembly, MMX, ...) of this file can be used
  21. * under the LGPL license.
  22. */
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef PAVGB
  26. #undef PREFETCH
  27. #undef PREFETCHW
  28. #undef EMMS
  29. #undef SFENCE
  30. #ifdef HAVE_3DNOW
  31. /* On K6 femms is faster than emms. On K7 femms is directly mapped on emms. */
  32. #define EMMS "femms"
  33. #else
  34. #define EMMS "emms"
  35. #endif
  36. #ifdef HAVE_3DNOW
  37. #define PREFETCH "prefetch"
  38. #define PREFETCHW "prefetchw"
  39. #elif defined (HAVE_MMX2)
  40. #define PREFETCH "prefetchnta"
  41. #define PREFETCHW "prefetcht0"
  42. #else
  43. #define PREFETCH " # nop"
  44. #define PREFETCHW " # nop"
  45. #endif
  46. #ifdef HAVE_MMX2
  47. #define SFENCE "sfence"
  48. #else
  49. #define SFENCE " # nop"
  50. #endif
  51. #ifdef HAVE_MMX2
  52. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  53. #elif defined (HAVE_3DNOW)
  54. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  55. #endif
  56. #ifdef HAVE_MMX2
  57. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  58. #else
  59. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  60. #endif
  61. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  62. #ifdef HAVE_ALTIVEC
  63. #include "swscale_altivec_template.c"
  64. #endif
  65. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  66. asm volatile(\
  67. "xor %%"REG_a", %%"REG_a" \n\t"\
  68. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  69. "movq %%mm3, %%mm4 \n\t"\
  70. "lea " offset "(%0), %%"REG_d" \n\t"\
  71. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  72. ASMALIGN(4) /* FIXME Unroll? */\
  73. "1: \n\t"\
  74. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  75. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  76. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
  77. "add $16, %%"REG_d" \n\t"\
  78. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  79. "test %%"REG_S", %%"REG_S" \n\t"\
  80. "pmulhw %%mm0, %%mm2 \n\t"\
  81. "pmulhw %%mm0, %%mm5 \n\t"\
  82. "paddw %%mm2, %%mm3 \n\t"\
  83. "paddw %%mm5, %%mm4 \n\t"\
  84. " jnz 1b \n\t"\
  85. "psraw $3, %%mm3 \n\t"\
  86. "psraw $3, %%mm4 \n\t"\
  87. "packuswb %%mm4, %%mm3 \n\t"\
  88. MOVNTQ(%%mm3, (%1, %%REGa))\
  89. "add $8, %%"REG_a" \n\t"\
  90. "cmp %2, %%"REG_a" \n\t"\
  91. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  92. "movq %%mm3, %%mm4 \n\t"\
  93. "lea " offset "(%0), %%"REG_d" \n\t"\
  94. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  95. "jb 1b \n\t"\
  96. :: "r" (&c->redDither),\
  97. "r" (dest), "g" (width)\
  98. : "%"REG_a, "%"REG_d, "%"REG_S\
  99. );
  100. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  101. asm volatile(\
  102. "lea " offset "(%0), %%"REG_d" \n\t"\
  103. "xor %%"REG_a", %%"REG_a" \n\t"\
  104. "pxor %%mm4, %%mm4 \n\t"\
  105. "pxor %%mm5, %%mm5 \n\t"\
  106. "pxor %%mm6, %%mm6 \n\t"\
  107. "pxor %%mm7, %%mm7 \n\t"\
  108. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  109. ASMALIGN(4) \
  110. "1: \n\t"\
  111. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
  112. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  113. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  114. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
  115. "movq %%mm0, %%mm3 \n\t"\
  116. "punpcklwd %%mm1, %%mm0 \n\t"\
  117. "punpckhwd %%mm1, %%mm3 \n\t"\
  118. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  119. "pmaddwd %%mm1, %%mm0 \n\t"\
  120. "pmaddwd %%mm1, %%mm3 \n\t"\
  121. "paddd %%mm0, %%mm4 \n\t"\
  122. "paddd %%mm3, %%mm5 \n\t"\
  123. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
  124. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  125. "add $16, %%"REG_d" \n\t"\
  126. "test %%"REG_S", %%"REG_S" \n\t"\
  127. "movq %%mm2, %%mm0 \n\t"\
  128. "punpcklwd %%mm3, %%mm2 \n\t"\
  129. "punpckhwd %%mm3, %%mm0 \n\t"\
  130. "pmaddwd %%mm1, %%mm2 \n\t"\
  131. "pmaddwd %%mm1, %%mm0 \n\t"\
  132. "paddd %%mm2, %%mm6 \n\t"\
  133. "paddd %%mm0, %%mm7 \n\t"\
  134. " jnz 1b \n\t"\
  135. "psrad $16, %%mm4 \n\t"\
  136. "psrad $16, %%mm5 \n\t"\
  137. "psrad $16, %%mm6 \n\t"\
  138. "psrad $16, %%mm7 \n\t"\
  139. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  140. "packssdw %%mm5, %%mm4 \n\t"\
  141. "packssdw %%mm7, %%mm6 \n\t"\
  142. "paddw %%mm0, %%mm4 \n\t"\
  143. "paddw %%mm0, %%mm6 \n\t"\
  144. "psraw $3, %%mm4 \n\t"\
  145. "psraw $3, %%mm6 \n\t"\
  146. "packuswb %%mm6, %%mm4 \n\t"\
  147. MOVNTQ(%%mm4, (%1, %%REGa))\
  148. "add $8, %%"REG_a" \n\t"\
  149. "cmp %2, %%"REG_a" \n\t"\
  150. "lea " offset "(%0), %%"REG_d" \n\t"\
  151. "pxor %%mm4, %%mm4 \n\t"\
  152. "pxor %%mm5, %%mm5 \n\t"\
  153. "pxor %%mm6, %%mm6 \n\t"\
  154. "pxor %%mm7, %%mm7 \n\t"\
  155. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  156. "jb 1b \n\t"\
  157. :: "r" (&c->redDither),\
  158. "r" (dest), "g" (width)\
  159. : "%"REG_a, "%"REG_d, "%"REG_S\
  160. );
  161. #define YSCALEYUV2YV121 \
  162. "mov %2, %%"REG_a" \n\t"\
  163. ASMALIGN(4) /* FIXME Unroll? */\
  164. "1: \n\t"\
  165. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  166. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  167. "psraw $7, %%mm0 \n\t"\
  168. "psraw $7, %%mm1 \n\t"\
  169. "packuswb %%mm1, %%mm0 \n\t"\
  170. MOVNTQ(%%mm0, (%1, %%REGa))\
  171. "add $8, %%"REG_a" \n\t"\
  172. "jnc 1b \n\t"
  173. #define YSCALEYUV2YV121_ACCURATE \
  174. "mov %2, %%"REG_a" \n\t"\
  175. "pcmpeqw %%mm7, %%mm7 \n\t"\
  176. "psrlw $15, %%mm7 \n\t"\
  177. "psllw $6, %%mm7 \n\t"\
  178. ASMALIGN(4) /* FIXME Unroll? */\
  179. "1: \n\t"\
  180. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  181. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  182. "paddw %%mm7, %%mm0 \n\t"\
  183. "paddw %%mm7, %%mm1 \n\t"\
  184. "psraw $7, %%mm0 \n\t"\
  185. "psraw $7, %%mm1 \n\t"\
  186. "packuswb %%mm1, %%mm0 \n\t"\
  187. MOVNTQ(%%mm0, (%1, %%REGa))\
  188. "add $8, %%"REG_a" \n\t"\
  189. "jnc 1b \n\t"
  190. /*
  191. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  192. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  193. "r" (dest), "m" (dstW),
  194. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  195. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  196. */
  197. #define YSCALEYUV2PACKEDX \
  198. asm volatile(\
  199. "xor %%"REG_a", %%"REG_a" \n\t"\
  200. ASMALIGN(4)\
  201. "nop \n\t"\
  202. "1: \n\t"\
  203. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  204. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  205. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  206. "movq %%mm3, %%mm4 \n\t"\
  207. ASMALIGN(4)\
  208. "2: \n\t"\
  209. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  210. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  211. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  212. "add $16, %%"REG_d" \n\t"\
  213. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  214. "pmulhw %%mm0, %%mm2 \n\t"\
  215. "pmulhw %%mm0, %%mm5 \n\t"\
  216. "paddw %%mm2, %%mm3 \n\t"\
  217. "paddw %%mm5, %%mm4 \n\t"\
  218. "test %%"REG_S", %%"REG_S" \n\t"\
  219. " jnz 2b \n\t"\
  220. \
  221. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  222. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  223. "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\
  224. "movq %%mm1, %%mm7 \n\t"\
  225. ASMALIGN(4)\
  226. "2: \n\t"\
  227. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  228. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  229. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  230. "add $16, %%"REG_d" \n\t"\
  231. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  232. "pmulhw %%mm0, %%mm2 \n\t"\
  233. "pmulhw %%mm0, %%mm5 \n\t"\
  234. "paddw %%mm2, %%mm1 \n\t"\
  235. "paddw %%mm5, %%mm7 \n\t"\
  236. "test %%"REG_S", %%"REG_S" \n\t"\
  237. " jnz 2b \n\t"\
  238. #define YSCALEYUV2PACKEDX_END \
  239. :: "r" (&c->redDither), \
  240. "m" (dummy), "m" (dummy), "m" (dummy),\
  241. "r" (dest), "m" (dstW) \
  242. : "%"REG_a, "%"REG_d, "%"REG_S \
  243. );
  244. #define YSCALEYUV2PACKEDX_ACCURATE \
  245. asm volatile(\
  246. "xor %%"REG_a", %%"REG_a" \n\t"\
  247. ASMALIGN(4)\
  248. "nop \n\t"\
  249. "1: \n\t"\
  250. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  251. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  252. "pxor %%mm4, %%mm4 \n\t"\
  253. "pxor %%mm5, %%mm5 \n\t"\
  254. "pxor %%mm6, %%mm6 \n\t"\
  255. "pxor %%mm7, %%mm7 \n\t"\
  256. ASMALIGN(4)\
  257. "2: \n\t"\
  258. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  259. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  260. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  261. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  262. "movq %%mm0, %%mm3 \n\t"\
  263. "punpcklwd %%mm1, %%mm0 \n\t"\
  264. "punpckhwd %%mm1, %%mm3 \n\t"\
  265. "movq 8(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  266. "pmaddwd %%mm1, %%mm0 \n\t"\
  267. "pmaddwd %%mm1, %%mm3 \n\t"\
  268. "paddd %%mm0, %%mm4 \n\t"\
  269. "paddd %%mm3, %%mm5 \n\t"\
  270. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  271. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  272. "add $16, %%"REG_d" \n\t"\
  273. "test %%"REG_S", %%"REG_S" \n\t"\
  274. "movq %%mm2, %%mm0 \n\t"\
  275. "punpcklwd %%mm3, %%mm2 \n\t"\
  276. "punpckhwd %%mm3, %%mm0 \n\t"\
  277. "pmaddwd %%mm1, %%mm2 \n\t"\
  278. "pmaddwd %%mm1, %%mm0 \n\t"\
  279. "paddd %%mm2, %%mm6 \n\t"\
  280. "paddd %%mm0, %%mm7 \n\t"\
  281. " jnz 2b \n\t"\
  282. "psrad $16, %%mm4 \n\t"\
  283. "psrad $16, %%mm5 \n\t"\
  284. "psrad $16, %%mm6 \n\t"\
  285. "psrad $16, %%mm7 \n\t"\
  286. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  287. "packssdw %%mm5, %%mm4 \n\t"\
  288. "packssdw %%mm7, %%mm6 \n\t"\
  289. "paddw %%mm0, %%mm4 \n\t"\
  290. "paddw %%mm0, %%mm6 \n\t"\
  291. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  292. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  293. \
  294. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  295. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  296. "pxor %%mm1, %%mm1 \n\t"\
  297. "pxor %%mm5, %%mm5 \n\t"\
  298. "pxor %%mm7, %%mm7 \n\t"\
  299. "pxor %%mm6, %%mm6 \n\t"\
  300. ASMALIGN(4)\
  301. "2: \n\t"\
  302. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  303. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  304. "mov 4(%%"REG_d"), %%"REG_S" \n\t"\
  305. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  306. "movq %%mm0, %%mm3 \n\t"\
  307. "punpcklwd %%mm4, %%mm0 \n\t"\
  308. "punpckhwd %%mm4, %%mm3 \n\t"\
  309. "movq 8(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  310. "pmaddwd %%mm4, %%mm0 \n\t"\
  311. "pmaddwd %%mm4, %%mm3 \n\t"\
  312. "paddd %%mm0, %%mm1 \n\t"\
  313. "paddd %%mm3, %%mm5 \n\t"\
  314. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  315. "mov 16(%%"REG_d"), %%"REG_S" \n\t"\
  316. "add $16, %%"REG_d" \n\t"\
  317. "test %%"REG_S", %%"REG_S" \n\t"\
  318. "movq %%mm2, %%mm0 \n\t"\
  319. "punpcklwd %%mm3, %%mm2 \n\t"\
  320. "punpckhwd %%mm3, %%mm0 \n\t"\
  321. "pmaddwd %%mm4, %%mm2 \n\t"\
  322. "pmaddwd %%mm4, %%mm0 \n\t"\
  323. "paddd %%mm2, %%mm7 \n\t"\
  324. "paddd %%mm0, %%mm6 \n\t"\
  325. " jnz 2b \n\t"\
  326. "psrad $16, %%mm1 \n\t"\
  327. "psrad $16, %%mm5 \n\t"\
  328. "psrad $16, %%mm7 \n\t"\
  329. "psrad $16, %%mm6 \n\t"\
  330. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  331. "packssdw %%mm5, %%mm1 \n\t"\
  332. "packssdw %%mm6, %%mm7 \n\t"\
  333. "paddw %%mm0, %%mm1 \n\t"\
  334. "paddw %%mm0, %%mm7 \n\t"\
  335. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  336. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  337. #define YSCALEYUV2RGBX \
  338. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  339. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  340. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  341. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  342. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  343. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  344. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  345. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  346. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  347. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  348. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  349. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  350. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  351. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  352. "paddw %%mm3, %%mm4 \n\t"\
  353. "movq %%mm2, %%mm0 \n\t"\
  354. "movq %%mm5, %%mm6 \n\t"\
  355. "movq %%mm4, %%mm3 \n\t"\
  356. "punpcklwd %%mm2, %%mm2 \n\t"\
  357. "punpcklwd %%mm5, %%mm5 \n\t"\
  358. "punpcklwd %%mm4, %%mm4 \n\t"\
  359. "paddw %%mm1, %%mm2 \n\t"\
  360. "paddw %%mm1, %%mm5 \n\t"\
  361. "paddw %%mm1, %%mm4 \n\t"\
  362. "punpckhwd %%mm0, %%mm0 \n\t"\
  363. "punpckhwd %%mm6, %%mm6 \n\t"\
  364. "punpckhwd %%mm3, %%mm3 \n\t"\
  365. "paddw %%mm7, %%mm0 \n\t"\
  366. "paddw %%mm7, %%mm6 \n\t"\
  367. "paddw %%mm7, %%mm3 \n\t"\
  368. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  369. "packuswb %%mm0, %%mm2 \n\t"\
  370. "packuswb %%mm6, %%mm5 \n\t"\
  371. "packuswb %%mm3, %%mm4 \n\t"\
  372. "pxor %%mm7, %%mm7 \n\t"
  373. #if 0
  374. #define FULL_YSCALEYUV2RGB \
  375. "pxor %%mm7, %%mm7 \n\t"\
  376. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  377. "punpcklwd %%mm6, %%mm6 \n\t"\
  378. "punpcklwd %%mm6, %%mm6 \n\t"\
  379. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  380. "punpcklwd %%mm5, %%mm5 \n\t"\
  381. "punpcklwd %%mm5, %%mm5 \n\t"\
  382. "xor %%"REG_a", %%"REG_a" \n\t"\
  383. ASMALIGN(4)\
  384. "1: \n\t"\
  385. "movq (%0, %%"REG_a",2), %%mm0 \n\t" /*buf0[eax]*/\
  386. "movq (%1, %%"REG_a",2), %%mm1 \n\t" /*buf1[eax]*/\
  387. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  388. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  389. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  390. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  391. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  392. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  393. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  394. "movq "AV_STRINGIFY(VOF)"(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  395. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  396. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  397. "movq "AV_STRINGIFY(VOF)"(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  398. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  399. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  400. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  401. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  402. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  403. \
  404. \
  405. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  406. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  407. "pmulhw "MANGLE(ubCoeff)", %%mm3 \n\t"\
  408. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  409. "pmulhw "MANGLE(ugCoeff)", %%mm2 \n\t"\
  410. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  411. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  412. \
  413. \
  414. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  415. "pmulhw "MANGLE(vrCoeff)", %%mm0 \n\t"\
  416. "pmulhw "MANGLE(vgCoeff)", %%mm4 \n\t"\
  417. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  418. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  419. "packuswb %%mm3, %%mm3 \n\t"\
  420. \
  421. "packuswb %%mm0, %%mm0 \n\t"\
  422. "paddw %%mm4, %%mm2 \n\t"\
  423. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  424. \
  425. "packuswb %%mm1, %%mm1 \n\t"
  426. #endif
  427. #define REAL_YSCALEYUV2PACKED(index, c) \
  428. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  429. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  430. "psraw $3, %%mm0 \n\t"\
  431. "psraw $3, %%mm1 \n\t"\
  432. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  433. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  434. "xor "#index", "#index" \n\t"\
  435. ASMALIGN(4)\
  436. "1: \n\t"\
  437. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  438. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  439. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  440. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  441. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  442. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  443. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  444. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  445. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  446. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  447. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  448. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  449. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  450. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  451. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  452. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  453. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  454. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  455. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  456. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  457. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  458. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  459. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  460. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  461. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  462. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  463. #define REAL_YSCALEYUV2RGB(index, c) \
  464. "xor "#index", "#index" \n\t"\
  465. ASMALIGN(4)\
  466. "1: \n\t"\
  467. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  468. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  469. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  470. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  471. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  472. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  473. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  474. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  475. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  476. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  477. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  478. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  479. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  480. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  481. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  482. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  483. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  484. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  485. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  486. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  487. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  488. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  489. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  490. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  491. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  492. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  493. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  494. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  495. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  496. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  497. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  498. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  499. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  500. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  501. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  502. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  503. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  504. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  505. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  506. "paddw %%mm3, %%mm4 \n\t"\
  507. "movq %%mm2, %%mm0 \n\t"\
  508. "movq %%mm5, %%mm6 \n\t"\
  509. "movq %%mm4, %%mm3 \n\t"\
  510. "punpcklwd %%mm2, %%mm2 \n\t"\
  511. "punpcklwd %%mm5, %%mm5 \n\t"\
  512. "punpcklwd %%mm4, %%mm4 \n\t"\
  513. "paddw %%mm1, %%mm2 \n\t"\
  514. "paddw %%mm1, %%mm5 \n\t"\
  515. "paddw %%mm1, %%mm4 \n\t"\
  516. "punpckhwd %%mm0, %%mm0 \n\t"\
  517. "punpckhwd %%mm6, %%mm6 \n\t"\
  518. "punpckhwd %%mm3, %%mm3 \n\t"\
  519. "paddw %%mm7, %%mm0 \n\t"\
  520. "paddw %%mm7, %%mm6 \n\t"\
  521. "paddw %%mm7, %%mm3 \n\t"\
  522. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  523. "packuswb %%mm0, %%mm2 \n\t"\
  524. "packuswb %%mm6, %%mm5 \n\t"\
  525. "packuswb %%mm3, %%mm4 \n\t"\
  526. "pxor %%mm7, %%mm7 \n\t"
  527. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  528. #define REAL_YSCALEYUV2PACKED1(index, c) \
  529. "xor "#index", "#index" \n\t"\
  530. ASMALIGN(4)\
  531. "1: \n\t"\
  532. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  533. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  534. "psraw $7, %%mm3 \n\t" \
  535. "psraw $7, %%mm4 \n\t" \
  536. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  537. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  538. "psraw $7, %%mm1 \n\t" \
  539. "psraw $7, %%mm7 \n\t" \
  540. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  541. #define REAL_YSCALEYUV2RGB1(index, c) \
  542. "xor "#index", "#index" \n\t"\
  543. ASMALIGN(4)\
  544. "1: \n\t"\
  545. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  546. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  547. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  548. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  549. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  550. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  551. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  552. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  553. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  554. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  555. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  556. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  557. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  558. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  559. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  560. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  561. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  562. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  563. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  564. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  565. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  566. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  567. "paddw %%mm3, %%mm4 \n\t"\
  568. "movq %%mm2, %%mm0 \n\t"\
  569. "movq %%mm5, %%mm6 \n\t"\
  570. "movq %%mm4, %%mm3 \n\t"\
  571. "punpcklwd %%mm2, %%mm2 \n\t"\
  572. "punpcklwd %%mm5, %%mm5 \n\t"\
  573. "punpcklwd %%mm4, %%mm4 \n\t"\
  574. "paddw %%mm1, %%mm2 \n\t"\
  575. "paddw %%mm1, %%mm5 \n\t"\
  576. "paddw %%mm1, %%mm4 \n\t"\
  577. "punpckhwd %%mm0, %%mm0 \n\t"\
  578. "punpckhwd %%mm6, %%mm6 \n\t"\
  579. "punpckhwd %%mm3, %%mm3 \n\t"\
  580. "paddw %%mm7, %%mm0 \n\t"\
  581. "paddw %%mm7, %%mm6 \n\t"\
  582. "paddw %%mm7, %%mm3 \n\t"\
  583. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  584. "packuswb %%mm0, %%mm2 \n\t"\
  585. "packuswb %%mm6, %%mm5 \n\t"\
  586. "packuswb %%mm3, %%mm4 \n\t"\
  587. "pxor %%mm7, %%mm7 \n\t"
  588. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  589. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  590. "xor "#index", "#index" \n\t"\
  591. ASMALIGN(4)\
  592. "1: \n\t"\
  593. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  594. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  595. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  596. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  597. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  598. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  599. "psrlw $8, %%mm3 \n\t" \
  600. "psrlw $8, %%mm4 \n\t" \
  601. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  602. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  603. "psraw $7, %%mm1 \n\t" \
  604. "psraw $7, %%mm7 \n\t"
  605. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  606. // do vertical chrominance interpolation
  607. #define REAL_YSCALEYUV2RGB1b(index, c) \
  608. "xor "#index", "#index" \n\t"\
  609. ASMALIGN(4)\
  610. "1: \n\t"\
  611. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  612. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  613. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  614. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  615. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  616. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  617. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  618. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  619. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  620. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  621. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  622. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  623. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  624. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  625. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  626. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  627. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  628. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  629. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  630. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  631. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  632. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  633. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  634. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  635. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  636. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  637. "paddw %%mm3, %%mm4 \n\t"\
  638. "movq %%mm2, %%mm0 \n\t"\
  639. "movq %%mm5, %%mm6 \n\t"\
  640. "movq %%mm4, %%mm3 \n\t"\
  641. "punpcklwd %%mm2, %%mm2 \n\t"\
  642. "punpcklwd %%mm5, %%mm5 \n\t"\
  643. "punpcklwd %%mm4, %%mm4 \n\t"\
  644. "paddw %%mm1, %%mm2 \n\t"\
  645. "paddw %%mm1, %%mm5 \n\t"\
  646. "paddw %%mm1, %%mm4 \n\t"\
  647. "punpckhwd %%mm0, %%mm0 \n\t"\
  648. "punpckhwd %%mm6, %%mm6 \n\t"\
  649. "punpckhwd %%mm3, %%mm3 \n\t"\
  650. "paddw %%mm7, %%mm0 \n\t"\
  651. "paddw %%mm7, %%mm6 \n\t"\
  652. "paddw %%mm7, %%mm3 \n\t"\
  653. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  654. "packuswb %%mm0, %%mm2 \n\t"\
  655. "packuswb %%mm6, %%mm5 \n\t"\
  656. "packuswb %%mm3, %%mm4 \n\t"\
  657. "pxor %%mm7, %%mm7 \n\t"
  658. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  659. #define REAL_WRITEBGR32(dst, dstw, index) \
  660. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  661. "movq %%mm2, %%mm1 \n\t" /* B */\
  662. "movq %%mm5, %%mm6 \n\t" /* R */\
  663. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  664. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  665. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  666. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  667. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  668. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  669. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  670. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  671. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  672. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  673. \
  674. MOVNTQ(%%mm0, (dst, index, 4))\
  675. MOVNTQ(%%mm2, 8(dst, index, 4))\
  676. MOVNTQ(%%mm1, 16(dst, index, 4))\
  677. MOVNTQ(%%mm3, 24(dst, index, 4))\
  678. \
  679. "add $8, "#index" \n\t"\
  680. "cmp "#dstw", "#index" \n\t"\
  681. " jb 1b \n\t"
  682. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  683. #define REAL_WRITERGB16(dst, dstw, index) \
  684. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  685. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  686. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  687. "psrlq $3, %%mm2 \n\t"\
  688. \
  689. "movq %%mm2, %%mm1 \n\t"\
  690. "movq %%mm4, %%mm3 \n\t"\
  691. \
  692. "punpcklbw %%mm7, %%mm3 \n\t"\
  693. "punpcklbw %%mm5, %%mm2 \n\t"\
  694. "punpckhbw %%mm7, %%mm4 \n\t"\
  695. "punpckhbw %%mm5, %%mm1 \n\t"\
  696. \
  697. "psllq $3, %%mm3 \n\t"\
  698. "psllq $3, %%mm4 \n\t"\
  699. \
  700. "por %%mm3, %%mm2 \n\t"\
  701. "por %%mm4, %%mm1 \n\t"\
  702. \
  703. MOVNTQ(%%mm2, (dst, index, 2))\
  704. MOVNTQ(%%mm1, 8(dst, index, 2))\
  705. \
  706. "add $8, "#index" \n\t"\
  707. "cmp "#dstw", "#index" \n\t"\
  708. " jb 1b \n\t"
  709. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  710. #define REAL_WRITERGB15(dst, dstw, index) \
  711. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  712. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  713. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  714. "psrlq $3, %%mm2 \n\t"\
  715. "psrlq $1, %%mm5 \n\t"\
  716. \
  717. "movq %%mm2, %%mm1 \n\t"\
  718. "movq %%mm4, %%mm3 \n\t"\
  719. \
  720. "punpcklbw %%mm7, %%mm3 \n\t"\
  721. "punpcklbw %%mm5, %%mm2 \n\t"\
  722. "punpckhbw %%mm7, %%mm4 \n\t"\
  723. "punpckhbw %%mm5, %%mm1 \n\t"\
  724. \
  725. "psllq $2, %%mm3 \n\t"\
  726. "psllq $2, %%mm4 \n\t"\
  727. \
  728. "por %%mm3, %%mm2 \n\t"\
  729. "por %%mm4, %%mm1 \n\t"\
  730. \
  731. MOVNTQ(%%mm2, (dst, index, 2))\
  732. MOVNTQ(%%mm1, 8(dst, index, 2))\
  733. \
  734. "add $8, "#index" \n\t"\
  735. "cmp "#dstw", "#index" \n\t"\
  736. " jb 1b \n\t"
  737. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  738. #define WRITEBGR24OLD(dst, dstw, index) \
  739. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  740. "movq %%mm2, %%mm1 \n\t" /* B */\
  741. "movq %%mm5, %%mm6 \n\t" /* R */\
  742. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  743. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  744. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  745. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  746. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  747. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  748. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  749. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  750. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  751. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  752. \
  753. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  754. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  755. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
  756. "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
  757. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  758. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  759. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  760. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  761. \
  762. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  763. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  764. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  765. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  766. "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
  767. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  768. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  769. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
  770. "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
  771. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  772. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  773. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  774. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  775. \
  776. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  777. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  778. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  779. "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
  780. "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
  781. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  782. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  783. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  784. \
  785. MOVNTQ(%%mm0, (dst))\
  786. MOVNTQ(%%mm2, 8(dst))\
  787. MOVNTQ(%%mm3, 16(dst))\
  788. "add $24, "#dst" \n\t"\
  789. \
  790. "add $8, "#index" \n\t"\
  791. "cmp "#dstw", "#index" \n\t"\
  792. " jb 1b \n\t"
  793. #define WRITEBGR24MMX(dst, dstw, index) \
  794. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  795. "movq %%mm2, %%mm1 \n\t" /* B */\
  796. "movq %%mm5, %%mm6 \n\t" /* R */\
  797. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  798. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  799. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  800. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  801. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  802. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  803. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  804. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  805. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  806. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  807. \
  808. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  809. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  810. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  811. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  812. \
  813. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  814. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  815. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  816. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  817. \
  818. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  819. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  820. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  821. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  822. \
  823. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  824. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  825. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  826. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  827. MOVNTQ(%%mm0, (dst))\
  828. \
  829. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  830. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  831. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  832. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  833. MOVNTQ(%%mm6, 8(dst))\
  834. \
  835. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  836. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  837. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  838. MOVNTQ(%%mm5, 16(dst))\
  839. \
  840. "add $24, "#dst" \n\t"\
  841. \
  842. "add $8, "#index" \n\t"\
  843. "cmp "#dstw", "#index" \n\t"\
  844. " jb 1b \n\t"
  845. #define WRITEBGR24MMX2(dst, dstw, index) \
  846. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  847. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  848. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  849. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  850. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  851. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  852. \
  853. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  854. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  855. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  856. \
  857. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  858. "por %%mm1, %%mm6 \n\t"\
  859. "por %%mm3, %%mm6 \n\t"\
  860. MOVNTQ(%%mm6, (dst))\
  861. \
  862. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  863. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  864. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  865. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  866. \
  867. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  868. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  869. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  870. \
  871. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  872. "por %%mm3, %%mm6 \n\t"\
  873. MOVNTQ(%%mm6, 8(dst))\
  874. \
  875. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  876. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  877. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  878. \
  879. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  880. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  881. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  882. \
  883. "por %%mm1, %%mm3 \n\t"\
  884. "por %%mm3, %%mm6 \n\t"\
  885. MOVNTQ(%%mm6, 16(dst))\
  886. \
  887. "add $24, "#dst" \n\t"\
  888. \
  889. "add $8, "#index" \n\t"\
  890. "cmp "#dstw", "#index" \n\t"\
  891. " jb 1b \n\t"
  892. #ifdef HAVE_MMX2
  893. #undef WRITEBGR24
  894. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  895. #else
  896. #undef WRITEBGR24
  897. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  898. #endif
  899. #define REAL_WRITEYUY2(dst, dstw, index) \
  900. "packuswb %%mm3, %%mm3 \n\t"\
  901. "packuswb %%mm4, %%mm4 \n\t"\
  902. "packuswb %%mm7, %%mm1 \n\t"\
  903. "punpcklbw %%mm4, %%mm3 \n\t"\
  904. "movq %%mm1, %%mm7 \n\t"\
  905. "punpcklbw %%mm3, %%mm1 \n\t"\
  906. "punpckhbw %%mm3, %%mm7 \n\t"\
  907. \
  908. MOVNTQ(%%mm1, (dst, index, 2))\
  909. MOVNTQ(%%mm7, 8(dst, index, 2))\
  910. \
  911. "add $8, "#index" \n\t"\
  912. "cmp "#dstw", "#index" \n\t"\
  913. " jb 1b \n\t"
  914. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  915. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  916. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  917. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  918. {
  919. #ifdef HAVE_MMX
  920. if (c->flags & SWS_ACCURATE_RND){
  921. if (uDest){
  922. YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  923. YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  924. }
  925. YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  926. }else{
  927. if (uDest){
  928. YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  929. YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  930. }
  931. YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  932. }
  933. #else
  934. #ifdef HAVE_ALTIVEC
  935. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  936. chrFilter, chrSrc, chrFilterSize,
  937. dest, uDest, vDest, dstW, chrDstW);
  938. #else //HAVE_ALTIVEC
  939. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  940. chrFilter, chrSrc, chrFilterSize,
  941. dest, uDest, vDest, dstW, chrDstW);
  942. #endif //!HAVE_ALTIVEC
  943. #endif /* HAVE_MMX */
  944. }
  945. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  946. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  947. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  948. {
  949. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  950. chrFilter, chrSrc, chrFilterSize,
  951. dest, uDest, dstW, chrDstW, dstFormat);
  952. }
  953. static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc,
  954. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  955. {
  956. #ifdef HAVE_MMX
  957. long p= uDest ? 3 : 1;
  958. uint8_t *src[3]= {lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
  959. uint8_t *dst[3]= {dest, uDest, vDest};
  960. long counter[3] = {dstW, chrDstW, chrDstW};
  961. if (c->flags & SWS_ACCURATE_RND){
  962. while(p--){
  963. asm volatile(
  964. YSCALEYUV2YV121_ACCURATE
  965. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  966. "g" (-counter[p])
  967. : "%"REG_a
  968. );
  969. }
  970. }else{
  971. while(p--){
  972. asm volatile(
  973. YSCALEYUV2YV121
  974. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  975. "g" (-counter[p])
  976. : "%"REG_a
  977. );
  978. }
  979. }
  980. #else
  981. int i;
  982. for (i=0; i<dstW; i++)
  983. {
  984. int val= (lumSrc[i]+64)>>7;
  985. if (val&256){
  986. if (val<0) val=0;
  987. else val=255;
  988. }
  989. dest[i]= val;
  990. }
  991. if (uDest)
  992. for (i=0; i<chrDstW; i++)
  993. {
  994. int u=(chrSrc[i ]+64)>>7;
  995. int v=(chrSrc[i + VOFW]+64)>>7;
  996. if ((u|v)&256){
  997. if (u<0) u=0;
  998. else if (u>255) u=255;
  999. if (v<0) v=0;
  1000. else if (v>255) v=255;
  1001. }
  1002. uDest[i]= u;
  1003. vDest[i]= v;
  1004. }
  1005. #endif
  1006. }
  1007. /**
  1008. * vertical scale YV12 to RGB
  1009. */
  1010. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  1011. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  1012. uint8_t *dest, long dstW, long dstY)
  1013. {
  1014. #ifdef HAVE_MMX
  1015. long dummy=0;
  1016. if (c->flags & SWS_ACCURATE_RND){
  1017. switch(c->dstFormat){
  1018. case PIX_FMT_RGB32:
  1019. YSCALEYUV2PACKEDX_ACCURATE
  1020. YSCALEYUV2RGBX
  1021. WRITEBGR32(%4, %5, %%REGa)
  1022. YSCALEYUV2PACKEDX_END
  1023. return;
  1024. case PIX_FMT_BGR24:
  1025. YSCALEYUV2PACKEDX_ACCURATE
  1026. YSCALEYUV2RGBX
  1027. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  1028. "add %4, %%"REG_c" \n\t"
  1029. WRITEBGR24(%%REGc, %5, %%REGa)
  1030. :: "r" (&c->redDither),
  1031. "m" (dummy), "m" (dummy), "m" (dummy),
  1032. "r" (dest), "m" (dstW)
  1033. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1034. );
  1035. return;
  1036. case PIX_FMT_RGB555:
  1037. YSCALEYUV2PACKEDX_ACCURATE
  1038. YSCALEYUV2RGBX
  1039. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1040. #ifdef DITHER1XBPP
  1041. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1042. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1043. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1044. #endif
  1045. WRITERGB15(%4, %5, %%REGa)
  1046. YSCALEYUV2PACKEDX_END
  1047. return;
  1048. case PIX_FMT_RGB565:
  1049. YSCALEYUV2PACKEDX_ACCURATE
  1050. YSCALEYUV2RGBX
  1051. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1052. #ifdef DITHER1XBPP
  1053. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1054. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1055. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1056. #endif
  1057. WRITERGB16(%4, %5, %%REGa)
  1058. YSCALEYUV2PACKEDX_END
  1059. return;
  1060. case PIX_FMT_YUYV422:
  1061. YSCALEYUV2PACKEDX_ACCURATE
  1062. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1063. "psraw $3, %%mm3 \n\t"
  1064. "psraw $3, %%mm4 \n\t"
  1065. "psraw $3, %%mm1 \n\t"
  1066. "psraw $3, %%mm7 \n\t"
  1067. WRITEYUY2(%4, %5, %%REGa)
  1068. YSCALEYUV2PACKEDX_END
  1069. return;
  1070. }
  1071. }else{
  1072. switch(c->dstFormat)
  1073. {
  1074. case PIX_FMT_RGB32:
  1075. YSCALEYUV2PACKEDX
  1076. YSCALEYUV2RGBX
  1077. WRITEBGR32(%4, %5, %%REGa)
  1078. YSCALEYUV2PACKEDX_END
  1079. return;
  1080. case PIX_FMT_BGR24:
  1081. YSCALEYUV2PACKEDX
  1082. YSCALEYUV2RGBX
  1083. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  1084. "add %4, %%"REG_c" \n\t"
  1085. WRITEBGR24(%%REGc, %5, %%REGa)
  1086. :: "r" (&c->redDither),
  1087. "m" (dummy), "m" (dummy), "m" (dummy),
  1088. "r" (dest), "m" (dstW)
  1089. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1090. );
  1091. return;
  1092. case PIX_FMT_RGB555:
  1093. YSCALEYUV2PACKEDX
  1094. YSCALEYUV2RGBX
  1095. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1096. #ifdef DITHER1XBPP
  1097. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1098. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1099. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1100. #endif
  1101. WRITERGB15(%4, %5, %%REGa)
  1102. YSCALEYUV2PACKEDX_END
  1103. return;
  1104. case PIX_FMT_RGB565:
  1105. YSCALEYUV2PACKEDX
  1106. YSCALEYUV2RGBX
  1107. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1108. #ifdef DITHER1XBPP
  1109. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1110. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1111. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1112. #endif
  1113. WRITERGB16(%4, %5, %%REGa)
  1114. YSCALEYUV2PACKEDX_END
  1115. return;
  1116. case PIX_FMT_YUYV422:
  1117. YSCALEYUV2PACKEDX
  1118. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1119. "psraw $3, %%mm3 \n\t"
  1120. "psraw $3, %%mm4 \n\t"
  1121. "psraw $3, %%mm1 \n\t"
  1122. "psraw $3, %%mm7 \n\t"
  1123. WRITEYUY2(%4, %5, %%REGa)
  1124. YSCALEYUV2PACKEDX_END
  1125. return;
  1126. }
  1127. }
  1128. #endif /* HAVE_MMX */
  1129. #ifdef HAVE_ALTIVEC
  1130. /* The following list of supported dstFormat values should
  1131. match what's found in the body of altivec_yuv2packedX() */
  1132. if (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
  1133. c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
  1134. c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
  1135. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  1136. chrFilter, chrSrc, chrFilterSize,
  1137. dest, dstW, dstY);
  1138. else
  1139. #endif
  1140. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1141. chrFilter, chrSrc, chrFilterSize,
  1142. dest, dstW, dstY);
  1143. }
  1144. /**
  1145. * vertical bilinear scale YV12 to RGB
  1146. */
  1147. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1148. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1149. {
  1150. int yalpha1=yalpha^4095;
  1151. int uvalpha1=uvalpha^4095;
  1152. int i;
  1153. #if 0 //isn't used
  1154. if (flags&SWS_FULL_CHR_H_INT)
  1155. {
  1156. switch(dstFormat)
  1157. {
  1158. #ifdef HAVE_MMX
  1159. case PIX_FMT_RGB32:
  1160. asm volatile(
  1161. FULL_YSCALEYUV2RGB
  1162. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1163. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1164. "movq %%mm3, %%mm1 \n\t"
  1165. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1166. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1167. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  1168. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  1169. "add $4, %%"REG_a" \n\t"
  1170. "cmp %5, %%"REG_a" \n\t"
  1171. " jb 1b \n\t"
  1172. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  1173. "m" (yalpha1), "m" (uvalpha1)
  1174. : "%"REG_a
  1175. );
  1176. break;
  1177. case PIX_FMT_BGR24:
  1178. asm volatile(
  1179. FULL_YSCALEYUV2RGB
  1180. // lsb ... msb
  1181. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1182. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1183. "movq %%mm3, %%mm1 \n\t"
  1184. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1185. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1186. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  1187. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  1188. "pand "MANGLE(bm00000111)", %%mm2 \n\t" // BGR00000
  1189. "pand "MANGLE(bm11111000)", %%mm3 \n\t" // 000BGR00
  1190. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  1191. "movq %%mm1, %%mm2 \n\t"
  1192. "psllq $48, %%mm1 \n\t" // 000000BG
  1193. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  1194. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  1195. "psrld $16, %%mm2 \n\t" // R000R000
  1196. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  1197. "por %%mm2, %%mm1 \n\t" // RBGRR000
  1198. "mov %4, %%"REG_b" \n\t"
  1199. "add %%"REG_a", %%"REG_b" \n\t"
  1200. #ifdef HAVE_MMX2
  1201. //FIXME Alignment
  1202. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1203. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1204. #else
  1205. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1206. "psrlq $32, %%mm3 \n\t"
  1207. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  1208. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1209. #endif
  1210. "add $4, %%"REG_a" \n\t"
  1211. "cmp %5, %%"REG_a" \n\t"
  1212. " jb 1b \n\t"
  1213. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1214. "m" (yalpha1), "m" (uvalpha1)
  1215. : "%"REG_a, "%"REG_b
  1216. );
  1217. break;
  1218. case PIX_FMT_BGR555:
  1219. asm volatile(
  1220. FULL_YSCALEYUV2RGB
  1221. #ifdef DITHER1XBPP
  1222. "paddusb "MANGLE(g5Dither)", %%mm1 \n\t"
  1223. "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
  1224. "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
  1225. #endif
  1226. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1227. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1228. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1229. "psrlw $3, %%mm3 \n\t"
  1230. "psllw $2, %%mm1 \n\t"
  1231. "psllw $7, %%mm0 \n\t"
  1232. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1233. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1234. "por %%mm3, %%mm1 \n\t"
  1235. "por %%mm1, %%mm0 \n\t"
  1236. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1237. "add $4, %%"REG_a" \n\t"
  1238. "cmp %5, %%"REG_a" \n\t"
  1239. " jb 1b \n\t"
  1240. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1241. "m" (yalpha1), "m" (uvalpha1)
  1242. : "%"REG_a
  1243. );
  1244. break;
  1245. case PIX_FMT_BGR565:
  1246. asm volatile(
  1247. FULL_YSCALEYUV2RGB
  1248. #ifdef DITHER1XBPP
  1249. "paddusb "MANGLE(g6Dither)", %%mm1 \n\t"
  1250. "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
  1251. "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
  1252. #endif
  1253. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1254. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1255. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1256. "psrlw $3, %%mm3 \n\t"
  1257. "psllw $3, %%mm1 \n\t"
  1258. "psllw $8, %%mm0 \n\t"
  1259. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1260. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1261. "por %%mm3, %%mm1 \n\t"
  1262. "por %%mm1, %%mm0 \n\t"
  1263. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1264. "add $4, %%"REG_a" \n\t"
  1265. "cmp %5, %%"REG_a" \n\t"
  1266. " jb 1b \n\t"
  1267. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1268. "m" (yalpha1), "m" (uvalpha1)
  1269. : "%"REG_a
  1270. );
  1271. break;
  1272. #endif /* HAVE_MMX */
  1273. case PIX_FMT_BGR32:
  1274. #ifndef HAVE_MMX
  1275. case PIX_FMT_RGB32:
  1276. #endif
  1277. if (dstFormat==PIX_FMT_RGB32)
  1278. {
  1279. int i;
  1280. #ifdef WORDS_BIGENDIAN
  1281. dest++;
  1282. #endif
  1283. for (i=0;i<dstW;i++){
  1284. // vertical linear interpolation && yuv2rgb in a single step:
  1285. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1286. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1287. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1288. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1289. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1290. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1291. dest+= 4;
  1292. }
  1293. }
  1294. else if (dstFormat==PIX_FMT_BGR24)
  1295. {
  1296. int i;
  1297. for (i=0;i<dstW;i++){
  1298. // vertical linear interpolation && yuv2rgb in a single step:
  1299. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1300. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1301. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1302. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1303. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1304. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1305. dest+= 3;
  1306. }
  1307. }
  1308. else if (dstFormat==PIX_FMT_BGR565)
  1309. {
  1310. int i;
  1311. for (i=0;i<dstW;i++){
  1312. // vertical linear interpolation && yuv2rgb in a single step:
  1313. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1314. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1315. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1316. ((uint16_t*)dest)[i] =
  1317. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1318. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1319. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1320. }
  1321. }
  1322. else if (dstFormat==PIX_FMT_BGR555)
  1323. {
  1324. int i;
  1325. for (i=0;i<dstW;i++){
  1326. // vertical linear interpolation && yuv2rgb in a single step:
  1327. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1328. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1329. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1330. ((uint16_t*)dest)[i] =
  1331. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1332. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1333. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1334. }
  1335. }
  1336. }//FULL_UV_IPOL
  1337. else
  1338. {
  1339. #endif // if 0
  1340. #ifdef HAVE_MMX
  1341. switch(c->dstFormat)
  1342. {
  1343. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1344. case PIX_FMT_RGB32:
  1345. asm volatile(
  1346. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1347. "mov %4, %%"REG_b" \n\t"
  1348. "push %%"REG_BP" \n\t"
  1349. YSCALEYUV2RGB(%%REGBP, %5)
  1350. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1351. "pop %%"REG_BP" \n\t"
  1352. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1353. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1354. "a" (&c->redDither)
  1355. );
  1356. return;
  1357. case PIX_FMT_BGR24:
  1358. asm volatile(
  1359. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1360. "mov %4, %%"REG_b" \n\t"
  1361. "push %%"REG_BP" \n\t"
  1362. YSCALEYUV2RGB(%%REGBP, %5)
  1363. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1364. "pop %%"REG_BP" \n\t"
  1365. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1366. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1367. "a" (&c->redDither)
  1368. );
  1369. return;
  1370. case PIX_FMT_RGB555:
  1371. asm volatile(
  1372. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1373. "mov %4, %%"REG_b" \n\t"
  1374. "push %%"REG_BP" \n\t"
  1375. YSCALEYUV2RGB(%%REGBP, %5)
  1376. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1377. #ifdef DITHER1XBPP
  1378. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1379. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1380. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1381. #endif
  1382. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1383. "pop %%"REG_BP" \n\t"
  1384. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1385. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1386. "a" (&c->redDither)
  1387. );
  1388. return;
  1389. case PIX_FMT_RGB565:
  1390. asm volatile(
  1391. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1392. "mov %4, %%"REG_b" \n\t"
  1393. "push %%"REG_BP" \n\t"
  1394. YSCALEYUV2RGB(%%REGBP, %5)
  1395. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1396. #ifdef DITHER1XBPP
  1397. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1398. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1399. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1400. #endif
  1401. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1402. "pop %%"REG_BP" \n\t"
  1403. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1404. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1405. "a" (&c->redDither)
  1406. );
  1407. return;
  1408. case PIX_FMT_YUYV422:
  1409. asm volatile(
  1410. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1411. "mov %4, %%"REG_b" \n\t"
  1412. "push %%"REG_BP" \n\t"
  1413. YSCALEYUV2PACKED(%%REGBP, %5)
  1414. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1415. "pop %%"REG_BP" \n\t"
  1416. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1417. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1418. "a" (&c->redDither)
  1419. );
  1420. return;
  1421. default: break;
  1422. }
  1423. #endif //HAVE_MMX
  1424. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C, YSCALE_YUV_2_GRAY16_2_C)
  1425. }
  1426. /**
  1427. * YV12 to RGB without scaling or interpolating
  1428. */
  1429. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1430. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1431. {
  1432. const int yalpha1=0;
  1433. int i;
  1434. uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1435. const int yalpha= 4096; //FIXME ...
  1436. if (flags&SWS_FULL_CHR_H_INT)
  1437. {
  1438. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1439. return;
  1440. }
  1441. #ifdef HAVE_MMX
  1442. if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1443. {
  1444. switch(dstFormat)
  1445. {
  1446. case PIX_FMT_RGB32:
  1447. asm volatile(
  1448. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1449. "mov %4, %%"REG_b" \n\t"
  1450. "push %%"REG_BP" \n\t"
  1451. YSCALEYUV2RGB1(%%REGBP, %5)
  1452. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1453. "pop %%"REG_BP" \n\t"
  1454. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1455. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1456. "a" (&c->redDither)
  1457. );
  1458. return;
  1459. case PIX_FMT_BGR24:
  1460. asm volatile(
  1461. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1462. "mov %4, %%"REG_b" \n\t"
  1463. "push %%"REG_BP" \n\t"
  1464. YSCALEYUV2RGB1(%%REGBP, %5)
  1465. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1466. "pop %%"REG_BP" \n\t"
  1467. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1468. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1469. "a" (&c->redDither)
  1470. );
  1471. return;
  1472. case PIX_FMT_RGB555:
  1473. asm volatile(
  1474. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1475. "mov %4, %%"REG_b" \n\t"
  1476. "push %%"REG_BP" \n\t"
  1477. YSCALEYUV2RGB1(%%REGBP, %5)
  1478. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1479. #ifdef DITHER1XBPP
  1480. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1481. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1482. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1483. #endif
  1484. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1485. "pop %%"REG_BP" \n\t"
  1486. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1487. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1488. "a" (&c->redDither)
  1489. );
  1490. return;
  1491. case PIX_FMT_RGB565:
  1492. asm volatile(
  1493. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1494. "mov %4, %%"REG_b" \n\t"
  1495. "push %%"REG_BP" \n\t"
  1496. YSCALEYUV2RGB1(%%REGBP, %5)
  1497. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1498. #ifdef DITHER1XBPP
  1499. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1500. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1501. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1502. #endif
  1503. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1504. "pop %%"REG_BP" \n\t"
  1505. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1506. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1507. "a" (&c->redDither)
  1508. );
  1509. return;
  1510. case PIX_FMT_YUYV422:
  1511. asm volatile(
  1512. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1513. "mov %4, %%"REG_b" \n\t"
  1514. "push %%"REG_BP" \n\t"
  1515. YSCALEYUV2PACKED1(%%REGBP, %5)
  1516. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1517. "pop %%"REG_BP" \n\t"
  1518. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1519. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1520. "a" (&c->redDither)
  1521. );
  1522. return;
  1523. }
  1524. }
  1525. else
  1526. {
  1527. switch(dstFormat)
  1528. {
  1529. case PIX_FMT_RGB32:
  1530. asm volatile(
  1531. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1532. "mov %4, %%"REG_b" \n\t"
  1533. "push %%"REG_BP" \n\t"
  1534. YSCALEYUV2RGB1b(%%REGBP, %5)
  1535. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1536. "pop %%"REG_BP" \n\t"
  1537. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1538. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1539. "a" (&c->redDither)
  1540. );
  1541. return;
  1542. case PIX_FMT_BGR24:
  1543. asm volatile(
  1544. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1545. "mov %4, %%"REG_b" \n\t"
  1546. "push %%"REG_BP" \n\t"
  1547. YSCALEYUV2RGB1b(%%REGBP, %5)
  1548. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1549. "pop %%"REG_BP" \n\t"
  1550. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1551. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1552. "a" (&c->redDither)
  1553. );
  1554. return;
  1555. case PIX_FMT_RGB555:
  1556. asm volatile(
  1557. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1558. "mov %4, %%"REG_b" \n\t"
  1559. "push %%"REG_BP" \n\t"
  1560. YSCALEYUV2RGB1b(%%REGBP, %5)
  1561. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1562. #ifdef DITHER1XBPP
  1563. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1564. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1565. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1566. #endif
  1567. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1568. "pop %%"REG_BP" \n\t"
  1569. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1570. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1571. "a" (&c->redDither)
  1572. );
  1573. return;
  1574. case PIX_FMT_RGB565:
  1575. asm volatile(
  1576. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1577. "mov %4, %%"REG_b" \n\t"
  1578. "push %%"REG_BP" \n\t"
  1579. YSCALEYUV2RGB1b(%%REGBP, %5)
  1580. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1581. #ifdef DITHER1XBPP
  1582. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1583. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1584. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1585. #endif
  1586. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1587. "pop %%"REG_BP" \n\t"
  1588. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1589. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1590. "a" (&c->redDither)
  1591. );
  1592. return;
  1593. case PIX_FMT_YUYV422:
  1594. asm volatile(
  1595. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1596. "mov %4, %%"REG_b" \n\t"
  1597. "push %%"REG_BP" \n\t"
  1598. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1599. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1600. "pop %%"REG_BP" \n\t"
  1601. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1602. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1603. "a" (&c->redDither)
  1604. );
  1605. return;
  1606. }
  1607. }
  1608. #endif /* HAVE_MMX */
  1609. if (uvalpha < 2048)
  1610. {
  1611. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C, YSCALE_YUV_2_GRAY16_1_C)
  1612. }else{
  1613. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C, YSCALE_YUV_2_GRAY16_1_C)
  1614. }
  1615. }
  1616. //FIXME yuy2* can read up to 7 samples too much
  1617. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width)
  1618. {
  1619. #ifdef HAVE_MMX
  1620. asm volatile(
  1621. "movq "MANGLE(bm01010101)", %%mm2 \n\t"
  1622. "mov %0, %%"REG_a" \n\t"
  1623. "1: \n\t"
  1624. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1625. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1626. "pand %%mm2, %%mm0 \n\t"
  1627. "pand %%mm2, %%mm1 \n\t"
  1628. "packuswb %%mm1, %%mm0 \n\t"
  1629. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1630. "add $8, %%"REG_a" \n\t"
  1631. " js 1b \n\t"
  1632. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1633. : "%"REG_a
  1634. );
  1635. #else
  1636. int i;
  1637. for (i=0; i<width; i++)
  1638. dst[i]= src[2*i];
  1639. #endif
  1640. }
  1641. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1642. {
  1643. #ifdef HAVE_MMX
  1644. asm volatile(
  1645. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1646. "mov %0, %%"REG_a" \n\t"
  1647. "1: \n\t"
  1648. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1649. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1650. "psrlw $8, %%mm0 \n\t"
  1651. "psrlw $8, %%mm1 \n\t"
  1652. "packuswb %%mm1, %%mm0 \n\t"
  1653. "movq %%mm0, %%mm1 \n\t"
  1654. "psrlw $8, %%mm0 \n\t"
  1655. "pand %%mm4, %%mm1 \n\t"
  1656. "packuswb %%mm0, %%mm0 \n\t"
  1657. "packuswb %%mm1, %%mm1 \n\t"
  1658. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1659. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1660. "add $4, %%"REG_a" \n\t"
  1661. " js 1b \n\t"
  1662. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1663. : "%"REG_a
  1664. );
  1665. #else
  1666. int i;
  1667. for (i=0; i<width; i++)
  1668. {
  1669. dstU[i]= src1[4*i + 1];
  1670. dstV[i]= src1[4*i + 3];
  1671. }
  1672. #endif
  1673. assert(src1 == src2);
  1674. }
  1675. /* This is almost identical to the previous, end exists only because
  1676. * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
  1677. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width)
  1678. {
  1679. #ifdef HAVE_MMX
  1680. asm volatile(
  1681. "mov %0, %%"REG_a" \n\t"
  1682. "1: \n\t"
  1683. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1684. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1685. "psrlw $8, %%mm0 \n\t"
  1686. "psrlw $8, %%mm1 \n\t"
  1687. "packuswb %%mm1, %%mm0 \n\t"
  1688. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1689. "add $8, %%"REG_a" \n\t"
  1690. " js 1b \n\t"
  1691. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1692. : "%"REG_a
  1693. );
  1694. #else
  1695. int i;
  1696. for (i=0; i<width; i++)
  1697. dst[i]= src[2*i+1];
  1698. #endif
  1699. }
  1700. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1701. {
  1702. #ifdef HAVE_MMX
  1703. asm volatile(
  1704. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1705. "mov %0, %%"REG_a" \n\t"
  1706. "1: \n\t"
  1707. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1708. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1709. "pand %%mm4, %%mm0 \n\t"
  1710. "pand %%mm4, %%mm1 \n\t"
  1711. "packuswb %%mm1, %%mm0 \n\t"
  1712. "movq %%mm0, %%mm1 \n\t"
  1713. "psrlw $8, %%mm0 \n\t"
  1714. "pand %%mm4, %%mm1 \n\t"
  1715. "packuswb %%mm0, %%mm0 \n\t"
  1716. "packuswb %%mm1, %%mm1 \n\t"
  1717. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1718. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1719. "add $4, %%"REG_a" \n\t"
  1720. " js 1b \n\t"
  1721. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1722. : "%"REG_a
  1723. );
  1724. #else
  1725. int i;
  1726. for (i=0; i<width; i++)
  1727. {
  1728. dstU[i]= src1[4*i + 0];
  1729. dstV[i]= src1[4*i + 2];
  1730. }
  1731. #endif
  1732. assert(src1 == src2);
  1733. }
  1734. static inline void RENAME(bgr32ToY)(uint8_t *dst, uint8_t *src, int width)
  1735. {
  1736. int i;
  1737. for (i=0; i<width; i++)
  1738. {
  1739. int b= ((uint32_t*)src)[i]&0xFF;
  1740. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  1741. int r= (((uint32_t*)src)[i]>>16)&0xFF;
  1742. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1743. }
  1744. }
  1745. static inline void RENAME(bgr32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1746. {
  1747. int i;
  1748. assert(src1 == src2);
  1749. for (i=0; i<width; i++)
  1750. {
  1751. const int a= ((uint32_t*)src1)[2*i+0];
  1752. const int e= ((uint32_t*)src1)[2*i+1];
  1753. const int l= (a&0xFF00FF) + (e&0xFF00FF);
  1754. const int h= (a&0x00FF00) + (e&0x00FF00);
  1755. const int b= l&0x3FF;
  1756. const int g= h>>8;
  1757. const int r= l>>16;
  1758. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1759. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1760. }
  1761. }
  1762. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width)
  1763. {
  1764. #ifdef HAVE_MMX
  1765. asm volatile(
  1766. "mov %2, %%"REG_a" \n\t"
  1767. "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
  1768. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1769. "pxor %%mm7, %%mm7 \n\t"
  1770. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1771. ASMALIGN(4)
  1772. "1: \n\t"
  1773. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1774. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1775. "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
  1776. "punpcklbw %%mm7, %%mm0 \n\t"
  1777. "punpcklbw %%mm7, %%mm1 \n\t"
  1778. "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
  1779. "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
  1780. "punpcklbw %%mm7, %%mm2 \n\t"
  1781. "punpcklbw %%mm7, %%mm3 \n\t"
  1782. "pmaddwd %%mm6, %%mm0 \n\t"
  1783. "pmaddwd %%mm6, %%mm1 \n\t"
  1784. "pmaddwd %%mm6, %%mm2 \n\t"
  1785. "pmaddwd %%mm6, %%mm3 \n\t"
  1786. #ifndef FAST_BGR2YV12
  1787. "psrad $8, %%mm0 \n\t"
  1788. "psrad $8, %%mm1 \n\t"
  1789. "psrad $8, %%mm2 \n\t"
  1790. "psrad $8, %%mm3 \n\t"
  1791. #endif
  1792. "packssdw %%mm1, %%mm0 \n\t"
  1793. "packssdw %%mm3, %%mm2 \n\t"
  1794. "pmaddwd %%mm5, %%mm0 \n\t"
  1795. "pmaddwd %%mm5, %%mm2 \n\t"
  1796. "packssdw %%mm2, %%mm0 \n\t"
  1797. "psraw $7, %%mm0 \n\t"
  1798. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1799. "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
  1800. "punpcklbw %%mm7, %%mm4 \n\t"
  1801. "punpcklbw %%mm7, %%mm1 \n\t"
  1802. "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
  1803. "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
  1804. "punpcklbw %%mm7, %%mm2 \n\t"
  1805. "punpcklbw %%mm7, %%mm3 \n\t"
  1806. "pmaddwd %%mm6, %%mm4 \n\t"
  1807. "pmaddwd %%mm6, %%mm1 \n\t"
  1808. "pmaddwd %%mm6, %%mm2 \n\t"
  1809. "pmaddwd %%mm6, %%mm3 \n\t"
  1810. #ifndef FAST_BGR2YV12
  1811. "psrad $8, %%mm4 \n\t"
  1812. "psrad $8, %%mm1 \n\t"
  1813. "psrad $8, %%mm2 \n\t"
  1814. "psrad $8, %%mm3 \n\t"
  1815. #endif
  1816. "packssdw %%mm1, %%mm4 \n\t"
  1817. "packssdw %%mm3, %%mm2 \n\t"
  1818. "pmaddwd %%mm5, %%mm4 \n\t"
  1819. "pmaddwd %%mm5, %%mm2 \n\t"
  1820. "add $24, %%"REG_d" \n\t"
  1821. "packssdw %%mm2, %%mm4 \n\t"
  1822. "psraw $7, %%mm4 \n\t"
  1823. "packuswb %%mm4, %%mm0 \n\t"
  1824. "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
  1825. "movq %%mm0, (%1, %%"REG_a") \n\t"
  1826. "add $8, %%"REG_a" \n\t"
  1827. " js 1b \n\t"
  1828. : : "r" (src+width*3), "r" (dst+width), "g" (-width)
  1829. : "%"REG_a, "%"REG_d
  1830. );
  1831. #else
  1832. int i;
  1833. for (i=0; i<width; i++)
  1834. {
  1835. int b= src[i*3+0];
  1836. int g= src[i*3+1];
  1837. int r= src[i*3+2];
  1838. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1839. }
  1840. #endif /* HAVE_MMX */
  1841. }
  1842. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width)
  1843. {
  1844. #ifdef HAVE_MMX
  1845. asm volatile(
  1846. "mov %3, %%"REG_a" \n\t"
  1847. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1848. "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
  1849. "pxor %%mm7, %%mm7 \n\t"
  1850. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1851. "add %%"REG_d", %%"REG_d" \n\t"
  1852. ASMALIGN(4)
  1853. "1: \n\t"
  1854. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1855. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1856. "movq (%0, %%"REG_d"), %%mm0 \n\t"
  1857. "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
  1858. "movq %%mm0, %%mm1 \n\t"
  1859. "movq %%mm2, %%mm3 \n\t"
  1860. "psrlq $24, %%mm0 \n\t"
  1861. "psrlq $24, %%mm2 \n\t"
  1862. PAVGB(%%mm1, %%mm0)
  1863. PAVGB(%%mm3, %%mm2)
  1864. "punpcklbw %%mm7, %%mm0 \n\t"
  1865. "punpcklbw %%mm7, %%mm2 \n\t"
  1866. #else
  1867. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1868. "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
  1869. "punpcklbw %%mm7, %%mm0 \n\t"
  1870. "punpcklbw %%mm7, %%mm2 \n\t"
  1871. "paddw %%mm2, %%mm0 \n\t"
  1872. "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
  1873. "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
  1874. "punpcklbw %%mm7, %%mm4 \n\t"
  1875. "punpcklbw %%mm7, %%mm2 \n\t"
  1876. "paddw %%mm4, %%mm2 \n\t"
  1877. "psrlw $1, %%mm0 \n\t"
  1878. "psrlw $1, %%mm2 \n\t"
  1879. #endif
  1880. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1881. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1882. "pmaddwd %%mm0, %%mm1 \n\t"
  1883. "pmaddwd %%mm2, %%mm3 \n\t"
  1884. "pmaddwd %%mm6, %%mm0 \n\t"
  1885. "pmaddwd %%mm6, %%mm2 \n\t"
  1886. #ifndef FAST_BGR2YV12
  1887. "psrad $8, %%mm0 \n\t"
  1888. "psrad $8, %%mm1 \n\t"
  1889. "psrad $8, %%mm2 \n\t"
  1890. "psrad $8, %%mm3 \n\t"
  1891. #endif
  1892. "packssdw %%mm2, %%mm0 \n\t"
  1893. "packssdw %%mm3, %%mm1 \n\t"
  1894. "pmaddwd %%mm5, %%mm0 \n\t"
  1895. "pmaddwd %%mm5, %%mm1 \n\t"
  1896. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1897. "psraw $7, %%mm0 \n\t"
  1898. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1899. "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
  1900. "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
  1901. "movq %%mm4, %%mm1 \n\t"
  1902. "movq %%mm2, %%mm3 \n\t"
  1903. "psrlq $24, %%mm4 \n\t"
  1904. "psrlq $24, %%mm2 \n\t"
  1905. PAVGB(%%mm1, %%mm4)
  1906. PAVGB(%%mm3, %%mm2)
  1907. "punpcklbw %%mm7, %%mm4 \n\t"
  1908. "punpcklbw %%mm7, %%mm2 \n\t"
  1909. #else
  1910. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1911. "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
  1912. "punpcklbw %%mm7, %%mm4 \n\t"
  1913. "punpcklbw %%mm7, %%mm2 \n\t"
  1914. "paddw %%mm2, %%mm4 \n\t"
  1915. "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
  1916. "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
  1917. "punpcklbw %%mm7, %%mm5 \n\t"
  1918. "punpcklbw %%mm7, %%mm2 \n\t"
  1919. "paddw %%mm5, %%mm2 \n\t"
  1920. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1921. "psrlw $2, %%mm4 \n\t"
  1922. "psrlw $2, %%mm2 \n\t"
  1923. #endif
  1924. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1925. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1926. "pmaddwd %%mm4, %%mm1 \n\t"
  1927. "pmaddwd %%mm2, %%mm3 \n\t"
  1928. "pmaddwd %%mm6, %%mm4 \n\t"
  1929. "pmaddwd %%mm6, %%mm2 \n\t"
  1930. #ifndef FAST_BGR2YV12
  1931. "psrad $8, %%mm4 \n\t"
  1932. "psrad $8, %%mm1 \n\t"
  1933. "psrad $8, %%mm2 \n\t"
  1934. "psrad $8, %%mm3 \n\t"
  1935. #endif
  1936. "packssdw %%mm2, %%mm4 \n\t"
  1937. "packssdw %%mm3, %%mm1 \n\t"
  1938. "pmaddwd %%mm5, %%mm4 \n\t"
  1939. "pmaddwd %%mm5, %%mm1 \n\t"
  1940. "add $24, %%"REG_d" \n\t"
  1941. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1942. "psraw $7, %%mm4 \n\t"
  1943. "movq %%mm0, %%mm1 \n\t"
  1944. "punpckldq %%mm4, %%mm0 \n\t"
  1945. "punpckhdq %%mm4, %%mm1 \n\t"
  1946. "packsswb %%mm1, %%mm0 \n\t"
  1947. "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
  1948. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1949. "punpckhdq %%mm0, %%mm0 \n\t"
  1950. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1951. "add $4, %%"REG_a" \n\t"
  1952. " js 1b \n\t"
  1953. : : "r" (src1+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
  1954. : "%"REG_a, "%"REG_d
  1955. );
  1956. #else
  1957. int i;
  1958. for (i=0; i<width; i++)
  1959. {
  1960. int b= src1[6*i + 0] + src1[6*i + 3];
  1961. int g= src1[6*i + 1] + src1[6*i + 4];
  1962. int r= src1[6*i + 2] + src1[6*i + 5];
  1963. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1964. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  1965. }
  1966. #endif /* HAVE_MMX */
  1967. assert(src1 == src2);
  1968. }
  1969. static inline void RENAME(rgb16ToY)(uint8_t *dst, uint8_t *src, int width)
  1970. {
  1971. int i;
  1972. for (i=0; i<width; i++)
  1973. {
  1974. int d= ((uint16_t*)src)[i];
  1975. int b= d&0x1F;
  1976. int g= (d>>5)&0x3F;
  1977. int r= (d>>11)&0x1F;
  1978. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  1979. }
  1980. }
  1981. static inline void RENAME(rgb16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  1982. {
  1983. int i;
  1984. assert(src1==src2);
  1985. for (i=0; i<width; i++)
  1986. {
  1987. int d0= ((uint32_t*)src1)[i];
  1988. int dl= (d0&0x07E0F81F);
  1989. int dh= ((d0>>5)&0x07C0F83F);
  1990. int dh2= (dh>>11) + (dh<<21);
  1991. int d= dh2 + dl;
  1992. int b= d&0x7F;
  1993. int r= (d>>11)&0x7F;
  1994. int g= d>>21;
  1995. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  1996. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  1997. }
  1998. }
  1999. static inline void RENAME(rgb15ToY)(uint8_t *dst, uint8_t *src, int width)
  2000. {
  2001. int i;
  2002. for (i=0; i<width; i++)
  2003. {
  2004. int d= ((uint16_t*)src)[i];
  2005. int b= d&0x1F;
  2006. int g= (d>>5)&0x1F;
  2007. int r= (d>>10)&0x1F;
  2008. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  2009. }
  2010. }
  2011. static inline void RENAME(rgb15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2012. {
  2013. int i;
  2014. assert(src1==src2);
  2015. for (i=0; i<width; i++)
  2016. {
  2017. int d0= ((uint32_t*)src1)[i];
  2018. int dl= (d0&0x03E07C1F);
  2019. int dh= ((d0>>5)&0x03E0F81F);
  2020. int dh2= (dh>>11) + (dh<<21);
  2021. int d= dh2 + dl;
  2022. int b= d&0x7F;
  2023. int r= (d>>10)&0x7F;
  2024. int g= d>>21;
  2025. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2026. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2027. }
  2028. }
  2029. static inline void RENAME(rgb32ToY)(uint8_t *dst, uint8_t *src, int width)
  2030. {
  2031. int i;
  2032. for (i=0; i<width; i++)
  2033. {
  2034. int r= ((uint32_t*)src)[i]&0xFF;
  2035. int g= (((uint32_t*)src)[i]>>8)&0xFF;
  2036. int b= (((uint32_t*)src)[i]>>16)&0xFF;
  2037. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  2038. }
  2039. }
  2040. static inline void RENAME(rgb32ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2041. {
  2042. int i;
  2043. assert(src1==src2);
  2044. for (i=0; i<width; i++)
  2045. {
  2046. const int a= ((uint32_t*)src1)[2*i+0];
  2047. const int e= ((uint32_t*)src1)[2*i+1];
  2048. const int l= (a&0xFF00FF) + (e&0xFF00FF);
  2049. const int h= (a&0x00FF00) + (e&0x00FF00);
  2050. const int r= l&0x3FF;
  2051. const int g= h>>8;
  2052. const int b= l>>16;
  2053. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2054. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2055. }
  2056. }
  2057. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, int width)
  2058. {
  2059. int i;
  2060. for (i=0; i<width; i++)
  2061. {
  2062. int r= src[i*3+0];
  2063. int g= src[i*3+1];
  2064. int b= src[i*3+2];
  2065. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  2066. }
  2067. }
  2068. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2069. {
  2070. int i;
  2071. assert(src1==src2);
  2072. for (i=0; i<width; i++)
  2073. {
  2074. int r= src1[6*i + 0] + src1[6*i + 3];
  2075. int g= src1[6*i + 1] + src1[6*i + 4];
  2076. int b= src1[6*i + 2] + src1[6*i + 5];
  2077. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2078. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1)) + 128;
  2079. }
  2080. }
  2081. static inline void RENAME(bgr16ToY)(uint8_t *dst, uint8_t *src, int width)
  2082. {
  2083. int i;
  2084. for (i=0; i<width; i++)
  2085. {
  2086. int d= ((uint16_t*)src)[i];
  2087. int r= d&0x1F;
  2088. int g= (d>>5)&0x3F;
  2089. int b= (d>>11)&0x1F;
  2090. dst[i]= ((2*RY*r + GY*g + 2*BY*b)>>(RGB2YUV_SHIFT-2)) + 16;
  2091. }
  2092. }
  2093. static inline void RENAME(bgr16ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2094. {
  2095. int i;
  2096. assert(src1 == src2);
  2097. for (i=0; i<width; i++)
  2098. {
  2099. int d0= ((uint32_t*)src1)[i];
  2100. int dl= (d0&0x07E0F81F);
  2101. int d= dl + (((d0>>16) + (d0<<16))&0x07E0F81F);
  2102. int r= d&0x3F;
  2103. int b= (d>>11)&0x3F;
  2104. int g= d>>21;
  2105. dstU[i]= ((2*RU*r + GU*g + 2*BU*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  2106. dstV[i]= ((2*RV*r + GV*g + 2*BV*b)>>(RGB2YUV_SHIFT+1-2)) + 128;
  2107. }
  2108. }
  2109. static inline void RENAME(bgr15ToY)(uint8_t *dst, uint8_t *src, int width)
  2110. {
  2111. int i;
  2112. for (i=0; i<width; i++)
  2113. {
  2114. int d= ((uint16_t*)src)[i];
  2115. int r= d&0x1F;
  2116. int g= (d>>5)&0x1F;
  2117. int b= (d>>10)&0x1F;
  2118. dst[i]= ((RY*r + GY*g + BY*b)>>(RGB2YUV_SHIFT-3)) + 16;
  2119. }
  2120. }
  2121. static inline void RENAME(bgr15ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width)
  2122. {
  2123. int i;
  2124. assert(src1 == src2);
  2125. for (i=0; i<width; i++)
  2126. {
  2127. int d0= ((uint32_t*)src1)[i];
  2128. int dl= (d0&0x03E07C1F);
  2129. int d= dl + (((d0>>16) + (d0<<16))&0x03E07C1F);
  2130. int r= d&0x3F;
  2131. int b= (d>>10)&0x3F;
  2132. int g= d>>21;
  2133. dstU[i]= ((RU*r + GU*g + BU*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2134. dstV[i]= ((RV*r + GV*g + BV*b)>>(RGB2YUV_SHIFT+1-3)) + 128;
  2135. }
  2136. }
  2137. static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, int width, uint32_t *pal)
  2138. {
  2139. int i;
  2140. for (i=0; i<width; i++)
  2141. {
  2142. int d= src[i];
  2143. dst[i]= pal[d] & 0xFF;
  2144. }
  2145. }
  2146. static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, int width, uint32_t *pal)
  2147. {
  2148. int i;
  2149. assert(src1 == src2);
  2150. for (i=0; i<width; i++)
  2151. {
  2152. int p= pal[src1[i]];
  2153. dstU[i]= p>>8;
  2154. dstV[i]= p>>16;
  2155. }
  2156. }
  2157. // bilinear / bicubic scaling
  2158. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  2159. int16_t *filter, int16_t *filterPos, long filterSize)
  2160. {
  2161. #ifdef HAVE_MMX
  2162. assert(filterSize % 4 == 0 && filterSize>0);
  2163. if (filterSize==4) // Always true for upscaling, sometimes for down, too.
  2164. {
  2165. long counter= -2*dstW;
  2166. filter-= counter*2;
  2167. filterPos-= counter/2;
  2168. dst-= counter/2;
  2169. asm volatile(
  2170. #if defined(PIC)
  2171. "push %%"REG_b" \n\t"
  2172. #endif
  2173. "pxor %%mm7, %%mm7 \n\t"
  2174. "movq "MANGLE(w02)", %%mm6 \n\t"
  2175. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2176. "mov %%"REG_a", %%"REG_BP" \n\t"
  2177. ASMALIGN(4)
  2178. "1: \n\t"
  2179. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2180. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  2181. "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
  2182. "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
  2183. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2184. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2185. "punpcklbw %%mm7, %%mm0 \n\t"
  2186. "punpcklbw %%mm7, %%mm2 \n\t"
  2187. "pmaddwd %%mm1, %%mm0 \n\t"
  2188. "pmaddwd %%mm2, %%mm3 \n\t"
  2189. "psrad $8, %%mm0 \n\t"
  2190. "psrad $8, %%mm3 \n\t"
  2191. "packssdw %%mm3, %%mm0 \n\t"
  2192. "pmaddwd %%mm6, %%mm0 \n\t"
  2193. "packssdw %%mm0, %%mm0 \n\t"
  2194. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2195. "add $4, %%"REG_BP" \n\t"
  2196. " jnc 1b \n\t"
  2197. "pop %%"REG_BP" \n\t"
  2198. #if defined(PIC)
  2199. "pop %%"REG_b" \n\t"
  2200. #endif
  2201. : "+a" (counter)
  2202. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2203. #if !defined(PIC)
  2204. : "%"REG_b
  2205. #endif
  2206. );
  2207. }
  2208. else if (filterSize==8)
  2209. {
  2210. long counter= -2*dstW;
  2211. filter-= counter*4;
  2212. filterPos-= counter/2;
  2213. dst-= counter/2;
  2214. asm volatile(
  2215. #if defined(PIC)
  2216. "push %%"REG_b" \n\t"
  2217. #endif
  2218. "pxor %%mm7, %%mm7 \n\t"
  2219. "movq "MANGLE(w02)", %%mm6 \n\t"
  2220. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2221. "mov %%"REG_a", %%"REG_BP" \n\t"
  2222. ASMALIGN(4)
  2223. "1: \n\t"
  2224. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2225. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  2226. "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
  2227. "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
  2228. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2229. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2230. "punpcklbw %%mm7, %%mm0 \n\t"
  2231. "punpcklbw %%mm7, %%mm2 \n\t"
  2232. "pmaddwd %%mm1, %%mm0 \n\t"
  2233. "pmaddwd %%mm2, %%mm3 \n\t"
  2234. "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
  2235. "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
  2236. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  2237. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  2238. "punpcklbw %%mm7, %%mm4 \n\t"
  2239. "punpcklbw %%mm7, %%mm2 \n\t"
  2240. "pmaddwd %%mm1, %%mm4 \n\t"
  2241. "pmaddwd %%mm2, %%mm5 \n\t"
  2242. "paddd %%mm4, %%mm0 \n\t"
  2243. "paddd %%mm5, %%mm3 \n\t"
  2244. "psrad $8, %%mm0 \n\t"
  2245. "psrad $8, %%mm3 \n\t"
  2246. "packssdw %%mm3, %%mm0 \n\t"
  2247. "pmaddwd %%mm6, %%mm0 \n\t"
  2248. "packssdw %%mm0, %%mm0 \n\t"
  2249. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2250. "add $4, %%"REG_BP" \n\t"
  2251. " jnc 1b \n\t"
  2252. "pop %%"REG_BP" \n\t"
  2253. #if defined(PIC)
  2254. "pop %%"REG_b" \n\t"
  2255. #endif
  2256. : "+a" (counter)
  2257. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2258. #if !defined(PIC)
  2259. : "%"REG_b
  2260. #endif
  2261. );
  2262. }
  2263. else
  2264. {
  2265. uint8_t *offset = src+filterSize;
  2266. long counter= -2*dstW;
  2267. //filter-= counter*filterSize/2;
  2268. filterPos-= counter/2;
  2269. dst-= counter/2;
  2270. asm volatile(
  2271. "pxor %%mm7, %%mm7 \n\t"
  2272. "movq "MANGLE(w02)", %%mm6 \n\t"
  2273. ASMALIGN(4)
  2274. "1: \n\t"
  2275. "mov %2, %%"REG_c" \n\t"
  2276. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2277. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  2278. "mov %5, %%"REG_c" \n\t"
  2279. "pxor %%mm4, %%mm4 \n\t"
  2280. "pxor %%mm5, %%mm5 \n\t"
  2281. "2: \n\t"
  2282. "movq (%1), %%mm1 \n\t"
  2283. "movq (%1, %6), %%mm3 \n\t"
  2284. "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
  2285. "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
  2286. "punpcklbw %%mm7, %%mm0 \n\t"
  2287. "punpcklbw %%mm7, %%mm2 \n\t"
  2288. "pmaddwd %%mm1, %%mm0 \n\t"
  2289. "pmaddwd %%mm2, %%mm3 \n\t"
  2290. "paddd %%mm3, %%mm5 \n\t"
  2291. "paddd %%mm0, %%mm4 \n\t"
  2292. "add $8, %1 \n\t"
  2293. "add $4, %%"REG_c" \n\t"
  2294. "cmp %4, %%"REG_c" \n\t"
  2295. " jb 2b \n\t"
  2296. "add %6, %1 \n\t"
  2297. "psrad $8, %%mm4 \n\t"
  2298. "psrad $8, %%mm5 \n\t"
  2299. "packssdw %%mm5, %%mm4 \n\t"
  2300. "pmaddwd %%mm6, %%mm4 \n\t"
  2301. "packssdw %%mm4, %%mm4 \n\t"
  2302. "mov %3, %%"REG_a" \n\t"
  2303. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2304. "add $4, %0 \n\t"
  2305. " jnc 1b \n\t"
  2306. : "+r" (counter), "+r" (filter)
  2307. : "m" (filterPos), "m" (dst), "m"(offset),
  2308. "m" (src), "r" (filterSize*2)
  2309. : "%"REG_a, "%"REG_c, "%"REG_d
  2310. );
  2311. }
  2312. #else
  2313. #ifdef HAVE_ALTIVEC
  2314. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2315. #else
  2316. int i;
  2317. for (i=0; i<dstW; i++)
  2318. {
  2319. int j;
  2320. int srcPos= filterPos[i];
  2321. int val=0;
  2322. //printf("filterPos: %d\n", filterPos[i]);
  2323. for (j=0; j<filterSize; j++)
  2324. {
  2325. //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2326. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2327. }
  2328. //filter += hFilterSize;
  2329. dst[i] = av_clip(val>>7, 0, (1<<15)-1); // the cubic equation does overflow ...
  2330. //dst[i] = val>>7;
  2331. }
  2332. #endif /* HAVE_ALTIVEC */
  2333. #endif /* HAVE_MMX */
  2334. }
  2335. // *** horizontal scale Y line to temp buffer
  2336. static inline void RENAME(hyscale)(uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  2337. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2338. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2339. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2340. int32_t *mmx2FilterPos, uint8_t *pal)
  2341. {
  2342. if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
  2343. {
  2344. RENAME(yuy2ToY)(formatConvBuffer, src, srcW);
  2345. src= formatConvBuffer;
  2346. }
  2347. else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
  2348. {
  2349. RENAME(uyvyToY)(formatConvBuffer, src, srcW);
  2350. src= formatConvBuffer;
  2351. }
  2352. else if (srcFormat==PIX_FMT_RGB32)
  2353. {
  2354. RENAME(bgr32ToY)(formatConvBuffer, src, srcW);
  2355. src= formatConvBuffer;
  2356. }
  2357. else if (srcFormat==PIX_FMT_RGB32_1)
  2358. {
  2359. RENAME(bgr32ToY)(formatConvBuffer, src+ALT32_CORR, srcW);
  2360. src= formatConvBuffer;
  2361. }
  2362. else if (srcFormat==PIX_FMT_BGR24)
  2363. {
  2364. RENAME(bgr24ToY)(formatConvBuffer, src, srcW);
  2365. src= formatConvBuffer;
  2366. }
  2367. else if (srcFormat==PIX_FMT_BGR565)
  2368. {
  2369. RENAME(bgr16ToY)(formatConvBuffer, src, srcW);
  2370. src= formatConvBuffer;
  2371. }
  2372. else if (srcFormat==PIX_FMT_BGR555)
  2373. {
  2374. RENAME(bgr15ToY)(formatConvBuffer, src, srcW);
  2375. src= formatConvBuffer;
  2376. }
  2377. else if (srcFormat==PIX_FMT_BGR32)
  2378. {
  2379. RENAME(rgb32ToY)(formatConvBuffer, src, srcW);
  2380. src= formatConvBuffer;
  2381. }
  2382. else if (srcFormat==PIX_FMT_BGR32_1)
  2383. {
  2384. RENAME(rgb32ToY)(formatConvBuffer, src+ALT32_CORR, srcW);
  2385. src= formatConvBuffer;
  2386. }
  2387. else if (srcFormat==PIX_FMT_RGB24)
  2388. {
  2389. RENAME(rgb24ToY)(formatConvBuffer, src, srcW);
  2390. src= formatConvBuffer;
  2391. }
  2392. else if (srcFormat==PIX_FMT_RGB565)
  2393. {
  2394. RENAME(rgb16ToY)(formatConvBuffer, src, srcW);
  2395. src= formatConvBuffer;
  2396. }
  2397. else if (srcFormat==PIX_FMT_RGB555)
  2398. {
  2399. RENAME(rgb15ToY)(formatConvBuffer, src, srcW);
  2400. src= formatConvBuffer;
  2401. }
  2402. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2403. {
  2404. RENAME(palToY)(formatConvBuffer, src, srcW, (uint32_t*)pal);
  2405. src= formatConvBuffer;
  2406. }
  2407. #ifdef HAVE_MMX
  2408. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2409. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2410. #else
  2411. if (!(flags&SWS_FAST_BILINEAR))
  2412. #endif
  2413. {
  2414. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2415. }
  2416. else // fast bilinear upscale / crap downscale
  2417. {
  2418. #if defined(ARCH_X86)
  2419. #ifdef HAVE_MMX2
  2420. int i;
  2421. #if defined(PIC)
  2422. uint64_t ebxsave __attribute__((aligned(8)));
  2423. #endif
  2424. if (canMMX2BeUsed)
  2425. {
  2426. asm volatile(
  2427. #if defined(PIC)
  2428. "mov %%"REG_b", %5 \n\t"
  2429. #endif
  2430. "pxor %%mm7, %%mm7 \n\t"
  2431. "mov %0, %%"REG_c" \n\t"
  2432. "mov %1, %%"REG_D" \n\t"
  2433. "mov %2, %%"REG_d" \n\t"
  2434. "mov %3, %%"REG_b" \n\t"
  2435. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2436. PREFETCH" (%%"REG_c") \n\t"
  2437. PREFETCH" 32(%%"REG_c") \n\t"
  2438. PREFETCH" 64(%%"REG_c") \n\t"
  2439. #ifdef ARCH_X86_64
  2440. #define FUNNY_Y_CODE \
  2441. "movl (%%"REG_b"), %%esi \n\t"\
  2442. "call *%4 \n\t"\
  2443. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2444. "add %%"REG_S", %%"REG_c" \n\t"\
  2445. "add %%"REG_a", %%"REG_D" \n\t"\
  2446. "xor %%"REG_a", %%"REG_a" \n\t"\
  2447. #else
  2448. #define FUNNY_Y_CODE \
  2449. "movl (%%"REG_b"), %%esi \n\t"\
  2450. "call *%4 \n\t"\
  2451. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2452. "add %%"REG_a", %%"REG_D" \n\t"\
  2453. "xor %%"REG_a", %%"REG_a" \n\t"\
  2454. #endif /* ARCH_X86_64 */
  2455. FUNNY_Y_CODE
  2456. FUNNY_Y_CODE
  2457. FUNNY_Y_CODE
  2458. FUNNY_Y_CODE
  2459. FUNNY_Y_CODE
  2460. FUNNY_Y_CODE
  2461. FUNNY_Y_CODE
  2462. FUNNY_Y_CODE
  2463. #if defined(PIC)
  2464. "mov %5, %%"REG_b" \n\t"
  2465. #endif
  2466. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2467. "m" (funnyYCode)
  2468. #if defined(PIC)
  2469. ,"m" (ebxsave)
  2470. #endif
  2471. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2472. #if !defined(PIC)
  2473. ,"%"REG_b
  2474. #endif
  2475. );
  2476. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2477. }
  2478. else
  2479. {
  2480. #endif /* HAVE_MMX2 */
  2481. long xInc_shr16 = xInc >> 16;
  2482. uint16_t xInc_mask = xInc & 0xffff;
  2483. //NO MMX just normal asm ...
  2484. asm volatile(
  2485. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2486. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2487. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2488. ASMALIGN(4)
  2489. "1: \n\t"
  2490. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2491. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2492. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2493. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2494. "shll $16, %%edi \n\t"
  2495. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2496. "mov %1, %%"REG_D" \n\t"
  2497. "shrl $9, %%esi \n\t"
  2498. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2499. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2500. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2501. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2502. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2503. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2504. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2505. "shll $16, %%edi \n\t"
  2506. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2507. "mov %1, %%"REG_D" \n\t"
  2508. "shrl $9, %%esi \n\t"
  2509. "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
  2510. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2511. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2512. "add $2, %%"REG_a" \n\t"
  2513. "cmp %2, %%"REG_a" \n\t"
  2514. " jb 1b \n\t"
  2515. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2516. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2517. );
  2518. #ifdef HAVE_MMX2
  2519. } //if MMX2 can't be used
  2520. #endif
  2521. #else
  2522. int i;
  2523. unsigned int xpos=0;
  2524. for (i=0;i<dstWidth;i++)
  2525. {
  2526. register unsigned int xx=xpos>>16;
  2527. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2528. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2529. xpos+=xInc;
  2530. }
  2531. #endif /* defined(ARCH_X86) */
  2532. }
  2533. }
  2534. inline static void RENAME(hcscale)(uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2535. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2536. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2537. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2538. int32_t *mmx2FilterPos, uint8_t *pal)
  2539. {
  2540. if (srcFormat==PIX_FMT_YUYV422)
  2541. {
  2542. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2543. src1= formatConvBuffer;
  2544. src2= formatConvBuffer+VOFW;
  2545. }
  2546. else if (srcFormat==PIX_FMT_UYVY422)
  2547. {
  2548. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2549. src1= formatConvBuffer;
  2550. src2= formatConvBuffer+VOFW;
  2551. }
  2552. else if (srcFormat==PIX_FMT_RGB32)
  2553. {
  2554. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2555. src1= formatConvBuffer;
  2556. src2= formatConvBuffer+VOFW;
  2557. }
  2558. else if (srcFormat==PIX_FMT_RGB32_1)
  2559. {
  2560. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW);
  2561. src1= formatConvBuffer;
  2562. src2= formatConvBuffer+VOFW;
  2563. }
  2564. else if (srcFormat==PIX_FMT_BGR24)
  2565. {
  2566. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2567. src1= formatConvBuffer;
  2568. src2= formatConvBuffer+VOFW;
  2569. }
  2570. else if (srcFormat==PIX_FMT_BGR565)
  2571. {
  2572. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2573. src1= formatConvBuffer;
  2574. src2= formatConvBuffer+VOFW;
  2575. }
  2576. else if (srcFormat==PIX_FMT_BGR555)
  2577. {
  2578. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2579. src1= formatConvBuffer;
  2580. src2= formatConvBuffer+VOFW;
  2581. }
  2582. else if (srcFormat==PIX_FMT_BGR32)
  2583. {
  2584. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2585. src1= formatConvBuffer;
  2586. src2= formatConvBuffer+VOFW;
  2587. }
  2588. else if (srcFormat==PIX_FMT_BGR32_1)
  2589. {
  2590. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW);
  2591. src1= formatConvBuffer;
  2592. src2= formatConvBuffer+VOFW;
  2593. }
  2594. else if (srcFormat==PIX_FMT_RGB24)
  2595. {
  2596. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2597. src1= formatConvBuffer;
  2598. src2= formatConvBuffer+VOFW;
  2599. }
  2600. else if (srcFormat==PIX_FMT_RGB565)
  2601. {
  2602. RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2603. src1= formatConvBuffer;
  2604. src2= formatConvBuffer+VOFW;
  2605. }
  2606. else if (srcFormat==PIX_FMT_RGB555)
  2607. {
  2608. RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW);
  2609. src1= formatConvBuffer;
  2610. src2= formatConvBuffer+VOFW;
  2611. }
  2612. else if (isGray(srcFormat))
  2613. {
  2614. return;
  2615. }
  2616. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2617. {
  2618. RENAME(palToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, (uint32_t*)pal);
  2619. src1= formatConvBuffer;
  2620. src2= formatConvBuffer+VOFW;
  2621. }
  2622. #ifdef HAVE_MMX
  2623. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2624. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2625. #else
  2626. if (!(flags&SWS_FAST_BILINEAR))
  2627. #endif
  2628. {
  2629. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2630. RENAME(hScale)(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2631. }
  2632. else // fast bilinear upscale / crap downscale
  2633. {
  2634. #if defined(ARCH_X86)
  2635. #ifdef HAVE_MMX2
  2636. int i;
  2637. #if defined(PIC)
  2638. uint64_t ebxsave __attribute__((aligned(8)));
  2639. #endif
  2640. if (canMMX2BeUsed)
  2641. {
  2642. asm volatile(
  2643. #if defined(PIC)
  2644. "mov %%"REG_b", %6 \n\t"
  2645. #endif
  2646. "pxor %%mm7, %%mm7 \n\t"
  2647. "mov %0, %%"REG_c" \n\t"
  2648. "mov %1, %%"REG_D" \n\t"
  2649. "mov %2, %%"REG_d" \n\t"
  2650. "mov %3, %%"REG_b" \n\t"
  2651. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2652. PREFETCH" (%%"REG_c") \n\t"
  2653. PREFETCH" 32(%%"REG_c") \n\t"
  2654. PREFETCH" 64(%%"REG_c") \n\t"
  2655. #ifdef ARCH_X86_64
  2656. #define FUNNY_UV_CODE \
  2657. "movl (%%"REG_b"), %%esi \n\t"\
  2658. "call *%4 \n\t"\
  2659. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2660. "add %%"REG_S", %%"REG_c" \n\t"\
  2661. "add %%"REG_a", %%"REG_D" \n\t"\
  2662. "xor %%"REG_a", %%"REG_a" \n\t"\
  2663. #else
  2664. #define FUNNY_UV_CODE \
  2665. "movl (%%"REG_b"), %%esi \n\t"\
  2666. "call *%4 \n\t"\
  2667. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2668. "add %%"REG_a", %%"REG_D" \n\t"\
  2669. "xor %%"REG_a", %%"REG_a" \n\t"\
  2670. #endif /* ARCH_X86_64 */
  2671. FUNNY_UV_CODE
  2672. FUNNY_UV_CODE
  2673. FUNNY_UV_CODE
  2674. FUNNY_UV_CODE
  2675. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2676. "mov %5, %%"REG_c" \n\t" // src
  2677. "mov %1, %%"REG_D" \n\t" // buf1
  2678. "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
  2679. PREFETCH" (%%"REG_c") \n\t"
  2680. PREFETCH" 32(%%"REG_c") \n\t"
  2681. PREFETCH" 64(%%"REG_c") \n\t"
  2682. FUNNY_UV_CODE
  2683. FUNNY_UV_CODE
  2684. FUNNY_UV_CODE
  2685. FUNNY_UV_CODE
  2686. #if defined(PIC)
  2687. "mov %6, %%"REG_b" \n\t"
  2688. #endif
  2689. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2690. "m" (funnyUVCode), "m" (src2)
  2691. #if defined(PIC)
  2692. ,"m" (ebxsave)
  2693. #endif
  2694. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2695. #if !defined(PIC)
  2696. ,"%"REG_b
  2697. #endif
  2698. );
  2699. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2700. {
  2701. //printf("%d %d %d\n", dstWidth, i, srcW);
  2702. dst[i] = src1[srcW-1]*128;
  2703. dst[i+VOFW] = src2[srcW-1]*128;
  2704. }
  2705. }
  2706. else
  2707. {
  2708. #endif /* HAVE_MMX2 */
  2709. long xInc_shr16 = (long) (xInc >> 16);
  2710. uint16_t xInc_mask = xInc & 0xffff;
  2711. asm volatile(
  2712. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2713. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2714. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2715. ASMALIGN(4)
  2716. "1: \n\t"
  2717. "mov %0, %%"REG_S" \n\t"
  2718. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2719. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2720. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2721. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2722. "shll $16, %%edi \n\t"
  2723. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2724. "mov %1, %%"REG_D" \n\t"
  2725. "shrl $9, %%esi \n\t"
  2726. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2727. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2728. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2729. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2730. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2731. "shll $16, %%edi \n\t"
  2732. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2733. "mov %1, %%"REG_D" \n\t"
  2734. "shrl $9, %%esi \n\t"
  2735. "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
  2736. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2737. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2738. "add $1, %%"REG_a" \n\t"
  2739. "cmp %2, %%"REG_a" \n\t"
  2740. " jb 1b \n\t"
  2741. /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2742. which is needed to support GCC 4.0. */
  2743. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2744. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2745. #else
  2746. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2747. #endif
  2748. "r" (src2)
  2749. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2750. );
  2751. #ifdef HAVE_MMX2
  2752. } //if MMX2 can't be used
  2753. #endif
  2754. #else
  2755. int i;
  2756. unsigned int xpos=0;
  2757. for (i=0;i<dstWidth;i++)
  2758. {
  2759. register unsigned int xx=xpos>>16;
  2760. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2761. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2762. dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2763. /* slower
  2764. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2765. dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2766. */
  2767. xpos+=xInc;
  2768. }
  2769. #endif /* defined(ARCH_X86) */
  2770. }
  2771. }
  2772. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2773. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2774. /* load a few things into local vars to make the code more readable? and faster */
  2775. const int srcW= c->srcW;
  2776. const int dstW= c->dstW;
  2777. const int dstH= c->dstH;
  2778. const int chrDstW= c->chrDstW;
  2779. const int chrSrcW= c->chrSrcW;
  2780. const int lumXInc= c->lumXInc;
  2781. const int chrXInc= c->chrXInc;
  2782. const int dstFormat= c->dstFormat;
  2783. const int srcFormat= c->srcFormat;
  2784. const int flags= c->flags;
  2785. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2786. int16_t *vLumFilterPos= c->vLumFilterPos;
  2787. int16_t *vChrFilterPos= c->vChrFilterPos;
  2788. int16_t *hLumFilterPos= c->hLumFilterPos;
  2789. int16_t *hChrFilterPos= c->hChrFilterPos;
  2790. int16_t *vLumFilter= c->vLumFilter;
  2791. int16_t *vChrFilter= c->vChrFilter;
  2792. int16_t *hLumFilter= c->hLumFilter;
  2793. int16_t *hChrFilter= c->hChrFilter;
  2794. int32_t *lumMmxFilter= c->lumMmxFilter;
  2795. int32_t *chrMmxFilter= c->chrMmxFilter;
  2796. const int vLumFilterSize= c->vLumFilterSize;
  2797. const int vChrFilterSize= c->vChrFilterSize;
  2798. const int hLumFilterSize= c->hLumFilterSize;
  2799. const int hChrFilterSize= c->hChrFilterSize;
  2800. int16_t **lumPixBuf= c->lumPixBuf;
  2801. int16_t **chrPixBuf= c->chrPixBuf;
  2802. const int vLumBufSize= c->vLumBufSize;
  2803. const int vChrBufSize= c->vChrBufSize;
  2804. uint8_t *funnyYCode= c->funnyYCode;
  2805. uint8_t *funnyUVCode= c->funnyUVCode;
  2806. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2807. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2808. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2809. int lastDstY;
  2810. uint8_t *pal=NULL;
  2811. /* vars which will change and which we need to store back in the context */
  2812. int dstY= c->dstY;
  2813. int lumBufIndex= c->lumBufIndex;
  2814. int chrBufIndex= c->chrBufIndex;
  2815. int lastInLumBuf= c->lastInLumBuf;
  2816. int lastInChrBuf= c->lastInChrBuf;
  2817. if (isPacked(c->srcFormat)){
  2818. pal= src[1];
  2819. src[0]=
  2820. src[1]=
  2821. src[2]= src[0];
  2822. srcStride[0]=
  2823. srcStride[1]=
  2824. srcStride[2]= srcStride[0];
  2825. }
  2826. srcStride[1]<<= c->vChrDrop;
  2827. srcStride[2]<<= c->vChrDrop;
  2828. //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2829. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2830. #if 0 //self test FIXME move to a vfilter or something
  2831. {
  2832. static volatile int i=0;
  2833. i++;
  2834. if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
  2835. selfTest(src, srcStride, c->srcW, c->srcH);
  2836. i--;
  2837. }
  2838. #endif
  2839. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2840. //dstStride[0],dstStride[1],dstStride[2]);
  2841. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2842. {
  2843. static int firstTime=1; //FIXME move this into the context perhaps
  2844. if (flags & SWS_PRINT_INFO && firstTime)
  2845. {
  2846. av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
  2847. " ->cannot do aligned memory accesses anymore\n");
  2848. firstTime=0;
  2849. }
  2850. }
  2851. /* Note the user might start scaling the picture in the middle so this
  2852. will not get executed. This is not really intended but works
  2853. currently, so people might do it. */
  2854. if (srcSliceY ==0){
  2855. lumBufIndex=0;
  2856. chrBufIndex=0;
  2857. dstY=0;
  2858. lastInLumBuf= -1;
  2859. lastInChrBuf= -1;
  2860. }
  2861. lastDstY= dstY;
  2862. for (;dstY < dstH; dstY++){
  2863. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2864. const int chrDstY= dstY>>c->chrDstVSubSample;
  2865. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2866. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2867. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2868. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2869. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2870. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2871. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2872. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2873. //handle holes (FAST_BILINEAR & weird filters)
  2874. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2875. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2876. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2877. assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
  2878. assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
  2879. // Do we have enough lines in this slice to output the dstY line
  2880. if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2881. {
  2882. //Do horizontal scaling
  2883. while(lastInLumBuf < lastLumSrcY)
  2884. {
  2885. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2886. lumBufIndex++;
  2887. //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2888. assert(lumBufIndex < 2*vLumBufSize);
  2889. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2890. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2891. //printf("%d %d\n", lumBufIndex, vLumBufSize);
  2892. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2893. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2894. funnyYCode, c->srcFormat, formatConvBuffer,
  2895. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2896. lastInLumBuf++;
  2897. }
  2898. while(lastInChrBuf < lastChrSrcY)
  2899. {
  2900. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2901. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2902. chrBufIndex++;
  2903. assert(chrBufIndex < 2*vChrBufSize);
  2904. assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
  2905. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2906. //FIXME replace parameters through context struct (some at least)
  2907. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2908. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2909. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2910. funnyUVCode, c->srcFormat, formatConvBuffer,
  2911. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2912. lastInChrBuf++;
  2913. }
  2914. //wrap buf index around to stay inside the ring buffer
  2915. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2916. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2917. }
  2918. else // not enough lines left in this slice -> load the rest in the buffer
  2919. {
  2920. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2921. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2922. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2923. vChrBufSize, vLumBufSize);*/
  2924. //Do horizontal scaling
  2925. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2926. {
  2927. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2928. lumBufIndex++;
  2929. assert(lumBufIndex < 2*vLumBufSize);
  2930. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2931. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2932. RENAME(hyscale)(lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2933. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2934. funnyYCode, c->srcFormat, formatConvBuffer,
  2935. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2936. lastInLumBuf++;
  2937. }
  2938. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2939. {
  2940. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2941. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2942. chrBufIndex++;
  2943. assert(chrBufIndex < 2*vChrBufSize);
  2944. assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
  2945. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2946. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2947. RENAME(hcscale)(chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2948. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2949. funnyUVCode, c->srcFormat, formatConvBuffer,
  2950. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2951. lastInChrBuf++;
  2952. }
  2953. //wrap buf index around to stay inside the ring buffer
  2954. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2955. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2956. break; //we can't output a dstY line so let's try with the next slice
  2957. }
  2958. #ifdef HAVE_MMX
  2959. b5Dither= ff_dither8[dstY&1];
  2960. g6Dither= ff_dither4[dstY&1];
  2961. g5Dither= ff_dither8[dstY&1];
  2962. r5Dither= ff_dither8[(dstY+1)&1];
  2963. #endif
  2964. if (dstY < dstH-2)
  2965. {
  2966. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2967. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2968. #ifdef HAVE_MMX
  2969. int i;
  2970. if (flags & SWS_ACCURATE_RND){
  2971. for (i=0; i<vLumFilterSize; i+=2){
  2972. lumMmxFilter[2*i+0]= (int32_t)lumSrcPtr[i ];
  2973. lumMmxFilter[2*i+1]= (int32_t)lumSrcPtr[i+(vLumFilterSize>1)];
  2974. lumMmxFilter[2*i+2]=
  2975. lumMmxFilter[2*i+3]= vLumFilter[dstY*vLumFilterSize + i ]
  2976. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2977. }
  2978. for (i=0; i<vChrFilterSize; i+=2){
  2979. chrMmxFilter[2*i+0]= (int32_t)chrSrcPtr[i ];
  2980. chrMmxFilter[2*i+1]= (int32_t)chrSrcPtr[i+(vChrFilterSize>1)];
  2981. chrMmxFilter[2*i+2]=
  2982. chrMmxFilter[2*i+3]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2983. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2984. }
  2985. }else{
  2986. for (i=0; i<vLumFilterSize; i++)
  2987. {
  2988. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2989. lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
  2990. lumMmxFilter[4*i+2]=
  2991. lumMmxFilter[4*i+3]=
  2992. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2993. }
  2994. for (i=0; i<vChrFilterSize; i++)
  2995. {
  2996. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2997. chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
  2998. chrMmxFilter[4*i+2]=
  2999. chrMmxFilter[4*i+3]=
  3000. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  3001. }
  3002. }
  3003. #endif
  3004. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  3005. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3006. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  3007. RENAME(yuv2nv12X)(c,
  3008. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3009. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3010. dest, uDest, dstW, chrDstW, dstFormat);
  3011. }
  3012. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
  3013. {
  3014. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3015. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  3016. if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
  3017. {
  3018. int16_t *lumBuf = lumPixBuf[0];
  3019. int16_t *chrBuf= chrPixBuf[0];
  3020. RENAME(yuv2yuv1)(c, lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  3021. }
  3022. else //General YV12
  3023. {
  3024. RENAME(yuv2yuvX)(c,
  3025. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3026. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3027. dest, uDest, vDest, dstW, chrDstW);
  3028. }
  3029. }
  3030. else
  3031. {
  3032. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  3033. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  3034. if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
  3035. {
  3036. int chrAlpha= vChrFilter[2*dstY+1];
  3037. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  3038. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  3039. }
  3040. else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
  3041. {
  3042. int lumAlpha= vLumFilter[2*dstY+1];
  3043. int chrAlpha= vChrFilter[2*dstY+1];
  3044. lumMmxFilter[2]=
  3045. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  3046. chrMmxFilter[2]=
  3047. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  3048. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  3049. dest, dstW, lumAlpha, chrAlpha, dstY);
  3050. }
  3051. else //general RGB
  3052. {
  3053. RENAME(yuv2packedX)(c,
  3054. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3055. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3056. dest, dstW, dstY);
  3057. }
  3058. }
  3059. }
  3060. else // hmm looks like we can't use MMX here without overwriting this array's tail
  3061. {
  3062. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  3063. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  3064. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  3065. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3066. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  3067. yuv2nv12XinC(
  3068. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3069. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3070. dest, uDest, dstW, chrDstW, dstFormat);
  3071. }
  3072. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
  3073. {
  3074. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3075. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  3076. yuv2yuvXinC(
  3077. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3078. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3079. dest, uDest, vDest, dstW, chrDstW);
  3080. }
  3081. else
  3082. {
  3083. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  3084. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  3085. yuv2packedXinC(c,
  3086. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3087. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3088. dest, dstW, dstY);
  3089. }
  3090. }
  3091. }
  3092. #ifdef HAVE_MMX
  3093. asm volatile(SFENCE:::"memory");
  3094. asm volatile(EMMS:::"memory");
  3095. #endif
  3096. /* store changed local vars back in the context */
  3097. c->dstY= dstY;
  3098. c->lumBufIndex= lumBufIndex;
  3099. c->chrBufIndex= chrBufIndex;
  3100. c->lastInLumBuf= lastInLumBuf;
  3101. c->lastInChrBuf= lastInChrBuf;
  3102. return dstY - lastDstY;
  3103. }