You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3116 lines
136KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * The C code (not assembly, MMX, ...) of this file can be used
  21. * under the LGPL license.
  22. */
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef PAVGB
  26. #undef PREFETCH
  27. #undef PREFETCHW
  28. #if HAVE_AMD3DNOW
  29. #define PREFETCH "prefetch"
  30. #define PREFETCHW "prefetchw"
  31. #elif HAVE_MMX2
  32. #define PREFETCH "prefetchnta"
  33. #define PREFETCHW "prefetcht0"
  34. #else
  35. #define PREFETCH " # nop"
  36. #define PREFETCHW " # nop"
  37. #endif
  38. #if HAVE_MMX2
  39. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  40. #elif HAVE_AMD3DNOW
  41. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  42. #endif
  43. #if HAVE_MMX2
  44. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  45. #else
  46. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  47. #endif
  48. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  49. #if HAVE_ALTIVEC
  50. #include "ppc/swscale_altivec_template.c"
  51. #endif
  52. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  53. __asm__ volatile(\
  54. "xor %%"REG_a", %%"REG_a" \n\t"\
  55. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  56. "movq %%mm3, %%mm4 \n\t"\
  57. "lea " offset "(%0), %%"REG_d" \n\t"\
  58. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  59. ASMALIGN(4) /* FIXME Unroll? */\
  60. "1: \n\t"\
  61. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  62. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  63. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
  64. "add $16, %%"REG_d" \n\t"\
  65. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  66. "test %%"REG_S", %%"REG_S" \n\t"\
  67. "pmulhw %%mm0, %%mm2 \n\t"\
  68. "pmulhw %%mm0, %%mm5 \n\t"\
  69. "paddw %%mm2, %%mm3 \n\t"\
  70. "paddw %%mm5, %%mm4 \n\t"\
  71. " jnz 1b \n\t"\
  72. "psraw $3, %%mm3 \n\t"\
  73. "psraw $3, %%mm4 \n\t"\
  74. "packuswb %%mm4, %%mm3 \n\t"\
  75. MOVNTQ(%%mm3, (%1, %%REGa))\
  76. "add $8, %%"REG_a" \n\t"\
  77. "cmp %2, %%"REG_a" \n\t"\
  78. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  79. "movq %%mm3, %%mm4 \n\t"\
  80. "lea " offset "(%0), %%"REG_d" \n\t"\
  81. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  82. "jb 1b \n\t"\
  83. :: "r" (&c->redDither),\
  84. "r" (dest), "g" (width)\
  85. : "%"REG_a, "%"REG_d, "%"REG_S\
  86. );
  87. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  88. __asm__ volatile(\
  89. "lea " offset "(%0), %%"REG_d" \n\t"\
  90. "xor %%"REG_a", %%"REG_a" \n\t"\
  91. "pxor %%mm4, %%mm4 \n\t"\
  92. "pxor %%mm5, %%mm5 \n\t"\
  93. "pxor %%mm6, %%mm6 \n\t"\
  94. "pxor %%mm7, %%mm7 \n\t"\
  95. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  96. ASMALIGN(4) \
  97. "1: \n\t"\
  98. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
  99. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  100. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  101. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
  102. "movq %%mm0, %%mm3 \n\t"\
  103. "punpcklwd %%mm1, %%mm0 \n\t"\
  104. "punpckhwd %%mm1, %%mm3 \n\t"\
  105. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  106. "pmaddwd %%mm1, %%mm0 \n\t"\
  107. "pmaddwd %%mm1, %%mm3 \n\t"\
  108. "paddd %%mm0, %%mm4 \n\t"\
  109. "paddd %%mm3, %%mm5 \n\t"\
  110. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
  111. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  112. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  113. "test %%"REG_S", %%"REG_S" \n\t"\
  114. "movq %%mm2, %%mm0 \n\t"\
  115. "punpcklwd %%mm3, %%mm2 \n\t"\
  116. "punpckhwd %%mm3, %%mm0 \n\t"\
  117. "pmaddwd %%mm1, %%mm2 \n\t"\
  118. "pmaddwd %%mm1, %%mm0 \n\t"\
  119. "paddd %%mm2, %%mm6 \n\t"\
  120. "paddd %%mm0, %%mm7 \n\t"\
  121. " jnz 1b \n\t"\
  122. "psrad $16, %%mm4 \n\t"\
  123. "psrad $16, %%mm5 \n\t"\
  124. "psrad $16, %%mm6 \n\t"\
  125. "psrad $16, %%mm7 \n\t"\
  126. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  127. "packssdw %%mm5, %%mm4 \n\t"\
  128. "packssdw %%mm7, %%mm6 \n\t"\
  129. "paddw %%mm0, %%mm4 \n\t"\
  130. "paddw %%mm0, %%mm6 \n\t"\
  131. "psraw $3, %%mm4 \n\t"\
  132. "psraw $3, %%mm6 \n\t"\
  133. "packuswb %%mm6, %%mm4 \n\t"\
  134. MOVNTQ(%%mm4, (%1, %%REGa))\
  135. "add $8, %%"REG_a" \n\t"\
  136. "cmp %2, %%"REG_a" \n\t"\
  137. "lea " offset "(%0), %%"REG_d" \n\t"\
  138. "pxor %%mm4, %%mm4 \n\t"\
  139. "pxor %%mm5, %%mm5 \n\t"\
  140. "pxor %%mm6, %%mm6 \n\t"\
  141. "pxor %%mm7, %%mm7 \n\t"\
  142. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  143. "jb 1b \n\t"\
  144. :: "r" (&c->redDither),\
  145. "r" (dest), "g" (width)\
  146. : "%"REG_a, "%"REG_d, "%"REG_S\
  147. );
  148. #define YSCALEYUV2YV121 \
  149. "mov %2, %%"REG_a" \n\t"\
  150. ASMALIGN(4) /* FIXME Unroll? */\
  151. "1: \n\t"\
  152. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  153. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  154. "psraw $7, %%mm0 \n\t"\
  155. "psraw $7, %%mm1 \n\t"\
  156. "packuswb %%mm1, %%mm0 \n\t"\
  157. MOVNTQ(%%mm0, (%1, %%REGa))\
  158. "add $8, %%"REG_a" \n\t"\
  159. "jnc 1b \n\t"
  160. #define YSCALEYUV2YV121_ACCURATE \
  161. "mov %2, %%"REG_a" \n\t"\
  162. "pcmpeqw %%mm7, %%mm7 \n\t"\
  163. "psrlw $15, %%mm7 \n\t"\
  164. "psllw $6, %%mm7 \n\t"\
  165. ASMALIGN(4) /* FIXME Unroll? */\
  166. "1: \n\t"\
  167. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  168. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  169. "paddsw %%mm7, %%mm0 \n\t"\
  170. "paddsw %%mm7, %%mm1 \n\t"\
  171. "psraw $7, %%mm0 \n\t"\
  172. "psraw $7, %%mm1 \n\t"\
  173. "packuswb %%mm1, %%mm0 \n\t"\
  174. MOVNTQ(%%mm0, (%1, %%REGa))\
  175. "add $8, %%"REG_a" \n\t"\
  176. "jnc 1b \n\t"
  177. /*
  178. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  179. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  180. "r" (dest), "m" (dstW),
  181. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  182. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  183. */
  184. #define YSCALEYUV2PACKEDX_UV \
  185. __asm__ volatile(\
  186. "xor %%"REG_a", %%"REG_a" \n\t"\
  187. ASMALIGN(4)\
  188. "nop \n\t"\
  189. "1: \n\t"\
  190. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  191. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  192. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  193. "movq %%mm3, %%mm4 \n\t"\
  194. ASMALIGN(4)\
  195. "2: \n\t"\
  196. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  197. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  198. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  199. "add $16, %%"REG_d" \n\t"\
  200. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  201. "pmulhw %%mm0, %%mm2 \n\t"\
  202. "pmulhw %%mm0, %%mm5 \n\t"\
  203. "paddw %%mm2, %%mm3 \n\t"\
  204. "paddw %%mm5, %%mm4 \n\t"\
  205. "test %%"REG_S", %%"REG_S" \n\t"\
  206. " jnz 2b \n\t"\
  207. #define YSCALEYUV2PACKEDX_YA(offset,coeff,src1,src2,dst1,dst2) \
  208. "lea "offset"(%0), %%"REG_d" \n\t"\
  209. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  210. "movq "VROUNDER_OFFSET"(%0), "#dst1" \n\t"\
  211. "movq "#dst1", "#dst2" \n\t"\
  212. ASMALIGN(4)\
  213. "2: \n\t"\
  214. "movq 8(%%"REG_d"), "#coeff" \n\t" /* filterCoeff */\
  215. "movq (%%"REG_S", %%"REG_a", 2), "#src1" \n\t" /* Y1srcData */\
  216. "movq 8(%%"REG_S", %%"REG_a", 2), "#src2" \n\t" /* Y2srcData */\
  217. "add $16, %%"REG_d" \n\t"\
  218. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  219. "pmulhw "#coeff", "#src1" \n\t"\
  220. "pmulhw "#coeff", "#src2" \n\t"\
  221. "paddw "#src1", "#dst1" \n\t"\
  222. "paddw "#src2", "#dst2" \n\t"\
  223. "test %%"REG_S", %%"REG_S" \n\t"\
  224. " jnz 2b \n\t"\
  225. #define YSCALEYUV2PACKEDX \
  226. YSCALEYUV2PACKEDX_UV \
  227. YSCALEYUV2PACKEDX_YA(LUM_MMX_FILTER_OFFSET,%%mm0,%%mm2,%%mm5,%%mm1,%%mm7) \
  228. #define YSCALEYUV2PACKEDX_END \
  229. :: "r" (&c->redDither), \
  230. "m" (dummy), "m" (dummy), "m" (dummy),\
  231. "r" (dest), "m" (dstW) \
  232. : "%"REG_a, "%"REG_d, "%"REG_S \
  233. );
  234. #define YSCALEYUV2PACKEDX_ACCURATE_UV \
  235. __asm__ volatile(\
  236. "xor %%"REG_a", %%"REG_a" \n\t"\
  237. ASMALIGN(4)\
  238. "nop \n\t"\
  239. "1: \n\t"\
  240. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  241. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  242. "pxor %%mm4, %%mm4 \n\t"\
  243. "pxor %%mm5, %%mm5 \n\t"\
  244. "pxor %%mm6, %%mm6 \n\t"\
  245. "pxor %%mm7, %%mm7 \n\t"\
  246. ASMALIGN(4)\
  247. "2: \n\t"\
  248. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  249. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  250. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  251. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  252. "movq %%mm0, %%mm3 \n\t"\
  253. "punpcklwd %%mm1, %%mm0 \n\t"\
  254. "punpckhwd %%mm1, %%mm3 \n\t"\
  255. "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
  256. "pmaddwd %%mm1, %%mm0 \n\t"\
  257. "pmaddwd %%mm1, %%mm3 \n\t"\
  258. "paddd %%mm0, %%mm4 \n\t"\
  259. "paddd %%mm3, %%mm5 \n\t"\
  260. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  261. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  262. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  263. "test %%"REG_S", %%"REG_S" \n\t"\
  264. "movq %%mm2, %%mm0 \n\t"\
  265. "punpcklwd %%mm3, %%mm2 \n\t"\
  266. "punpckhwd %%mm3, %%mm0 \n\t"\
  267. "pmaddwd %%mm1, %%mm2 \n\t"\
  268. "pmaddwd %%mm1, %%mm0 \n\t"\
  269. "paddd %%mm2, %%mm6 \n\t"\
  270. "paddd %%mm0, %%mm7 \n\t"\
  271. " jnz 2b \n\t"\
  272. "psrad $16, %%mm4 \n\t"\
  273. "psrad $16, %%mm5 \n\t"\
  274. "psrad $16, %%mm6 \n\t"\
  275. "psrad $16, %%mm7 \n\t"\
  276. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  277. "packssdw %%mm5, %%mm4 \n\t"\
  278. "packssdw %%mm7, %%mm6 \n\t"\
  279. "paddw %%mm0, %%mm4 \n\t"\
  280. "paddw %%mm0, %%mm6 \n\t"\
  281. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  282. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  283. #define YSCALEYUV2PACKEDX_ACCURATE_YA(offset) \
  284. "lea "offset"(%0), %%"REG_d" \n\t"\
  285. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  286. "pxor %%mm1, %%mm1 \n\t"\
  287. "pxor %%mm5, %%mm5 \n\t"\
  288. "pxor %%mm7, %%mm7 \n\t"\
  289. "pxor %%mm6, %%mm6 \n\t"\
  290. ASMALIGN(4)\
  291. "2: \n\t"\
  292. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  293. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  294. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  295. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  296. "movq %%mm0, %%mm3 \n\t"\
  297. "punpcklwd %%mm4, %%mm0 \n\t"\
  298. "punpckhwd %%mm4, %%mm3 \n\t"\
  299. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  300. "pmaddwd %%mm4, %%mm0 \n\t"\
  301. "pmaddwd %%mm4, %%mm3 \n\t"\
  302. "paddd %%mm0, %%mm1 \n\t"\
  303. "paddd %%mm3, %%mm5 \n\t"\
  304. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  305. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  306. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  307. "test %%"REG_S", %%"REG_S" \n\t"\
  308. "movq %%mm2, %%mm0 \n\t"\
  309. "punpcklwd %%mm3, %%mm2 \n\t"\
  310. "punpckhwd %%mm3, %%mm0 \n\t"\
  311. "pmaddwd %%mm4, %%mm2 \n\t"\
  312. "pmaddwd %%mm4, %%mm0 \n\t"\
  313. "paddd %%mm2, %%mm7 \n\t"\
  314. "paddd %%mm0, %%mm6 \n\t"\
  315. " jnz 2b \n\t"\
  316. "psrad $16, %%mm1 \n\t"\
  317. "psrad $16, %%mm5 \n\t"\
  318. "psrad $16, %%mm7 \n\t"\
  319. "psrad $16, %%mm6 \n\t"\
  320. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  321. "packssdw %%mm5, %%mm1 \n\t"\
  322. "packssdw %%mm6, %%mm7 \n\t"\
  323. "paddw %%mm0, %%mm1 \n\t"\
  324. "paddw %%mm0, %%mm7 \n\t"\
  325. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  326. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  327. #define YSCALEYUV2PACKEDX_ACCURATE \
  328. YSCALEYUV2PACKEDX_ACCURATE_UV \
  329. YSCALEYUV2PACKEDX_ACCURATE_YA(LUM_MMX_FILTER_OFFSET)
  330. #define YSCALEYUV2RGBX \
  331. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  332. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  333. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  334. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  335. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  336. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  337. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  338. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  339. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  340. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  341. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  342. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  343. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  344. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  345. "paddw %%mm3, %%mm4 \n\t"\
  346. "movq %%mm2, %%mm0 \n\t"\
  347. "movq %%mm5, %%mm6 \n\t"\
  348. "movq %%mm4, %%mm3 \n\t"\
  349. "punpcklwd %%mm2, %%mm2 \n\t"\
  350. "punpcklwd %%mm5, %%mm5 \n\t"\
  351. "punpcklwd %%mm4, %%mm4 \n\t"\
  352. "paddw %%mm1, %%mm2 \n\t"\
  353. "paddw %%mm1, %%mm5 \n\t"\
  354. "paddw %%mm1, %%mm4 \n\t"\
  355. "punpckhwd %%mm0, %%mm0 \n\t"\
  356. "punpckhwd %%mm6, %%mm6 \n\t"\
  357. "punpckhwd %%mm3, %%mm3 \n\t"\
  358. "paddw %%mm7, %%mm0 \n\t"\
  359. "paddw %%mm7, %%mm6 \n\t"\
  360. "paddw %%mm7, %%mm3 \n\t"\
  361. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  362. "packuswb %%mm0, %%mm2 \n\t"\
  363. "packuswb %%mm6, %%mm5 \n\t"\
  364. "packuswb %%mm3, %%mm4 \n\t"\
  365. #define REAL_YSCALEYUV2PACKED(index, c) \
  366. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  367. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  368. "psraw $3, %%mm0 \n\t"\
  369. "psraw $3, %%mm1 \n\t"\
  370. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  371. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  372. "xor "#index", "#index" \n\t"\
  373. ASMALIGN(4)\
  374. "1: \n\t"\
  375. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  376. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  377. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  378. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  379. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  380. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  381. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  382. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  383. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  384. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  385. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  386. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  387. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  388. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  389. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  390. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  391. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  392. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  393. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  394. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  395. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  396. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  397. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  398. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  399. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  400. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  401. #define REAL_YSCALEYUV2RGB_UV(index, c) \
  402. "xor "#index", "#index" \n\t"\
  403. ASMALIGN(4)\
  404. "1: \n\t"\
  405. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  406. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  407. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  408. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  409. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  410. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  411. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  412. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  413. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  414. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  415. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  416. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  417. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  418. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  419. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  420. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  421. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  422. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  423. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  424. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  425. #define REAL_YSCALEYUV2RGB_YA(index, c, b1, b2) \
  426. "movq ("#b1", "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  427. "movq ("#b2", "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  428. "movq 8("#b1", "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  429. "movq 8("#b2", "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  430. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  431. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  432. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  433. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  434. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  435. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  436. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  437. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  438. #define REAL_YSCALEYUV2RGB_COEFF(c) \
  439. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  440. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  441. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  442. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  443. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  444. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  445. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  446. "paddw %%mm3, %%mm4 \n\t"\
  447. "movq %%mm2, %%mm0 \n\t"\
  448. "movq %%mm5, %%mm6 \n\t"\
  449. "movq %%mm4, %%mm3 \n\t"\
  450. "punpcklwd %%mm2, %%mm2 \n\t"\
  451. "punpcklwd %%mm5, %%mm5 \n\t"\
  452. "punpcklwd %%mm4, %%mm4 \n\t"\
  453. "paddw %%mm1, %%mm2 \n\t"\
  454. "paddw %%mm1, %%mm5 \n\t"\
  455. "paddw %%mm1, %%mm4 \n\t"\
  456. "punpckhwd %%mm0, %%mm0 \n\t"\
  457. "punpckhwd %%mm6, %%mm6 \n\t"\
  458. "punpckhwd %%mm3, %%mm3 \n\t"\
  459. "paddw %%mm7, %%mm0 \n\t"\
  460. "paddw %%mm7, %%mm6 \n\t"\
  461. "paddw %%mm7, %%mm3 \n\t"\
  462. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  463. "packuswb %%mm0, %%mm2 \n\t"\
  464. "packuswb %%mm6, %%mm5 \n\t"\
  465. "packuswb %%mm3, %%mm4 \n\t"\
  466. #define YSCALEYUV2RGB_YA(index, c, b1, b2) REAL_YSCALEYUV2RGB_YA(index, c, b1, b2)
  467. #define YSCALEYUV2RGB(index, c) \
  468. REAL_YSCALEYUV2RGB_UV(index, c) \
  469. REAL_YSCALEYUV2RGB_YA(index, c, %0, %1) \
  470. REAL_YSCALEYUV2RGB_COEFF(c)
  471. #define REAL_YSCALEYUV2PACKED1(index, c) \
  472. "xor "#index", "#index" \n\t"\
  473. ASMALIGN(4)\
  474. "1: \n\t"\
  475. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  476. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  477. "psraw $7, %%mm3 \n\t" \
  478. "psraw $7, %%mm4 \n\t" \
  479. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  480. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  481. "psraw $7, %%mm1 \n\t" \
  482. "psraw $7, %%mm7 \n\t" \
  483. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  484. #define REAL_YSCALEYUV2RGB1(index, c) \
  485. "xor "#index", "#index" \n\t"\
  486. ASMALIGN(4)\
  487. "1: \n\t"\
  488. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  489. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  490. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  491. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  492. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  493. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  494. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  495. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  496. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  497. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  498. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  499. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  500. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  501. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  502. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  503. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  504. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  505. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  506. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  507. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  508. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  509. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  510. "paddw %%mm3, %%mm4 \n\t"\
  511. "movq %%mm2, %%mm0 \n\t"\
  512. "movq %%mm5, %%mm6 \n\t"\
  513. "movq %%mm4, %%mm3 \n\t"\
  514. "punpcklwd %%mm2, %%mm2 \n\t"\
  515. "punpcklwd %%mm5, %%mm5 \n\t"\
  516. "punpcklwd %%mm4, %%mm4 \n\t"\
  517. "paddw %%mm1, %%mm2 \n\t"\
  518. "paddw %%mm1, %%mm5 \n\t"\
  519. "paddw %%mm1, %%mm4 \n\t"\
  520. "punpckhwd %%mm0, %%mm0 \n\t"\
  521. "punpckhwd %%mm6, %%mm6 \n\t"\
  522. "punpckhwd %%mm3, %%mm3 \n\t"\
  523. "paddw %%mm7, %%mm0 \n\t"\
  524. "paddw %%mm7, %%mm6 \n\t"\
  525. "paddw %%mm7, %%mm3 \n\t"\
  526. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  527. "packuswb %%mm0, %%mm2 \n\t"\
  528. "packuswb %%mm6, %%mm5 \n\t"\
  529. "packuswb %%mm3, %%mm4 \n\t"\
  530. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  531. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  532. "xor "#index", "#index" \n\t"\
  533. ASMALIGN(4)\
  534. "1: \n\t"\
  535. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  536. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  537. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  538. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  539. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  540. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  541. "psrlw $8, %%mm3 \n\t" \
  542. "psrlw $8, %%mm4 \n\t" \
  543. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  544. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  545. "psraw $7, %%mm1 \n\t" \
  546. "psraw $7, %%mm7 \n\t"
  547. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  548. // do vertical chrominance interpolation
  549. #define REAL_YSCALEYUV2RGB1b(index, c) \
  550. "xor "#index", "#index" \n\t"\
  551. ASMALIGN(4)\
  552. "1: \n\t"\
  553. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  554. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  555. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  556. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  557. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  558. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  559. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  560. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  561. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  562. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  563. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  564. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  565. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  566. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  567. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  568. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  569. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  570. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  571. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  572. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  573. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  574. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  575. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  576. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  577. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  578. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  579. "paddw %%mm3, %%mm4 \n\t"\
  580. "movq %%mm2, %%mm0 \n\t"\
  581. "movq %%mm5, %%mm6 \n\t"\
  582. "movq %%mm4, %%mm3 \n\t"\
  583. "punpcklwd %%mm2, %%mm2 \n\t"\
  584. "punpcklwd %%mm5, %%mm5 \n\t"\
  585. "punpcklwd %%mm4, %%mm4 \n\t"\
  586. "paddw %%mm1, %%mm2 \n\t"\
  587. "paddw %%mm1, %%mm5 \n\t"\
  588. "paddw %%mm1, %%mm4 \n\t"\
  589. "punpckhwd %%mm0, %%mm0 \n\t"\
  590. "punpckhwd %%mm6, %%mm6 \n\t"\
  591. "punpckhwd %%mm3, %%mm3 \n\t"\
  592. "paddw %%mm7, %%mm0 \n\t"\
  593. "paddw %%mm7, %%mm6 \n\t"\
  594. "paddw %%mm7, %%mm3 \n\t"\
  595. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  596. "packuswb %%mm0, %%mm2 \n\t"\
  597. "packuswb %%mm6, %%mm5 \n\t"\
  598. "packuswb %%mm3, %%mm4 \n\t"\
  599. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  600. #define REAL_YSCALEYUV2RGB1_ALPHA(index) \
  601. "movq (%1, "#index", 2), %%mm7 \n\t" /* abuf0[index ] */\
  602. "movq 8(%1, "#index", 2), %%mm1 \n\t" /* abuf0[index+4] */\
  603. "psraw $7, %%mm7 \n\t" /* abuf0[index ] >>7 */\
  604. "psraw $7, %%mm1 \n\t" /* abuf0[index+4] >>7 */\
  605. "packuswb %%mm1, %%mm7 \n\t"
  606. #define YSCALEYUV2RGB1_ALPHA(index) REAL_YSCALEYUV2RGB1_ALPHA(index)
  607. #define REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) \
  608. "movq "#b", "#q2" \n\t" /* B */\
  609. "movq "#r", "#t" \n\t" /* R */\
  610. "punpcklbw "#g", "#b" \n\t" /* GBGBGBGB 0 */\
  611. "punpcklbw "#a", "#r" \n\t" /* ARARARAR 0 */\
  612. "punpckhbw "#g", "#q2" \n\t" /* GBGBGBGB 2 */\
  613. "punpckhbw "#a", "#t" \n\t" /* ARARARAR 2 */\
  614. "movq "#b", "#q0" \n\t" /* GBGBGBGB 0 */\
  615. "movq "#q2", "#q3" \n\t" /* GBGBGBGB 2 */\
  616. "punpcklwd "#r", "#q0" \n\t" /* ARGBARGB 0 */\
  617. "punpckhwd "#r", "#b" \n\t" /* ARGBARGB 1 */\
  618. "punpcklwd "#t", "#q2" \n\t" /* ARGBARGB 2 */\
  619. "punpckhwd "#t", "#q3" \n\t" /* ARGBARGB 3 */\
  620. \
  621. MOVNTQ( q0, (dst, index, 4))\
  622. MOVNTQ( b, 8(dst, index, 4))\
  623. MOVNTQ( q2, 16(dst, index, 4))\
  624. MOVNTQ( q3, 24(dst, index, 4))\
  625. \
  626. "add $8, "#index" \n\t"\
  627. "cmp "#dstw", "#index" \n\t"\
  628. " jb 1b \n\t"
  629. #define WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t) REAL_WRITEBGR32(dst, dstw, index, b, g, r, a, q0, q2, q3, t)
  630. #define REAL_WRITERGB16(dst, dstw, index) \
  631. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  632. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  633. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  634. "psrlq $3, %%mm2 \n\t"\
  635. \
  636. "movq %%mm2, %%mm1 \n\t"\
  637. "movq %%mm4, %%mm3 \n\t"\
  638. \
  639. "punpcklbw %%mm7, %%mm3 \n\t"\
  640. "punpcklbw %%mm5, %%mm2 \n\t"\
  641. "punpckhbw %%mm7, %%mm4 \n\t"\
  642. "punpckhbw %%mm5, %%mm1 \n\t"\
  643. \
  644. "psllq $3, %%mm3 \n\t"\
  645. "psllq $3, %%mm4 \n\t"\
  646. \
  647. "por %%mm3, %%mm2 \n\t"\
  648. "por %%mm4, %%mm1 \n\t"\
  649. \
  650. MOVNTQ(%%mm2, (dst, index, 2))\
  651. MOVNTQ(%%mm1, 8(dst, index, 2))\
  652. \
  653. "add $8, "#index" \n\t"\
  654. "cmp "#dstw", "#index" \n\t"\
  655. " jb 1b \n\t"
  656. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  657. #define REAL_WRITERGB15(dst, dstw, index) \
  658. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  659. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  660. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  661. "psrlq $3, %%mm2 \n\t"\
  662. "psrlq $1, %%mm5 \n\t"\
  663. \
  664. "movq %%mm2, %%mm1 \n\t"\
  665. "movq %%mm4, %%mm3 \n\t"\
  666. \
  667. "punpcklbw %%mm7, %%mm3 \n\t"\
  668. "punpcklbw %%mm5, %%mm2 \n\t"\
  669. "punpckhbw %%mm7, %%mm4 \n\t"\
  670. "punpckhbw %%mm5, %%mm1 \n\t"\
  671. \
  672. "psllq $2, %%mm3 \n\t"\
  673. "psllq $2, %%mm4 \n\t"\
  674. \
  675. "por %%mm3, %%mm2 \n\t"\
  676. "por %%mm4, %%mm1 \n\t"\
  677. \
  678. MOVNTQ(%%mm2, (dst, index, 2))\
  679. MOVNTQ(%%mm1, 8(dst, index, 2))\
  680. \
  681. "add $8, "#index" \n\t"\
  682. "cmp "#dstw", "#index" \n\t"\
  683. " jb 1b \n\t"
  684. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  685. #define WRITEBGR24OLD(dst, dstw, index) \
  686. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  687. "movq %%mm2, %%mm1 \n\t" /* B */\
  688. "movq %%mm5, %%mm6 \n\t" /* R */\
  689. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  690. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  691. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  692. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  693. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  694. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  695. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  696. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  697. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  698. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  699. \
  700. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  701. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  702. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
  703. "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
  704. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  705. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  706. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  707. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  708. \
  709. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  710. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  711. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  712. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  713. "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
  714. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  715. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  716. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
  717. "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
  718. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  719. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  720. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  721. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  722. \
  723. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  724. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  725. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  726. "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
  727. "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
  728. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  729. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  730. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  731. \
  732. MOVNTQ(%%mm0, (dst))\
  733. MOVNTQ(%%mm2, 8(dst))\
  734. MOVNTQ(%%mm3, 16(dst))\
  735. "add $24, "#dst" \n\t"\
  736. \
  737. "add $8, "#index" \n\t"\
  738. "cmp "#dstw", "#index" \n\t"\
  739. " jb 1b \n\t"
  740. #define WRITEBGR24MMX(dst, dstw, index) \
  741. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  742. "movq %%mm2, %%mm1 \n\t" /* B */\
  743. "movq %%mm5, %%mm6 \n\t" /* R */\
  744. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  745. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  746. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  747. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  748. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  749. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  750. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  751. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  752. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  753. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  754. \
  755. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  756. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  757. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  758. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  759. \
  760. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  761. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  762. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  763. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  764. \
  765. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  766. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  767. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  768. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  769. \
  770. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  771. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  772. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  773. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  774. MOVNTQ(%%mm0, (dst))\
  775. \
  776. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  777. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  778. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  779. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  780. MOVNTQ(%%mm6, 8(dst))\
  781. \
  782. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  783. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  784. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  785. MOVNTQ(%%mm5, 16(dst))\
  786. \
  787. "add $24, "#dst" \n\t"\
  788. \
  789. "add $8, "#index" \n\t"\
  790. "cmp "#dstw", "#index" \n\t"\
  791. " jb 1b \n\t"
  792. #define WRITEBGR24MMX2(dst, dstw, index) \
  793. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  794. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  795. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  796. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  797. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  798. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  799. \
  800. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  801. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  802. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  803. \
  804. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  805. "por %%mm1, %%mm6 \n\t"\
  806. "por %%mm3, %%mm6 \n\t"\
  807. MOVNTQ(%%mm6, (dst))\
  808. \
  809. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  810. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  811. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  812. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  813. \
  814. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  815. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  816. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  817. \
  818. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  819. "por %%mm3, %%mm6 \n\t"\
  820. MOVNTQ(%%mm6, 8(dst))\
  821. \
  822. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  823. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  824. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  825. \
  826. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  827. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  828. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  829. \
  830. "por %%mm1, %%mm3 \n\t"\
  831. "por %%mm3, %%mm6 \n\t"\
  832. MOVNTQ(%%mm6, 16(dst))\
  833. \
  834. "add $24, "#dst" \n\t"\
  835. \
  836. "add $8, "#index" \n\t"\
  837. "cmp "#dstw", "#index" \n\t"\
  838. " jb 1b \n\t"
  839. #if HAVE_MMX2
  840. #undef WRITEBGR24
  841. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  842. #else
  843. #undef WRITEBGR24
  844. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  845. #endif
  846. #define REAL_WRITEYUY2(dst, dstw, index) \
  847. "packuswb %%mm3, %%mm3 \n\t"\
  848. "packuswb %%mm4, %%mm4 \n\t"\
  849. "packuswb %%mm7, %%mm1 \n\t"\
  850. "punpcklbw %%mm4, %%mm3 \n\t"\
  851. "movq %%mm1, %%mm7 \n\t"\
  852. "punpcklbw %%mm3, %%mm1 \n\t"\
  853. "punpckhbw %%mm3, %%mm7 \n\t"\
  854. \
  855. MOVNTQ(%%mm1, (dst, index, 2))\
  856. MOVNTQ(%%mm7, 8(dst, index, 2))\
  857. \
  858. "add $8, "#index" \n\t"\
  859. "cmp "#dstw", "#index" \n\t"\
  860. " jb 1b \n\t"
  861. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  862. static inline void RENAME(yuv2yuvX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
  863. const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize, const int16_t **alpSrc,
  864. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
  865. {
  866. #if HAVE_MMX
  867. if(!(c->flags & SWS_BITEXACT)){
  868. if (c->flags & SWS_ACCURATE_RND){
  869. if (uDest){
  870. YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  871. YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  872. }
  873. if (CONFIG_SWSCALE_ALPHA && aDest){
  874. YSCALEYUV2YV12X_ACCURATE( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
  875. }
  876. YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  877. }else{
  878. if (uDest){
  879. YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  880. YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  881. }
  882. if (CONFIG_SWSCALE_ALPHA && aDest){
  883. YSCALEYUV2YV12X( "0", ALP_MMX_FILTER_OFFSET, aDest, dstW)
  884. }
  885. YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  886. }
  887. return;
  888. }
  889. #endif
  890. #if HAVE_ALTIVEC
  891. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  892. chrFilter, chrSrc, chrFilterSize,
  893. dest, uDest, vDest, dstW, chrDstW);
  894. #else //HAVE_ALTIVEC
  895. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  896. chrFilter, chrSrc, chrFilterSize,
  897. alpSrc, dest, uDest, vDest, aDest, dstW, chrDstW);
  898. #endif //!HAVE_ALTIVEC
  899. }
  900. static inline void RENAME(yuv2nv12X)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
  901. const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
  902. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  903. {
  904. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  905. chrFilter, chrSrc, chrFilterSize,
  906. dest, uDest, dstW, chrDstW, dstFormat);
  907. }
  908. static inline void RENAME(yuv2yuv1)(SwsContext *c, const int16_t *lumSrc, const int16_t *chrSrc, const int16_t *alpSrc,
  909. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, uint8_t *aDest, long dstW, long chrDstW)
  910. {
  911. int i;
  912. #if HAVE_MMX
  913. if(!(c->flags & SWS_BITEXACT)){
  914. long p= 4;
  915. uint8_t *src[4]= {alpSrc + dstW, lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
  916. uint8_t *dst[4]= {aDest, dest, uDest, vDest};
  917. x86_reg counter[4]= {dstW, dstW, chrDstW, chrDstW};
  918. if (c->flags & SWS_ACCURATE_RND){
  919. while(p--){
  920. if (dst[p]){
  921. __asm__ volatile(
  922. YSCALEYUV2YV121_ACCURATE
  923. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  924. "g" (-counter[p])
  925. : "%"REG_a
  926. );
  927. }
  928. }
  929. }else{
  930. while(p--){
  931. if (dst[p]){
  932. __asm__ volatile(
  933. YSCALEYUV2YV121
  934. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  935. "g" (-counter[p])
  936. : "%"REG_a
  937. );
  938. }
  939. }
  940. }
  941. return;
  942. }
  943. #endif
  944. for (i=0; i<dstW; i++)
  945. {
  946. int val= (lumSrc[i]+64)>>7;
  947. if (val&256){
  948. if (val<0) val=0;
  949. else val=255;
  950. }
  951. dest[i]= val;
  952. }
  953. if (uDest)
  954. for (i=0; i<chrDstW; i++)
  955. {
  956. int u=(chrSrc[i ]+64)>>7;
  957. int v=(chrSrc[i + VOFW]+64)>>7;
  958. if ((u|v)&256){
  959. if (u<0) u=0;
  960. else if (u>255) u=255;
  961. if (v<0) v=0;
  962. else if (v>255) v=255;
  963. }
  964. uDest[i]= u;
  965. vDest[i]= v;
  966. }
  967. if (CONFIG_SWSCALE_ALPHA && aDest)
  968. for (i=0; i<dstW; i++){
  969. int val= (alpSrc[i]+64)>>7;
  970. aDest[i]= av_clip_uint8(val);
  971. }
  972. }
  973. /**
  974. * vertical scale YV12 to RGB
  975. */
  976. static inline void RENAME(yuv2packedX)(SwsContext *c, const int16_t *lumFilter, const int16_t **lumSrc, int lumFilterSize,
  977. const int16_t *chrFilter, const int16_t **chrSrc, int chrFilterSize,
  978. const int16_t **alpSrc, uint8_t *dest, long dstW, long dstY)
  979. {
  980. #if HAVE_MMX
  981. x86_reg dummy=0;
  982. if(!(c->flags & SWS_BITEXACT)){
  983. if (c->flags & SWS_ACCURATE_RND){
  984. switch(c->dstFormat){
  985. case PIX_FMT_RGB32:
  986. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
  987. YSCALEYUV2PACKEDX_ACCURATE
  988. YSCALEYUV2RGBX
  989. "movq %%mm2, "U_TEMP"(%0) \n\t"
  990. "movq %%mm4, "V_TEMP"(%0) \n\t"
  991. "movq %%mm5, "Y_TEMP"(%0) \n\t"
  992. YSCALEYUV2PACKEDX_ACCURATE_YA(ALP_MMX_FILTER_OFFSET)
  993. "movq "Y_TEMP"(%0), %%mm5 \n\t"
  994. "psraw $3, %%mm1 \n\t"
  995. "psraw $3, %%mm7 \n\t"
  996. "packuswb %%mm7, %%mm1 \n\t"
  997. WRITEBGR32(%4, %5, %%REGa, %%mm3, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm2, %%mm6)
  998. YSCALEYUV2PACKEDX_END
  999. }else{
  1000. YSCALEYUV2PACKEDX_ACCURATE
  1001. YSCALEYUV2RGBX
  1002. "pcmpeqd %%mm7, %%mm7 \n\t"
  1003. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1004. YSCALEYUV2PACKEDX_END
  1005. }
  1006. return;
  1007. case PIX_FMT_BGR24:
  1008. YSCALEYUV2PACKEDX_ACCURATE
  1009. YSCALEYUV2RGBX
  1010. "pxor %%mm7, %%mm7 \n\t"
  1011. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  1012. "add %4, %%"REG_c" \n\t"
  1013. WRITEBGR24(%%REGc, %5, %%REGa)
  1014. :: "r" (&c->redDither),
  1015. "m" (dummy), "m" (dummy), "m" (dummy),
  1016. "r" (dest), "m" (dstW)
  1017. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1018. );
  1019. return;
  1020. case PIX_FMT_RGB555:
  1021. YSCALEYUV2PACKEDX_ACCURATE
  1022. YSCALEYUV2RGBX
  1023. "pxor %%mm7, %%mm7 \n\t"
  1024. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1025. #ifdef DITHER1XBPP
  1026. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  1027. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  1028. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  1029. #endif
  1030. WRITERGB15(%4, %5, %%REGa)
  1031. YSCALEYUV2PACKEDX_END
  1032. return;
  1033. case PIX_FMT_RGB565:
  1034. YSCALEYUV2PACKEDX_ACCURATE
  1035. YSCALEYUV2RGBX
  1036. "pxor %%mm7, %%mm7 \n\t"
  1037. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1038. #ifdef DITHER1XBPP
  1039. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  1040. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  1041. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  1042. #endif
  1043. WRITERGB16(%4, %5, %%REGa)
  1044. YSCALEYUV2PACKEDX_END
  1045. return;
  1046. case PIX_FMT_YUYV422:
  1047. YSCALEYUV2PACKEDX_ACCURATE
  1048. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1049. "psraw $3, %%mm3 \n\t"
  1050. "psraw $3, %%mm4 \n\t"
  1051. "psraw $3, %%mm1 \n\t"
  1052. "psraw $3, %%mm7 \n\t"
  1053. WRITEYUY2(%4, %5, %%REGa)
  1054. YSCALEYUV2PACKEDX_END
  1055. return;
  1056. }
  1057. }else{
  1058. switch(c->dstFormat)
  1059. {
  1060. case PIX_FMT_RGB32:
  1061. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
  1062. YSCALEYUV2PACKEDX
  1063. YSCALEYUV2RGBX
  1064. YSCALEYUV2PACKEDX_YA(ALP_MMX_FILTER_OFFSET, %%mm0, %%mm3, %%mm6, %%mm1, %%mm7)
  1065. "psraw $3, %%mm1 \n\t"
  1066. "psraw $3, %%mm7 \n\t"
  1067. "packuswb %%mm7, %%mm1 \n\t"
  1068. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  1069. YSCALEYUV2PACKEDX_END
  1070. }else{
  1071. YSCALEYUV2PACKEDX
  1072. YSCALEYUV2RGBX
  1073. "pcmpeqd %%mm7, %%mm7 \n\t"
  1074. WRITEBGR32(%4, %5, %%REGa, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1075. YSCALEYUV2PACKEDX_END
  1076. }
  1077. return;
  1078. case PIX_FMT_BGR24:
  1079. YSCALEYUV2PACKEDX
  1080. YSCALEYUV2RGBX
  1081. "pxor %%mm7, %%mm7 \n\t"
  1082. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  1083. "add %4, %%"REG_c" \n\t"
  1084. WRITEBGR24(%%REGc, %5, %%REGa)
  1085. :: "r" (&c->redDither),
  1086. "m" (dummy), "m" (dummy), "m" (dummy),
  1087. "r" (dest), "m" (dstW)
  1088. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1089. );
  1090. return;
  1091. case PIX_FMT_RGB555:
  1092. YSCALEYUV2PACKEDX
  1093. YSCALEYUV2RGBX
  1094. "pxor %%mm7, %%mm7 \n\t"
  1095. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1096. #ifdef DITHER1XBPP
  1097. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1098. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1099. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1100. #endif
  1101. WRITERGB15(%4, %5, %%REGa)
  1102. YSCALEYUV2PACKEDX_END
  1103. return;
  1104. case PIX_FMT_RGB565:
  1105. YSCALEYUV2PACKEDX
  1106. YSCALEYUV2RGBX
  1107. "pxor %%mm7, %%mm7 \n\t"
  1108. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1109. #ifdef DITHER1XBPP
  1110. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1111. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1112. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1113. #endif
  1114. WRITERGB16(%4, %5, %%REGa)
  1115. YSCALEYUV2PACKEDX_END
  1116. return;
  1117. case PIX_FMT_YUYV422:
  1118. YSCALEYUV2PACKEDX
  1119. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1120. "psraw $3, %%mm3 \n\t"
  1121. "psraw $3, %%mm4 \n\t"
  1122. "psraw $3, %%mm1 \n\t"
  1123. "psraw $3, %%mm7 \n\t"
  1124. WRITEYUY2(%4, %5, %%REGa)
  1125. YSCALEYUV2PACKEDX_END
  1126. return;
  1127. }
  1128. }
  1129. }
  1130. #endif /* HAVE_MMX */
  1131. #if HAVE_ALTIVEC
  1132. /* The following list of supported dstFormat values should
  1133. match what's found in the body of ff_yuv2packedX_altivec() */
  1134. if (!(c->flags & SWS_BITEXACT) && !c->alpPixBuf &&
  1135. (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
  1136. c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
  1137. c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB))
  1138. ff_yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize,
  1139. chrFilter, chrSrc, chrFilterSize,
  1140. dest, dstW, dstY);
  1141. else
  1142. #endif
  1143. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1144. chrFilter, chrSrc, chrFilterSize,
  1145. alpSrc, dest, dstW, dstY);
  1146. }
  1147. /**
  1148. * vertical bilinear scale YV12 to RGB
  1149. */
  1150. static inline void RENAME(yuv2packed2)(SwsContext *c, const uint16_t *buf0, const uint16_t *buf1, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
  1151. const uint16_t *abuf0, const uint16_t *abuf1, uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1152. {
  1153. int yalpha1=4095- yalpha;
  1154. int uvalpha1=4095-uvalpha;
  1155. int i;
  1156. #if HAVE_MMX
  1157. if(!(c->flags & SWS_BITEXACT)){
  1158. switch(c->dstFormat)
  1159. {
  1160. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1161. case PIX_FMT_RGB32:
  1162. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
  1163. #if ARCH_X86_64
  1164. __asm__ volatile(
  1165. YSCALEYUV2RGB(%%REGBP, %5)
  1166. YSCALEYUV2RGB_YA(%%REGBP, %5, %6, %7)
  1167. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1168. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1169. "packuswb %%mm7, %%mm1 \n\t"
  1170. WRITEBGR32(%4, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  1171. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "r" (dest),
  1172. "a" (&c->redDither)
  1173. ,"r" (abuf0), "r" (abuf1)
  1174. : "%"REG_BP
  1175. );
  1176. #else
  1177. *(uint16_t **)(&c->u_temp)=abuf0;
  1178. *(uint16_t **)(&c->v_temp)=abuf1;
  1179. __asm__ volatile(
  1180. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1181. "mov %4, %%"REG_b" \n\t"
  1182. "push %%"REG_BP" \n\t"
  1183. YSCALEYUV2RGB(%%REGBP, %5)
  1184. "push %0 \n\t"
  1185. "push %1 \n\t"
  1186. "mov "U_TEMP"(%5), %0 \n\t"
  1187. "mov "V_TEMP"(%5), %1 \n\t"
  1188. YSCALEYUV2RGB_YA(%%REGBP, %5, %0, %1)
  1189. "psraw $3, %%mm1 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1190. "psraw $3, %%mm7 \n\t" /* abuf0[eax] - abuf1[eax] >>7*/
  1191. "packuswb %%mm7, %%mm1 \n\t"
  1192. "pop %1 \n\t"
  1193. "pop %0 \n\t"
  1194. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm1, %%mm0, %%mm7, %%mm3, %%mm6)
  1195. "pop %%"REG_BP" \n\t"
  1196. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1197. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1198. "a" (&c->redDither)
  1199. );
  1200. #endif
  1201. }else{
  1202. __asm__ volatile(
  1203. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1204. "mov %4, %%"REG_b" \n\t"
  1205. "push %%"REG_BP" \n\t"
  1206. YSCALEYUV2RGB(%%REGBP, %5)
  1207. "pcmpeqd %%mm7, %%mm7 \n\t"
  1208. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1209. "pop %%"REG_BP" \n\t"
  1210. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1211. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1212. "a" (&c->redDither)
  1213. );
  1214. }
  1215. return;
  1216. case PIX_FMT_BGR24:
  1217. __asm__ volatile(
  1218. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1219. "mov %4, %%"REG_b" \n\t"
  1220. "push %%"REG_BP" \n\t"
  1221. YSCALEYUV2RGB(%%REGBP, %5)
  1222. "pxor %%mm7, %%mm7 \n\t"
  1223. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1224. "pop %%"REG_BP" \n\t"
  1225. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1226. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1227. "a" (&c->redDither)
  1228. );
  1229. return;
  1230. case PIX_FMT_RGB555:
  1231. __asm__ volatile(
  1232. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1233. "mov %4, %%"REG_b" \n\t"
  1234. "push %%"REG_BP" \n\t"
  1235. YSCALEYUV2RGB(%%REGBP, %5)
  1236. "pxor %%mm7, %%mm7 \n\t"
  1237. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1238. #ifdef DITHER1XBPP
  1239. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1240. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1241. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1242. #endif
  1243. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1244. "pop %%"REG_BP" \n\t"
  1245. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1246. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1247. "a" (&c->redDither)
  1248. );
  1249. return;
  1250. case PIX_FMT_RGB565:
  1251. __asm__ volatile(
  1252. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1253. "mov %4, %%"REG_b" \n\t"
  1254. "push %%"REG_BP" \n\t"
  1255. YSCALEYUV2RGB(%%REGBP, %5)
  1256. "pxor %%mm7, %%mm7 \n\t"
  1257. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1258. #ifdef DITHER1XBPP
  1259. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1260. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1261. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1262. #endif
  1263. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1264. "pop %%"REG_BP" \n\t"
  1265. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1266. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1267. "a" (&c->redDither)
  1268. );
  1269. return;
  1270. case PIX_FMT_YUYV422:
  1271. __asm__ volatile(
  1272. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1273. "mov %4, %%"REG_b" \n\t"
  1274. "push %%"REG_BP" \n\t"
  1275. YSCALEYUV2PACKED(%%REGBP, %5)
  1276. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1277. "pop %%"REG_BP" \n\t"
  1278. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1279. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1280. "a" (&c->redDither)
  1281. );
  1282. return;
  1283. default: break;
  1284. }
  1285. }
  1286. #endif //HAVE_MMX
  1287. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C(void,0), YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
  1288. }
  1289. /**
  1290. * YV12 to RGB without scaling or interpolating
  1291. */
  1292. static inline void RENAME(yuv2packed1)(SwsContext *c, const uint16_t *buf0, const uint16_t *uvbuf0, const uint16_t *uvbuf1,
  1293. const uint16_t *abuf0, uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1294. {
  1295. const int yalpha1=0;
  1296. int i;
  1297. const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1298. const int yalpha= 4096; //FIXME ...
  1299. if (flags&SWS_FULL_CHR_H_INT)
  1300. {
  1301. c->yuv2packed2(c, buf0, buf0, uvbuf0, uvbuf1, abuf0, abuf0, dest, dstW, 0, uvalpha, y);
  1302. return;
  1303. }
  1304. #if HAVE_MMX
  1305. if(!(flags & SWS_BITEXACT)){
  1306. if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1307. {
  1308. switch(dstFormat)
  1309. {
  1310. case PIX_FMT_RGB32:
  1311. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
  1312. __asm__ volatile(
  1313. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1314. "mov %4, %%"REG_b" \n\t"
  1315. "push %%"REG_BP" \n\t"
  1316. YSCALEYUV2RGB1(%%REGBP, %5)
  1317. YSCALEYUV2RGB1_ALPHA(%%REGBP)
  1318. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1319. "pop %%"REG_BP" \n\t"
  1320. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1321. :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1322. "a" (&c->redDither)
  1323. );
  1324. }else{
  1325. __asm__ volatile(
  1326. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1327. "mov %4, %%"REG_b" \n\t"
  1328. "push %%"REG_BP" \n\t"
  1329. YSCALEYUV2RGB1(%%REGBP, %5)
  1330. "pcmpeqd %%mm7, %%mm7 \n\t"
  1331. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1332. "pop %%"REG_BP" \n\t"
  1333. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1334. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1335. "a" (&c->redDither)
  1336. );
  1337. }
  1338. return;
  1339. case PIX_FMT_BGR24:
  1340. __asm__ volatile(
  1341. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1342. "mov %4, %%"REG_b" \n\t"
  1343. "push %%"REG_BP" \n\t"
  1344. YSCALEYUV2RGB1(%%REGBP, %5)
  1345. "pxor %%mm7, %%mm7 \n\t"
  1346. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1347. "pop %%"REG_BP" \n\t"
  1348. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1349. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1350. "a" (&c->redDither)
  1351. );
  1352. return;
  1353. case PIX_FMT_RGB555:
  1354. __asm__ volatile(
  1355. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1356. "mov %4, %%"REG_b" \n\t"
  1357. "push %%"REG_BP" \n\t"
  1358. YSCALEYUV2RGB1(%%REGBP, %5)
  1359. "pxor %%mm7, %%mm7 \n\t"
  1360. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1361. #ifdef DITHER1XBPP
  1362. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1363. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1364. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1365. #endif
  1366. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1367. "pop %%"REG_BP" \n\t"
  1368. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1369. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1370. "a" (&c->redDither)
  1371. );
  1372. return;
  1373. case PIX_FMT_RGB565:
  1374. __asm__ volatile(
  1375. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1376. "mov %4, %%"REG_b" \n\t"
  1377. "push %%"REG_BP" \n\t"
  1378. YSCALEYUV2RGB1(%%REGBP, %5)
  1379. "pxor %%mm7, %%mm7 \n\t"
  1380. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1381. #ifdef DITHER1XBPP
  1382. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1383. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1384. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1385. #endif
  1386. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1387. "pop %%"REG_BP" \n\t"
  1388. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1389. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1390. "a" (&c->redDither)
  1391. );
  1392. return;
  1393. case PIX_FMT_YUYV422:
  1394. __asm__ volatile(
  1395. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1396. "mov %4, %%"REG_b" \n\t"
  1397. "push %%"REG_BP" \n\t"
  1398. YSCALEYUV2PACKED1(%%REGBP, %5)
  1399. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1400. "pop %%"REG_BP" \n\t"
  1401. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1402. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1403. "a" (&c->redDither)
  1404. );
  1405. return;
  1406. }
  1407. }
  1408. else
  1409. {
  1410. switch(dstFormat)
  1411. {
  1412. case PIX_FMT_RGB32:
  1413. if (CONFIG_SWSCALE_ALPHA && c->alpPixBuf){
  1414. __asm__ volatile(
  1415. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1416. "mov %4, %%"REG_b" \n\t"
  1417. "push %%"REG_BP" \n\t"
  1418. YSCALEYUV2RGB1b(%%REGBP, %5)
  1419. YSCALEYUV2RGB1_ALPHA(%%REGBP)
  1420. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1421. "pop %%"REG_BP" \n\t"
  1422. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1423. :: "c" (buf0), "d" (abuf0), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1424. "a" (&c->redDither)
  1425. );
  1426. }else{
  1427. __asm__ volatile(
  1428. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1429. "mov %4, %%"REG_b" \n\t"
  1430. "push %%"REG_BP" \n\t"
  1431. YSCALEYUV2RGB1b(%%REGBP, %5)
  1432. "pcmpeqd %%mm7, %%mm7 \n\t"
  1433. WRITEBGR32(%%REGb, 8280(%5), %%REGBP, %%mm2, %%mm4, %%mm5, %%mm7, %%mm0, %%mm1, %%mm3, %%mm6)
  1434. "pop %%"REG_BP" \n\t"
  1435. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1436. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1437. "a" (&c->redDither)
  1438. );
  1439. }
  1440. return;
  1441. case PIX_FMT_BGR24:
  1442. __asm__ volatile(
  1443. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1444. "mov %4, %%"REG_b" \n\t"
  1445. "push %%"REG_BP" \n\t"
  1446. YSCALEYUV2RGB1b(%%REGBP, %5)
  1447. "pxor %%mm7, %%mm7 \n\t"
  1448. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1449. "pop %%"REG_BP" \n\t"
  1450. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1451. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1452. "a" (&c->redDither)
  1453. );
  1454. return;
  1455. case PIX_FMT_RGB555:
  1456. __asm__ volatile(
  1457. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1458. "mov %4, %%"REG_b" \n\t"
  1459. "push %%"REG_BP" \n\t"
  1460. YSCALEYUV2RGB1b(%%REGBP, %5)
  1461. "pxor %%mm7, %%mm7 \n\t"
  1462. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1463. #ifdef DITHER1XBPP
  1464. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1465. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1466. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1467. #endif
  1468. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1469. "pop %%"REG_BP" \n\t"
  1470. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1471. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1472. "a" (&c->redDither)
  1473. );
  1474. return;
  1475. case PIX_FMT_RGB565:
  1476. __asm__ volatile(
  1477. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1478. "mov %4, %%"REG_b" \n\t"
  1479. "push %%"REG_BP" \n\t"
  1480. YSCALEYUV2RGB1b(%%REGBP, %5)
  1481. "pxor %%mm7, %%mm7 \n\t"
  1482. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1483. #ifdef DITHER1XBPP
  1484. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1485. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1486. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1487. #endif
  1488. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1489. "pop %%"REG_BP" \n\t"
  1490. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1491. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1492. "a" (&c->redDither)
  1493. );
  1494. return;
  1495. case PIX_FMT_YUYV422:
  1496. __asm__ volatile(
  1497. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1498. "mov %4, %%"REG_b" \n\t"
  1499. "push %%"REG_BP" \n\t"
  1500. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1501. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1502. "pop %%"REG_BP" \n\t"
  1503. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1504. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1505. "a" (&c->redDither)
  1506. );
  1507. return;
  1508. }
  1509. }
  1510. }
  1511. #endif /* HAVE_MMX */
  1512. if (uvalpha < 2048)
  1513. {
  1514. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1515. }else{
  1516. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C(void,0), YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1517. }
  1518. }
  1519. //FIXME yuy2* can read up to 7 samples too much
  1520. static inline void RENAME(yuy2ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1521. {
  1522. #if HAVE_MMX
  1523. __asm__ volatile(
  1524. "movq "MANGLE(bm01010101)", %%mm2 \n\t"
  1525. "mov %0, %%"REG_a" \n\t"
  1526. "1: \n\t"
  1527. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1528. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1529. "pand %%mm2, %%mm0 \n\t"
  1530. "pand %%mm2, %%mm1 \n\t"
  1531. "packuswb %%mm1, %%mm0 \n\t"
  1532. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1533. "add $8, %%"REG_a" \n\t"
  1534. " js 1b \n\t"
  1535. : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
  1536. : "%"REG_a
  1537. );
  1538. #else
  1539. int i;
  1540. for (i=0; i<width; i++)
  1541. dst[i]= src[2*i];
  1542. #endif
  1543. }
  1544. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1545. {
  1546. #if HAVE_MMX
  1547. __asm__ volatile(
  1548. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1549. "mov %0, %%"REG_a" \n\t"
  1550. "1: \n\t"
  1551. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1552. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1553. "psrlw $8, %%mm0 \n\t"
  1554. "psrlw $8, %%mm1 \n\t"
  1555. "packuswb %%mm1, %%mm0 \n\t"
  1556. "movq %%mm0, %%mm1 \n\t"
  1557. "psrlw $8, %%mm0 \n\t"
  1558. "pand %%mm4, %%mm1 \n\t"
  1559. "packuswb %%mm0, %%mm0 \n\t"
  1560. "packuswb %%mm1, %%mm1 \n\t"
  1561. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1562. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1563. "add $4, %%"REG_a" \n\t"
  1564. " js 1b \n\t"
  1565. : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1566. : "%"REG_a
  1567. );
  1568. #else
  1569. int i;
  1570. for (i=0; i<width; i++)
  1571. {
  1572. dstU[i]= src1[4*i + 1];
  1573. dstV[i]= src1[4*i + 3];
  1574. }
  1575. #endif
  1576. assert(src1 == src2);
  1577. }
  1578. static inline void RENAME(LEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1579. {
  1580. #if HAVE_MMX
  1581. __asm__ volatile(
  1582. "mov %0, %%"REG_a" \n\t"
  1583. "1: \n\t"
  1584. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1585. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1586. "movq (%2, %%"REG_a",2), %%mm2 \n\t"
  1587. "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
  1588. "psrlw $8, %%mm0 \n\t"
  1589. "psrlw $8, %%mm1 \n\t"
  1590. "psrlw $8, %%mm2 \n\t"
  1591. "psrlw $8, %%mm3 \n\t"
  1592. "packuswb %%mm1, %%mm0 \n\t"
  1593. "packuswb %%mm3, %%mm2 \n\t"
  1594. "movq %%mm0, (%3, %%"REG_a") \n\t"
  1595. "movq %%mm2, (%4, %%"REG_a") \n\t"
  1596. "add $8, %%"REG_a" \n\t"
  1597. " js 1b \n\t"
  1598. : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
  1599. : "%"REG_a
  1600. );
  1601. #else
  1602. int i;
  1603. for (i=0; i<width; i++)
  1604. {
  1605. dstU[i]= src1[2*i + 1];
  1606. dstV[i]= src2[2*i + 1];
  1607. }
  1608. #endif
  1609. }
  1610. /* This is almost identical to the previous, end exists only because
  1611. * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
  1612. static inline void RENAME(uyvyToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1613. {
  1614. #if HAVE_MMX
  1615. __asm__ volatile(
  1616. "mov %0, %%"REG_a" \n\t"
  1617. "1: \n\t"
  1618. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1619. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1620. "psrlw $8, %%mm0 \n\t"
  1621. "psrlw $8, %%mm1 \n\t"
  1622. "packuswb %%mm1, %%mm0 \n\t"
  1623. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1624. "add $8, %%"REG_a" \n\t"
  1625. " js 1b \n\t"
  1626. : : "g" ((x86_reg)-width), "r" (src+width*2), "r" (dst+width)
  1627. : "%"REG_a
  1628. );
  1629. #else
  1630. int i;
  1631. for (i=0; i<width; i++)
  1632. dst[i]= src[2*i+1];
  1633. #endif
  1634. }
  1635. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1636. {
  1637. #if HAVE_MMX
  1638. __asm__ volatile(
  1639. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1640. "mov %0, %%"REG_a" \n\t"
  1641. "1: \n\t"
  1642. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1643. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1644. "pand %%mm4, %%mm0 \n\t"
  1645. "pand %%mm4, %%mm1 \n\t"
  1646. "packuswb %%mm1, %%mm0 \n\t"
  1647. "movq %%mm0, %%mm1 \n\t"
  1648. "psrlw $8, %%mm0 \n\t"
  1649. "pand %%mm4, %%mm1 \n\t"
  1650. "packuswb %%mm0, %%mm0 \n\t"
  1651. "packuswb %%mm1, %%mm1 \n\t"
  1652. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1653. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1654. "add $4, %%"REG_a" \n\t"
  1655. " js 1b \n\t"
  1656. : : "g" ((x86_reg)-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1657. : "%"REG_a
  1658. );
  1659. #else
  1660. int i;
  1661. for (i=0; i<width; i++)
  1662. {
  1663. dstU[i]= src1[4*i + 0];
  1664. dstV[i]= src1[4*i + 2];
  1665. }
  1666. #endif
  1667. assert(src1 == src2);
  1668. }
  1669. static inline void RENAME(BEToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1670. {
  1671. #if HAVE_MMX
  1672. __asm__ volatile(
  1673. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1674. "mov %0, %%"REG_a" \n\t"
  1675. "1: \n\t"
  1676. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1677. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1678. "movq (%2, %%"REG_a",2), %%mm2 \n\t"
  1679. "movq 8(%2, %%"REG_a",2), %%mm3 \n\t"
  1680. "pand %%mm4, %%mm0 \n\t"
  1681. "pand %%mm4, %%mm1 \n\t"
  1682. "pand %%mm4, %%mm2 \n\t"
  1683. "pand %%mm4, %%mm3 \n\t"
  1684. "packuswb %%mm1, %%mm0 \n\t"
  1685. "packuswb %%mm3, %%mm2 \n\t"
  1686. "movq %%mm0, (%3, %%"REG_a") \n\t"
  1687. "movq %%mm2, (%4, %%"REG_a") \n\t"
  1688. "add $8, %%"REG_a" \n\t"
  1689. " js 1b \n\t"
  1690. : : "g" ((x86_reg)-width), "r" (src1+width*2), "r" (src2+width*2), "r" (dstU+width), "r" (dstV+width)
  1691. : "%"REG_a
  1692. );
  1693. #else
  1694. int i;
  1695. for (i=0; i<width; i++)
  1696. {
  1697. dstU[i]= src1[2*i];
  1698. dstV[i]= src2[2*i];
  1699. }
  1700. #endif
  1701. }
  1702. #if HAVE_MMX
  1703. static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, const uint8_t *src, long width, int srcFormat)
  1704. {
  1705. if(srcFormat == PIX_FMT_BGR24){
  1706. __asm__ volatile(
  1707. "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
  1708. "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
  1709. :
  1710. );
  1711. }else{
  1712. __asm__ volatile(
  1713. "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
  1714. "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
  1715. :
  1716. );
  1717. }
  1718. __asm__ volatile(
  1719. "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
  1720. "mov %2, %%"REG_a" \n\t"
  1721. "pxor %%mm7, %%mm7 \n\t"
  1722. "1: \n\t"
  1723. PREFETCH" 64(%0) \n\t"
  1724. "movd (%0), %%mm0 \n\t"
  1725. "movd 2(%0), %%mm1 \n\t"
  1726. "movd 6(%0), %%mm2 \n\t"
  1727. "movd 8(%0), %%mm3 \n\t"
  1728. "add $12, %0 \n\t"
  1729. "punpcklbw %%mm7, %%mm0 \n\t"
  1730. "punpcklbw %%mm7, %%mm1 \n\t"
  1731. "punpcklbw %%mm7, %%mm2 \n\t"
  1732. "punpcklbw %%mm7, %%mm3 \n\t"
  1733. "pmaddwd %%mm5, %%mm0 \n\t"
  1734. "pmaddwd %%mm6, %%mm1 \n\t"
  1735. "pmaddwd %%mm5, %%mm2 \n\t"
  1736. "pmaddwd %%mm6, %%mm3 \n\t"
  1737. "paddd %%mm1, %%mm0 \n\t"
  1738. "paddd %%mm3, %%mm2 \n\t"
  1739. "paddd %%mm4, %%mm0 \n\t"
  1740. "paddd %%mm4, %%mm2 \n\t"
  1741. "psrad $15, %%mm0 \n\t"
  1742. "psrad $15, %%mm2 \n\t"
  1743. "packssdw %%mm2, %%mm0 \n\t"
  1744. "packuswb %%mm0, %%mm0 \n\t"
  1745. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1746. "add $4, %%"REG_a" \n\t"
  1747. " js 1b \n\t"
  1748. : "+r" (src)
  1749. : "r" (dst+width), "g" ((x86_reg)-width)
  1750. : "%"REG_a
  1751. );
  1752. }
  1753. static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src, long width, int srcFormat)
  1754. {
  1755. __asm__ volatile(
  1756. "movq 24+%4, %%mm6 \n\t"
  1757. "mov %3, %%"REG_a" \n\t"
  1758. "pxor %%mm7, %%mm7 \n\t"
  1759. "1: \n\t"
  1760. PREFETCH" 64(%0) \n\t"
  1761. "movd (%0), %%mm0 \n\t"
  1762. "movd 2(%0), %%mm1 \n\t"
  1763. "punpcklbw %%mm7, %%mm0 \n\t"
  1764. "punpcklbw %%mm7, %%mm1 \n\t"
  1765. "movq %%mm0, %%mm2 \n\t"
  1766. "movq %%mm1, %%mm3 \n\t"
  1767. "pmaddwd %4, %%mm0 \n\t"
  1768. "pmaddwd 8+%4, %%mm1 \n\t"
  1769. "pmaddwd 16+%4, %%mm2 \n\t"
  1770. "pmaddwd %%mm6, %%mm3 \n\t"
  1771. "paddd %%mm1, %%mm0 \n\t"
  1772. "paddd %%mm3, %%mm2 \n\t"
  1773. "movd 6(%0), %%mm1 \n\t"
  1774. "movd 8(%0), %%mm3 \n\t"
  1775. "add $12, %0 \n\t"
  1776. "punpcklbw %%mm7, %%mm1 \n\t"
  1777. "punpcklbw %%mm7, %%mm3 \n\t"
  1778. "movq %%mm1, %%mm4 \n\t"
  1779. "movq %%mm3, %%mm5 \n\t"
  1780. "pmaddwd %4, %%mm1 \n\t"
  1781. "pmaddwd 8+%4, %%mm3 \n\t"
  1782. "pmaddwd 16+%4, %%mm4 \n\t"
  1783. "pmaddwd %%mm6, %%mm5 \n\t"
  1784. "paddd %%mm3, %%mm1 \n\t"
  1785. "paddd %%mm5, %%mm4 \n\t"
  1786. "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
  1787. "paddd %%mm3, %%mm0 \n\t"
  1788. "paddd %%mm3, %%mm2 \n\t"
  1789. "paddd %%mm3, %%mm1 \n\t"
  1790. "paddd %%mm3, %%mm4 \n\t"
  1791. "psrad $15, %%mm0 \n\t"
  1792. "psrad $15, %%mm2 \n\t"
  1793. "psrad $15, %%mm1 \n\t"
  1794. "psrad $15, %%mm4 \n\t"
  1795. "packssdw %%mm1, %%mm0 \n\t"
  1796. "packssdw %%mm4, %%mm2 \n\t"
  1797. "packuswb %%mm0, %%mm0 \n\t"
  1798. "packuswb %%mm2, %%mm2 \n\t"
  1799. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1800. "movd %%mm2, (%2, %%"REG_a") \n\t"
  1801. "add $4, %%"REG_a" \n\t"
  1802. " js 1b \n\t"
  1803. : "+r" (src)
  1804. : "r" (dstU+width), "r" (dstV+width), "g" ((x86_reg)-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
  1805. : "%"REG_a
  1806. );
  1807. }
  1808. #endif
  1809. static inline void RENAME(bgr24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1810. {
  1811. #if HAVE_MMX
  1812. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
  1813. #else
  1814. int i;
  1815. for (i=0; i<width; i++)
  1816. {
  1817. int b= src[i*3+0];
  1818. int g= src[i*3+1];
  1819. int r= src[i*3+2];
  1820. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1821. }
  1822. #endif /* HAVE_MMX */
  1823. }
  1824. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1825. {
  1826. #if HAVE_MMX
  1827. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
  1828. #else
  1829. int i;
  1830. for (i=0; i<width; i++)
  1831. {
  1832. int b= src1[3*i + 0];
  1833. int g= src1[3*i + 1];
  1834. int r= src1[3*i + 2];
  1835. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1836. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1837. }
  1838. #endif /* HAVE_MMX */
  1839. assert(src1 == src2);
  1840. }
  1841. static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1842. {
  1843. int i;
  1844. for (i=0; i<width; i++)
  1845. {
  1846. int b= src1[6*i + 0] + src1[6*i + 3];
  1847. int g= src1[6*i + 1] + src1[6*i + 4];
  1848. int r= src1[6*i + 2] + src1[6*i + 5];
  1849. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1850. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1851. }
  1852. assert(src1 == src2);
  1853. }
  1854. static inline void RENAME(rgb24ToY)(uint8_t *dst, const uint8_t *src, long width, uint32_t *unused)
  1855. {
  1856. #if HAVE_MMX
  1857. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
  1858. #else
  1859. int i;
  1860. for (i=0; i<width; i++)
  1861. {
  1862. int r= src[i*3+0];
  1863. int g= src[i*3+1];
  1864. int b= src[i*3+2];
  1865. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1866. }
  1867. #endif
  1868. }
  1869. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1870. {
  1871. #if HAVE_MMX
  1872. assert(src1==src2);
  1873. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
  1874. #else
  1875. int i;
  1876. assert(src1==src2);
  1877. for (i=0; i<width; i++)
  1878. {
  1879. int r= src1[3*i + 0];
  1880. int g= src1[3*i + 1];
  1881. int b= src1[3*i + 2];
  1882. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1883. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1884. }
  1885. #endif
  1886. }
  1887. static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, const uint8_t *src1, const uint8_t *src2, long width, uint32_t *unused)
  1888. {
  1889. int i;
  1890. assert(src1==src2);
  1891. for (i=0; i<width; i++)
  1892. {
  1893. int r= src1[6*i + 0] + src1[6*i + 3];
  1894. int g= src1[6*i + 1] + src1[6*i + 4];
  1895. int b= src1[6*i + 2] + src1[6*i + 5];
  1896. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1897. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1898. }
  1899. }
  1900. // bilinear / bicubic scaling
  1901. static inline void RENAME(hScale)(int16_t *dst, int dstW, const uint8_t *src, int srcW, int xInc,
  1902. const int16_t *filter, const int16_t *filterPos, long filterSize)
  1903. {
  1904. #if HAVE_MMX
  1905. assert(filterSize % 4 == 0 && filterSize>0);
  1906. if (filterSize==4) // Always true for upscaling, sometimes for down, too.
  1907. {
  1908. x86_reg counter= -2*dstW;
  1909. filter-= counter*2;
  1910. filterPos-= counter/2;
  1911. dst-= counter/2;
  1912. __asm__ volatile(
  1913. #if defined(PIC)
  1914. "push %%"REG_b" \n\t"
  1915. #endif
  1916. "pxor %%mm7, %%mm7 \n\t"
  1917. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1918. "mov %%"REG_a", %%"REG_BP" \n\t"
  1919. ASMALIGN(4)
  1920. "1: \n\t"
  1921. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1922. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1923. "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
  1924. "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
  1925. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1926. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1927. "punpcklbw %%mm7, %%mm0 \n\t"
  1928. "punpcklbw %%mm7, %%mm2 \n\t"
  1929. "pmaddwd %%mm1, %%mm0 \n\t"
  1930. "pmaddwd %%mm2, %%mm3 \n\t"
  1931. "movq %%mm0, %%mm4 \n\t"
  1932. "punpckldq %%mm3, %%mm0 \n\t"
  1933. "punpckhdq %%mm3, %%mm4 \n\t"
  1934. "paddd %%mm4, %%mm0 \n\t"
  1935. "psrad $7, %%mm0 \n\t"
  1936. "packssdw %%mm0, %%mm0 \n\t"
  1937. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1938. "add $4, %%"REG_BP" \n\t"
  1939. " jnc 1b \n\t"
  1940. "pop %%"REG_BP" \n\t"
  1941. #if defined(PIC)
  1942. "pop %%"REG_b" \n\t"
  1943. #endif
  1944. : "+a" (counter)
  1945. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1946. #if !defined(PIC)
  1947. : "%"REG_b
  1948. #endif
  1949. );
  1950. }
  1951. else if (filterSize==8)
  1952. {
  1953. x86_reg counter= -2*dstW;
  1954. filter-= counter*4;
  1955. filterPos-= counter/2;
  1956. dst-= counter/2;
  1957. __asm__ volatile(
  1958. #if defined(PIC)
  1959. "push %%"REG_b" \n\t"
  1960. #endif
  1961. "pxor %%mm7, %%mm7 \n\t"
  1962. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1963. "mov %%"REG_a", %%"REG_BP" \n\t"
  1964. ASMALIGN(4)
  1965. "1: \n\t"
  1966. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1967. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1968. "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
  1969. "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
  1970. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1971. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1972. "punpcklbw %%mm7, %%mm0 \n\t"
  1973. "punpcklbw %%mm7, %%mm2 \n\t"
  1974. "pmaddwd %%mm1, %%mm0 \n\t"
  1975. "pmaddwd %%mm2, %%mm3 \n\t"
  1976. "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
  1977. "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
  1978. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  1979. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  1980. "punpcklbw %%mm7, %%mm4 \n\t"
  1981. "punpcklbw %%mm7, %%mm2 \n\t"
  1982. "pmaddwd %%mm1, %%mm4 \n\t"
  1983. "pmaddwd %%mm2, %%mm5 \n\t"
  1984. "paddd %%mm4, %%mm0 \n\t"
  1985. "paddd %%mm5, %%mm3 \n\t"
  1986. "movq %%mm0, %%mm4 \n\t"
  1987. "punpckldq %%mm3, %%mm0 \n\t"
  1988. "punpckhdq %%mm3, %%mm4 \n\t"
  1989. "paddd %%mm4, %%mm0 \n\t"
  1990. "psrad $7, %%mm0 \n\t"
  1991. "packssdw %%mm0, %%mm0 \n\t"
  1992. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1993. "add $4, %%"REG_BP" \n\t"
  1994. " jnc 1b \n\t"
  1995. "pop %%"REG_BP" \n\t"
  1996. #if defined(PIC)
  1997. "pop %%"REG_b" \n\t"
  1998. #endif
  1999. : "+a" (counter)
  2000. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2001. #if !defined(PIC)
  2002. : "%"REG_b
  2003. #endif
  2004. );
  2005. }
  2006. else
  2007. {
  2008. uint8_t *offset = src+filterSize;
  2009. x86_reg counter= -2*dstW;
  2010. //filter-= counter*filterSize/2;
  2011. filterPos-= counter/2;
  2012. dst-= counter/2;
  2013. __asm__ volatile(
  2014. "pxor %%mm7, %%mm7 \n\t"
  2015. ASMALIGN(4)
  2016. "1: \n\t"
  2017. "mov %2, %%"REG_c" \n\t"
  2018. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2019. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  2020. "mov %5, %%"REG_c" \n\t"
  2021. "pxor %%mm4, %%mm4 \n\t"
  2022. "pxor %%mm5, %%mm5 \n\t"
  2023. "2: \n\t"
  2024. "movq (%1), %%mm1 \n\t"
  2025. "movq (%1, %6), %%mm3 \n\t"
  2026. "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
  2027. "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
  2028. "punpcklbw %%mm7, %%mm0 \n\t"
  2029. "punpcklbw %%mm7, %%mm2 \n\t"
  2030. "pmaddwd %%mm1, %%mm0 \n\t"
  2031. "pmaddwd %%mm2, %%mm3 \n\t"
  2032. "paddd %%mm3, %%mm5 \n\t"
  2033. "paddd %%mm0, %%mm4 \n\t"
  2034. "add $8, %1 \n\t"
  2035. "add $4, %%"REG_c" \n\t"
  2036. "cmp %4, %%"REG_c" \n\t"
  2037. " jb 2b \n\t"
  2038. "add %6, %1 \n\t"
  2039. "movq %%mm4, %%mm0 \n\t"
  2040. "punpckldq %%mm5, %%mm4 \n\t"
  2041. "punpckhdq %%mm5, %%mm0 \n\t"
  2042. "paddd %%mm0, %%mm4 \n\t"
  2043. "psrad $7, %%mm4 \n\t"
  2044. "packssdw %%mm4, %%mm4 \n\t"
  2045. "mov %3, %%"REG_a" \n\t"
  2046. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2047. "add $4, %0 \n\t"
  2048. " jnc 1b \n\t"
  2049. : "+r" (counter), "+r" (filter)
  2050. : "m" (filterPos), "m" (dst), "m"(offset),
  2051. "m" (src), "r" ((x86_reg)filterSize*2)
  2052. : "%"REG_a, "%"REG_c, "%"REG_d
  2053. );
  2054. }
  2055. #else
  2056. #if HAVE_ALTIVEC
  2057. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2058. #else
  2059. int i;
  2060. for (i=0; i<dstW; i++)
  2061. {
  2062. int j;
  2063. int srcPos= filterPos[i];
  2064. int val=0;
  2065. //printf("filterPos: %d\n", filterPos[i]);
  2066. for (j=0; j<filterSize; j++)
  2067. {
  2068. //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2069. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2070. }
  2071. //filter += hFilterSize;
  2072. dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
  2073. //dst[i] = val>>7;
  2074. }
  2075. #endif /* HAVE_ALTIVEC */
  2076. #endif /* HAVE_MMX */
  2077. }
  2078. static inline void RENAME(hyscale_fast)(SwsContext *c, int16_t *dst,
  2079. int dstWidth, const uint8_t *src, int srcW,
  2080. int xInc)
  2081. {
  2082. int i;
  2083. unsigned int xpos=0;
  2084. for (i=0;i<dstWidth;i++)
  2085. {
  2086. register unsigned int xx=xpos>>16;
  2087. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2088. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2089. xpos+=xInc;
  2090. }
  2091. }
  2092. // *** horizontal scale Y line to temp buffer
  2093. static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src, int srcW, int xInc,
  2094. int flags, const int16_t *hLumFilter,
  2095. const int16_t *hLumFilterPos, int hLumFilterSize,
  2096. int srcFormat, uint8_t *formatConvBuffer,
  2097. uint32_t *pal, int isAlpha)
  2098. {
  2099. int32_t *mmx2FilterPos = c->lumMmx2FilterPos;
  2100. int16_t *mmx2Filter = c->lumMmx2Filter;
  2101. int canMMX2BeUsed = c->canMMX2BeUsed;
  2102. void *funnyYCode = c->funnyYCode;
  2103. void (*internal_func)(uint8_t *, const uint8_t *, long, uint32_t *) = isAlpha ? c->hascale_internal : c->hyscale_internal;
  2104. if (isAlpha) {
  2105. if (srcFormat == PIX_FMT_RGB32 || srcFormat == PIX_FMT_BGR32 )
  2106. src += 3;
  2107. } else {
  2108. if (srcFormat == PIX_FMT_RGB32_1 || srcFormat == PIX_FMT_BGR32_1)
  2109. src += ALT32_CORR;
  2110. }
  2111. if (internal_func) {
  2112. internal_func(formatConvBuffer, src, srcW, pal);
  2113. src= formatConvBuffer;
  2114. }
  2115. #if HAVE_MMX
  2116. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2117. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2118. #else
  2119. if (!(flags&SWS_FAST_BILINEAR))
  2120. #endif
  2121. {
  2122. c->hScale(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2123. }
  2124. else // fast bilinear upscale / crap downscale
  2125. {
  2126. #if ARCH_X86 && CONFIG_GPL
  2127. #if HAVE_MMX2
  2128. int i;
  2129. #if defined(PIC)
  2130. uint64_t ebxsave __attribute__((aligned(8)));
  2131. #endif
  2132. if (canMMX2BeUsed)
  2133. {
  2134. __asm__ volatile(
  2135. #if defined(PIC)
  2136. "mov %%"REG_b", %5 \n\t"
  2137. #endif
  2138. "pxor %%mm7, %%mm7 \n\t"
  2139. "mov %0, %%"REG_c" \n\t"
  2140. "mov %1, %%"REG_D" \n\t"
  2141. "mov %2, %%"REG_d" \n\t"
  2142. "mov %3, %%"REG_b" \n\t"
  2143. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2144. PREFETCH" (%%"REG_c") \n\t"
  2145. PREFETCH" 32(%%"REG_c") \n\t"
  2146. PREFETCH" 64(%%"REG_c") \n\t"
  2147. #if ARCH_X86_64
  2148. #define FUNNY_Y_CODE \
  2149. "movl (%%"REG_b"), %%esi \n\t"\
  2150. "call *%4 \n\t"\
  2151. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2152. "add %%"REG_S", %%"REG_c" \n\t"\
  2153. "add %%"REG_a", %%"REG_D" \n\t"\
  2154. "xor %%"REG_a", %%"REG_a" \n\t"\
  2155. #else
  2156. #define FUNNY_Y_CODE \
  2157. "movl (%%"REG_b"), %%esi \n\t"\
  2158. "call *%4 \n\t"\
  2159. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2160. "add %%"REG_a", %%"REG_D" \n\t"\
  2161. "xor %%"REG_a", %%"REG_a" \n\t"\
  2162. #endif /* ARCH_X86_64 */
  2163. FUNNY_Y_CODE
  2164. FUNNY_Y_CODE
  2165. FUNNY_Y_CODE
  2166. FUNNY_Y_CODE
  2167. FUNNY_Y_CODE
  2168. FUNNY_Y_CODE
  2169. FUNNY_Y_CODE
  2170. FUNNY_Y_CODE
  2171. #if defined(PIC)
  2172. "mov %5, %%"REG_b" \n\t"
  2173. #endif
  2174. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2175. "m" (funnyYCode)
  2176. #if defined(PIC)
  2177. ,"m" (ebxsave)
  2178. #endif
  2179. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2180. #if !defined(PIC)
  2181. ,"%"REG_b
  2182. #endif
  2183. );
  2184. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2185. }
  2186. else
  2187. {
  2188. #endif /* HAVE_MMX2 */
  2189. x86_reg xInc_shr16 = xInc >> 16;
  2190. uint16_t xInc_mask = xInc & 0xffff;
  2191. //NO MMX just normal asm ...
  2192. __asm__ volatile(
  2193. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2194. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2195. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2196. ASMALIGN(4)
  2197. "1: \n\t"
  2198. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2199. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2200. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2201. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2202. "shll $16, %%edi \n\t"
  2203. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2204. "mov %1, %%"REG_D" \n\t"
  2205. "shrl $9, %%esi \n\t"
  2206. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2207. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2208. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2209. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2210. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2211. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2212. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2213. "shll $16, %%edi \n\t"
  2214. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2215. "mov %1, %%"REG_D" \n\t"
  2216. "shrl $9, %%esi \n\t"
  2217. "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
  2218. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2219. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2220. "add $2, %%"REG_a" \n\t"
  2221. "cmp %2, %%"REG_a" \n\t"
  2222. " jb 1b \n\t"
  2223. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2224. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2225. );
  2226. #if HAVE_MMX2
  2227. } //if MMX2 can't be used
  2228. #endif
  2229. #else
  2230. c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc);
  2231. #endif /* ARCH_X86 */
  2232. }
  2233. if(!isAlpha && c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2234. int i;
  2235. //FIXME all pal and rgb srcFormats could do this convertion as well
  2236. //FIXME all scalers more complex than bilinear could do half of this transform
  2237. if(c->srcRange){
  2238. for (i=0; i<dstWidth; i++)
  2239. dst[i]= (dst[i]*14071 + 33561947)>>14;
  2240. }else{
  2241. for (i=0; i<dstWidth; i++)
  2242. dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
  2243. }
  2244. }
  2245. }
  2246. static inline void RENAME(hcscale_fast)(SwsContext *c, int16_t *dst,
  2247. int dstWidth, const uint8_t *src1,
  2248. const uint8_t *src2, int srcW, int xInc)
  2249. {
  2250. int i;
  2251. unsigned int xpos=0;
  2252. for (i=0;i<dstWidth;i++)
  2253. {
  2254. register unsigned int xx=xpos>>16;
  2255. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2256. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2257. dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2258. /* slower
  2259. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2260. dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2261. */
  2262. xpos+=xInc;
  2263. }
  2264. }
  2265. inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src1, const uint8_t *src2,
  2266. int srcW, int xInc, int flags, const int16_t *hChrFilter,
  2267. const int16_t *hChrFilterPos, int hChrFilterSize,
  2268. int srcFormat, uint8_t *formatConvBuffer,
  2269. uint32_t *pal)
  2270. {
  2271. int32_t *mmx2FilterPos = c->chrMmx2FilterPos;
  2272. int16_t *mmx2Filter = c->chrMmx2Filter;
  2273. int canMMX2BeUsed = c->canMMX2BeUsed;
  2274. void *funnyUVCode = c->funnyUVCode;
  2275. if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
  2276. return;
  2277. if (srcFormat==PIX_FMT_RGB32_1 || srcFormat==PIX_FMT_BGR32_1) {
  2278. src1 += ALT32_CORR;
  2279. src2 += ALT32_CORR;
  2280. }
  2281. if (c->hcscale_internal) {
  2282. c->hcscale_internal(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2283. src1= formatConvBuffer;
  2284. src2= formatConvBuffer+VOFW;
  2285. }
  2286. #if HAVE_MMX
  2287. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2288. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2289. #else
  2290. if (!(flags&SWS_FAST_BILINEAR))
  2291. #endif
  2292. {
  2293. c->hScale(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2294. c->hScale(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2295. }
  2296. else // fast bilinear upscale / crap downscale
  2297. {
  2298. #if ARCH_X86 && CONFIG_GPL
  2299. #if HAVE_MMX2
  2300. int i;
  2301. #if defined(PIC)
  2302. uint64_t ebxsave __attribute__((aligned(8)));
  2303. #endif
  2304. if (canMMX2BeUsed)
  2305. {
  2306. __asm__ volatile(
  2307. #if defined(PIC)
  2308. "mov %%"REG_b", %6 \n\t"
  2309. #endif
  2310. "pxor %%mm7, %%mm7 \n\t"
  2311. "mov %0, %%"REG_c" \n\t"
  2312. "mov %1, %%"REG_D" \n\t"
  2313. "mov %2, %%"REG_d" \n\t"
  2314. "mov %3, %%"REG_b" \n\t"
  2315. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2316. PREFETCH" (%%"REG_c") \n\t"
  2317. PREFETCH" 32(%%"REG_c") \n\t"
  2318. PREFETCH" 64(%%"REG_c") \n\t"
  2319. #if ARCH_X86_64
  2320. #define FUNNY_UV_CODE \
  2321. "movl (%%"REG_b"), %%esi \n\t"\
  2322. "call *%4 \n\t"\
  2323. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2324. "add %%"REG_S", %%"REG_c" \n\t"\
  2325. "add %%"REG_a", %%"REG_D" \n\t"\
  2326. "xor %%"REG_a", %%"REG_a" \n\t"\
  2327. #else
  2328. #define FUNNY_UV_CODE \
  2329. "movl (%%"REG_b"), %%esi \n\t"\
  2330. "call *%4 \n\t"\
  2331. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2332. "add %%"REG_a", %%"REG_D" \n\t"\
  2333. "xor %%"REG_a", %%"REG_a" \n\t"\
  2334. #endif /* ARCH_X86_64 */
  2335. FUNNY_UV_CODE
  2336. FUNNY_UV_CODE
  2337. FUNNY_UV_CODE
  2338. FUNNY_UV_CODE
  2339. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2340. "mov %5, %%"REG_c" \n\t" // src
  2341. "mov %1, %%"REG_D" \n\t" // buf1
  2342. "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
  2343. PREFETCH" (%%"REG_c") \n\t"
  2344. PREFETCH" 32(%%"REG_c") \n\t"
  2345. PREFETCH" 64(%%"REG_c") \n\t"
  2346. FUNNY_UV_CODE
  2347. FUNNY_UV_CODE
  2348. FUNNY_UV_CODE
  2349. FUNNY_UV_CODE
  2350. #if defined(PIC)
  2351. "mov %6, %%"REG_b" \n\t"
  2352. #endif
  2353. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2354. "m" (funnyUVCode), "m" (src2)
  2355. #if defined(PIC)
  2356. ,"m" (ebxsave)
  2357. #endif
  2358. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2359. #if !defined(PIC)
  2360. ,"%"REG_b
  2361. #endif
  2362. );
  2363. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2364. {
  2365. //printf("%d %d %d\n", dstWidth, i, srcW);
  2366. dst[i] = src1[srcW-1]*128;
  2367. dst[i+VOFW] = src2[srcW-1]*128;
  2368. }
  2369. }
  2370. else
  2371. {
  2372. #endif /* HAVE_MMX2 */
  2373. x86_reg xInc_shr16 = (x86_reg) (xInc >> 16);
  2374. uint16_t xInc_mask = xInc & 0xffff;
  2375. __asm__ volatile(
  2376. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2377. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2378. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2379. ASMALIGN(4)
  2380. "1: \n\t"
  2381. "mov %0, %%"REG_S" \n\t"
  2382. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2383. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2384. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2385. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2386. "shll $16, %%edi \n\t"
  2387. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2388. "mov %1, %%"REG_D" \n\t"
  2389. "shrl $9, %%esi \n\t"
  2390. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2391. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2392. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2393. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2394. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2395. "shll $16, %%edi \n\t"
  2396. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2397. "mov %1, %%"REG_D" \n\t"
  2398. "shrl $9, %%esi \n\t"
  2399. "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
  2400. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2401. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2402. "add $1, %%"REG_a" \n\t"
  2403. "cmp %2, %%"REG_a" \n\t"
  2404. " jb 1b \n\t"
  2405. /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2406. which is needed to support GCC 4.0. */
  2407. #if ARCH_X86_64 && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2408. :: "m" (src1), "m" (dst), "g" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2409. #else
  2410. :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2411. #endif
  2412. "r" (src2)
  2413. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2414. );
  2415. #if HAVE_MMX2
  2416. } //if MMX2 can't be used
  2417. #endif
  2418. #else
  2419. c->hcscale_fast(c, dst, dstWidth, src1, src2, srcW, xInc);
  2420. #endif /* ARCH_X86 */
  2421. }
  2422. if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2423. int i;
  2424. //FIXME all pal and rgb srcFormats could do this convertion as well
  2425. //FIXME all scalers more complex than bilinear could do half of this transform
  2426. if(c->srcRange){
  2427. for (i=0; i<dstWidth; i++){
  2428. dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
  2429. dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
  2430. }
  2431. }else{
  2432. for (i=0; i<dstWidth; i++){
  2433. dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
  2434. dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
  2435. }
  2436. }
  2437. }
  2438. }
  2439. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2440. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2441. /* load a few things into local vars to make the code more readable? and faster */
  2442. const int srcW= c->srcW;
  2443. const int dstW= c->dstW;
  2444. const int dstH= c->dstH;
  2445. const int chrDstW= c->chrDstW;
  2446. const int chrSrcW= c->chrSrcW;
  2447. const int lumXInc= c->lumXInc;
  2448. const int chrXInc= c->chrXInc;
  2449. const int dstFormat= c->dstFormat;
  2450. const int srcFormat= c->srcFormat;
  2451. const int flags= c->flags;
  2452. int16_t *vLumFilterPos= c->vLumFilterPos;
  2453. int16_t *vChrFilterPos= c->vChrFilterPos;
  2454. int16_t *hLumFilterPos= c->hLumFilterPos;
  2455. int16_t *hChrFilterPos= c->hChrFilterPos;
  2456. int16_t *vLumFilter= c->vLumFilter;
  2457. int16_t *vChrFilter= c->vChrFilter;
  2458. int16_t *hLumFilter= c->hLumFilter;
  2459. int16_t *hChrFilter= c->hChrFilter;
  2460. int32_t *lumMmxFilter= c->lumMmxFilter;
  2461. int32_t *chrMmxFilter= c->chrMmxFilter;
  2462. int32_t *alpMmxFilter= c->alpMmxFilter;
  2463. const int vLumFilterSize= c->vLumFilterSize;
  2464. const int vChrFilterSize= c->vChrFilterSize;
  2465. const int hLumFilterSize= c->hLumFilterSize;
  2466. const int hChrFilterSize= c->hChrFilterSize;
  2467. int16_t **lumPixBuf= c->lumPixBuf;
  2468. int16_t **chrPixBuf= c->chrPixBuf;
  2469. int16_t **alpPixBuf= c->alpPixBuf;
  2470. const int vLumBufSize= c->vLumBufSize;
  2471. const int vChrBufSize= c->vChrBufSize;
  2472. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2473. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2474. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2475. int lastDstY;
  2476. uint32_t *pal=c->pal_yuv;
  2477. /* vars which will change and which we need to store back in the context */
  2478. int dstY= c->dstY;
  2479. int lumBufIndex= c->lumBufIndex;
  2480. int chrBufIndex= c->chrBufIndex;
  2481. int lastInLumBuf= c->lastInLumBuf;
  2482. int lastInChrBuf= c->lastInChrBuf;
  2483. if (isPacked(c->srcFormat)){
  2484. src[0]=
  2485. src[1]=
  2486. src[2]=
  2487. src[3]= src[0];
  2488. srcStride[0]=
  2489. srcStride[1]=
  2490. srcStride[2]=
  2491. srcStride[3]= srcStride[0];
  2492. }
  2493. srcStride[1]<<= c->vChrDrop;
  2494. srcStride[2]<<= c->vChrDrop;
  2495. //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2496. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2497. #if 0 //self test FIXME move to a vfilter or something
  2498. {
  2499. static volatile int i=0;
  2500. i++;
  2501. if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
  2502. selfTest(src, srcStride, c->srcW, c->srcH);
  2503. i--;
  2504. }
  2505. #endif
  2506. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2507. //dstStride[0],dstStride[1],dstStride[2]);
  2508. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0 || dstStride[3]%8 != 0)
  2509. {
  2510. static int warnedAlready=0; //FIXME move this into the context perhaps
  2511. if (flags & SWS_PRINT_INFO && !warnedAlready)
  2512. {
  2513. av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
  2514. " ->cannot do aligned memory accesses anymore\n");
  2515. warnedAlready=1;
  2516. }
  2517. }
  2518. /* Note the user might start scaling the picture in the middle so this
  2519. will not get executed. This is not really intended but works
  2520. currently, so people might do it. */
  2521. if (srcSliceY ==0){
  2522. lumBufIndex=0;
  2523. chrBufIndex=0;
  2524. dstY=0;
  2525. lastInLumBuf= -1;
  2526. lastInChrBuf= -1;
  2527. }
  2528. lastDstY= dstY;
  2529. for (;dstY < dstH; dstY++){
  2530. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2531. const int chrDstY= dstY>>c->chrDstVSubSample;
  2532. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2533. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2534. unsigned char *aDest=(CONFIG_SWSCALE_ALPHA && alpPixBuf) ? dst[3]+dstStride[3]*dstY : NULL;
  2535. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2536. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2537. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2538. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2539. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2540. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2541. //handle holes (FAST_BILINEAR & weird filters)
  2542. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2543. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2544. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2545. assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
  2546. assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
  2547. // Do we have enough lines in this slice to output the dstY line
  2548. if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2549. {
  2550. //Do horizontal scaling
  2551. while(lastInLumBuf < lastLumSrcY)
  2552. {
  2553. uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2554. uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
  2555. lumBufIndex++;
  2556. //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2557. assert(lumBufIndex < 2*vLumBufSize);
  2558. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2559. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2560. //printf("%d %d\n", lumBufIndex, vLumBufSize);
  2561. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
  2562. flags, hLumFilter, hLumFilterPos, hLumFilterSize,
  2563. c->srcFormat, formatConvBuffer,
  2564. pal, 0);
  2565. if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
  2566. RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
  2567. flags, hLumFilter, hLumFilterPos, hLumFilterSize,
  2568. c->srcFormat, formatConvBuffer,
  2569. pal, 1);
  2570. lastInLumBuf++;
  2571. }
  2572. while(lastInChrBuf < lastChrSrcY)
  2573. {
  2574. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2575. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2576. chrBufIndex++;
  2577. assert(chrBufIndex < 2*vChrBufSize);
  2578. assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
  2579. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2580. //FIXME replace parameters through context struct (some at least)
  2581. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2582. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2583. flags, hChrFilter, hChrFilterPos, hChrFilterSize,
  2584. c->srcFormat, formatConvBuffer,
  2585. pal);
  2586. lastInChrBuf++;
  2587. }
  2588. //wrap buf index around to stay inside the ring buffer
  2589. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2590. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2591. }
  2592. else // not enough lines left in this slice -> load the rest in the buffer
  2593. {
  2594. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2595. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2596. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2597. vChrBufSize, vLumBufSize);*/
  2598. //Do horizontal scaling
  2599. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2600. {
  2601. uint8_t *src1= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2602. uint8_t *src2= src[3]+(lastInLumBuf + 1 - srcSliceY)*srcStride[3];
  2603. lumBufIndex++;
  2604. assert(lumBufIndex < 2*vLumBufSize);
  2605. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2606. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2607. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, src1, srcW, lumXInc,
  2608. flags, hLumFilter, hLumFilterPos, hLumFilterSize,
  2609. c->srcFormat, formatConvBuffer,
  2610. pal, 0);
  2611. if (CONFIG_SWSCALE_ALPHA && alpPixBuf)
  2612. RENAME(hyscale)(c, alpPixBuf[ lumBufIndex ], dstW, src2, srcW, lumXInc,
  2613. flags, hLumFilter, hLumFilterPos, hLumFilterSize,
  2614. c->srcFormat, formatConvBuffer,
  2615. pal, 1);
  2616. lastInLumBuf++;
  2617. }
  2618. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2619. {
  2620. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2621. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2622. chrBufIndex++;
  2623. assert(chrBufIndex < 2*vChrBufSize);
  2624. assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
  2625. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2626. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2627. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2628. flags, hChrFilter, hChrFilterPos, hChrFilterSize,
  2629. c->srcFormat, formatConvBuffer,
  2630. pal);
  2631. lastInChrBuf++;
  2632. }
  2633. //wrap buf index around to stay inside the ring buffer
  2634. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2635. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2636. break; //we can't output a dstY line so let's try with the next slice
  2637. }
  2638. #if HAVE_MMX
  2639. c->blueDither= ff_dither8[dstY&1];
  2640. if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
  2641. c->greenDither= ff_dither8[dstY&1];
  2642. else
  2643. c->greenDither= ff_dither4[dstY&1];
  2644. c->redDither= ff_dither8[(dstY+1)&1];
  2645. #endif
  2646. if (dstY < dstH-2)
  2647. {
  2648. const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2649. const int16_t **chrSrcPtr= (const int16_t **) chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2650. const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
  2651. #if HAVE_MMX
  2652. int i;
  2653. if (flags & SWS_ACCURATE_RND){
  2654. int s= APCK_SIZE / 8;
  2655. for (i=0; i<vLumFilterSize; i+=2){
  2656. *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
  2657. *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
  2658. lumMmxFilter[s*i+APCK_COEF/4 ]=
  2659. lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
  2660. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2661. if (CONFIG_SWSCALE_ALPHA && alpPixBuf){
  2662. *(void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
  2663. *(void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
  2664. alpMmxFilter[s*i+APCK_COEF/4 ]=
  2665. alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
  2666. }
  2667. }
  2668. for (i=0; i<vChrFilterSize; i+=2){
  2669. *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
  2670. *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
  2671. chrMmxFilter[s*i+APCK_COEF/4 ]=
  2672. chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2673. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2674. }
  2675. }else{
  2676. for (i=0; i<vLumFilterSize; i++)
  2677. {
  2678. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2679. lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
  2680. lumMmxFilter[4*i+2]=
  2681. lumMmxFilter[4*i+3]=
  2682. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2683. if (CONFIG_SWSCALE_ALPHA && alpPixBuf){
  2684. alpMmxFilter[4*i+0]= (int32_t)alpSrcPtr[i];
  2685. alpMmxFilter[4*i+1]= (uint64_t)alpSrcPtr[i] >> 32;
  2686. alpMmxFilter[4*i+2]=
  2687. alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
  2688. }
  2689. }
  2690. for (i=0; i<vChrFilterSize; i++)
  2691. {
  2692. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2693. chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
  2694. chrMmxFilter[4*i+2]=
  2695. chrMmxFilter[4*i+3]=
  2696. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2697. }
  2698. }
  2699. #endif
  2700. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2701. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2702. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2703. c->yuv2nv12X(c,
  2704. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2705. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2706. dest, uDest, dstW, chrDstW, dstFormat);
  2707. }
  2708. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
  2709. {
  2710. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2711. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2712. if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
  2713. {
  2714. int16_t *lumBuf = lumPixBuf[0];
  2715. int16_t *chrBuf= chrPixBuf[0];
  2716. int16_t *alpBuf= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? alpPixBuf[0] : NULL;
  2717. c->yuv2yuv1(c, lumBuf, chrBuf, alpBuf, dest, uDest, vDest, aDest, dstW, chrDstW);
  2718. }
  2719. else //General YV12
  2720. {
  2721. c->yuv2yuvX(c,
  2722. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2723. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2724. alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
  2725. }
  2726. }
  2727. else
  2728. {
  2729. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2730. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2731. if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
  2732. {
  2733. int chrAlpha= vChrFilter[2*dstY+1];
  2734. if(flags & SWS_FULL_CHR_H_INT){
  2735. yuv2rgbXinC_full(c, //FIXME write a packed1_full function
  2736. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2737. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2738. alpSrcPtr, dest, dstW, dstY);
  2739. }else{
  2740. c->yuv2packed1(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2741. alpPixBuf ? *alpSrcPtr : NULL,
  2742. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2743. }
  2744. }
  2745. else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
  2746. {
  2747. int lumAlpha= vLumFilter[2*dstY+1];
  2748. int chrAlpha= vChrFilter[2*dstY+1];
  2749. lumMmxFilter[2]=
  2750. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  2751. chrMmxFilter[2]=
  2752. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  2753. if(flags & SWS_FULL_CHR_H_INT){
  2754. yuv2rgbXinC_full(c, //FIXME write a packed2_full function
  2755. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2756. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2757. alpSrcPtr, dest, dstW, dstY);
  2758. }else{
  2759. c->yuv2packed2(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2760. alpPixBuf ? *alpSrcPtr : NULL, alpPixBuf ? *(alpSrcPtr+1) : NULL,
  2761. dest, dstW, lumAlpha, chrAlpha, dstY);
  2762. }
  2763. }
  2764. else //general RGB
  2765. {
  2766. if(flags & SWS_FULL_CHR_H_INT){
  2767. yuv2rgbXinC_full(c,
  2768. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2769. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2770. alpSrcPtr, dest, dstW, dstY);
  2771. }else{
  2772. c->yuv2packedX(c,
  2773. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2774. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2775. alpSrcPtr, dest, dstW, dstY);
  2776. }
  2777. }
  2778. }
  2779. }
  2780. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2781. {
  2782. const int16_t **lumSrcPtr= (const int16_t **)lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2783. const int16_t **chrSrcPtr= (const int16_t **)chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2784. const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **)alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
  2785. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2786. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2787. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2788. yuv2nv12XinC(
  2789. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2790. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2791. dest, uDest, dstW, chrDstW, dstFormat);
  2792. }
  2793. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
  2794. {
  2795. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2796. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2797. yuv2yuvXinC(
  2798. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2799. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2800. alpSrcPtr, dest, uDest, vDest, aDest, dstW, chrDstW);
  2801. }
  2802. else
  2803. {
  2804. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2805. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2806. if(flags & SWS_FULL_CHR_H_INT){
  2807. yuv2rgbXinC_full(c,
  2808. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2809. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2810. alpSrcPtr, dest, dstW, dstY);
  2811. }else{
  2812. yuv2packedXinC(c,
  2813. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2814. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2815. alpSrcPtr, dest, dstW, dstY);
  2816. }
  2817. }
  2818. }
  2819. }
  2820. if ((dstFormat == PIX_FMT_YUVA420P) && !alpPixBuf)
  2821. fillPlane(dst[3], dstStride[3], dstW, dstY-lastDstY, lastDstY, 255);
  2822. #if HAVE_MMX
  2823. if (flags & SWS_CPU_CAPS_MMX2 ) __asm__ volatile("sfence":::"memory");
  2824. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  2825. if (flags & SWS_CPU_CAPS_3DNOW) __asm__ volatile("femms" :::"memory");
  2826. else __asm__ volatile("emms" :::"memory");
  2827. #endif
  2828. /* store changed local vars back in the context */
  2829. c->dstY= dstY;
  2830. c->lumBufIndex= lumBufIndex;
  2831. c->chrBufIndex= chrBufIndex;
  2832. c->lastInLumBuf= lastInLumBuf;
  2833. c->lastInChrBuf= lastInChrBuf;
  2834. return dstY - lastDstY;
  2835. }
  2836. static void RENAME(sws_init_swScale)(SwsContext *c)
  2837. {
  2838. enum PixelFormat srcFormat = c->srcFormat;
  2839. c->yuv2nv12X = RENAME(yuv2nv12X );
  2840. c->yuv2yuv1 = RENAME(yuv2yuv1 );
  2841. c->yuv2yuvX = RENAME(yuv2yuvX );
  2842. c->yuv2packed1 = RENAME(yuv2packed1 );
  2843. c->yuv2packed2 = RENAME(yuv2packed2 );
  2844. c->yuv2packedX = RENAME(yuv2packedX );
  2845. c->hScale = RENAME(hScale );
  2846. c->hyscale_fast = RENAME(hyscale_fast);
  2847. c->hcscale_fast = RENAME(hcscale_fast);
  2848. c->hcscale_internal = NULL;
  2849. switch(srcFormat) {
  2850. case PIX_FMT_YUYV422 : c->hcscale_internal = RENAME(yuy2ToUV); break;
  2851. case PIX_FMT_UYVY422 : c->hcscale_internal = RENAME(uyvyToUV); break;
  2852. case PIX_FMT_RGB8 :
  2853. case PIX_FMT_BGR8 :
  2854. case PIX_FMT_PAL8 :
  2855. case PIX_FMT_BGR4_BYTE:
  2856. case PIX_FMT_RGB4_BYTE: c->hcscale_internal = palToUV; break;
  2857. case PIX_FMT_YUV420PBE:
  2858. case PIX_FMT_YUV422PBE:
  2859. case PIX_FMT_YUV444PBE: c->hcscale_internal = RENAME(BEToUV); break;
  2860. case PIX_FMT_YUV420PLE:
  2861. case PIX_FMT_YUV422PLE:
  2862. case PIX_FMT_YUV444PLE: c->hcscale_internal = RENAME(LEToUV); break;
  2863. }
  2864. if (c->chrSrcHSubSample) {
  2865. switch(srcFormat) {
  2866. case PIX_FMT_RGB32 :
  2867. case PIX_FMT_RGB32_1: c->hcscale_internal = bgr32ToUV_half; break;
  2868. case PIX_FMT_BGR24 : c->hcscale_internal = RENAME(bgr24ToUV_half); break;
  2869. case PIX_FMT_BGR565 : c->hcscale_internal = bgr16ToUV_half; break;
  2870. case PIX_FMT_BGR555 : c->hcscale_internal = bgr15ToUV_half; break;
  2871. case PIX_FMT_BGR32 :
  2872. case PIX_FMT_BGR32_1: c->hcscale_internal = rgb32ToUV_half; break;
  2873. case PIX_FMT_RGB24 : c->hcscale_internal = RENAME(rgb24ToUV_half); break;
  2874. case PIX_FMT_RGB565 : c->hcscale_internal = rgb16ToUV_half; break;
  2875. case PIX_FMT_RGB555 : c->hcscale_internal = rgb15ToUV_half; break;
  2876. }
  2877. } else {
  2878. switch(srcFormat) {
  2879. case PIX_FMT_RGB32 :
  2880. case PIX_FMT_RGB32_1: c->hcscale_internal = bgr32ToUV; break;
  2881. case PIX_FMT_BGR24 : c->hcscale_internal = RENAME(bgr24ToUV); break;
  2882. case PIX_FMT_BGR565 : c->hcscale_internal = bgr16ToUV; break;
  2883. case PIX_FMT_BGR555 : c->hcscale_internal = bgr15ToUV; break;
  2884. case PIX_FMT_BGR32 :
  2885. case PIX_FMT_BGR32_1: c->hcscale_internal = rgb32ToUV; break;
  2886. case PIX_FMT_RGB24 : c->hcscale_internal = RENAME(rgb24ToUV); break;
  2887. case PIX_FMT_RGB565 : c->hcscale_internal = rgb16ToUV; break;
  2888. case PIX_FMT_RGB555 : c->hcscale_internal = rgb15ToUV; break;
  2889. }
  2890. }
  2891. c->hyscale_internal = NULL;
  2892. c->hascale_internal = NULL;
  2893. switch (srcFormat) {
  2894. case PIX_FMT_YUYV422 :
  2895. case PIX_FMT_YUV420PBE:
  2896. case PIX_FMT_YUV422PBE:
  2897. case PIX_FMT_YUV444PBE:
  2898. case PIX_FMT_GRAY16BE : c->hyscale_internal = RENAME(yuy2ToY); break;
  2899. case PIX_FMT_UYVY422 :
  2900. case PIX_FMT_YUV420PLE:
  2901. case PIX_FMT_YUV422PLE:
  2902. case PIX_FMT_YUV444PLE:
  2903. case PIX_FMT_GRAY16LE : c->hyscale_internal = RENAME(uyvyToY); break;
  2904. case PIX_FMT_BGR24 : c->hyscale_internal = RENAME(bgr24ToY); break;
  2905. case PIX_FMT_BGR565 : c->hyscale_internal = bgr16ToY; break;
  2906. case PIX_FMT_BGR555 : c->hyscale_internal = bgr15ToY; break;
  2907. case PIX_FMT_RGB24 : c->hyscale_internal = RENAME(rgb24ToY); break;
  2908. case PIX_FMT_RGB565 : c->hyscale_internal = rgb16ToY; break;
  2909. case PIX_FMT_RGB555 : c->hyscale_internal = rgb15ToY; break;
  2910. case PIX_FMT_RGB8 :
  2911. case PIX_FMT_BGR8 :
  2912. case PIX_FMT_PAL8 :
  2913. case PIX_FMT_BGR4_BYTE:
  2914. case PIX_FMT_RGB4_BYTE: c->hyscale_internal = palToY; break;
  2915. case PIX_FMT_MONOBLACK: c->hyscale_internal = monoblack2Y; break;
  2916. case PIX_FMT_MONOWHITE: c->hyscale_internal = monowhite2Y; break;
  2917. case PIX_FMT_RGB32 :
  2918. case PIX_FMT_RGB32_1: c->hyscale_internal = bgr32ToY; break;
  2919. case PIX_FMT_BGR32 :
  2920. case PIX_FMT_BGR32_1: c->hyscale_internal = rgb32ToY; break;
  2921. }
  2922. if (c->alpPixBuf) {
  2923. switch (srcFormat) {
  2924. case PIX_FMT_RGB32 :
  2925. case PIX_FMT_RGB32_1:
  2926. case PIX_FMT_BGR32 :
  2927. case PIX_FMT_BGR32_1: c->hascale_internal = abgrToA; break;
  2928. }
  2929. }
  2930. }