You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3002 lines
126KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * The C code (not assembly, MMX, ...) of this file can be used
  21. * under the LGPL license.
  22. */
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef PAVGB
  26. #undef PREFETCH
  27. #undef PREFETCHW
  28. #undef EMMS
  29. #undef SFENCE
  30. #ifdef HAVE_3DNOW
  31. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  32. #define EMMS "femms"
  33. #else
  34. #define EMMS "emms"
  35. #endif
  36. #ifdef HAVE_3DNOW
  37. #define PREFETCH "prefetch"
  38. #define PREFETCHW "prefetchw"
  39. #elif defined (HAVE_MMX2)
  40. #define PREFETCH "prefetchnta"
  41. #define PREFETCHW "prefetcht0"
  42. #else
  43. #define PREFETCH " # nop"
  44. #define PREFETCHW " # nop"
  45. #endif
  46. #ifdef HAVE_MMX2
  47. #define SFENCE "sfence"
  48. #else
  49. #define SFENCE " # nop"
  50. #endif
  51. #ifdef HAVE_MMX2
  52. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  53. #elif defined (HAVE_3DNOW)
  54. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  55. #endif
  56. #ifdef HAVE_MMX2
  57. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  58. #else
  59. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  60. #endif
  61. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  62. #ifdef HAVE_ALTIVEC
  63. #include "swscale_altivec_template.c"
  64. #endif
  65. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  66. __asm__ volatile(\
  67. "xor %%"REG_a", %%"REG_a" \n\t"\
  68. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  69. "movq %%mm3, %%mm4 \n\t"\
  70. "lea " offset "(%0), %%"REG_d" \n\t"\
  71. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  72. ASMALIGN(4) /* FIXME Unroll? */\
  73. "1: \n\t"\
  74. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  75. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  76. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
  77. "add $16, %%"REG_d" \n\t"\
  78. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  79. "test %%"REG_S", %%"REG_S" \n\t"\
  80. "pmulhw %%mm0, %%mm2 \n\t"\
  81. "pmulhw %%mm0, %%mm5 \n\t"\
  82. "paddw %%mm2, %%mm3 \n\t"\
  83. "paddw %%mm5, %%mm4 \n\t"\
  84. " jnz 1b \n\t"\
  85. "psraw $3, %%mm3 \n\t"\
  86. "psraw $3, %%mm4 \n\t"\
  87. "packuswb %%mm4, %%mm3 \n\t"\
  88. MOVNTQ(%%mm3, (%1, %%REGa))\
  89. "add $8, %%"REG_a" \n\t"\
  90. "cmp %2, %%"REG_a" \n\t"\
  91. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  92. "movq %%mm3, %%mm4 \n\t"\
  93. "lea " offset "(%0), %%"REG_d" \n\t"\
  94. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  95. "jb 1b \n\t"\
  96. :: "r" (&c->redDither),\
  97. "r" (dest), "g" (width)\
  98. : "%"REG_a, "%"REG_d, "%"REG_S\
  99. );
  100. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  101. __asm__ volatile(\
  102. "lea " offset "(%0), %%"REG_d" \n\t"\
  103. "xor %%"REG_a", %%"REG_a" \n\t"\
  104. "pxor %%mm4, %%mm4 \n\t"\
  105. "pxor %%mm5, %%mm5 \n\t"\
  106. "pxor %%mm6, %%mm6 \n\t"\
  107. "pxor %%mm7, %%mm7 \n\t"\
  108. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  109. ASMALIGN(4) \
  110. "1: \n\t"\
  111. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
  112. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  113. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  114. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
  115. "movq %%mm0, %%mm3 \n\t"\
  116. "punpcklwd %%mm1, %%mm0 \n\t"\
  117. "punpckhwd %%mm1, %%mm3 \n\t"\
  118. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  119. "pmaddwd %%mm1, %%mm0 \n\t"\
  120. "pmaddwd %%mm1, %%mm3 \n\t"\
  121. "paddd %%mm0, %%mm4 \n\t"\
  122. "paddd %%mm3, %%mm5 \n\t"\
  123. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
  124. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  125. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  126. "test %%"REG_S", %%"REG_S" \n\t"\
  127. "movq %%mm2, %%mm0 \n\t"\
  128. "punpcklwd %%mm3, %%mm2 \n\t"\
  129. "punpckhwd %%mm3, %%mm0 \n\t"\
  130. "pmaddwd %%mm1, %%mm2 \n\t"\
  131. "pmaddwd %%mm1, %%mm0 \n\t"\
  132. "paddd %%mm2, %%mm6 \n\t"\
  133. "paddd %%mm0, %%mm7 \n\t"\
  134. " jnz 1b \n\t"\
  135. "psrad $16, %%mm4 \n\t"\
  136. "psrad $16, %%mm5 \n\t"\
  137. "psrad $16, %%mm6 \n\t"\
  138. "psrad $16, %%mm7 \n\t"\
  139. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  140. "packssdw %%mm5, %%mm4 \n\t"\
  141. "packssdw %%mm7, %%mm6 \n\t"\
  142. "paddw %%mm0, %%mm4 \n\t"\
  143. "paddw %%mm0, %%mm6 \n\t"\
  144. "psraw $3, %%mm4 \n\t"\
  145. "psraw $3, %%mm6 \n\t"\
  146. "packuswb %%mm6, %%mm4 \n\t"\
  147. MOVNTQ(%%mm4, (%1, %%REGa))\
  148. "add $8, %%"REG_a" \n\t"\
  149. "cmp %2, %%"REG_a" \n\t"\
  150. "lea " offset "(%0), %%"REG_d" \n\t"\
  151. "pxor %%mm4, %%mm4 \n\t"\
  152. "pxor %%mm5, %%mm5 \n\t"\
  153. "pxor %%mm6, %%mm6 \n\t"\
  154. "pxor %%mm7, %%mm7 \n\t"\
  155. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  156. "jb 1b \n\t"\
  157. :: "r" (&c->redDither),\
  158. "r" (dest), "g" (width)\
  159. : "%"REG_a, "%"REG_d, "%"REG_S\
  160. );
  161. #define YSCALEYUV2YV121 \
  162. "mov %2, %%"REG_a" \n\t"\
  163. ASMALIGN(4) /* FIXME Unroll? */\
  164. "1: \n\t"\
  165. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  166. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  167. "psraw $7, %%mm0 \n\t"\
  168. "psraw $7, %%mm1 \n\t"\
  169. "packuswb %%mm1, %%mm0 \n\t"\
  170. MOVNTQ(%%mm0, (%1, %%REGa))\
  171. "add $8, %%"REG_a" \n\t"\
  172. "jnc 1b \n\t"
  173. #define YSCALEYUV2YV121_ACCURATE \
  174. "mov %2, %%"REG_a" \n\t"\
  175. "pcmpeqw %%mm7, %%mm7 \n\t"\
  176. "psrlw $15, %%mm7 \n\t"\
  177. "psllw $6, %%mm7 \n\t"\
  178. ASMALIGN(4) /* FIXME Unroll? */\
  179. "1: \n\t"\
  180. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  181. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  182. "paddsw %%mm7, %%mm0 \n\t"\
  183. "paddsw %%mm7, %%mm1 \n\t"\
  184. "psraw $7, %%mm0 \n\t"\
  185. "psraw $7, %%mm1 \n\t"\
  186. "packuswb %%mm1, %%mm0 \n\t"\
  187. MOVNTQ(%%mm0, (%1, %%REGa))\
  188. "add $8, %%"REG_a" \n\t"\
  189. "jnc 1b \n\t"
  190. /*
  191. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  192. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  193. "r" (dest), "m" (dstW),
  194. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  195. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  196. */
  197. #define YSCALEYUV2PACKEDX \
  198. __asm__ volatile(\
  199. "xor %%"REG_a", %%"REG_a" \n\t"\
  200. ASMALIGN(4)\
  201. "nop \n\t"\
  202. "1: \n\t"\
  203. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  204. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  205. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  206. "movq %%mm3, %%mm4 \n\t"\
  207. ASMALIGN(4)\
  208. "2: \n\t"\
  209. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  210. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  211. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  212. "add $16, %%"REG_d" \n\t"\
  213. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  214. "pmulhw %%mm0, %%mm2 \n\t"\
  215. "pmulhw %%mm0, %%mm5 \n\t"\
  216. "paddw %%mm2, %%mm3 \n\t"\
  217. "paddw %%mm5, %%mm4 \n\t"\
  218. "test %%"REG_S", %%"REG_S" \n\t"\
  219. " jnz 2b \n\t"\
  220. \
  221. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  222. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  223. "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\
  224. "movq %%mm1, %%mm7 \n\t"\
  225. ASMALIGN(4)\
  226. "2: \n\t"\
  227. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  228. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  229. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  230. "add $16, %%"REG_d" \n\t"\
  231. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  232. "pmulhw %%mm0, %%mm2 \n\t"\
  233. "pmulhw %%mm0, %%mm5 \n\t"\
  234. "paddw %%mm2, %%mm1 \n\t"\
  235. "paddw %%mm5, %%mm7 \n\t"\
  236. "test %%"REG_S", %%"REG_S" \n\t"\
  237. " jnz 2b \n\t"\
  238. #define YSCALEYUV2PACKEDX_END \
  239. :: "r" (&c->redDither), \
  240. "m" (dummy), "m" (dummy), "m" (dummy),\
  241. "r" (dest), "m" (dstW) \
  242. : "%"REG_a, "%"REG_d, "%"REG_S \
  243. );
  244. #define YSCALEYUV2PACKEDX_ACCURATE \
  245. __asm__ volatile(\
  246. "xor %%"REG_a", %%"REG_a" \n\t"\
  247. ASMALIGN(4)\
  248. "nop \n\t"\
  249. "1: \n\t"\
  250. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  251. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  252. "pxor %%mm4, %%mm4 \n\t"\
  253. "pxor %%mm5, %%mm5 \n\t"\
  254. "pxor %%mm6, %%mm6 \n\t"\
  255. "pxor %%mm7, %%mm7 \n\t"\
  256. ASMALIGN(4)\
  257. "2: \n\t"\
  258. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  259. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  260. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  261. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  262. "movq %%mm0, %%mm3 \n\t"\
  263. "punpcklwd %%mm1, %%mm0 \n\t"\
  264. "punpckhwd %%mm1, %%mm3 \n\t"\
  265. "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
  266. "pmaddwd %%mm1, %%mm0 \n\t"\
  267. "pmaddwd %%mm1, %%mm3 \n\t"\
  268. "paddd %%mm0, %%mm4 \n\t"\
  269. "paddd %%mm3, %%mm5 \n\t"\
  270. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  271. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  272. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  273. "test %%"REG_S", %%"REG_S" \n\t"\
  274. "movq %%mm2, %%mm0 \n\t"\
  275. "punpcklwd %%mm3, %%mm2 \n\t"\
  276. "punpckhwd %%mm3, %%mm0 \n\t"\
  277. "pmaddwd %%mm1, %%mm2 \n\t"\
  278. "pmaddwd %%mm1, %%mm0 \n\t"\
  279. "paddd %%mm2, %%mm6 \n\t"\
  280. "paddd %%mm0, %%mm7 \n\t"\
  281. " jnz 2b \n\t"\
  282. "psrad $16, %%mm4 \n\t"\
  283. "psrad $16, %%mm5 \n\t"\
  284. "psrad $16, %%mm6 \n\t"\
  285. "psrad $16, %%mm7 \n\t"\
  286. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  287. "packssdw %%mm5, %%mm4 \n\t"\
  288. "packssdw %%mm7, %%mm6 \n\t"\
  289. "paddw %%mm0, %%mm4 \n\t"\
  290. "paddw %%mm0, %%mm6 \n\t"\
  291. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  292. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  293. \
  294. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  295. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  296. "pxor %%mm1, %%mm1 \n\t"\
  297. "pxor %%mm5, %%mm5 \n\t"\
  298. "pxor %%mm7, %%mm7 \n\t"\
  299. "pxor %%mm6, %%mm6 \n\t"\
  300. ASMALIGN(4)\
  301. "2: \n\t"\
  302. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  303. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  304. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  305. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  306. "movq %%mm0, %%mm3 \n\t"\
  307. "punpcklwd %%mm4, %%mm0 \n\t"\
  308. "punpckhwd %%mm4, %%mm3 \n\t"\
  309. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  310. "pmaddwd %%mm4, %%mm0 \n\t"\
  311. "pmaddwd %%mm4, %%mm3 \n\t"\
  312. "paddd %%mm0, %%mm1 \n\t"\
  313. "paddd %%mm3, %%mm5 \n\t"\
  314. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  315. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  316. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  317. "test %%"REG_S", %%"REG_S" \n\t"\
  318. "movq %%mm2, %%mm0 \n\t"\
  319. "punpcklwd %%mm3, %%mm2 \n\t"\
  320. "punpckhwd %%mm3, %%mm0 \n\t"\
  321. "pmaddwd %%mm4, %%mm2 \n\t"\
  322. "pmaddwd %%mm4, %%mm0 \n\t"\
  323. "paddd %%mm2, %%mm7 \n\t"\
  324. "paddd %%mm0, %%mm6 \n\t"\
  325. " jnz 2b \n\t"\
  326. "psrad $16, %%mm1 \n\t"\
  327. "psrad $16, %%mm5 \n\t"\
  328. "psrad $16, %%mm7 \n\t"\
  329. "psrad $16, %%mm6 \n\t"\
  330. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  331. "packssdw %%mm5, %%mm1 \n\t"\
  332. "packssdw %%mm6, %%mm7 \n\t"\
  333. "paddw %%mm0, %%mm1 \n\t"\
  334. "paddw %%mm0, %%mm7 \n\t"\
  335. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  336. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  337. #define YSCALEYUV2RGBX \
  338. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  339. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  340. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  341. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  342. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  343. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  344. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  345. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  346. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  347. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  348. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  349. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  350. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  351. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  352. "paddw %%mm3, %%mm4 \n\t"\
  353. "movq %%mm2, %%mm0 \n\t"\
  354. "movq %%mm5, %%mm6 \n\t"\
  355. "movq %%mm4, %%mm3 \n\t"\
  356. "punpcklwd %%mm2, %%mm2 \n\t"\
  357. "punpcklwd %%mm5, %%mm5 \n\t"\
  358. "punpcklwd %%mm4, %%mm4 \n\t"\
  359. "paddw %%mm1, %%mm2 \n\t"\
  360. "paddw %%mm1, %%mm5 \n\t"\
  361. "paddw %%mm1, %%mm4 \n\t"\
  362. "punpckhwd %%mm0, %%mm0 \n\t"\
  363. "punpckhwd %%mm6, %%mm6 \n\t"\
  364. "punpckhwd %%mm3, %%mm3 \n\t"\
  365. "paddw %%mm7, %%mm0 \n\t"\
  366. "paddw %%mm7, %%mm6 \n\t"\
  367. "paddw %%mm7, %%mm3 \n\t"\
  368. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  369. "packuswb %%mm0, %%mm2 \n\t"\
  370. "packuswb %%mm6, %%mm5 \n\t"\
  371. "packuswb %%mm3, %%mm4 \n\t"\
  372. "pxor %%mm7, %%mm7 \n\t"
  373. #define REAL_YSCALEYUV2PACKED(index, c) \
  374. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  375. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  376. "psraw $3, %%mm0 \n\t"\
  377. "psraw $3, %%mm1 \n\t"\
  378. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  379. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  380. "xor "#index", "#index" \n\t"\
  381. ASMALIGN(4)\
  382. "1: \n\t"\
  383. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  384. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  385. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  386. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  387. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  388. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  389. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  390. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  391. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  392. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  393. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  394. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  395. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  396. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  397. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  398. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  399. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  400. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  401. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  402. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  403. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  404. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  405. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  406. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  407. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  408. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  409. #define REAL_YSCALEYUV2RGB(index, c) \
  410. "xor "#index", "#index" \n\t"\
  411. ASMALIGN(4)\
  412. "1: \n\t"\
  413. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  414. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  415. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  416. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  417. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  418. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  419. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  420. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  421. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  422. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  423. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  424. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  425. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  426. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  427. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  428. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  429. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  430. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  431. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  432. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  433. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  434. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  435. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  436. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  437. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  438. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  439. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  440. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  441. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  442. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  443. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  444. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  445. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  446. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  447. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  448. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  449. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  450. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  451. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  452. "paddw %%mm3, %%mm4 \n\t"\
  453. "movq %%mm2, %%mm0 \n\t"\
  454. "movq %%mm5, %%mm6 \n\t"\
  455. "movq %%mm4, %%mm3 \n\t"\
  456. "punpcklwd %%mm2, %%mm2 \n\t"\
  457. "punpcklwd %%mm5, %%mm5 \n\t"\
  458. "punpcklwd %%mm4, %%mm4 \n\t"\
  459. "paddw %%mm1, %%mm2 \n\t"\
  460. "paddw %%mm1, %%mm5 \n\t"\
  461. "paddw %%mm1, %%mm4 \n\t"\
  462. "punpckhwd %%mm0, %%mm0 \n\t"\
  463. "punpckhwd %%mm6, %%mm6 \n\t"\
  464. "punpckhwd %%mm3, %%mm3 \n\t"\
  465. "paddw %%mm7, %%mm0 \n\t"\
  466. "paddw %%mm7, %%mm6 \n\t"\
  467. "paddw %%mm7, %%mm3 \n\t"\
  468. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  469. "packuswb %%mm0, %%mm2 \n\t"\
  470. "packuswb %%mm6, %%mm5 \n\t"\
  471. "packuswb %%mm3, %%mm4 \n\t"\
  472. "pxor %%mm7, %%mm7 \n\t"
  473. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  474. #define REAL_YSCALEYUV2PACKED1(index, c) \
  475. "xor "#index", "#index" \n\t"\
  476. ASMALIGN(4)\
  477. "1: \n\t"\
  478. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  479. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  480. "psraw $7, %%mm3 \n\t" \
  481. "psraw $7, %%mm4 \n\t" \
  482. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  483. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  484. "psraw $7, %%mm1 \n\t" \
  485. "psraw $7, %%mm7 \n\t" \
  486. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  487. #define REAL_YSCALEYUV2RGB1(index, c) \
  488. "xor "#index", "#index" \n\t"\
  489. ASMALIGN(4)\
  490. "1: \n\t"\
  491. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  492. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  493. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  494. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  495. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  496. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  497. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  498. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  499. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  500. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  501. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  502. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  503. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  504. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  505. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  506. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  507. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  508. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  509. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  510. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  511. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  512. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  513. "paddw %%mm3, %%mm4 \n\t"\
  514. "movq %%mm2, %%mm0 \n\t"\
  515. "movq %%mm5, %%mm6 \n\t"\
  516. "movq %%mm4, %%mm3 \n\t"\
  517. "punpcklwd %%mm2, %%mm2 \n\t"\
  518. "punpcklwd %%mm5, %%mm5 \n\t"\
  519. "punpcklwd %%mm4, %%mm4 \n\t"\
  520. "paddw %%mm1, %%mm2 \n\t"\
  521. "paddw %%mm1, %%mm5 \n\t"\
  522. "paddw %%mm1, %%mm4 \n\t"\
  523. "punpckhwd %%mm0, %%mm0 \n\t"\
  524. "punpckhwd %%mm6, %%mm6 \n\t"\
  525. "punpckhwd %%mm3, %%mm3 \n\t"\
  526. "paddw %%mm7, %%mm0 \n\t"\
  527. "paddw %%mm7, %%mm6 \n\t"\
  528. "paddw %%mm7, %%mm3 \n\t"\
  529. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  530. "packuswb %%mm0, %%mm2 \n\t"\
  531. "packuswb %%mm6, %%mm5 \n\t"\
  532. "packuswb %%mm3, %%mm4 \n\t"\
  533. "pxor %%mm7, %%mm7 \n\t"
  534. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  535. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  536. "xor "#index", "#index" \n\t"\
  537. ASMALIGN(4)\
  538. "1: \n\t"\
  539. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  540. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  541. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  542. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  543. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  544. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  545. "psrlw $8, %%mm3 \n\t" \
  546. "psrlw $8, %%mm4 \n\t" \
  547. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  548. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  549. "psraw $7, %%mm1 \n\t" \
  550. "psraw $7, %%mm7 \n\t"
  551. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  552. // do vertical chrominance interpolation
  553. #define REAL_YSCALEYUV2RGB1b(index, c) \
  554. "xor "#index", "#index" \n\t"\
  555. ASMALIGN(4)\
  556. "1: \n\t"\
  557. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  558. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  559. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  560. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  561. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  562. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  563. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  564. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  565. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  566. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  567. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  568. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  569. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  570. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  571. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  572. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  573. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  574. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  575. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  576. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  577. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  578. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  579. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  580. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  581. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  582. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  583. "paddw %%mm3, %%mm4 \n\t"\
  584. "movq %%mm2, %%mm0 \n\t"\
  585. "movq %%mm5, %%mm6 \n\t"\
  586. "movq %%mm4, %%mm3 \n\t"\
  587. "punpcklwd %%mm2, %%mm2 \n\t"\
  588. "punpcklwd %%mm5, %%mm5 \n\t"\
  589. "punpcklwd %%mm4, %%mm4 \n\t"\
  590. "paddw %%mm1, %%mm2 \n\t"\
  591. "paddw %%mm1, %%mm5 \n\t"\
  592. "paddw %%mm1, %%mm4 \n\t"\
  593. "punpckhwd %%mm0, %%mm0 \n\t"\
  594. "punpckhwd %%mm6, %%mm6 \n\t"\
  595. "punpckhwd %%mm3, %%mm3 \n\t"\
  596. "paddw %%mm7, %%mm0 \n\t"\
  597. "paddw %%mm7, %%mm6 \n\t"\
  598. "paddw %%mm7, %%mm3 \n\t"\
  599. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  600. "packuswb %%mm0, %%mm2 \n\t"\
  601. "packuswb %%mm6, %%mm5 \n\t"\
  602. "packuswb %%mm3, %%mm4 \n\t"\
  603. "pxor %%mm7, %%mm7 \n\t"
  604. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  605. #define REAL_WRITEBGR32(dst, dstw, index) \
  606. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  607. "movq %%mm2, %%mm1 \n\t" /* B */\
  608. "movq %%mm5, %%mm6 \n\t" /* R */\
  609. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  610. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  611. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  612. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  613. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  614. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  615. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  616. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  617. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  618. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  619. \
  620. MOVNTQ(%%mm0, (dst, index, 4))\
  621. MOVNTQ(%%mm2, 8(dst, index, 4))\
  622. MOVNTQ(%%mm1, 16(dst, index, 4))\
  623. MOVNTQ(%%mm3, 24(dst, index, 4))\
  624. \
  625. "add $8, "#index" \n\t"\
  626. "cmp "#dstw", "#index" \n\t"\
  627. " jb 1b \n\t"
  628. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  629. #define REAL_WRITERGB16(dst, dstw, index) \
  630. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  631. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  632. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  633. "psrlq $3, %%mm2 \n\t"\
  634. \
  635. "movq %%mm2, %%mm1 \n\t"\
  636. "movq %%mm4, %%mm3 \n\t"\
  637. \
  638. "punpcklbw %%mm7, %%mm3 \n\t"\
  639. "punpcklbw %%mm5, %%mm2 \n\t"\
  640. "punpckhbw %%mm7, %%mm4 \n\t"\
  641. "punpckhbw %%mm5, %%mm1 \n\t"\
  642. \
  643. "psllq $3, %%mm3 \n\t"\
  644. "psllq $3, %%mm4 \n\t"\
  645. \
  646. "por %%mm3, %%mm2 \n\t"\
  647. "por %%mm4, %%mm1 \n\t"\
  648. \
  649. MOVNTQ(%%mm2, (dst, index, 2))\
  650. MOVNTQ(%%mm1, 8(dst, index, 2))\
  651. \
  652. "add $8, "#index" \n\t"\
  653. "cmp "#dstw", "#index" \n\t"\
  654. " jb 1b \n\t"
  655. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  656. #define REAL_WRITERGB15(dst, dstw, index) \
  657. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  658. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  659. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  660. "psrlq $3, %%mm2 \n\t"\
  661. "psrlq $1, %%mm5 \n\t"\
  662. \
  663. "movq %%mm2, %%mm1 \n\t"\
  664. "movq %%mm4, %%mm3 \n\t"\
  665. \
  666. "punpcklbw %%mm7, %%mm3 \n\t"\
  667. "punpcklbw %%mm5, %%mm2 \n\t"\
  668. "punpckhbw %%mm7, %%mm4 \n\t"\
  669. "punpckhbw %%mm5, %%mm1 \n\t"\
  670. \
  671. "psllq $2, %%mm3 \n\t"\
  672. "psllq $2, %%mm4 \n\t"\
  673. \
  674. "por %%mm3, %%mm2 \n\t"\
  675. "por %%mm4, %%mm1 \n\t"\
  676. \
  677. MOVNTQ(%%mm2, (dst, index, 2))\
  678. MOVNTQ(%%mm1, 8(dst, index, 2))\
  679. \
  680. "add $8, "#index" \n\t"\
  681. "cmp "#dstw", "#index" \n\t"\
  682. " jb 1b \n\t"
  683. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  684. #define WRITEBGR24OLD(dst, dstw, index) \
  685. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  686. "movq %%mm2, %%mm1 \n\t" /* B */\
  687. "movq %%mm5, %%mm6 \n\t" /* R */\
  688. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  689. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  690. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  691. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  692. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  693. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  694. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  695. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  696. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  697. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  698. \
  699. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  700. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  701. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
  702. "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
  703. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  704. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  705. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  706. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  707. \
  708. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  709. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  710. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  711. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  712. "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
  713. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  714. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  715. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
  716. "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
  717. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  718. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  719. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  720. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  721. \
  722. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  723. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  724. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  725. "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
  726. "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
  727. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  728. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  729. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  730. \
  731. MOVNTQ(%%mm0, (dst))\
  732. MOVNTQ(%%mm2, 8(dst))\
  733. MOVNTQ(%%mm3, 16(dst))\
  734. "add $24, "#dst" \n\t"\
  735. \
  736. "add $8, "#index" \n\t"\
  737. "cmp "#dstw", "#index" \n\t"\
  738. " jb 1b \n\t"
  739. #define WRITEBGR24MMX(dst, dstw, index) \
  740. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  741. "movq %%mm2, %%mm1 \n\t" /* B */\
  742. "movq %%mm5, %%mm6 \n\t" /* R */\
  743. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  744. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  745. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  746. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  747. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  748. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  749. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  750. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  751. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  752. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  753. \
  754. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  755. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  756. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  757. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  758. \
  759. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  760. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  761. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  762. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  763. \
  764. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  765. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  766. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  767. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  768. \
  769. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  770. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  771. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  772. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  773. MOVNTQ(%%mm0, (dst))\
  774. \
  775. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  776. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  777. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  778. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  779. MOVNTQ(%%mm6, 8(dst))\
  780. \
  781. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  782. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  783. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  784. MOVNTQ(%%mm5, 16(dst))\
  785. \
  786. "add $24, "#dst" \n\t"\
  787. \
  788. "add $8, "#index" \n\t"\
  789. "cmp "#dstw", "#index" \n\t"\
  790. " jb 1b \n\t"
  791. #define WRITEBGR24MMX2(dst, dstw, index) \
  792. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  793. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  794. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  795. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  796. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  797. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  798. \
  799. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  800. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  801. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  802. \
  803. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  804. "por %%mm1, %%mm6 \n\t"\
  805. "por %%mm3, %%mm6 \n\t"\
  806. MOVNTQ(%%mm6, (dst))\
  807. \
  808. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  809. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  810. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  811. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  812. \
  813. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  814. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  815. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  816. \
  817. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  818. "por %%mm3, %%mm6 \n\t"\
  819. MOVNTQ(%%mm6, 8(dst))\
  820. \
  821. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  822. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  823. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  824. \
  825. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  826. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  827. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  828. \
  829. "por %%mm1, %%mm3 \n\t"\
  830. "por %%mm3, %%mm6 \n\t"\
  831. MOVNTQ(%%mm6, 16(dst))\
  832. \
  833. "add $24, "#dst" \n\t"\
  834. \
  835. "add $8, "#index" \n\t"\
  836. "cmp "#dstw", "#index" \n\t"\
  837. " jb 1b \n\t"
  838. #ifdef HAVE_MMX2
  839. #undef WRITEBGR24
  840. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  841. #else
  842. #undef WRITEBGR24
  843. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  844. #endif
  845. #define REAL_WRITEYUY2(dst, dstw, index) \
  846. "packuswb %%mm3, %%mm3 \n\t"\
  847. "packuswb %%mm4, %%mm4 \n\t"\
  848. "packuswb %%mm7, %%mm1 \n\t"\
  849. "punpcklbw %%mm4, %%mm3 \n\t"\
  850. "movq %%mm1, %%mm7 \n\t"\
  851. "punpcklbw %%mm3, %%mm1 \n\t"\
  852. "punpckhbw %%mm3, %%mm7 \n\t"\
  853. \
  854. MOVNTQ(%%mm1, (dst, index, 2))\
  855. MOVNTQ(%%mm7, 8(dst, index, 2))\
  856. \
  857. "add $8, "#index" \n\t"\
  858. "cmp "#dstw", "#index" \n\t"\
  859. " jb 1b \n\t"
  860. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  861. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  862. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  863. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  864. {
  865. #ifdef HAVE_MMX
  866. if(!(c->flags & SWS_BITEXACT)){
  867. if (c->flags & SWS_ACCURATE_RND){
  868. if (uDest){
  869. YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  870. YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  871. }
  872. YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  873. }else{
  874. if (uDest){
  875. YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  876. YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  877. }
  878. YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  879. }
  880. return;
  881. }
  882. #endif
  883. #ifdef HAVE_ALTIVEC
  884. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  885. chrFilter, chrSrc, chrFilterSize,
  886. dest, uDest, vDest, dstW, chrDstW);
  887. #else //HAVE_ALTIVEC
  888. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  889. chrFilter, chrSrc, chrFilterSize,
  890. dest, uDest, vDest, dstW, chrDstW);
  891. #endif //!HAVE_ALTIVEC
  892. }
  893. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  894. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  895. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  896. {
  897. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  898. chrFilter, chrSrc, chrFilterSize,
  899. dest, uDest, dstW, chrDstW, dstFormat);
  900. }
  901. static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc,
  902. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  903. {
  904. int i;
  905. #ifdef HAVE_MMX
  906. if(!(c->flags & SWS_BITEXACT)){
  907. long p= uDest ? 3 : 1;
  908. uint8_t *src[3]= {lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
  909. uint8_t *dst[3]= {dest, uDest, vDest};
  910. long counter[3] = {dstW, chrDstW, chrDstW};
  911. if (c->flags & SWS_ACCURATE_RND){
  912. while(p--){
  913. __asm__ volatile(
  914. YSCALEYUV2YV121_ACCURATE
  915. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  916. "g" (-counter[p])
  917. : "%"REG_a
  918. );
  919. }
  920. }else{
  921. while(p--){
  922. __asm__ volatile(
  923. YSCALEYUV2YV121
  924. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  925. "g" (-counter[p])
  926. : "%"REG_a
  927. );
  928. }
  929. }
  930. return;
  931. }
  932. #endif
  933. for (i=0; i<dstW; i++)
  934. {
  935. int val= (lumSrc[i]+64)>>7;
  936. if (val&256){
  937. if (val<0) val=0;
  938. else val=255;
  939. }
  940. dest[i]= val;
  941. }
  942. if (uDest)
  943. for (i=0; i<chrDstW; i++)
  944. {
  945. int u=(chrSrc[i ]+64)>>7;
  946. int v=(chrSrc[i + VOFW]+64)>>7;
  947. if ((u|v)&256){
  948. if (u<0) u=0;
  949. else if (u>255) u=255;
  950. if (v<0) v=0;
  951. else if (v>255) v=255;
  952. }
  953. uDest[i]= u;
  954. vDest[i]= v;
  955. }
  956. }
  957. /**
  958. * vertical scale YV12 to RGB
  959. */
  960. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  961. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  962. uint8_t *dest, long dstW, long dstY)
  963. {
  964. #ifdef HAVE_MMX
  965. long dummy=0;
  966. if(!(c->flags & SWS_BITEXACT)){
  967. if (c->flags & SWS_ACCURATE_RND){
  968. switch(c->dstFormat){
  969. case PIX_FMT_RGB32:
  970. YSCALEYUV2PACKEDX_ACCURATE
  971. YSCALEYUV2RGBX
  972. WRITEBGR32(%4, %5, %%REGa)
  973. YSCALEYUV2PACKEDX_END
  974. return;
  975. case PIX_FMT_BGR24:
  976. YSCALEYUV2PACKEDX_ACCURATE
  977. YSCALEYUV2RGBX
  978. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  979. "add %4, %%"REG_c" \n\t"
  980. WRITEBGR24(%%REGc, %5, %%REGa)
  981. :: "r" (&c->redDither),
  982. "m" (dummy), "m" (dummy), "m" (dummy),
  983. "r" (dest), "m" (dstW)
  984. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  985. );
  986. return;
  987. case PIX_FMT_RGB555:
  988. YSCALEYUV2PACKEDX_ACCURATE
  989. YSCALEYUV2RGBX
  990. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  991. #ifdef DITHER1XBPP
  992. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  993. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  994. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  995. #endif
  996. WRITERGB15(%4, %5, %%REGa)
  997. YSCALEYUV2PACKEDX_END
  998. return;
  999. case PIX_FMT_RGB565:
  1000. YSCALEYUV2PACKEDX_ACCURATE
  1001. YSCALEYUV2RGBX
  1002. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1003. #ifdef DITHER1XBPP
  1004. "paddusb "BLUE_DITHER"(%0), %%mm2\n\t"
  1005. "paddusb "GREEN_DITHER"(%0), %%mm4\n\t"
  1006. "paddusb "RED_DITHER"(%0), %%mm5\n\t"
  1007. #endif
  1008. WRITERGB16(%4, %5, %%REGa)
  1009. YSCALEYUV2PACKEDX_END
  1010. return;
  1011. case PIX_FMT_YUYV422:
  1012. YSCALEYUV2PACKEDX_ACCURATE
  1013. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1014. "psraw $3, %%mm3 \n\t"
  1015. "psraw $3, %%mm4 \n\t"
  1016. "psraw $3, %%mm1 \n\t"
  1017. "psraw $3, %%mm7 \n\t"
  1018. WRITEYUY2(%4, %5, %%REGa)
  1019. YSCALEYUV2PACKEDX_END
  1020. return;
  1021. }
  1022. }else{
  1023. switch(c->dstFormat)
  1024. {
  1025. case PIX_FMT_RGB32:
  1026. YSCALEYUV2PACKEDX
  1027. YSCALEYUV2RGBX
  1028. WRITEBGR32(%4, %5, %%REGa)
  1029. YSCALEYUV2PACKEDX_END
  1030. return;
  1031. case PIX_FMT_BGR24:
  1032. YSCALEYUV2PACKEDX
  1033. YSCALEYUV2RGBX
  1034. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  1035. "add %4, %%"REG_c" \n\t"
  1036. WRITEBGR24(%%REGc, %5, %%REGa)
  1037. :: "r" (&c->redDither),
  1038. "m" (dummy), "m" (dummy), "m" (dummy),
  1039. "r" (dest), "m" (dstW)
  1040. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1041. );
  1042. return;
  1043. case PIX_FMT_RGB555:
  1044. YSCALEYUV2PACKEDX
  1045. YSCALEYUV2RGBX
  1046. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1047. #ifdef DITHER1XBPP
  1048. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1049. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1050. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1051. #endif
  1052. WRITERGB15(%4, %5, %%REGa)
  1053. YSCALEYUV2PACKEDX_END
  1054. return;
  1055. case PIX_FMT_RGB565:
  1056. YSCALEYUV2PACKEDX
  1057. YSCALEYUV2RGBX
  1058. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1059. #ifdef DITHER1XBPP
  1060. "paddusb "BLUE_DITHER"(%0), %%mm2 \n\t"
  1061. "paddusb "GREEN_DITHER"(%0), %%mm4 \n\t"
  1062. "paddusb "RED_DITHER"(%0), %%mm5 \n\t"
  1063. #endif
  1064. WRITERGB16(%4, %5, %%REGa)
  1065. YSCALEYUV2PACKEDX_END
  1066. return;
  1067. case PIX_FMT_YUYV422:
  1068. YSCALEYUV2PACKEDX
  1069. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1070. "psraw $3, %%mm3 \n\t"
  1071. "psraw $3, %%mm4 \n\t"
  1072. "psraw $3, %%mm1 \n\t"
  1073. "psraw $3, %%mm7 \n\t"
  1074. WRITEYUY2(%4, %5, %%REGa)
  1075. YSCALEYUV2PACKEDX_END
  1076. return;
  1077. }
  1078. }
  1079. }
  1080. #endif /* HAVE_MMX */
  1081. #ifdef HAVE_ALTIVEC
  1082. /* The following list of supported dstFormat values should
  1083. match what's found in the body of altivec_yuv2packedX() */
  1084. if (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
  1085. c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
  1086. c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
  1087. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  1088. chrFilter, chrSrc, chrFilterSize,
  1089. dest, dstW, dstY);
  1090. else
  1091. #endif
  1092. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1093. chrFilter, chrSrc, chrFilterSize,
  1094. dest, dstW, dstY);
  1095. }
  1096. /**
  1097. * vertical bilinear scale YV12 to RGB
  1098. */
  1099. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1100. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1101. {
  1102. int yalpha1=4095- yalpha;
  1103. int uvalpha1=4095-uvalpha;
  1104. int i;
  1105. #ifdef HAVE_MMX
  1106. if(!(c->flags & SWS_BITEXACT)){
  1107. switch(c->dstFormat)
  1108. {
  1109. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1110. case PIX_FMT_RGB32:
  1111. __asm__ volatile(
  1112. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1113. "mov %4, %%"REG_b" \n\t"
  1114. "push %%"REG_BP" \n\t"
  1115. YSCALEYUV2RGB(%%REGBP, %5)
  1116. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1117. "pop %%"REG_BP" \n\t"
  1118. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1119. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1120. "a" (&c->redDither)
  1121. );
  1122. return;
  1123. case PIX_FMT_BGR24:
  1124. __asm__ volatile(
  1125. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1126. "mov %4, %%"REG_b" \n\t"
  1127. "push %%"REG_BP" \n\t"
  1128. YSCALEYUV2RGB(%%REGBP, %5)
  1129. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1130. "pop %%"REG_BP" \n\t"
  1131. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1132. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1133. "a" (&c->redDither)
  1134. );
  1135. return;
  1136. case PIX_FMT_RGB555:
  1137. __asm__ volatile(
  1138. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1139. "mov %4, %%"REG_b" \n\t"
  1140. "push %%"REG_BP" \n\t"
  1141. YSCALEYUV2RGB(%%REGBP, %5)
  1142. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1143. #ifdef DITHER1XBPP
  1144. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1145. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1146. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1147. #endif
  1148. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1149. "pop %%"REG_BP" \n\t"
  1150. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1151. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1152. "a" (&c->redDither)
  1153. );
  1154. return;
  1155. case PIX_FMT_RGB565:
  1156. __asm__ volatile(
  1157. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1158. "mov %4, %%"REG_b" \n\t"
  1159. "push %%"REG_BP" \n\t"
  1160. YSCALEYUV2RGB(%%REGBP, %5)
  1161. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1162. #ifdef DITHER1XBPP
  1163. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1164. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1165. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1166. #endif
  1167. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1168. "pop %%"REG_BP" \n\t"
  1169. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1170. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1171. "a" (&c->redDither)
  1172. );
  1173. return;
  1174. case PIX_FMT_YUYV422:
  1175. __asm__ volatile(
  1176. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1177. "mov %4, %%"REG_b" \n\t"
  1178. "push %%"REG_BP" \n\t"
  1179. YSCALEYUV2PACKED(%%REGBP, %5)
  1180. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1181. "pop %%"REG_BP" \n\t"
  1182. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1183. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1184. "a" (&c->redDither)
  1185. );
  1186. return;
  1187. default: break;
  1188. }
  1189. }
  1190. #endif //HAVE_MMX
  1191. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C, YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
  1192. }
  1193. /**
  1194. * YV12 to RGB without scaling or interpolating
  1195. */
  1196. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1197. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1198. {
  1199. const int yalpha1=0;
  1200. int i;
  1201. uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1202. const int yalpha= 4096; //FIXME ...
  1203. if (flags&SWS_FULL_CHR_H_INT)
  1204. {
  1205. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1206. return;
  1207. }
  1208. #ifdef HAVE_MMX
  1209. if(!(flags & SWS_BITEXACT)){
  1210. if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1211. {
  1212. switch(dstFormat)
  1213. {
  1214. case PIX_FMT_RGB32:
  1215. __asm__ volatile(
  1216. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1217. "mov %4, %%"REG_b" \n\t"
  1218. "push %%"REG_BP" \n\t"
  1219. YSCALEYUV2RGB1(%%REGBP, %5)
  1220. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1221. "pop %%"REG_BP" \n\t"
  1222. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1223. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1224. "a" (&c->redDither)
  1225. );
  1226. return;
  1227. case PIX_FMT_BGR24:
  1228. __asm__ volatile(
  1229. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1230. "mov %4, %%"REG_b" \n\t"
  1231. "push %%"REG_BP" \n\t"
  1232. YSCALEYUV2RGB1(%%REGBP, %5)
  1233. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1234. "pop %%"REG_BP" \n\t"
  1235. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1236. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1237. "a" (&c->redDither)
  1238. );
  1239. return;
  1240. case PIX_FMT_RGB555:
  1241. __asm__ volatile(
  1242. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1243. "mov %4, %%"REG_b" \n\t"
  1244. "push %%"REG_BP" \n\t"
  1245. YSCALEYUV2RGB1(%%REGBP, %5)
  1246. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1247. #ifdef DITHER1XBPP
  1248. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1249. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1250. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1251. #endif
  1252. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1253. "pop %%"REG_BP" \n\t"
  1254. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1255. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1256. "a" (&c->redDither)
  1257. );
  1258. return;
  1259. case PIX_FMT_RGB565:
  1260. __asm__ volatile(
  1261. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1262. "mov %4, %%"REG_b" \n\t"
  1263. "push %%"REG_BP" \n\t"
  1264. YSCALEYUV2RGB1(%%REGBP, %5)
  1265. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1266. #ifdef DITHER1XBPP
  1267. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1268. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1269. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1270. #endif
  1271. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1272. "pop %%"REG_BP" \n\t"
  1273. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1274. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1275. "a" (&c->redDither)
  1276. );
  1277. return;
  1278. case PIX_FMT_YUYV422:
  1279. __asm__ volatile(
  1280. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1281. "mov %4, %%"REG_b" \n\t"
  1282. "push %%"REG_BP" \n\t"
  1283. YSCALEYUV2PACKED1(%%REGBP, %5)
  1284. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1285. "pop %%"REG_BP" \n\t"
  1286. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1287. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1288. "a" (&c->redDither)
  1289. );
  1290. return;
  1291. }
  1292. }
  1293. else
  1294. {
  1295. switch(dstFormat)
  1296. {
  1297. case PIX_FMT_RGB32:
  1298. __asm__ volatile(
  1299. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1300. "mov %4, %%"REG_b" \n\t"
  1301. "push %%"REG_BP" \n\t"
  1302. YSCALEYUV2RGB1b(%%REGBP, %5)
  1303. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1304. "pop %%"REG_BP" \n\t"
  1305. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1306. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1307. "a" (&c->redDither)
  1308. );
  1309. return;
  1310. case PIX_FMT_BGR24:
  1311. __asm__ volatile(
  1312. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1313. "mov %4, %%"REG_b" \n\t"
  1314. "push %%"REG_BP" \n\t"
  1315. YSCALEYUV2RGB1b(%%REGBP, %5)
  1316. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1317. "pop %%"REG_BP" \n\t"
  1318. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1319. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1320. "a" (&c->redDither)
  1321. );
  1322. return;
  1323. case PIX_FMT_RGB555:
  1324. __asm__ volatile(
  1325. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1326. "mov %4, %%"REG_b" \n\t"
  1327. "push %%"REG_BP" \n\t"
  1328. YSCALEYUV2RGB1b(%%REGBP, %5)
  1329. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1330. #ifdef DITHER1XBPP
  1331. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1332. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1333. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1334. #endif
  1335. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1336. "pop %%"REG_BP" \n\t"
  1337. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1338. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1339. "a" (&c->redDither)
  1340. );
  1341. return;
  1342. case PIX_FMT_RGB565:
  1343. __asm__ volatile(
  1344. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1345. "mov %4, %%"REG_b" \n\t"
  1346. "push %%"REG_BP" \n\t"
  1347. YSCALEYUV2RGB1b(%%REGBP, %5)
  1348. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1349. #ifdef DITHER1XBPP
  1350. "paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
  1351. "paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
  1352. "paddusb "RED_DITHER"(%5), %%mm5 \n\t"
  1353. #endif
  1354. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1355. "pop %%"REG_BP" \n\t"
  1356. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1357. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1358. "a" (&c->redDither)
  1359. );
  1360. return;
  1361. case PIX_FMT_YUYV422:
  1362. __asm__ volatile(
  1363. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1364. "mov %4, %%"REG_b" \n\t"
  1365. "push %%"REG_BP" \n\t"
  1366. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1367. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1368. "pop %%"REG_BP" \n\t"
  1369. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1370. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1371. "a" (&c->redDither)
  1372. );
  1373. return;
  1374. }
  1375. }
  1376. }
  1377. #endif /* HAVE_MMX */
  1378. if (uvalpha < 2048)
  1379. {
  1380. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1381. }else{
  1382. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1383. }
  1384. }
  1385. //FIXME yuy2* can read up to 7 samples too much
  1386. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1387. {
  1388. #ifdef HAVE_MMX
  1389. __asm__ volatile(
  1390. "movq "MANGLE(bm01010101)", %%mm2 \n\t"
  1391. "mov %0, %%"REG_a" \n\t"
  1392. "1: \n\t"
  1393. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1394. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1395. "pand %%mm2, %%mm0 \n\t"
  1396. "pand %%mm2, %%mm1 \n\t"
  1397. "packuswb %%mm1, %%mm0 \n\t"
  1398. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1399. "add $8, %%"REG_a" \n\t"
  1400. " js 1b \n\t"
  1401. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1402. : "%"REG_a
  1403. );
  1404. #else
  1405. int i;
  1406. for (i=0; i<width; i++)
  1407. dst[i]= src[2*i];
  1408. #endif
  1409. }
  1410. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1411. {
  1412. #ifdef HAVE_MMX
  1413. __asm__ volatile(
  1414. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1415. "mov %0, %%"REG_a" \n\t"
  1416. "1: \n\t"
  1417. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1418. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1419. "psrlw $8, %%mm0 \n\t"
  1420. "psrlw $8, %%mm1 \n\t"
  1421. "packuswb %%mm1, %%mm0 \n\t"
  1422. "movq %%mm0, %%mm1 \n\t"
  1423. "psrlw $8, %%mm0 \n\t"
  1424. "pand %%mm4, %%mm1 \n\t"
  1425. "packuswb %%mm0, %%mm0 \n\t"
  1426. "packuswb %%mm1, %%mm1 \n\t"
  1427. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1428. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1429. "add $4, %%"REG_a" \n\t"
  1430. " js 1b \n\t"
  1431. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1432. : "%"REG_a
  1433. );
  1434. #else
  1435. int i;
  1436. for (i=0; i<width; i++)
  1437. {
  1438. dstU[i]= src1[4*i + 1];
  1439. dstV[i]= src1[4*i + 3];
  1440. }
  1441. #endif
  1442. assert(src1 == src2);
  1443. }
  1444. /* This is almost identical to the previous, end exists only because
  1445. * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
  1446. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1447. {
  1448. #ifdef HAVE_MMX
  1449. __asm__ volatile(
  1450. "mov %0, %%"REG_a" \n\t"
  1451. "1: \n\t"
  1452. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1453. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1454. "psrlw $8, %%mm0 \n\t"
  1455. "psrlw $8, %%mm1 \n\t"
  1456. "packuswb %%mm1, %%mm0 \n\t"
  1457. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1458. "add $8, %%"REG_a" \n\t"
  1459. " js 1b \n\t"
  1460. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1461. : "%"REG_a
  1462. );
  1463. #else
  1464. int i;
  1465. for (i=0; i<width; i++)
  1466. dst[i]= src[2*i+1];
  1467. #endif
  1468. }
  1469. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1470. {
  1471. #ifdef HAVE_MMX
  1472. __asm__ volatile(
  1473. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1474. "mov %0, %%"REG_a" \n\t"
  1475. "1: \n\t"
  1476. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1477. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1478. "pand %%mm4, %%mm0 \n\t"
  1479. "pand %%mm4, %%mm1 \n\t"
  1480. "packuswb %%mm1, %%mm0 \n\t"
  1481. "movq %%mm0, %%mm1 \n\t"
  1482. "psrlw $8, %%mm0 \n\t"
  1483. "pand %%mm4, %%mm1 \n\t"
  1484. "packuswb %%mm0, %%mm0 \n\t"
  1485. "packuswb %%mm1, %%mm1 \n\t"
  1486. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1487. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1488. "add $4, %%"REG_a" \n\t"
  1489. " js 1b \n\t"
  1490. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1491. : "%"REG_a
  1492. );
  1493. #else
  1494. int i;
  1495. for (i=0; i<width; i++)
  1496. {
  1497. dstU[i]= src1[4*i + 0];
  1498. dstV[i]= src1[4*i + 2];
  1499. }
  1500. #endif
  1501. assert(src1 == src2);
  1502. }
  1503. #define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\
  1504. static inline void RENAME(name)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)\
  1505. {\
  1506. int i;\
  1507. for (i=0; i<width; i++)\
  1508. {\
  1509. int b= (((type*)src)[i]>>shb)&maskb;\
  1510. int g= (((type*)src)[i]>>shg)&maskg;\
  1511. int r= (((type*)src)[i]>>shr)&maskr;\
  1512. \
  1513. dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\
  1514. }\
  1515. }
  1516. BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
  1517. BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
  1518. BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY , RGB2YUV_SHIFT+8)
  1519. BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY , RGB2YUV_SHIFT+7)
  1520. BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
  1521. BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
  1522. #define BGR2UV(type, name, shr, shg, shb, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\
  1523. static inline void RENAME(name)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
  1524. {\
  1525. int i;\
  1526. for (i=0; i<width; i++)\
  1527. {\
  1528. int b= (((type*)src)[i]&maskb)>>shb;\
  1529. int g= (((type*)src)[i]&maskg)>>shg;\
  1530. int r= (((type*)src)[i]&maskr)>>shr;\
  1531. \
  1532. dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\
  1533. dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\
  1534. }\
  1535. }\
  1536. static inline void RENAME(name ## _half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
  1537. {\
  1538. int i;\
  1539. for (i=0; i<width; i++)\
  1540. {\
  1541. int pix0= ((type*)src)[2*i+0];\
  1542. int pix1= ((type*)src)[2*i+1];\
  1543. int g= (pix0&maskg)+(pix1&maskg);\
  1544. int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\
  1545. int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\
  1546. \
  1547. g>>=shg;\
  1548. \
  1549. dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\
  1550. dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\
  1551. }\
  1552. }
  1553. BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF0000, 0xFF00, 0x00FF, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
  1554. BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
  1555. BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RU<<11, GU<<5, BU , RV<<11, GV<<5, BV , RGB2YUV_SHIFT+8)
  1556. BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<5, BU , RV<<10, GV<<5, BV , RGB2YUV_SHIFT+7)
  1557. BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
  1558. BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
  1559. #ifdef HAVE_MMX
  1560. static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, int srcFormat)
  1561. {
  1562. if(srcFormat == PIX_FMT_BGR24){
  1563. __asm__ volatile(
  1564. "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
  1565. "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
  1566. :
  1567. );
  1568. }else{
  1569. __asm__ volatile(
  1570. "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
  1571. "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
  1572. :
  1573. );
  1574. }
  1575. __asm__ volatile(
  1576. "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
  1577. "mov %2, %%"REG_a" \n\t"
  1578. "pxor %%mm7, %%mm7 \n\t"
  1579. "1: \n\t"
  1580. PREFETCH" 64(%0) \n\t"
  1581. "movd (%0), %%mm0 \n\t"
  1582. "movd 2(%0), %%mm1 \n\t"
  1583. "movd 6(%0), %%mm2 \n\t"
  1584. "movd 8(%0), %%mm3 \n\t"
  1585. "add $12, %0 \n\t"
  1586. "punpcklbw %%mm7, %%mm0 \n\t"
  1587. "punpcklbw %%mm7, %%mm1 \n\t"
  1588. "punpcklbw %%mm7, %%mm2 \n\t"
  1589. "punpcklbw %%mm7, %%mm3 \n\t"
  1590. "pmaddwd %%mm5, %%mm0 \n\t"
  1591. "pmaddwd %%mm6, %%mm1 \n\t"
  1592. "pmaddwd %%mm5, %%mm2 \n\t"
  1593. "pmaddwd %%mm6, %%mm3 \n\t"
  1594. "paddd %%mm1, %%mm0 \n\t"
  1595. "paddd %%mm3, %%mm2 \n\t"
  1596. "paddd %%mm4, %%mm0 \n\t"
  1597. "paddd %%mm4, %%mm2 \n\t"
  1598. "psrad $15, %%mm0 \n\t"
  1599. "psrad $15, %%mm2 \n\t"
  1600. "packssdw %%mm2, %%mm0 \n\t"
  1601. "packuswb %%mm0, %%mm0 \n\t"
  1602. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1603. "add $4, %%"REG_a" \n\t"
  1604. " js 1b \n\t"
  1605. : "+r" (src)
  1606. : "r" (dst+width), "g" (-width)
  1607. : "%"REG_a
  1608. );
  1609. }
  1610. static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat)
  1611. {
  1612. __asm__ volatile(
  1613. "movq 24+%4, %%mm6 \n\t"
  1614. "mov %3, %%"REG_a" \n\t"
  1615. "pxor %%mm7, %%mm7 \n\t"
  1616. "1: \n\t"
  1617. PREFETCH" 64(%0) \n\t"
  1618. "movd (%0), %%mm0 \n\t"
  1619. "movd 2(%0), %%mm1 \n\t"
  1620. "punpcklbw %%mm7, %%mm0 \n\t"
  1621. "punpcklbw %%mm7, %%mm1 \n\t"
  1622. "movq %%mm0, %%mm2 \n\t"
  1623. "movq %%mm1, %%mm3 \n\t"
  1624. "pmaddwd %4, %%mm0 \n\t"
  1625. "pmaddwd 8+%4, %%mm1 \n\t"
  1626. "pmaddwd 16+%4, %%mm2 \n\t"
  1627. "pmaddwd %%mm6, %%mm3 \n\t"
  1628. "paddd %%mm1, %%mm0 \n\t"
  1629. "paddd %%mm3, %%mm2 \n\t"
  1630. "movd 6(%0), %%mm1 \n\t"
  1631. "movd 8(%0), %%mm3 \n\t"
  1632. "add $12, %0 \n\t"
  1633. "punpcklbw %%mm7, %%mm1 \n\t"
  1634. "punpcklbw %%mm7, %%mm3 \n\t"
  1635. "movq %%mm1, %%mm4 \n\t"
  1636. "movq %%mm3, %%mm5 \n\t"
  1637. "pmaddwd %4, %%mm1 \n\t"
  1638. "pmaddwd 8+%4, %%mm3 \n\t"
  1639. "pmaddwd 16+%4, %%mm4 \n\t"
  1640. "pmaddwd %%mm6, %%mm5 \n\t"
  1641. "paddd %%mm3, %%mm1 \n\t"
  1642. "paddd %%mm5, %%mm4 \n\t"
  1643. "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
  1644. "paddd %%mm3, %%mm0 \n\t"
  1645. "paddd %%mm3, %%mm2 \n\t"
  1646. "paddd %%mm3, %%mm1 \n\t"
  1647. "paddd %%mm3, %%mm4 \n\t"
  1648. "psrad $15, %%mm0 \n\t"
  1649. "psrad $15, %%mm2 \n\t"
  1650. "psrad $15, %%mm1 \n\t"
  1651. "psrad $15, %%mm4 \n\t"
  1652. "packssdw %%mm1, %%mm0 \n\t"
  1653. "packssdw %%mm4, %%mm2 \n\t"
  1654. "packuswb %%mm0, %%mm0 \n\t"
  1655. "packuswb %%mm2, %%mm2 \n\t"
  1656. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1657. "movd %%mm2, (%2, %%"REG_a") \n\t"
  1658. "add $4, %%"REG_a" \n\t"
  1659. " js 1b \n\t"
  1660. : "+r" (src)
  1661. : "r" (dstU+width), "r" (dstV+width), "g" (-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
  1662. : "%"REG_a
  1663. );
  1664. }
  1665. #endif
  1666. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1667. {
  1668. #ifdef HAVE_MMX
  1669. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
  1670. #else
  1671. int i;
  1672. for (i=0; i<width; i++)
  1673. {
  1674. int b= src[i*3+0];
  1675. int g= src[i*3+1];
  1676. int r= src[i*3+2];
  1677. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1678. }
  1679. #endif /* HAVE_MMX */
  1680. }
  1681. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1682. {
  1683. #ifdef HAVE_MMX
  1684. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
  1685. #else
  1686. int i;
  1687. for (i=0; i<width; i++)
  1688. {
  1689. int b= src1[3*i + 0];
  1690. int g= src1[3*i + 1];
  1691. int r= src1[3*i + 2];
  1692. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1693. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1694. }
  1695. #endif /* HAVE_MMX */
  1696. assert(src1 == src2);
  1697. }
  1698. static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1699. {
  1700. int i;
  1701. for (i=0; i<width; i++)
  1702. {
  1703. int b= src1[6*i + 0] + src1[6*i + 3];
  1704. int g= src1[6*i + 1] + src1[6*i + 4];
  1705. int r= src1[6*i + 2] + src1[6*i + 5];
  1706. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1707. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1708. }
  1709. assert(src1 == src2);
  1710. }
  1711. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1712. {
  1713. #ifdef HAVE_MMX
  1714. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
  1715. #else
  1716. int i;
  1717. for (i=0; i<width; i++)
  1718. {
  1719. int r= src[i*3+0];
  1720. int g= src[i*3+1];
  1721. int b= src[i*3+2];
  1722. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1723. }
  1724. #endif
  1725. }
  1726. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1727. {
  1728. int i;
  1729. assert(src1==src2);
  1730. #ifdef HAVE_MMX
  1731. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
  1732. #else
  1733. for (i=0; i<width; i++)
  1734. {
  1735. int r= src1[3*i + 0];
  1736. int g= src1[3*i + 1];
  1737. int b= src1[3*i + 2];
  1738. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1739. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1740. }
  1741. #endif
  1742. }
  1743. static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1744. {
  1745. int i;
  1746. assert(src1==src2);
  1747. for (i=0; i<width; i++)
  1748. {
  1749. int r= src1[6*i + 0] + src1[6*i + 3];
  1750. int g= src1[6*i + 1] + src1[6*i + 4];
  1751. int b= src1[6*i + 2] + src1[6*i + 5];
  1752. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1753. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1754. }
  1755. }
  1756. static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *pal)
  1757. {
  1758. int i;
  1759. for (i=0; i<width; i++)
  1760. {
  1761. int d= src[i];
  1762. dst[i]= pal[d] & 0xFF;
  1763. }
  1764. }
  1765. static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *pal)
  1766. {
  1767. int i;
  1768. assert(src1 == src2);
  1769. for (i=0; i<width; i++)
  1770. {
  1771. int p= pal[src1[i]];
  1772. dstU[i]= p>>8;
  1773. dstV[i]= p>>16;
  1774. }
  1775. }
  1776. static inline void RENAME(monowhite2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1777. {
  1778. int i, j;
  1779. for (i=0; i<width/8; i++){
  1780. int d= ~src[i];
  1781. for(j=0; j<8; j++)
  1782. dst[8*i+j]= ((d>>(7-j))&1)*255;
  1783. }
  1784. }
  1785. static inline void RENAME(monoblack2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1786. {
  1787. int i, j;
  1788. for (i=0; i<width/8; i++){
  1789. int d= src[i];
  1790. for(j=0; j<8; j++)
  1791. dst[8*i+j]= ((d>>(7-j))&1)*255;
  1792. }
  1793. }
  1794. // bilinear / bicubic scaling
  1795. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  1796. int16_t *filter, int16_t *filterPos, long filterSize)
  1797. {
  1798. #ifdef HAVE_MMX
  1799. assert(filterSize % 4 == 0 && filterSize>0);
  1800. if (filterSize==4) // Always true for upscaling, sometimes for down, too.
  1801. {
  1802. long counter= -2*dstW;
  1803. filter-= counter*2;
  1804. filterPos-= counter/2;
  1805. dst-= counter/2;
  1806. __asm__ volatile(
  1807. #if defined(PIC)
  1808. "push %%"REG_b" \n\t"
  1809. #endif
  1810. "pxor %%mm7, %%mm7 \n\t"
  1811. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1812. "mov %%"REG_a", %%"REG_BP" \n\t"
  1813. ASMALIGN(4)
  1814. "1: \n\t"
  1815. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1816. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1817. "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
  1818. "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
  1819. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1820. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1821. "punpcklbw %%mm7, %%mm0 \n\t"
  1822. "punpcklbw %%mm7, %%mm2 \n\t"
  1823. "pmaddwd %%mm1, %%mm0 \n\t"
  1824. "pmaddwd %%mm2, %%mm3 \n\t"
  1825. "movq %%mm0, %%mm4 \n\t"
  1826. "punpckldq %%mm3, %%mm0 \n\t"
  1827. "punpckhdq %%mm3, %%mm4 \n\t"
  1828. "paddd %%mm4, %%mm0 \n\t"
  1829. "psrad $7, %%mm0 \n\t"
  1830. "packssdw %%mm0, %%mm0 \n\t"
  1831. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1832. "add $4, %%"REG_BP" \n\t"
  1833. " jnc 1b \n\t"
  1834. "pop %%"REG_BP" \n\t"
  1835. #if defined(PIC)
  1836. "pop %%"REG_b" \n\t"
  1837. #endif
  1838. : "+a" (counter)
  1839. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1840. #if !defined(PIC)
  1841. : "%"REG_b
  1842. #endif
  1843. );
  1844. }
  1845. else if (filterSize==8)
  1846. {
  1847. long counter= -2*dstW;
  1848. filter-= counter*4;
  1849. filterPos-= counter/2;
  1850. dst-= counter/2;
  1851. __asm__ volatile(
  1852. #if defined(PIC)
  1853. "push %%"REG_b" \n\t"
  1854. #endif
  1855. "pxor %%mm7, %%mm7 \n\t"
  1856. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  1857. "mov %%"REG_a", %%"REG_BP" \n\t"
  1858. ASMALIGN(4)
  1859. "1: \n\t"
  1860. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  1861. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  1862. "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
  1863. "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
  1864. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  1865. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  1866. "punpcklbw %%mm7, %%mm0 \n\t"
  1867. "punpcklbw %%mm7, %%mm2 \n\t"
  1868. "pmaddwd %%mm1, %%mm0 \n\t"
  1869. "pmaddwd %%mm2, %%mm3 \n\t"
  1870. "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
  1871. "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
  1872. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  1873. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  1874. "punpcklbw %%mm7, %%mm4 \n\t"
  1875. "punpcklbw %%mm7, %%mm2 \n\t"
  1876. "pmaddwd %%mm1, %%mm4 \n\t"
  1877. "pmaddwd %%mm2, %%mm5 \n\t"
  1878. "paddd %%mm4, %%mm0 \n\t"
  1879. "paddd %%mm5, %%mm3 \n\t"
  1880. "movq %%mm0, %%mm4 \n\t"
  1881. "punpckldq %%mm3, %%mm0 \n\t"
  1882. "punpckhdq %%mm3, %%mm4 \n\t"
  1883. "paddd %%mm4, %%mm0 \n\t"
  1884. "psrad $7, %%mm0 \n\t"
  1885. "packssdw %%mm0, %%mm0 \n\t"
  1886. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  1887. "add $4, %%"REG_BP" \n\t"
  1888. " jnc 1b \n\t"
  1889. "pop %%"REG_BP" \n\t"
  1890. #if defined(PIC)
  1891. "pop %%"REG_b" \n\t"
  1892. #endif
  1893. : "+a" (counter)
  1894. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  1895. #if !defined(PIC)
  1896. : "%"REG_b
  1897. #endif
  1898. );
  1899. }
  1900. else
  1901. {
  1902. uint8_t *offset = src+filterSize;
  1903. long counter= -2*dstW;
  1904. //filter-= counter*filterSize/2;
  1905. filterPos-= counter/2;
  1906. dst-= counter/2;
  1907. __asm__ volatile(
  1908. "pxor %%mm7, %%mm7 \n\t"
  1909. ASMALIGN(4)
  1910. "1: \n\t"
  1911. "mov %2, %%"REG_c" \n\t"
  1912. "movzwl (%%"REG_c", %0), %%eax \n\t"
  1913. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  1914. "mov %5, %%"REG_c" \n\t"
  1915. "pxor %%mm4, %%mm4 \n\t"
  1916. "pxor %%mm5, %%mm5 \n\t"
  1917. "2: \n\t"
  1918. "movq (%1), %%mm1 \n\t"
  1919. "movq (%1, %6), %%mm3 \n\t"
  1920. "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
  1921. "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
  1922. "punpcklbw %%mm7, %%mm0 \n\t"
  1923. "punpcklbw %%mm7, %%mm2 \n\t"
  1924. "pmaddwd %%mm1, %%mm0 \n\t"
  1925. "pmaddwd %%mm2, %%mm3 \n\t"
  1926. "paddd %%mm3, %%mm5 \n\t"
  1927. "paddd %%mm0, %%mm4 \n\t"
  1928. "add $8, %1 \n\t"
  1929. "add $4, %%"REG_c" \n\t"
  1930. "cmp %4, %%"REG_c" \n\t"
  1931. " jb 2b \n\t"
  1932. "add %6, %1 \n\t"
  1933. "movq %%mm4, %%mm0 \n\t"
  1934. "punpckldq %%mm5, %%mm4 \n\t"
  1935. "punpckhdq %%mm5, %%mm0 \n\t"
  1936. "paddd %%mm0, %%mm4 \n\t"
  1937. "psrad $7, %%mm4 \n\t"
  1938. "packssdw %%mm4, %%mm4 \n\t"
  1939. "mov %3, %%"REG_a" \n\t"
  1940. "movd %%mm4, (%%"REG_a", %0) \n\t"
  1941. "add $4, %0 \n\t"
  1942. " jnc 1b \n\t"
  1943. : "+r" (counter), "+r" (filter)
  1944. : "m" (filterPos), "m" (dst), "m"(offset),
  1945. "m" (src), "r" (filterSize*2)
  1946. : "%"REG_a, "%"REG_c, "%"REG_d
  1947. );
  1948. }
  1949. #else
  1950. #ifdef HAVE_ALTIVEC
  1951. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  1952. #else
  1953. int i;
  1954. for (i=0; i<dstW; i++)
  1955. {
  1956. int j;
  1957. int srcPos= filterPos[i];
  1958. int val=0;
  1959. //printf("filterPos: %d\n", filterPos[i]);
  1960. for (j=0; j<filterSize; j++)
  1961. {
  1962. //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  1963. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  1964. }
  1965. //filter += hFilterSize;
  1966. dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
  1967. //dst[i] = val>>7;
  1968. }
  1969. #endif /* HAVE_ALTIVEC */
  1970. #endif /* HAVE_MMX */
  1971. }
  1972. // *** horizontal scale Y line to temp buffer
  1973. static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  1974. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  1975. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  1976. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  1977. int32_t *mmx2FilterPos, uint32_t *pal)
  1978. {
  1979. if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
  1980. {
  1981. RENAME(yuy2ToY)(formatConvBuffer, src, srcW, pal);
  1982. src= formatConvBuffer;
  1983. }
  1984. else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
  1985. {
  1986. RENAME(uyvyToY)(formatConvBuffer, src, srcW, pal);
  1987. src= formatConvBuffer;
  1988. }
  1989. else if (srcFormat==PIX_FMT_RGB32)
  1990. {
  1991. RENAME(bgr32ToY)(formatConvBuffer, src, srcW, pal);
  1992. src= formatConvBuffer;
  1993. }
  1994. else if (srcFormat==PIX_FMT_RGB32_1)
  1995. {
  1996. RENAME(bgr32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
  1997. src= formatConvBuffer;
  1998. }
  1999. else if (srcFormat==PIX_FMT_BGR24)
  2000. {
  2001. RENAME(bgr24ToY)(formatConvBuffer, src, srcW, pal);
  2002. src= formatConvBuffer;
  2003. }
  2004. else if (srcFormat==PIX_FMT_BGR565)
  2005. {
  2006. RENAME(bgr16ToY)(formatConvBuffer, src, srcW, pal);
  2007. src= formatConvBuffer;
  2008. }
  2009. else if (srcFormat==PIX_FMT_BGR555)
  2010. {
  2011. RENAME(bgr15ToY)(formatConvBuffer, src, srcW, pal);
  2012. src= formatConvBuffer;
  2013. }
  2014. else if (srcFormat==PIX_FMT_BGR32)
  2015. {
  2016. RENAME(rgb32ToY)(formatConvBuffer, src, srcW, pal);
  2017. src= formatConvBuffer;
  2018. }
  2019. else if (srcFormat==PIX_FMT_BGR32_1)
  2020. {
  2021. RENAME(rgb32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
  2022. src= formatConvBuffer;
  2023. }
  2024. else if (srcFormat==PIX_FMT_RGB24)
  2025. {
  2026. RENAME(rgb24ToY)(formatConvBuffer, src, srcW, pal);
  2027. src= formatConvBuffer;
  2028. }
  2029. else if (srcFormat==PIX_FMT_RGB565)
  2030. {
  2031. RENAME(rgb16ToY)(formatConvBuffer, src, srcW, pal);
  2032. src= formatConvBuffer;
  2033. }
  2034. else if (srcFormat==PIX_FMT_RGB555)
  2035. {
  2036. RENAME(rgb15ToY)(formatConvBuffer, src, srcW, pal);
  2037. src= formatConvBuffer;
  2038. }
  2039. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2040. {
  2041. RENAME(palToY)(formatConvBuffer, src, srcW, pal);
  2042. src= formatConvBuffer;
  2043. }
  2044. else if (srcFormat==PIX_FMT_MONOBLACK)
  2045. {
  2046. RENAME(monoblack2Y)(formatConvBuffer, src, srcW, pal);
  2047. src= formatConvBuffer;
  2048. }
  2049. else if (srcFormat==PIX_FMT_MONOWHITE)
  2050. {
  2051. RENAME(monowhite2Y)(formatConvBuffer, src, srcW, pal);
  2052. src= formatConvBuffer;
  2053. }
  2054. #ifdef HAVE_MMX
  2055. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2056. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2057. #else
  2058. if (!(flags&SWS_FAST_BILINEAR))
  2059. #endif
  2060. {
  2061. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2062. }
  2063. else // fast bilinear upscale / crap downscale
  2064. {
  2065. #if defined(ARCH_X86)
  2066. #ifdef HAVE_MMX2
  2067. int i;
  2068. #if defined(PIC)
  2069. uint64_t ebxsave __attribute__((aligned(8)));
  2070. #endif
  2071. if (canMMX2BeUsed)
  2072. {
  2073. __asm__ volatile(
  2074. #if defined(PIC)
  2075. "mov %%"REG_b", %5 \n\t"
  2076. #endif
  2077. "pxor %%mm7, %%mm7 \n\t"
  2078. "mov %0, %%"REG_c" \n\t"
  2079. "mov %1, %%"REG_D" \n\t"
  2080. "mov %2, %%"REG_d" \n\t"
  2081. "mov %3, %%"REG_b" \n\t"
  2082. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2083. PREFETCH" (%%"REG_c") \n\t"
  2084. PREFETCH" 32(%%"REG_c") \n\t"
  2085. PREFETCH" 64(%%"REG_c") \n\t"
  2086. #ifdef ARCH_X86_64
  2087. #define FUNNY_Y_CODE \
  2088. "movl (%%"REG_b"), %%esi \n\t"\
  2089. "call *%4 \n\t"\
  2090. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2091. "add %%"REG_S", %%"REG_c" \n\t"\
  2092. "add %%"REG_a", %%"REG_D" \n\t"\
  2093. "xor %%"REG_a", %%"REG_a" \n\t"\
  2094. #else
  2095. #define FUNNY_Y_CODE \
  2096. "movl (%%"REG_b"), %%esi \n\t"\
  2097. "call *%4 \n\t"\
  2098. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2099. "add %%"REG_a", %%"REG_D" \n\t"\
  2100. "xor %%"REG_a", %%"REG_a" \n\t"\
  2101. #endif /* ARCH_X86_64 */
  2102. FUNNY_Y_CODE
  2103. FUNNY_Y_CODE
  2104. FUNNY_Y_CODE
  2105. FUNNY_Y_CODE
  2106. FUNNY_Y_CODE
  2107. FUNNY_Y_CODE
  2108. FUNNY_Y_CODE
  2109. FUNNY_Y_CODE
  2110. #if defined(PIC)
  2111. "mov %5, %%"REG_b" \n\t"
  2112. #endif
  2113. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2114. "m" (funnyYCode)
  2115. #if defined(PIC)
  2116. ,"m" (ebxsave)
  2117. #endif
  2118. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2119. #if !defined(PIC)
  2120. ,"%"REG_b
  2121. #endif
  2122. );
  2123. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2124. }
  2125. else
  2126. {
  2127. #endif /* HAVE_MMX2 */
  2128. long xInc_shr16 = xInc >> 16;
  2129. uint16_t xInc_mask = xInc & 0xffff;
  2130. //NO MMX just normal asm ...
  2131. __asm__ volatile(
  2132. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2133. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2134. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2135. ASMALIGN(4)
  2136. "1: \n\t"
  2137. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2138. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2139. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2140. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2141. "shll $16, %%edi \n\t"
  2142. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2143. "mov %1, %%"REG_D" \n\t"
  2144. "shrl $9, %%esi \n\t"
  2145. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2146. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2147. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2148. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2149. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2150. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2151. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2152. "shll $16, %%edi \n\t"
  2153. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2154. "mov %1, %%"REG_D" \n\t"
  2155. "shrl $9, %%esi \n\t"
  2156. "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
  2157. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2158. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2159. "add $2, %%"REG_a" \n\t"
  2160. "cmp %2, %%"REG_a" \n\t"
  2161. " jb 1b \n\t"
  2162. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2163. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2164. );
  2165. #ifdef HAVE_MMX2
  2166. } //if MMX2 can't be used
  2167. #endif
  2168. #else
  2169. int i;
  2170. unsigned int xpos=0;
  2171. for (i=0;i<dstWidth;i++)
  2172. {
  2173. register unsigned int xx=xpos>>16;
  2174. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2175. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2176. xpos+=xInc;
  2177. }
  2178. #endif /* defined(ARCH_X86) */
  2179. }
  2180. if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2181. int i;
  2182. //FIXME all pal and rgb srcFormats could do this convertion as well
  2183. //FIXME all scalers more complex than bilinear could do half of this transform
  2184. if(c->srcRange){
  2185. for (i=0; i<dstWidth; i++)
  2186. dst[i]= (dst[i]*14071 + 33561947)>>14;
  2187. }else{
  2188. for (i=0; i<dstWidth; i++)
  2189. dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
  2190. }
  2191. }
  2192. }
  2193. inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2194. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2195. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2196. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2197. int32_t *mmx2FilterPos, uint32_t *pal)
  2198. {
  2199. if (srcFormat==PIX_FMT_YUYV422)
  2200. {
  2201. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2202. src1= formatConvBuffer;
  2203. src2= formatConvBuffer+VOFW;
  2204. }
  2205. else if (srcFormat==PIX_FMT_UYVY422)
  2206. {
  2207. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2208. src1= formatConvBuffer;
  2209. src2= formatConvBuffer+VOFW;
  2210. }
  2211. else if (srcFormat==PIX_FMT_RGB32)
  2212. {
  2213. if(c->chrSrcHSubSample)
  2214. RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2215. else
  2216. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2217. src1= formatConvBuffer;
  2218. src2= formatConvBuffer+VOFW;
  2219. }
  2220. else if (srcFormat==PIX_FMT_RGB32_1)
  2221. {
  2222. if(c->chrSrcHSubSample)
  2223. RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2224. else
  2225. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2226. src1= formatConvBuffer;
  2227. src2= formatConvBuffer+VOFW;
  2228. }
  2229. else if (srcFormat==PIX_FMT_BGR24)
  2230. {
  2231. if(c->chrSrcHSubSample)
  2232. RENAME(bgr24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2233. else
  2234. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2235. src1= formatConvBuffer;
  2236. src2= formatConvBuffer+VOFW;
  2237. }
  2238. else if (srcFormat==PIX_FMT_BGR565)
  2239. {
  2240. if(c->chrSrcHSubSample)
  2241. RENAME(bgr16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2242. else
  2243. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2244. src1= formatConvBuffer;
  2245. src2= formatConvBuffer+VOFW;
  2246. }
  2247. else if (srcFormat==PIX_FMT_BGR555)
  2248. {
  2249. if(c->chrSrcHSubSample)
  2250. RENAME(bgr15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2251. else
  2252. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2253. src1= formatConvBuffer;
  2254. src2= formatConvBuffer+VOFW;
  2255. }
  2256. else if (srcFormat==PIX_FMT_BGR32)
  2257. {
  2258. if(c->chrSrcHSubSample)
  2259. RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2260. else
  2261. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2262. src1= formatConvBuffer;
  2263. src2= formatConvBuffer+VOFW;
  2264. }
  2265. else if (srcFormat==PIX_FMT_BGR32_1)
  2266. {
  2267. if(c->chrSrcHSubSample)
  2268. RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2269. else
  2270. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2271. src1= formatConvBuffer;
  2272. src2= formatConvBuffer+VOFW;
  2273. }
  2274. else if (srcFormat==PIX_FMT_RGB24)
  2275. {
  2276. if(c->chrSrcHSubSample)
  2277. RENAME(rgb24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2278. else
  2279. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2280. src1= formatConvBuffer;
  2281. src2= formatConvBuffer+VOFW;
  2282. }
  2283. else if (srcFormat==PIX_FMT_RGB565)
  2284. {
  2285. if(c->chrSrcHSubSample)
  2286. RENAME(rgb16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2287. else
  2288. RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2289. src1= formatConvBuffer;
  2290. src2= formatConvBuffer+VOFW;
  2291. }
  2292. else if (srcFormat==PIX_FMT_RGB555)
  2293. {
  2294. if(c->chrSrcHSubSample)
  2295. RENAME(rgb15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2296. else
  2297. RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2298. src1= formatConvBuffer;
  2299. src2= formatConvBuffer+VOFW;
  2300. }
  2301. else if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
  2302. {
  2303. return;
  2304. }
  2305. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2306. {
  2307. RENAME(palToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2308. src1= formatConvBuffer;
  2309. src2= formatConvBuffer+VOFW;
  2310. }
  2311. #ifdef HAVE_MMX
  2312. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2313. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2314. #else
  2315. if (!(flags&SWS_FAST_BILINEAR))
  2316. #endif
  2317. {
  2318. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2319. RENAME(hScale)(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2320. }
  2321. else // fast bilinear upscale / crap downscale
  2322. {
  2323. #if defined(ARCH_X86)
  2324. #ifdef HAVE_MMX2
  2325. int i;
  2326. #if defined(PIC)
  2327. uint64_t ebxsave __attribute__((aligned(8)));
  2328. #endif
  2329. if (canMMX2BeUsed)
  2330. {
  2331. __asm__ volatile(
  2332. #if defined(PIC)
  2333. "mov %%"REG_b", %6 \n\t"
  2334. #endif
  2335. "pxor %%mm7, %%mm7 \n\t"
  2336. "mov %0, %%"REG_c" \n\t"
  2337. "mov %1, %%"REG_D" \n\t"
  2338. "mov %2, %%"REG_d" \n\t"
  2339. "mov %3, %%"REG_b" \n\t"
  2340. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2341. PREFETCH" (%%"REG_c") \n\t"
  2342. PREFETCH" 32(%%"REG_c") \n\t"
  2343. PREFETCH" 64(%%"REG_c") \n\t"
  2344. #ifdef ARCH_X86_64
  2345. #define FUNNY_UV_CODE \
  2346. "movl (%%"REG_b"), %%esi \n\t"\
  2347. "call *%4 \n\t"\
  2348. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2349. "add %%"REG_S", %%"REG_c" \n\t"\
  2350. "add %%"REG_a", %%"REG_D" \n\t"\
  2351. "xor %%"REG_a", %%"REG_a" \n\t"\
  2352. #else
  2353. #define FUNNY_UV_CODE \
  2354. "movl (%%"REG_b"), %%esi \n\t"\
  2355. "call *%4 \n\t"\
  2356. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2357. "add %%"REG_a", %%"REG_D" \n\t"\
  2358. "xor %%"REG_a", %%"REG_a" \n\t"\
  2359. #endif /* ARCH_X86_64 */
  2360. FUNNY_UV_CODE
  2361. FUNNY_UV_CODE
  2362. FUNNY_UV_CODE
  2363. FUNNY_UV_CODE
  2364. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2365. "mov %5, %%"REG_c" \n\t" // src
  2366. "mov %1, %%"REG_D" \n\t" // buf1
  2367. "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
  2368. PREFETCH" (%%"REG_c") \n\t"
  2369. PREFETCH" 32(%%"REG_c") \n\t"
  2370. PREFETCH" 64(%%"REG_c") \n\t"
  2371. FUNNY_UV_CODE
  2372. FUNNY_UV_CODE
  2373. FUNNY_UV_CODE
  2374. FUNNY_UV_CODE
  2375. #if defined(PIC)
  2376. "mov %6, %%"REG_b" \n\t"
  2377. #endif
  2378. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2379. "m" (funnyUVCode), "m" (src2)
  2380. #if defined(PIC)
  2381. ,"m" (ebxsave)
  2382. #endif
  2383. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2384. #if !defined(PIC)
  2385. ,"%"REG_b
  2386. #endif
  2387. );
  2388. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2389. {
  2390. //printf("%d %d %d\n", dstWidth, i, srcW);
  2391. dst[i] = src1[srcW-1]*128;
  2392. dst[i+VOFW] = src2[srcW-1]*128;
  2393. }
  2394. }
  2395. else
  2396. {
  2397. #endif /* HAVE_MMX2 */
  2398. long xInc_shr16 = (long) (xInc >> 16);
  2399. uint16_t xInc_mask = xInc & 0xffff;
  2400. __asm__ volatile(
  2401. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2402. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2403. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2404. ASMALIGN(4)
  2405. "1: \n\t"
  2406. "mov %0, %%"REG_S" \n\t"
  2407. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2408. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2409. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2410. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2411. "shll $16, %%edi \n\t"
  2412. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2413. "mov %1, %%"REG_D" \n\t"
  2414. "shrl $9, %%esi \n\t"
  2415. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2416. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2417. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2418. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2419. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2420. "shll $16, %%edi \n\t"
  2421. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2422. "mov %1, %%"REG_D" \n\t"
  2423. "shrl $9, %%esi \n\t"
  2424. "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
  2425. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2426. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2427. "add $1, %%"REG_a" \n\t"
  2428. "cmp %2, %%"REG_a" \n\t"
  2429. " jb 1b \n\t"
  2430. /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2431. which is needed to support GCC 4.0. */
  2432. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2433. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2434. #else
  2435. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2436. #endif
  2437. "r" (src2)
  2438. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2439. );
  2440. #ifdef HAVE_MMX2
  2441. } //if MMX2 can't be used
  2442. #endif
  2443. #else
  2444. int i;
  2445. unsigned int xpos=0;
  2446. for (i=0;i<dstWidth;i++)
  2447. {
  2448. register unsigned int xx=xpos>>16;
  2449. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2450. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2451. dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2452. /* slower
  2453. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2454. dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2455. */
  2456. xpos+=xInc;
  2457. }
  2458. #endif /* defined(ARCH_X86) */
  2459. }
  2460. if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2461. int i;
  2462. //FIXME all pal and rgb srcFormats could do this convertion as well
  2463. //FIXME all scalers more complex than bilinear could do half of this transform
  2464. if(c->srcRange){
  2465. for (i=0; i<dstWidth; i++){
  2466. dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
  2467. dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
  2468. }
  2469. }else{
  2470. for (i=0; i<dstWidth; i++){
  2471. dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
  2472. dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
  2473. }
  2474. }
  2475. }
  2476. }
  2477. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2478. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2479. /* load a few things into local vars to make the code more readable? and faster */
  2480. const int srcW= c->srcW;
  2481. const int dstW= c->dstW;
  2482. const int dstH= c->dstH;
  2483. const int chrDstW= c->chrDstW;
  2484. const int chrSrcW= c->chrSrcW;
  2485. const int lumXInc= c->lumXInc;
  2486. const int chrXInc= c->chrXInc;
  2487. const int dstFormat= c->dstFormat;
  2488. const int srcFormat= c->srcFormat;
  2489. const int flags= c->flags;
  2490. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2491. int16_t *vLumFilterPos= c->vLumFilterPos;
  2492. int16_t *vChrFilterPos= c->vChrFilterPos;
  2493. int16_t *hLumFilterPos= c->hLumFilterPos;
  2494. int16_t *hChrFilterPos= c->hChrFilterPos;
  2495. int16_t *vLumFilter= c->vLumFilter;
  2496. int16_t *vChrFilter= c->vChrFilter;
  2497. int16_t *hLumFilter= c->hLumFilter;
  2498. int16_t *hChrFilter= c->hChrFilter;
  2499. int32_t *lumMmxFilter= c->lumMmxFilter;
  2500. int32_t *chrMmxFilter= c->chrMmxFilter;
  2501. const int vLumFilterSize= c->vLumFilterSize;
  2502. const int vChrFilterSize= c->vChrFilterSize;
  2503. const int hLumFilterSize= c->hLumFilterSize;
  2504. const int hChrFilterSize= c->hChrFilterSize;
  2505. int16_t **lumPixBuf= c->lumPixBuf;
  2506. int16_t **chrPixBuf= c->chrPixBuf;
  2507. const int vLumBufSize= c->vLumBufSize;
  2508. const int vChrBufSize= c->vChrBufSize;
  2509. uint8_t *funnyYCode= c->funnyYCode;
  2510. uint8_t *funnyUVCode= c->funnyUVCode;
  2511. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2512. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2513. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2514. int lastDstY;
  2515. uint32_t *pal=c->pal_yuv;
  2516. /* vars which will change and which we need to store back in the context */
  2517. int dstY= c->dstY;
  2518. int lumBufIndex= c->lumBufIndex;
  2519. int chrBufIndex= c->chrBufIndex;
  2520. int lastInLumBuf= c->lastInLumBuf;
  2521. int lastInChrBuf= c->lastInChrBuf;
  2522. if (isPacked(c->srcFormat)){
  2523. src[0]=
  2524. src[1]=
  2525. src[2]= src[0];
  2526. srcStride[0]=
  2527. srcStride[1]=
  2528. srcStride[2]= srcStride[0];
  2529. }
  2530. srcStride[1]<<= c->vChrDrop;
  2531. srcStride[2]<<= c->vChrDrop;
  2532. //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2533. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2534. #if 0 //self test FIXME move to a vfilter or something
  2535. {
  2536. static volatile int i=0;
  2537. i++;
  2538. if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
  2539. selfTest(src, srcStride, c->srcW, c->srcH);
  2540. i--;
  2541. }
  2542. #endif
  2543. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2544. //dstStride[0],dstStride[1],dstStride[2]);
  2545. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2546. {
  2547. static int warnedAlready=0; //FIXME move this into the context perhaps
  2548. if (flags & SWS_PRINT_INFO && !warnedAlready)
  2549. {
  2550. av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
  2551. " ->cannot do aligned memory accesses anymore\n");
  2552. warnedAlready=1;
  2553. }
  2554. }
  2555. /* Note the user might start scaling the picture in the middle so this
  2556. will not get executed. This is not really intended but works
  2557. currently, so people might do it. */
  2558. if (srcSliceY ==0){
  2559. lumBufIndex=0;
  2560. chrBufIndex=0;
  2561. dstY=0;
  2562. lastInLumBuf= -1;
  2563. lastInChrBuf= -1;
  2564. }
  2565. lastDstY= dstY;
  2566. for (;dstY < dstH; dstY++){
  2567. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2568. const int chrDstY= dstY>>c->chrDstVSubSample;
  2569. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2570. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2571. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2572. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2573. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2574. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2575. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2576. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2577. //handle holes (FAST_BILINEAR & weird filters)
  2578. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2579. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2580. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2581. assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
  2582. assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
  2583. // Do we have enough lines in this slice to output the dstY line
  2584. if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2585. {
  2586. //Do horizontal scaling
  2587. while(lastInLumBuf < lastLumSrcY)
  2588. {
  2589. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2590. lumBufIndex++;
  2591. //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2592. assert(lumBufIndex < 2*vLumBufSize);
  2593. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2594. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2595. //printf("%d %d\n", lumBufIndex, vLumBufSize);
  2596. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2597. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2598. funnyYCode, c->srcFormat, formatConvBuffer,
  2599. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2600. lastInLumBuf++;
  2601. }
  2602. while(lastInChrBuf < lastChrSrcY)
  2603. {
  2604. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2605. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2606. chrBufIndex++;
  2607. assert(chrBufIndex < 2*vChrBufSize);
  2608. assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
  2609. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2610. //FIXME replace parameters through context struct (some at least)
  2611. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2612. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2613. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2614. funnyUVCode, c->srcFormat, formatConvBuffer,
  2615. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2616. lastInChrBuf++;
  2617. }
  2618. //wrap buf index around to stay inside the ring buffer
  2619. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2620. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2621. }
  2622. else // not enough lines left in this slice -> load the rest in the buffer
  2623. {
  2624. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2625. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2626. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2627. vChrBufSize, vLumBufSize);*/
  2628. //Do horizontal scaling
  2629. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2630. {
  2631. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2632. lumBufIndex++;
  2633. assert(lumBufIndex < 2*vLumBufSize);
  2634. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2635. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2636. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2637. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2638. funnyYCode, c->srcFormat, formatConvBuffer,
  2639. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2640. lastInLumBuf++;
  2641. }
  2642. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2643. {
  2644. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2645. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2646. chrBufIndex++;
  2647. assert(chrBufIndex < 2*vChrBufSize);
  2648. assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
  2649. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2650. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2651. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2652. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2653. funnyUVCode, c->srcFormat, formatConvBuffer,
  2654. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2655. lastInChrBuf++;
  2656. }
  2657. //wrap buf index around to stay inside the ring buffer
  2658. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2659. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2660. break; //we can't output a dstY line so let's try with the next slice
  2661. }
  2662. #ifdef HAVE_MMX
  2663. c->blueDither= ff_dither8[dstY&1];
  2664. if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
  2665. c->greenDither= ff_dither8[dstY&1];
  2666. else
  2667. c->greenDither= ff_dither4[dstY&1];
  2668. c->redDither= ff_dither8[(dstY+1)&1];
  2669. #endif
  2670. if (dstY < dstH-2)
  2671. {
  2672. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2673. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2674. #ifdef HAVE_MMX
  2675. int i;
  2676. if (flags & SWS_ACCURATE_RND){
  2677. int s= APCK_SIZE / 8;
  2678. for (i=0; i<vLumFilterSize; i+=2){
  2679. *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
  2680. *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
  2681. lumMmxFilter[s*i+APCK_COEF/4 ]=
  2682. lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
  2683. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2684. }
  2685. for (i=0; i<vChrFilterSize; i+=2){
  2686. *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
  2687. *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
  2688. chrMmxFilter[s*i+APCK_COEF/4 ]=
  2689. chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2690. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2691. }
  2692. }else{
  2693. for (i=0; i<vLumFilterSize; i++)
  2694. {
  2695. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2696. lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
  2697. lumMmxFilter[4*i+2]=
  2698. lumMmxFilter[4*i+3]=
  2699. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2700. }
  2701. for (i=0; i<vChrFilterSize; i++)
  2702. {
  2703. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2704. chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
  2705. chrMmxFilter[4*i+2]=
  2706. chrMmxFilter[4*i+3]=
  2707. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2708. }
  2709. }
  2710. #endif
  2711. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2712. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2713. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2714. RENAME(yuv2nv12X)(c,
  2715. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2716. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2717. dest, uDest, dstW, chrDstW, dstFormat);
  2718. }
  2719. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
  2720. {
  2721. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2722. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2723. if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
  2724. {
  2725. int16_t *lumBuf = lumPixBuf[0];
  2726. int16_t *chrBuf= chrPixBuf[0];
  2727. RENAME(yuv2yuv1)(c, lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2728. }
  2729. else //General YV12
  2730. {
  2731. RENAME(yuv2yuvX)(c,
  2732. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2733. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2734. dest, uDest, vDest, dstW, chrDstW);
  2735. }
  2736. }
  2737. else
  2738. {
  2739. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2740. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2741. if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
  2742. {
  2743. int chrAlpha= vChrFilter[2*dstY+1];
  2744. if(flags & SWS_FULL_CHR_H_INT){
  2745. yuv2rgbXinC_full(c, //FIXME write a packed1_full function
  2746. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2747. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2748. dest, dstW, dstY);
  2749. }else{
  2750. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2751. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2752. }
  2753. }
  2754. else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
  2755. {
  2756. int lumAlpha= vLumFilter[2*dstY+1];
  2757. int chrAlpha= vChrFilter[2*dstY+1];
  2758. lumMmxFilter[2]=
  2759. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  2760. chrMmxFilter[2]=
  2761. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  2762. if(flags & SWS_FULL_CHR_H_INT){
  2763. yuv2rgbXinC_full(c, //FIXME write a packed2_full function
  2764. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2765. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2766. dest, dstW, dstY);
  2767. }else{
  2768. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  2769. dest, dstW, lumAlpha, chrAlpha, dstY);
  2770. }
  2771. }
  2772. else //general RGB
  2773. {
  2774. if(flags & SWS_FULL_CHR_H_INT){
  2775. yuv2rgbXinC_full(c,
  2776. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2777. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2778. dest, dstW, dstY);
  2779. }else{
  2780. RENAME(yuv2packedX)(c,
  2781. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2782. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2783. dest, dstW, dstY);
  2784. }
  2785. }
  2786. }
  2787. }
  2788. else // hmm looks like we can't use MMX here without overwriting this array's tail
  2789. {
  2790. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2791. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2792. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2793. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2794. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2795. yuv2nv12XinC(
  2796. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2797. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2798. dest, uDest, dstW, chrDstW, dstFormat);
  2799. }
  2800. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
  2801. {
  2802. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2803. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2804. yuv2yuvXinC(
  2805. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2806. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2807. dest, uDest, vDest, dstW, chrDstW);
  2808. }
  2809. else
  2810. {
  2811. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2812. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2813. if(flags & SWS_FULL_CHR_H_INT){
  2814. yuv2rgbXinC_full(c,
  2815. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2816. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2817. dest, dstW, dstY);
  2818. }else{
  2819. yuv2packedXinC(c,
  2820. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2821. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2822. dest, dstW, dstY);
  2823. }
  2824. }
  2825. }
  2826. }
  2827. #ifdef HAVE_MMX
  2828. __asm__ volatile(SFENCE:::"memory");
  2829. __asm__ volatile(EMMS:::"memory");
  2830. #endif
  2831. /* store changed local vars back in the context */
  2832. c->dstY= dstY;
  2833. c->lumBufIndex= lumBufIndex;
  2834. c->chrBufIndex= chrBufIndex;
  2835. c->lastInLumBuf= lastInLumBuf;
  2836. c->lastInChrBuf= lastInChrBuf;
  2837. return dstY - lastDstY;
  2838. }