You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3270 lines
137KB

  1. /*
  2. * Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * The C code (not assembly, MMX, ...) of this file can be used
  21. * under the LGPL license.
  22. */
  23. #undef REAL_MOVNTQ
  24. #undef MOVNTQ
  25. #undef PAVGB
  26. #undef PREFETCH
  27. #undef PREFETCHW
  28. #undef EMMS
  29. #undef SFENCE
  30. #ifdef HAVE_3DNOW
  31. /* On K6 femms is faster than emms. On K7 femms is directly mapped on emms. */
  32. #define EMMS "femms"
  33. #else
  34. #define EMMS "emms"
  35. #endif
  36. #ifdef HAVE_3DNOW
  37. #define PREFETCH "prefetch"
  38. #define PREFETCHW "prefetchw"
  39. #elif defined (HAVE_MMX2)
  40. #define PREFETCH "prefetchnta"
  41. #define PREFETCHW "prefetcht0"
  42. #else
  43. #define PREFETCH " # nop"
  44. #define PREFETCHW " # nop"
  45. #endif
  46. #ifdef HAVE_MMX2
  47. #define SFENCE "sfence"
  48. #else
  49. #define SFENCE " # nop"
  50. #endif
  51. #ifdef HAVE_MMX2
  52. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  53. #elif defined (HAVE_3DNOW)
  54. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  55. #endif
  56. #ifdef HAVE_MMX2
  57. #define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
  58. #else
  59. #define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
  60. #endif
  61. #define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
  62. #ifdef HAVE_ALTIVEC
  63. #include "swscale_altivec_template.c"
  64. #endif
  65. #define YSCALEYUV2YV12X(x, offset, dest, width) \
  66. asm volatile(\
  67. "xor %%"REG_a", %%"REG_a" \n\t"\
  68. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  69. "movq %%mm3, %%mm4 \n\t"\
  70. "lea " offset "(%0), %%"REG_d" \n\t"\
  71. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  72. ASMALIGN(4) /* FIXME Unroll? */\
  73. "1: \n\t"\
  74. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  75. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  76. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* srcData */\
  77. "add $16, %%"REG_d" \n\t"\
  78. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  79. "test %%"REG_S", %%"REG_S" \n\t"\
  80. "pmulhw %%mm0, %%mm2 \n\t"\
  81. "pmulhw %%mm0, %%mm5 \n\t"\
  82. "paddw %%mm2, %%mm3 \n\t"\
  83. "paddw %%mm5, %%mm4 \n\t"\
  84. " jnz 1b \n\t"\
  85. "psraw $3, %%mm3 \n\t"\
  86. "psraw $3, %%mm4 \n\t"\
  87. "packuswb %%mm4, %%mm3 \n\t"\
  88. MOVNTQ(%%mm3, (%1, %%REGa))\
  89. "add $8, %%"REG_a" \n\t"\
  90. "cmp %2, %%"REG_a" \n\t"\
  91. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  92. "movq %%mm3, %%mm4 \n\t"\
  93. "lea " offset "(%0), %%"REG_d" \n\t"\
  94. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  95. "jb 1b \n\t"\
  96. :: "r" (&c->redDither),\
  97. "r" (dest), "g" (width)\
  98. : "%"REG_a, "%"REG_d, "%"REG_S\
  99. );
  100. #define YSCALEYUV2YV12X_ACCURATE(x, offset, dest, width) \
  101. asm volatile(\
  102. "lea " offset "(%0), %%"REG_d" \n\t"\
  103. "xor %%"REG_a", %%"REG_a" \n\t"\
  104. "pxor %%mm4, %%mm4 \n\t"\
  105. "pxor %%mm5, %%mm5 \n\t"\
  106. "pxor %%mm6, %%mm6 \n\t"\
  107. "pxor %%mm7, %%mm7 \n\t"\
  108. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  109. ASMALIGN(4) \
  110. "1: \n\t"\
  111. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* srcData */\
  112. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* srcData */\
  113. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  114. "movq " x "(%%"REG_S", %%"REG_a", 2), %%mm1 \n\t" /* srcData */\
  115. "movq %%mm0, %%mm3 \n\t"\
  116. "punpcklwd %%mm1, %%mm0 \n\t"\
  117. "punpckhwd %%mm1, %%mm3 \n\t"\
  118. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm1 \n\t" /* filterCoeff */\
  119. "pmaddwd %%mm1, %%mm0 \n\t"\
  120. "pmaddwd %%mm1, %%mm3 \n\t"\
  121. "paddd %%mm0, %%mm4 \n\t"\
  122. "paddd %%mm3, %%mm5 \n\t"\
  123. "movq 8+" x "(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* srcData */\
  124. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  125. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  126. "test %%"REG_S", %%"REG_S" \n\t"\
  127. "movq %%mm2, %%mm0 \n\t"\
  128. "punpcklwd %%mm3, %%mm2 \n\t"\
  129. "punpckhwd %%mm3, %%mm0 \n\t"\
  130. "pmaddwd %%mm1, %%mm2 \n\t"\
  131. "pmaddwd %%mm1, %%mm0 \n\t"\
  132. "paddd %%mm2, %%mm6 \n\t"\
  133. "paddd %%mm0, %%mm7 \n\t"\
  134. " jnz 1b \n\t"\
  135. "psrad $16, %%mm4 \n\t"\
  136. "psrad $16, %%mm5 \n\t"\
  137. "psrad $16, %%mm6 \n\t"\
  138. "psrad $16, %%mm7 \n\t"\
  139. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  140. "packssdw %%mm5, %%mm4 \n\t"\
  141. "packssdw %%mm7, %%mm6 \n\t"\
  142. "paddw %%mm0, %%mm4 \n\t"\
  143. "paddw %%mm0, %%mm6 \n\t"\
  144. "psraw $3, %%mm4 \n\t"\
  145. "psraw $3, %%mm6 \n\t"\
  146. "packuswb %%mm6, %%mm4 \n\t"\
  147. MOVNTQ(%%mm4, (%1, %%REGa))\
  148. "add $8, %%"REG_a" \n\t"\
  149. "cmp %2, %%"REG_a" \n\t"\
  150. "lea " offset "(%0), %%"REG_d" \n\t"\
  151. "pxor %%mm4, %%mm4 \n\t"\
  152. "pxor %%mm5, %%mm5 \n\t"\
  153. "pxor %%mm6, %%mm6 \n\t"\
  154. "pxor %%mm7, %%mm7 \n\t"\
  155. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  156. "jb 1b \n\t"\
  157. :: "r" (&c->redDither),\
  158. "r" (dest), "g" (width)\
  159. : "%"REG_a, "%"REG_d, "%"REG_S\
  160. );
  161. #define YSCALEYUV2YV121 \
  162. "mov %2, %%"REG_a" \n\t"\
  163. ASMALIGN(4) /* FIXME Unroll? */\
  164. "1: \n\t"\
  165. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  166. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  167. "psraw $7, %%mm0 \n\t"\
  168. "psraw $7, %%mm1 \n\t"\
  169. "packuswb %%mm1, %%mm0 \n\t"\
  170. MOVNTQ(%%mm0, (%1, %%REGa))\
  171. "add $8, %%"REG_a" \n\t"\
  172. "jnc 1b \n\t"
  173. #define YSCALEYUV2YV121_ACCURATE \
  174. "mov %2, %%"REG_a" \n\t"\
  175. "pcmpeqw %%mm7, %%mm7 \n\t"\
  176. "psrlw $15, %%mm7 \n\t"\
  177. "psllw $6, %%mm7 \n\t"\
  178. ASMALIGN(4) /* FIXME Unroll? */\
  179. "1: \n\t"\
  180. "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
  181. "movq 8(%0, %%"REG_a", 2), %%mm1 \n\t"\
  182. "paddsw %%mm7, %%mm0 \n\t"\
  183. "paddsw %%mm7, %%mm1 \n\t"\
  184. "psraw $7, %%mm0 \n\t"\
  185. "psraw $7, %%mm1 \n\t"\
  186. "packuswb %%mm1, %%mm0 \n\t"\
  187. MOVNTQ(%%mm0, (%1, %%REGa))\
  188. "add $8, %%"REG_a" \n\t"\
  189. "jnc 1b \n\t"
  190. /*
  191. :: "m" (-lumFilterSize), "m" (-chrFilterSize),
  192. "m" (lumMmxFilter+lumFilterSize*4), "m" (chrMmxFilter+chrFilterSize*4),
  193. "r" (dest), "m" (dstW),
  194. "m" (lumSrc+lumFilterSize), "m" (chrSrc+chrFilterSize)
  195. : "%eax", "%ebx", "%ecx", "%edx", "%esi"
  196. */
  197. #define YSCALEYUV2PACKEDX \
  198. asm volatile(\
  199. "xor %%"REG_a", %%"REG_a" \n\t"\
  200. ASMALIGN(4)\
  201. "nop \n\t"\
  202. "1: \n\t"\
  203. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  204. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  205. "movq "VROUNDER_OFFSET"(%0), %%mm3 \n\t"\
  206. "movq %%mm3, %%mm4 \n\t"\
  207. ASMALIGN(4)\
  208. "2: \n\t"\
  209. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  210. "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
  211. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
  212. "add $16, %%"REG_d" \n\t"\
  213. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  214. "pmulhw %%mm0, %%mm2 \n\t"\
  215. "pmulhw %%mm0, %%mm5 \n\t"\
  216. "paddw %%mm2, %%mm3 \n\t"\
  217. "paddw %%mm5, %%mm4 \n\t"\
  218. "test %%"REG_S", %%"REG_S" \n\t"\
  219. " jnz 2b \n\t"\
  220. \
  221. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  222. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  223. "movq "VROUNDER_OFFSET"(%0), %%mm1 \n\t"\
  224. "movq %%mm1, %%mm7 \n\t"\
  225. ASMALIGN(4)\
  226. "2: \n\t"\
  227. "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
  228. "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
  229. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
  230. "add $16, %%"REG_d" \n\t"\
  231. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  232. "pmulhw %%mm0, %%mm2 \n\t"\
  233. "pmulhw %%mm0, %%mm5 \n\t"\
  234. "paddw %%mm2, %%mm1 \n\t"\
  235. "paddw %%mm5, %%mm7 \n\t"\
  236. "test %%"REG_S", %%"REG_S" \n\t"\
  237. " jnz 2b \n\t"\
  238. #define YSCALEYUV2PACKEDX_END \
  239. :: "r" (&c->redDither), \
  240. "m" (dummy), "m" (dummy), "m" (dummy),\
  241. "r" (dest), "m" (dstW) \
  242. : "%"REG_a, "%"REG_d, "%"REG_S \
  243. );
  244. #define YSCALEYUV2PACKEDX_ACCURATE \
  245. asm volatile(\
  246. "xor %%"REG_a", %%"REG_a" \n\t"\
  247. ASMALIGN(4)\
  248. "nop \n\t"\
  249. "1: \n\t"\
  250. "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  251. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  252. "pxor %%mm4, %%mm4 \n\t"\
  253. "pxor %%mm5, %%mm5 \n\t"\
  254. "pxor %%mm6, %%mm6 \n\t"\
  255. "pxor %%mm7, %%mm7 \n\t"\
  256. ASMALIGN(4)\
  257. "2: \n\t"\
  258. "movq (%%"REG_S", %%"REG_a"), %%mm0 \n\t" /* UsrcData */\
  259. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* VsrcData */\
  260. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  261. "movq (%%"REG_S", %%"REG_a"), %%mm1 \n\t" /* UsrcData */\
  262. "movq %%mm0, %%mm3 \n\t"\
  263. "punpcklwd %%mm1, %%mm0 \n\t"\
  264. "punpckhwd %%mm1, %%mm3 \n\t"\
  265. "movq "STR(APCK_COEF)"(%%"REG_d"),%%mm1 \n\t" /* filterCoeff */\
  266. "pmaddwd %%mm1, %%mm0 \n\t"\
  267. "pmaddwd %%mm1, %%mm3 \n\t"\
  268. "paddd %%mm0, %%mm4 \n\t"\
  269. "paddd %%mm3, %%mm5 \n\t"\
  270. "movq "AV_STRINGIFY(VOF)"(%%"REG_S", %%"REG_a"), %%mm3 \n\t" /* VsrcData */\
  271. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  272. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  273. "test %%"REG_S", %%"REG_S" \n\t"\
  274. "movq %%mm2, %%mm0 \n\t"\
  275. "punpcklwd %%mm3, %%mm2 \n\t"\
  276. "punpckhwd %%mm3, %%mm0 \n\t"\
  277. "pmaddwd %%mm1, %%mm2 \n\t"\
  278. "pmaddwd %%mm1, %%mm0 \n\t"\
  279. "paddd %%mm2, %%mm6 \n\t"\
  280. "paddd %%mm0, %%mm7 \n\t"\
  281. " jnz 2b \n\t"\
  282. "psrad $16, %%mm4 \n\t"\
  283. "psrad $16, %%mm5 \n\t"\
  284. "psrad $16, %%mm6 \n\t"\
  285. "psrad $16, %%mm7 \n\t"\
  286. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  287. "packssdw %%mm5, %%mm4 \n\t"\
  288. "packssdw %%mm7, %%mm6 \n\t"\
  289. "paddw %%mm0, %%mm4 \n\t"\
  290. "paddw %%mm0, %%mm6 \n\t"\
  291. "movq %%mm4, "U_TEMP"(%0) \n\t"\
  292. "movq %%mm6, "V_TEMP"(%0) \n\t"\
  293. \
  294. "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d" \n\t"\
  295. "mov (%%"REG_d"), %%"REG_S" \n\t"\
  296. "pxor %%mm1, %%mm1 \n\t"\
  297. "pxor %%mm5, %%mm5 \n\t"\
  298. "pxor %%mm7, %%mm7 \n\t"\
  299. "pxor %%mm6, %%mm6 \n\t"\
  300. ASMALIGN(4)\
  301. "2: \n\t"\
  302. "movq (%%"REG_S", %%"REG_a", 2), %%mm0 \n\t" /* Y1srcData */\
  303. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y2srcData */\
  304. "mov "STR(APCK_PTR2)"(%%"REG_d"), %%"REG_S" \n\t"\
  305. "movq (%%"REG_S", %%"REG_a", 2), %%mm4 \n\t" /* Y1srcData */\
  306. "movq %%mm0, %%mm3 \n\t"\
  307. "punpcklwd %%mm4, %%mm0 \n\t"\
  308. "punpckhwd %%mm4, %%mm3 \n\t"\
  309. "movq "STR(APCK_COEF)"(%%"REG_d"), %%mm4 \n\t" /* filterCoeff */\
  310. "pmaddwd %%mm4, %%mm0 \n\t"\
  311. "pmaddwd %%mm4, %%mm3 \n\t"\
  312. "paddd %%mm0, %%mm1 \n\t"\
  313. "paddd %%mm3, %%mm5 \n\t"\
  314. "movq 8(%%"REG_S", %%"REG_a", 2), %%mm3 \n\t" /* Y2srcData */\
  315. "mov "STR(APCK_SIZE)"(%%"REG_d"), %%"REG_S" \n\t"\
  316. "add $"STR(APCK_SIZE)", %%"REG_d" \n\t"\
  317. "test %%"REG_S", %%"REG_S" \n\t"\
  318. "movq %%mm2, %%mm0 \n\t"\
  319. "punpcklwd %%mm3, %%mm2 \n\t"\
  320. "punpckhwd %%mm3, %%mm0 \n\t"\
  321. "pmaddwd %%mm4, %%mm2 \n\t"\
  322. "pmaddwd %%mm4, %%mm0 \n\t"\
  323. "paddd %%mm2, %%mm7 \n\t"\
  324. "paddd %%mm0, %%mm6 \n\t"\
  325. " jnz 2b \n\t"\
  326. "psrad $16, %%mm1 \n\t"\
  327. "psrad $16, %%mm5 \n\t"\
  328. "psrad $16, %%mm7 \n\t"\
  329. "psrad $16, %%mm6 \n\t"\
  330. "movq "VROUNDER_OFFSET"(%0), %%mm0 \n\t"\
  331. "packssdw %%mm5, %%mm1 \n\t"\
  332. "packssdw %%mm6, %%mm7 \n\t"\
  333. "paddw %%mm0, %%mm1 \n\t"\
  334. "paddw %%mm0, %%mm7 \n\t"\
  335. "movq "U_TEMP"(%0), %%mm3 \n\t"\
  336. "movq "V_TEMP"(%0), %%mm4 \n\t"\
  337. #define YSCALEYUV2RGBX \
  338. "psubw "U_OFFSET"(%0), %%mm3 \n\t" /* (U-128)8*/\
  339. "psubw "V_OFFSET"(%0), %%mm4 \n\t" /* (V-128)8*/\
  340. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  341. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  342. "pmulhw "UG_COEFF"(%0), %%mm3 \n\t"\
  343. "pmulhw "VG_COEFF"(%0), %%mm4 \n\t"\
  344. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  345. "pmulhw "UB_COEFF"(%0), %%mm2 \n\t"\
  346. "pmulhw "VR_COEFF"(%0), %%mm5 \n\t"\
  347. "psubw "Y_OFFSET"(%0), %%mm1 \n\t" /* 8(Y-16)*/\
  348. "psubw "Y_OFFSET"(%0), %%mm7 \n\t" /* 8(Y-16)*/\
  349. "pmulhw "Y_COEFF"(%0), %%mm1 \n\t"\
  350. "pmulhw "Y_COEFF"(%0), %%mm7 \n\t"\
  351. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  352. "paddw %%mm3, %%mm4 \n\t"\
  353. "movq %%mm2, %%mm0 \n\t"\
  354. "movq %%mm5, %%mm6 \n\t"\
  355. "movq %%mm4, %%mm3 \n\t"\
  356. "punpcklwd %%mm2, %%mm2 \n\t"\
  357. "punpcklwd %%mm5, %%mm5 \n\t"\
  358. "punpcklwd %%mm4, %%mm4 \n\t"\
  359. "paddw %%mm1, %%mm2 \n\t"\
  360. "paddw %%mm1, %%mm5 \n\t"\
  361. "paddw %%mm1, %%mm4 \n\t"\
  362. "punpckhwd %%mm0, %%mm0 \n\t"\
  363. "punpckhwd %%mm6, %%mm6 \n\t"\
  364. "punpckhwd %%mm3, %%mm3 \n\t"\
  365. "paddw %%mm7, %%mm0 \n\t"\
  366. "paddw %%mm7, %%mm6 \n\t"\
  367. "paddw %%mm7, %%mm3 \n\t"\
  368. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  369. "packuswb %%mm0, %%mm2 \n\t"\
  370. "packuswb %%mm6, %%mm5 \n\t"\
  371. "packuswb %%mm3, %%mm4 \n\t"\
  372. "pxor %%mm7, %%mm7 \n\t"
  373. #if 0
  374. #define FULL_YSCALEYUV2RGB \
  375. "pxor %%mm7, %%mm7 \n\t"\
  376. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  377. "punpcklwd %%mm6, %%mm6 \n\t"\
  378. "punpcklwd %%mm6, %%mm6 \n\t"\
  379. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  380. "punpcklwd %%mm5, %%mm5 \n\t"\
  381. "punpcklwd %%mm5, %%mm5 \n\t"\
  382. "xor %%"REG_a", %%"REG_a" \n\t"\
  383. ASMALIGN(4)\
  384. "1: \n\t"\
  385. "movq (%0, %%"REG_a",2), %%mm0 \n\t" /*buf0[eax]*/\
  386. "movq (%1, %%"REG_a",2), %%mm1 \n\t" /*buf1[eax]*/\
  387. "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  388. "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  389. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  390. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  391. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  392. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  393. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  394. "movq "AV_STRINGIFY(VOF)"(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  395. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  396. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  397. "movq "AV_STRINGIFY(VOF)"(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  398. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  399. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  400. "psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
  401. "psubw "MANGLE(w400)", %%mm3 \n\t" /* 8(U-128)*/\
  402. "pmulhw "MANGLE(yCoeff)", %%mm1 \n\t"\
  403. \
  404. \
  405. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  406. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  407. "pmulhw "MANGLE(ubCoeff)", %%mm3 \n\t"\
  408. "psraw $4, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  409. "pmulhw "MANGLE(ugCoeff)", %%mm2 \n\t"\
  410. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  411. "psubw "MANGLE(w400)", %%mm0 \n\t" /* (V-128)8*/\
  412. \
  413. \
  414. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  415. "pmulhw "MANGLE(vrCoeff)", %%mm0 \n\t"\
  416. "pmulhw "MANGLE(vgCoeff)", %%mm4 \n\t"\
  417. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  418. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  419. "packuswb %%mm3, %%mm3 \n\t"\
  420. \
  421. "packuswb %%mm0, %%mm0 \n\t"\
  422. "paddw %%mm4, %%mm2 \n\t"\
  423. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  424. \
  425. "packuswb %%mm1, %%mm1 \n\t"
  426. #endif
  427. #define REAL_YSCALEYUV2PACKED(index, c) \
  428. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  429. "movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1 \n\t"\
  430. "psraw $3, %%mm0 \n\t"\
  431. "psraw $3, %%mm1 \n\t"\
  432. "movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  433. "movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c") \n\t"\
  434. "xor "#index", "#index" \n\t"\
  435. ASMALIGN(4)\
  436. "1: \n\t"\
  437. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  438. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  439. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  440. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  441. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  442. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  443. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  444. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  445. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  446. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  447. "psraw $7, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  448. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  449. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  450. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  451. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  452. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  453. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  454. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  455. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  456. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  457. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  458. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  459. "psraw $7, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  460. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  461. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  462. #define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
  463. #define REAL_YSCALEYUV2RGB(index, c) \
  464. "xor "#index", "#index" \n\t"\
  465. ASMALIGN(4)\
  466. "1: \n\t"\
  467. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  468. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  469. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  470. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  471. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  472. "psubw %%mm4, %%mm5 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  473. "movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t"\
  474. "pmulhw %%mm0, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  475. "pmulhw %%mm0, %%mm5 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  476. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  477. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  478. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  479. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  480. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  481. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  482. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  483. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  484. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  485. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  486. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  487. "movq (%0, "#index", 2), %%mm0 \n\t" /*buf0[eax]*/\
  488. "movq (%1, "#index", 2), %%mm1 \n\t" /*buf1[eax]*/\
  489. "movq 8(%0, "#index", 2), %%mm6 \n\t" /*buf0[eax]*/\
  490. "movq 8(%1, "#index", 2), %%mm7 \n\t" /*buf1[eax]*/\
  491. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  492. "psubw %%mm7, %%mm6 \n\t" /* buf0[eax] - buf1[eax]*/\
  493. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  494. "pmulhw "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm6 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  495. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  496. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  497. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  498. "paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  499. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  500. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  501. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  502. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  503. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  504. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  505. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  506. "paddw %%mm3, %%mm4 \n\t"\
  507. "movq %%mm2, %%mm0 \n\t"\
  508. "movq %%mm5, %%mm6 \n\t"\
  509. "movq %%mm4, %%mm3 \n\t"\
  510. "punpcklwd %%mm2, %%mm2 \n\t"\
  511. "punpcklwd %%mm5, %%mm5 \n\t"\
  512. "punpcklwd %%mm4, %%mm4 \n\t"\
  513. "paddw %%mm1, %%mm2 \n\t"\
  514. "paddw %%mm1, %%mm5 \n\t"\
  515. "paddw %%mm1, %%mm4 \n\t"\
  516. "punpckhwd %%mm0, %%mm0 \n\t"\
  517. "punpckhwd %%mm6, %%mm6 \n\t"\
  518. "punpckhwd %%mm3, %%mm3 \n\t"\
  519. "paddw %%mm7, %%mm0 \n\t"\
  520. "paddw %%mm7, %%mm6 \n\t"\
  521. "paddw %%mm7, %%mm3 \n\t"\
  522. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  523. "packuswb %%mm0, %%mm2 \n\t"\
  524. "packuswb %%mm6, %%mm5 \n\t"\
  525. "packuswb %%mm3, %%mm4 \n\t"\
  526. "pxor %%mm7, %%mm7 \n\t"
  527. #define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
  528. #define REAL_YSCALEYUV2PACKED1(index, c) \
  529. "xor "#index", "#index" \n\t"\
  530. ASMALIGN(4)\
  531. "1: \n\t"\
  532. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  533. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  534. "psraw $7, %%mm3 \n\t" \
  535. "psraw $7, %%mm4 \n\t" \
  536. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  537. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  538. "psraw $7, %%mm1 \n\t" \
  539. "psraw $7, %%mm7 \n\t" \
  540. #define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
  541. #define REAL_YSCALEYUV2RGB1(index, c) \
  542. "xor "#index", "#index" \n\t"\
  543. ASMALIGN(4)\
  544. "1: \n\t"\
  545. "movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
  546. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  547. "psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
  548. "psraw $4, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>4*/\
  549. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  550. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  551. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  552. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  553. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  554. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  555. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  556. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  557. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  558. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  559. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  560. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  561. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  562. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  563. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  564. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  565. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  566. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  567. "paddw %%mm3, %%mm4 \n\t"\
  568. "movq %%mm2, %%mm0 \n\t"\
  569. "movq %%mm5, %%mm6 \n\t"\
  570. "movq %%mm4, %%mm3 \n\t"\
  571. "punpcklwd %%mm2, %%mm2 \n\t"\
  572. "punpcklwd %%mm5, %%mm5 \n\t"\
  573. "punpcklwd %%mm4, %%mm4 \n\t"\
  574. "paddw %%mm1, %%mm2 \n\t"\
  575. "paddw %%mm1, %%mm5 \n\t"\
  576. "paddw %%mm1, %%mm4 \n\t"\
  577. "punpckhwd %%mm0, %%mm0 \n\t"\
  578. "punpckhwd %%mm6, %%mm6 \n\t"\
  579. "punpckhwd %%mm3, %%mm3 \n\t"\
  580. "paddw %%mm7, %%mm0 \n\t"\
  581. "paddw %%mm7, %%mm6 \n\t"\
  582. "paddw %%mm7, %%mm3 \n\t"\
  583. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  584. "packuswb %%mm0, %%mm2 \n\t"\
  585. "packuswb %%mm6, %%mm5 \n\t"\
  586. "packuswb %%mm3, %%mm4 \n\t"\
  587. "pxor %%mm7, %%mm7 \n\t"
  588. #define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
  589. #define REAL_YSCALEYUV2PACKED1b(index, c) \
  590. "xor "#index", "#index" \n\t"\
  591. ASMALIGN(4)\
  592. "1: \n\t"\
  593. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  594. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  595. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  596. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  597. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  598. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  599. "psrlw $8, %%mm3 \n\t" \
  600. "psrlw $8, %%mm4 \n\t" \
  601. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  602. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  603. "psraw $7, %%mm1 \n\t" \
  604. "psraw $7, %%mm7 \n\t"
  605. #define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
  606. // do vertical chrominance interpolation
  607. #define REAL_YSCALEYUV2RGB1b(index, c) \
  608. "xor "#index", "#index" \n\t"\
  609. ASMALIGN(4)\
  610. "1: \n\t"\
  611. "movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
  612. "movq (%3, "#index"), %%mm3 \n\t" /* uvbuf1[eax]*/\
  613. "movq "AV_STRINGIFY(VOF)"(%2, "#index"), %%mm5 \n\t" /* uvbuf0[eax+2048]*/\
  614. "movq "AV_STRINGIFY(VOF)"(%3, "#index"), %%mm4 \n\t" /* uvbuf1[eax+2048]*/\
  615. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax] + uvbuf1[eax]*/\
  616. "paddw %%mm5, %%mm4 \n\t" /* uvbuf0[eax+2048] + uvbuf1[eax+2048]*/\
  617. "psrlw $5, %%mm3 \n\t" /*FIXME might overflow*/\
  618. "psrlw $5, %%mm4 \n\t" /*FIXME might overflow*/\
  619. "psubw "U_OFFSET"("#c"), %%mm3 \n\t" /* (U-128)8*/\
  620. "psubw "V_OFFSET"("#c"), %%mm4 \n\t" /* (V-128)8*/\
  621. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  622. "movq %%mm4, %%mm5 \n\t" /* (V-128)8*/\
  623. "pmulhw "UG_COEFF"("#c"), %%mm3 \n\t"\
  624. "pmulhw "VG_COEFF"("#c"), %%mm4 \n\t"\
  625. /* mm2=(U-128)8, mm3=ug, mm4=vg mm5=(V-128)8 */\
  626. "movq (%0, "#index", 2), %%mm1 \n\t" /*buf0[eax]*/\
  627. "movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
  628. "psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  629. "psraw $4, %%mm7 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
  630. "pmulhw "UB_COEFF"("#c"), %%mm2 \n\t"\
  631. "pmulhw "VR_COEFF"("#c"), %%mm5 \n\t"\
  632. "psubw "Y_OFFSET"("#c"), %%mm1 \n\t" /* 8(Y-16)*/\
  633. "psubw "Y_OFFSET"("#c"), %%mm7 \n\t" /* 8(Y-16)*/\
  634. "pmulhw "Y_COEFF"("#c"), %%mm1 \n\t"\
  635. "pmulhw "Y_COEFF"("#c"), %%mm7 \n\t"\
  636. /* mm1= Y1, mm2=ub, mm3=ug, mm4=vg mm5=vr, mm7=Y2 */\
  637. "paddw %%mm3, %%mm4 \n\t"\
  638. "movq %%mm2, %%mm0 \n\t"\
  639. "movq %%mm5, %%mm6 \n\t"\
  640. "movq %%mm4, %%mm3 \n\t"\
  641. "punpcklwd %%mm2, %%mm2 \n\t"\
  642. "punpcklwd %%mm5, %%mm5 \n\t"\
  643. "punpcklwd %%mm4, %%mm4 \n\t"\
  644. "paddw %%mm1, %%mm2 \n\t"\
  645. "paddw %%mm1, %%mm5 \n\t"\
  646. "paddw %%mm1, %%mm4 \n\t"\
  647. "punpckhwd %%mm0, %%mm0 \n\t"\
  648. "punpckhwd %%mm6, %%mm6 \n\t"\
  649. "punpckhwd %%mm3, %%mm3 \n\t"\
  650. "paddw %%mm7, %%mm0 \n\t"\
  651. "paddw %%mm7, %%mm6 \n\t"\
  652. "paddw %%mm7, %%mm3 \n\t"\
  653. /* mm0=B1, mm2=B2, mm3=G2, mm4=G1, mm5=R1, mm6=R2 */\
  654. "packuswb %%mm0, %%mm2 \n\t"\
  655. "packuswb %%mm6, %%mm5 \n\t"\
  656. "packuswb %%mm3, %%mm4 \n\t"\
  657. "pxor %%mm7, %%mm7 \n\t"
  658. #define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
  659. #define REAL_WRITEBGR32(dst, dstw, index) \
  660. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  661. "movq %%mm2, %%mm1 \n\t" /* B */\
  662. "movq %%mm5, %%mm6 \n\t" /* R */\
  663. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  664. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  665. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  666. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  667. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  668. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  669. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  670. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  671. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  672. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  673. \
  674. MOVNTQ(%%mm0, (dst, index, 4))\
  675. MOVNTQ(%%mm2, 8(dst, index, 4))\
  676. MOVNTQ(%%mm1, 16(dst, index, 4))\
  677. MOVNTQ(%%mm3, 24(dst, index, 4))\
  678. \
  679. "add $8, "#index" \n\t"\
  680. "cmp "#dstw", "#index" \n\t"\
  681. " jb 1b \n\t"
  682. #define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
  683. #define REAL_WRITERGB16(dst, dstw, index) \
  684. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  685. "pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
  686. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  687. "psrlq $3, %%mm2 \n\t"\
  688. \
  689. "movq %%mm2, %%mm1 \n\t"\
  690. "movq %%mm4, %%mm3 \n\t"\
  691. \
  692. "punpcklbw %%mm7, %%mm3 \n\t"\
  693. "punpcklbw %%mm5, %%mm2 \n\t"\
  694. "punpckhbw %%mm7, %%mm4 \n\t"\
  695. "punpckhbw %%mm5, %%mm1 \n\t"\
  696. \
  697. "psllq $3, %%mm3 \n\t"\
  698. "psllq $3, %%mm4 \n\t"\
  699. \
  700. "por %%mm3, %%mm2 \n\t"\
  701. "por %%mm4, %%mm1 \n\t"\
  702. \
  703. MOVNTQ(%%mm2, (dst, index, 2))\
  704. MOVNTQ(%%mm1, 8(dst, index, 2))\
  705. \
  706. "add $8, "#index" \n\t"\
  707. "cmp "#dstw", "#index" \n\t"\
  708. " jb 1b \n\t"
  709. #define WRITERGB16(dst, dstw, index) REAL_WRITERGB16(dst, dstw, index)
  710. #define REAL_WRITERGB15(dst, dstw, index) \
  711. "pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
  712. "pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
  713. "pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
  714. "psrlq $3, %%mm2 \n\t"\
  715. "psrlq $1, %%mm5 \n\t"\
  716. \
  717. "movq %%mm2, %%mm1 \n\t"\
  718. "movq %%mm4, %%mm3 \n\t"\
  719. \
  720. "punpcklbw %%mm7, %%mm3 \n\t"\
  721. "punpcklbw %%mm5, %%mm2 \n\t"\
  722. "punpckhbw %%mm7, %%mm4 \n\t"\
  723. "punpckhbw %%mm5, %%mm1 \n\t"\
  724. \
  725. "psllq $2, %%mm3 \n\t"\
  726. "psllq $2, %%mm4 \n\t"\
  727. \
  728. "por %%mm3, %%mm2 \n\t"\
  729. "por %%mm4, %%mm1 \n\t"\
  730. \
  731. MOVNTQ(%%mm2, (dst, index, 2))\
  732. MOVNTQ(%%mm1, 8(dst, index, 2))\
  733. \
  734. "add $8, "#index" \n\t"\
  735. "cmp "#dstw", "#index" \n\t"\
  736. " jb 1b \n\t"
  737. #define WRITERGB15(dst, dstw, index) REAL_WRITERGB15(dst, dstw, index)
  738. #define WRITEBGR24OLD(dst, dstw, index) \
  739. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  740. "movq %%mm2, %%mm1 \n\t" /* B */\
  741. "movq %%mm5, %%mm6 \n\t" /* R */\
  742. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  743. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  744. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  745. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  746. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  747. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  748. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  749. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  750. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  751. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  752. \
  753. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  754. "psrlq $8, %%mm0 \n\t" /* 00RGB0RG 0 */\
  755. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 0 */\
  756. "pand "MANGLE(bm11111000)", %%mm0 \n\t" /* 00RGB000 0.5 */\
  757. "por %%mm4, %%mm0 \n\t" /* 00RGBRGB 0 */\
  758. "movq %%mm2, %%mm4 \n\t" /* 0RGB0RGB 1 */\
  759. "psllq $48, %%mm2 \n\t" /* GB000000 1 */\
  760. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  761. \
  762. "movq %%mm4, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  763. "psrld $16, %%mm4 \n\t" /* 000R000R 1 */\
  764. "psrlq $24, %%mm2 \n\t" /* 0000RGB0 1.5 */\
  765. "por %%mm4, %%mm2 \n\t" /* 000RRGBR 1 */\
  766. "pand "MANGLE(bm00001111)", %%mm2 \n\t" /* 0000RGBR 1 */\
  767. "movq %%mm1, %%mm4 \n\t" /* 0RGB0RGB 2 */\
  768. "psrlq $8, %%mm1 \n\t" /* 00RGB0RG 2 */\
  769. "pand "MANGLE(bm00000111)", %%mm4 \n\t" /* 00000RGB 2 */\
  770. "pand "MANGLE(bm11111000)", %%mm1 \n\t" /* 00RGB000 2.5 */\
  771. "por %%mm4, %%mm1 \n\t" /* 00RGBRGB 2 */\
  772. "movq %%mm1, %%mm4 \n\t" /* 00RGBRGB 2 */\
  773. "psllq $32, %%mm1 \n\t" /* BRGB0000 2 */\
  774. "por %%mm1, %%mm2 \n\t" /* BRGBRGBR 1 */\
  775. \
  776. "psrlq $32, %%mm4 \n\t" /* 000000RG 2.5 */\
  777. "movq %%mm3, %%mm5 \n\t" /* 0RGB0RGB 3 */\
  778. "psrlq $8, %%mm3 \n\t" /* 00RGB0RG 3 */\
  779. "pand "MANGLE(bm00000111)", %%mm5 \n\t" /* 00000RGB 3 */\
  780. "pand "MANGLE(bm11111000)", %%mm3 \n\t" /* 00RGB000 3.5 */\
  781. "por %%mm5, %%mm3 \n\t" /* 00RGBRGB 3 */\
  782. "psllq $16, %%mm3 \n\t" /* RGBRGB00 3 */\
  783. "por %%mm4, %%mm3 \n\t" /* RGBRGBRG 2.5 */\
  784. \
  785. MOVNTQ(%%mm0, (dst))\
  786. MOVNTQ(%%mm2, 8(dst))\
  787. MOVNTQ(%%mm3, 16(dst))\
  788. "add $24, "#dst" \n\t"\
  789. \
  790. "add $8, "#index" \n\t"\
  791. "cmp "#dstw", "#index" \n\t"\
  792. " jb 1b \n\t"
  793. #define WRITEBGR24MMX(dst, dstw, index) \
  794. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  795. "movq %%mm2, %%mm1 \n\t" /* B */\
  796. "movq %%mm5, %%mm6 \n\t" /* R */\
  797. "punpcklbw %%mm4, %%mm2 \n\t" /* GBGBGBGB 0 */\
  798. "punpcklbw %%mm7, %%mm5 \n\t" /* 0R0R0R0R 0 */\
  799. "punpckhbw %%mm4, %%mm1 \n\t" /* GBGBGBGB 2 */\
  800. "punpckhbw %%mm7, %%mm6 \n\t" /* 0R0R0R0R 2 */\
  801. "movq %%mm2, %%mm0 \n\t" /* GBGBGBGB 0 */\
  802. "movq %%mm1, %%mm3 \n\t" /* GBGBGBGB 2 */\
  803. "punpcklwd %%mm5, %%mm0 \n\t" /* 0RGB0RGB 0 */\
  804. "punpckhwd %%mm5, %%mm2 \n\t" /* 0RGB0RGB 1 */\
  805. "punpcklwd %%mm6, %%mm1 \n\t" /* 0RGB0RGB 2 */\
  806. "punpckhwd %%mm6, %%mm3 \n\t" /* 0RGB0RGB 3 */\
  807. \
  808. "movq %%mm0, %%mm4 \n\t" /* 0RGB0RGB 0 */\
  809. "movq %%mm2, %%mm6 \n\t" /* 0RGB0RGB 1 */\
  810. "movq %%mm1, %%mm5 \n\t" /* 0RGB0RGB 2 */\
  811. "movq %%mm3, %%mm7 \n\t" /* 0RGB0RGB 3 */\
  812. \
  813. "psllq $40, %%mm0 \n\t" /* RGB00000 0 */\
  814. "psllq $40, %%mm2 \n\t" /* RGB00000 1 */\
  815. "psllq $40, %%mm1 \n\t" /* RGB00000 2 */\
  816. "psllq $40, %%mm3 \n\t" /* RGB00000 3 */\
  817. \
  818. "punpckhdq %%mm4, %%mm0 \n\t" /* 0RGBRGB0 0 */\
  819. "punpckhdq %%mm6, %%mm2 \n\t" /* 0RGBRGB0 1 */\
  820. "punpckhdq %%mm5, %%mm1 \n\t" /* 0RGBRGB0 2 */\
  821. "punpckhdq %%mm7, %%mm3 \n\t" /* 0RGBRGB0 3 */\
  822. \
  823. "psrlq $8, %%mm0 \n\t" /* 00RGBRGB 0 */\
  824. "movq %%mm2, %%mm6 \n\t" /* 0RGBRGB0 1 */\
  825. "psllq $40, %%mm2 \n\t" /* GB000000 1 */\
  826. "por %%mm2, %%mm0 \n\t" /* GBRGBRGB 0 */\
  827. MOVNTQ(%%mm0, (dst))\
  828. \
  829. "psrlq $24, %%mm6 \n\t" /* 0000RGBR 1 */\
  830. "movq %%mm1, %%mm5 \n\t" /* 0RGBRGB0 2 */\
  831. "psllq $24, %%mm1 \n\t" /* BRGB0000 2 */\
  832. "por %%mm1, %%mm6 \n\t" /* BRGBRGBR 1 */\
  833. MOVNTQ(%%mm6, 8(dst))\
  834. \
  835. "psrlq $40, %%mm5 \n\t" /* 000000RG 2 */\
  836. "psllq $8, %%mm3 \n\t" /* RGBRGB00 3 */\
  837. "por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
  838. MOVNTQ(%%mm5, 16(dst))\
  839. \
  840. "add $24, "#dst" \n\t"\
  841. \
  842. "add $8, "#index" \n\t"\
  843. "cmp "#dstw", "#index" \n\t"\
  844. " jb 1b \n\t"
  845. #define WRITEBGR24MMX2(dst, dstw, index) \
  846. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
  847. "movq "MANGLE(ff_M24A)", %%mm0 \n\t"\
  848. "movq "MANGLE(ff_M24C)", %%mm7 \n\t"\
  849. "pshufw $0x50, %%mm2, %%mm1 \n\t" /* B3 B2 B3 B2 B1 B0 B1 B0 */\
  850. "pshufw $0x50, %%mm4, %%mm3 \n\t" /* G3 G2 G3 G2 G1 G0 G1 G0 */\
  851. "pshufw $0x00, %%mm5, %%mm6 \n\t" /* R1 R0 R1 R0 R1 R0 R1 R0 */\
  852. \
  853. "pand %%mm0, %%mm1 \n\t" /* B2 B1 B0 */\
  854. "pand %%mm0, %%mm3 \n\t" /* G2 G1 G0 */\
  855. "pand %%mm7, %%mm6 \n\t" /* R1 R0 */\
  856. \
  857. "psllq $8, %%mm3 \n\t" /* G2 G1 G0 */\
  858. "por %%mm1, %%mm6 \n\t"\
  859. "por %%mm3, %%mm6 \n\t"\
  860. MOVNTQ(%%mm6, (dst))\
  861. \
  862. "psrlq $8, %%mm4 \n\t" /* 00 G7 G6 G5 G4 G3 G2 G1 */\
  863. "pshufw $0xA5, %%mm2, %%mm1 \n\t" /* B5 B4 B5 B4 B3 B2 B3 B2 */\
  864. "pshufw $0x55, %%mm4, %%mm3 \n\t" /* G4 G3 G4 G3 G4 G3 G4 G3 */\
  865. "pshufw $0xA5, %%mm5, %%mm6 \n\t" /* R5 R4 R5 R4 R3 R2 R3 R2 */\
  866. \
  867. "pand "MANGLE(ff_M24B)", %%mm1 \n\t" /* B5 B4 B3 */\
  868. "pand %%mm7, %%mm3 \n\t" /* G4 G3 */\
  869. "pand %%mm0, %%mm6 \n\t" /* R4 R3 R2 */\
  870. \
  871. "por %%mm1, %%mm3 \n\t" /* B5 G4 B4 G3 B3 */\
  872. "por %%mm3, %%mm6 \n\t"\
  873. MOVNTQ(%%mm6, 8(dst))\
  874. \
  875. "pshufw $0xFF, %%mm2, %%mm1 \n\t" /* B7 B6 B7 B6 B7 B6 B6 B7 */\
  876. "pshufw $0xFA, %%mm4, %%mm3 \n\t" /* 00 G7 00 G7 G6 G5 G6 G5 */\
  877. "pshufw $0xFA, %%mm5, %%mm6 \n\t" /* R7 R6 R7 R6 R5 R4 R5 R4 */\
  878. \
  879. "pand %%mm7, %%mm1 \n\t" /* B7 B6 */\
  880. "pand %%mm0, %%mm3 \n\t" /* G7 G6 G5 */\
  881. "pand "MANGLE(ff_M24B)", %%mm6 \n\t" /* R7 R6 R5 */\
  882. \
  883. "por %%mm1, %%mm3 \n\t"\
  884. "por %%mm3, %%mm6 \n\t"\
  885. MOVNTQ(%%mm6, 16(dst))\
  886. \
  887. "add $24, "#dst" \n\t"\
  888. \
  889. "add $8, "#index" \n\t"\
  890. "cmp "#dstw", "#index" \n\t"\
  891. " jb 1b \n\t"
  892. #ifdef HAVE_MMX2
  893. #undef WRITEBGR24
  894. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
  895. #else
  896. #undef WRITEBGR24
  897. #define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
  898. #endif
  899. #define REAL_WRITEYUY2(dst, dstw, index) \
  900. "packuswb %%mm3, %%mm3 \n\t"\
  901. "packuswb %%mm4, %%mm4 \n\t"\
  902. "packuswb %%mm7, %%mm1 \n\t"\
  903. "punpcklbw %%mm4, %%mm3 \n\t"\
  904. "movq %%mm1, %%mm7 \n\t"\
  905. "punpcklbw %%mm3, %%mm1 \n\t"\
  906. "punpckhbw %%mm3, %%mm7 \n\t"\
  907. \
  908. MOVNTQ(%%mm1, (dst, index, 2))\
  909. MOVNTQ(%%mm7, 8(dst, index, 2))\
  910. \
  911. "add $8, "#index" \n\t"\
  912. "cmp "#dstw", "#index" \n\t"\
  913. " jb 1b \n\t"
  914. #define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
  915. static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  916. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  917. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  918. {
  919. #ifdef HAVE_MMX
  920. if(!(c->flags & SWS_BITEXACT)){
  921. if (c->flags & SWS_ACCURATE_RND){
  922. if (uDest){
  923. YSCALEYUV2YV12X_ACCURATE( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  924. YSCALEYUV2YV12X_ACCURATE(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  925. }
  926. YSCALEYUV2YV12X_ACCURATE("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  927. }else{
  928. if (uDest){
  929. YSCALEYUV2YV12X( "0", CHR_MMX_FILTER_OFFSET, uDest, chrDstW)
  930. YSCALEYUV2YV12X(AV_STRINGIFY(VOF), CHR_MMX_FILTER_OFFSET, vDest, chrDstW)
  931. }
  932. YSCALEYUV2YV12X("0", LUM_MMX_FILTER_OFFSET, dest, dstW)
  933. }
  934. return;
  935. }
  936. #endif
  937. #ifdef HAVE_ALTIVEC
  938. yuv2yuvX_altivec_real(lumFilter, lumSrc, lumFilterSize,
  939. chrFilter, chrSrc, chrFilterSize,
  940. dest, uDest, vDest, dstW, chrDstW);
  941. #else //HAVE_ALTIVEC
  942. yuv2yuvXinC(lumFilter, lumSrc, lumFilterSize,
  943. chrFilter, chrSrc, chrFilterSize,
  944. dest, uDest, vDest, dstW, chrDstW);
  945. #endif //!HAVE_ALTIVEC
  946. }
  947. static inline void RENAME(yuv2nv12X)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  948. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  949. uint8_t *dest, uint8_t *uDest, int dstW, int chrDstW, int dstFormat)
  950. {
  951. yuv2nv12XinC(lumFilter, lumSrc, lumFilterSize,
  952. chrFilter, chrSrc, chrFilterSize,
  953. dest, uDest, dstW, chrDstW, dstFormat);
  954. }
  955. static inline void RENAME(yuv2yuv1)(SwsContext *c, int16_t *lumSrc, int16_t *chrSrc,
  956. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, long dstW, long chrDstW)
  957. {
  958. int i;
  959. #ifdef HAVE_MMX
  960. if(!(c->flags & SWS_BITEXACT)){
  961. long p= uDest ? 3 : 1;
  962. uint8_t *src[3]= {lumSrc + dstW, chrSrc + chrDstW, chrSrc + VOFW + chrDstW};
  963. uint8_t *dst[3]= {dest, uDest, vDest};
  964. long counter[3] = {dstW, chrDstW, chrDstW};
  965. if (c->flags & SWS_ACCURATE_RND){
  966. while(p--){
  967. asm volatile(
  968. YSCALEYUV2YV121_ACCURATE
  969. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  970. "g" (-counter[p])
  971. : "%"REG_a
  972. );
  973. }
  974. }else{
  975. while(p--){
  976. asm volatile(
  977. YSCALEYUV2YV121
  978. :: "r" (src[p]), "r" (dst[p] + counter[p]),
  979. "g" (-counter[p])
  980. : "%"REG_a
  981. );
  982. }
  983. }
  984. return;
  985. }
  986. #endif
  987. for (i=0; i<dstW; i++)
  988. {
  989. int val= (lumSrc[i]+64)>>7;
  990. if (val&256){
  991. if (val<0) val=0;
  992. else val=255;
  993. }
  994. dest[i]= val;
  995. }
  996. if (uDest)
  997. for (i=0; i<chrDstW; i++)
  998. {
  999. int u=(chrSrc[i ]+64)>>7;
  1000. int v=(chrSrc[i + VOFW]+64)>>7;
  1001. if ((u|v)&256){
  1002. if (u<0) u=0;
  1003. else if (u>255) u=255;
  1004. if (v<0) v=0;
  1005. else if (v>255) v=255;
  1006. }
  1007. uDest[i]= u;
  1008. vDest[i]= v;
  1009. }
  1010. }
  1011. /**
  1012. * vertical scale YV12 to RGB
  1013. */
  1014. static inline void RENAME(yuv2packedX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  1015. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  1016. uint8_t *dest, long dstW, long dstY)
  1017. {
  1018. #ifdef HAVE_MMX
  1019. long dummy=0;
  1020. if(!(c->flags & SWS_BITEXACT)){
  1021. if (c->flags & SWS_ACCURATE_RND){
  1022. switch(c->dstFormat){
  1023. case PIX_FMT_RGB32:
  1024. YSCALEYUV2PACKEDX_ACCURATE
  1025. YSCALEYUV2RGBX
  1026. WRITEBGR32(%4, %5, %%REGa)
  1027. YSCALEYUV2PACKEDX_END
  1028. return;
  1029. case PIX_FMT_BGR24:
  1030. YSCALEYUV2PACKEDX_ACCURATE
  1031. YSCALEYUV2RGBX
  1032. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c"\n\t" //FIXME optimize
  1033. "add %4, %%"REG_c" \n\t"
  1034. WRITEBGR24(%%REGc, %5, %%REGa)
  1035. :: "r" (&c->redDither),
  1036. "m" (dummy), "m" (dummy), "m" (dummy),
  1037. "r" (dest), "m" (dstW)
  1038. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1039. );
  1040. return;
  1041. case PIX_FMT_RGB555:
  1042. YSCALEYUV2PACKEDX_ACCURATE
  1043. YSCALEYUV2RGBX
  1044. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1045. #ifdef DITHER1XBPP
  1046. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1047. "paddusb "MANGLE(g5Dither)", %%mm4\n\t"
  1048. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1049. #endif
  1050. WRITERGB15(%4, %5, %%REGa)
  1051. YSCALEYUV2PACKEDX_END
  1052. return;
  1053. case PIX_FMT_RGB565:
  1054. YSCALEYUV2PACKEDX_ACCURATE
  1055. YSCALEYUV2RGBX
  1056. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1057. #ifdef DITHER1XBPP
  1058. "paddusb "MANGLE(b5Dither)", %%mm2\n\t"
  1059. "paddusb "MANGLE(g6Dither)", %%mm4\n\t"
  1060. "paddusb "MANGLE(r5Dither)", %%mm5\n\t"
  1061. #endif
  1062. WRITERGB16(%4, %5, %%REGa)
  1063. YSCALEYUV2PACKEDX_END
  1064. return;
  1065. case PIX_FMT_YUYV422:
  1066. YSCALEYUV2PACKEDX_ACCURATE
  1067. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1068. "psraw $3, %%mm3 \n\t"
  1069. "psraw $3, %%mm4 \n\t"
  1070. "psraw $3, %%mm1 \n\t"
  1071. "psraw $3, %%mm7 \n\t"
  1072. WRITEYUY2(%4, %5, %%REGa)
  1073. YSCALEYUV2PACKEDX_END
  1074. return;
  1075. }
  1076. }else{
  1077. switch(c->dstFormat)
  1078. {
  1079. case PIX_FMT_RGB32:
  1080. YSCALEYUV2PACKEDX
  1081. YSCALEYUV2RGBX
  1082. WRITEBGR32(%4, %5, %%REGa)
  1083. YSCALEYUV2PACKEDX_END
  1084. return;
  1085. case PIX_FMT_BGR24:
  1086. YSCALEYUV2PACKEDX
  1087. YSCALEYUV2RGBX
  1088. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_c" \n\t" //FIXME optimize
  1089. "add %4, %%"REG_c" \n\t"
  1090. WRITEBGR24(%%REGc, %5, %%REGa)
  1091. :: "r" (&c->redDither),
  1092. "m" (dummy), "m" (dummy), "m" (dummy),
  1093. "r" (dest), "m" (dstW)
  1094. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S
  1095. );
  1096. return;
  1097. case PIX_FMT_RGB555:
  1098. YSCALEYUV2PACKEDX
  1099. YSCALEYUV2RGBX
  1100. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1101. #ifdef DITHER1XBPP
  1102. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1103. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1104. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1105. #endif
  1106. WRITERGB15(%4, %5, %%REGa)
  1107. YSCALEYUV2PACKEDX_END
  1108. return;
  1109. case PIX_FMT_RGB565:
  1110. YSCALEYUV2PACKEDX
  1111. YSCALEYUV2RGBX
  1112. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1113. #ifdef DITHER1XBPP
  1114. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1115. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1116. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1117. #endif
  1118. WRITERGB16(%4, %5, %%REGa)
  1119. YSCALEYUV2PACKEDX_END
  1120. return;
  1121. case PIX_FMT_YUYV422:
  1122. YSCALEYUV2PACKEDX
  1123. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1124. "psraw $3, %%mm3 \n\t"
  1125. "psraw $3, %%mm4 \n\t"
  1126. "psraw $3, %%mm1 \n\t"
  1127. "psraw $3, %%mm7 \n\t"
  1128. WRITEYUY2(%4, %5, %%REGa)
  1129. YSCALEYUV2PACKEDX_END
  1130. return;
  1131. }
  1132. }
  1133. }
  1134. #endif /* HAVE_MMX */
  1135. #ifdef HAVE_ALTIVEC
  1136. /* The following list of supported dstFormat values should
  1137. match what's found in the body of altivec_yuv2packedX() */
  1138. if (c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
  1139. c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
  1140. c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)
  1141. altivec_yuv2packedX (c, lumFilter, lumSrc, lumFilterSize,
  1142. chrFilter, chrSrc, chrFilterSize,
  1143. dest, dstW, dstY);
  1144. else
  1145. #endif
  1146. yuv2packedXinC(c, lumFilter, lumSrc, lumFilterSize,
  1147. chrFilter, chrSrc, chrFilterSize,
  1148. dest, dstW, dstY);
  1149. }
  1150. /**
  1151. * vertical bilinear scale YV12 to RGB
  1152. */
  1153. static inline void RENAME(yuv2packed2)(SwsContext *c, uint16_t *buf0, uint16_t *buf1, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1154. uint8_t *dest, int dstW, int yalpha, int uvalpha, int y)
  1155. {
  1156. int yalpha1=4095- yalpha;
  1157. int uvalpha1=4095-uvalpha;
  1158. int i;
  1159. #if 0 //isn't used
  1160. if (flags&SWS_FULL_CHR_H_INT)
  1161. {
  1162. switch(dstFormat)
  1163. {
  1164. #ifdef HAVE_MMX
  1165. case PIX_FMT_RGB32:
  1166. asm volatile(
  1167. FULL_YSCALEYUV2RGB
  1168. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1169. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1170. "movq %%mm3, %%mm1 \n\t"
  1171. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1172. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1173. MOVNTQ(%%mm3, (%4, %%REGa, 4))
  1174. MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
  1175. "add $4, %%"REG_a" \n\t"
  1176. "cmp %5, %%"REG_a" \n\t"
  1177. " jb 1b \n\t"
  1178. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
  1179. "m" (yalpha1), "m" (uvalpha1)
  1180. : "%"REG_a
  1181. );
  1182. break;
  1183. case PIX_FMT_BGR24:
  1184. asm volatile(
  1185. FULL_YSCALEYUV2RGB
  1186. // lsb ... msb
  1187. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  1188. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  1189. "movq %%mm3, %%mm1 \n\t"
  1190. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  1191. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  1192. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  1193. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  1194. "pand "MANGLE(bm00000111)", %%mm2 \n\t" // BGR00000
  1195. "pand "MANGLE(bm11111000)", %%mm3 \n\t" // 000BGR00
  1196. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  1197. "movq %%mm1, %%mm2 \n\t"
  1198. "psllq $48, %%mm1 \n\t" // 000000BG
  1199. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  1200. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  1201. "psrld $16, %%mm2 \n\t" // R000R000
  1202. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  1203. "por %%mm2, %%mm1 \n\t" // RBGRR000
  1204. "mov %4, %%"REG_b" \n\t"
  1205. "add %%"REG_a", %%"REG_b" \n\t"
  1206. #ifdef HAVE_MMX2
  1207. //FIXME Alignment
  1208. "movntq %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1209. "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1210. #else
  1211. "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
  1212. "psrlq $32, %%mm3 \n\t"
  1213. "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
  1214. "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
  1215. #endif
  1216. "add $4, %%"REG_a" \n\t"
  1217. "cmp %5, %%"REG_a" \n\t"
  1218. " jb 1b \n\t"
  1219. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
  1220. "m" (yalpha1), "m" (uvalpha1)
  1221. : "%"REG_a, "%"REG_b
  1222. );
  1223. break;
  1224. case PIX_FMT_BGR555:
  1225. asm volatile(
  1226. FULL_YSCALEYUV2RGB
  1227. #ifdef DITHER1XBPP
  1228. "paddusb "MANGLE(g5Dither)", %%mm1 \n\t"
  1229. "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
  1230. "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
  1231. #endif
  1232. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1233. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1234. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1235. "psrlw $3, %%mm3 \n\t"
  1236. "psllw $2, %%mm1 \n\t"
  1237. "psllw $7, %%mm0 \n\t"
  1238. "pand "MANGLE(g15Mask)", %%mm1 \n\t"
  1239. "pand "MANGLE(r15Mask)", %%mm0 \n\t"
  1240. "por %%mm3, %%mm1 \n\t"
  1241. "por %%mm1, %%mm0 \n\t"
  1242. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1243. "add $4, %%"REG_a" \n\t"
  1244. "cmp %5, %%"REG_a" \n\t"
  1245. " jb 1b \n\t"
  1246. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1247. "m" (yalpha1), "m" (uvalpha1)
  1248. : "%"REG_a
  1249. );
  1250. break;
  1251. case PIX_FMT_BGR565:
  1252. asm volatile(
  1253. FULL_YSCALEYUV2RGB
  1254. #ifdef DITHER1XBPP
  1255. "paddusb "MANGLE(g6Dither)", %%mm1 \n\t"
  1256. "paddusb "MANGLE(r5Dither)", %%mm0 \n\t"
  1257. "paddusb "MANGLE(b5Dither)", %%mm3 \n\t"
  1258. #endif
  1259. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  1260. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  1261. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  1262. "psrlw $3, %%mm3 \n\t"
  1263. "psllw $3, %%mm1 \n\t"
  1264. "psllw $8, %%mm0 \n\t"
  1265. "pand "MANGLE(g16Mask)", %%mm1 \n\t"
  1266. "pand "MANGLE(r16Mask)", %%mm0 \n\t"
  1267. "por %%mm3, %%mm1 \n\t"
  1268. "por %%mm1, %%mm0 \n\t"
  1269. MOVNTQ(%%mm0, (%4, %%REGa, 2))
  1270. "add $4, %%"REG_a" \n\t"
  1271. "cmp %5, %%"REG_a" \n\t"
  1272. " jb 1b \n\t"
  1273. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
  1274. "m" (yalpha1), "m" (uvalpha1)
  1275. : "%"REG_a
  1276. );
  1277. break;
  1278. #endif /* HAVE_MMX */
  1279. case PIX_FMT_BGR32:
  1280. #ifndef HAVE_MMX
  1281. case PIX_FMT_RGB32:
  1282. #endif
  1283. if (dstFormat==PIX_FMT_RGB32)
  1284. {
  1285. int i;
  1286. #ifdef WORDS_BIGENDIAN
  1287. dest++;
  1288. #endif
  1289. for (i=0;i<dstW;i++){
  1290. // vertical linear interpolation && yuv2rgb in a single step:
  1291. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1292. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1293. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1294. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1295. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1296. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1297. dest+= 4;
  1298. }
  1299. }
  1300. else if (dstFormat==PIX_FMT_BGR24)
  1301. {
  1302. int i;
  1303. for (i=0;i<dstW;i++){
  1304. // vertical linear interpolation && yuv2rgb in a single step:
  1305. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1306. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1307. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1308. dest[0]=clip_table[((Y + yuvtab_40cf[U]) >>13)];
  1309. dest[1]=clip_table[((Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13)];
  1310. dest[2]=clip_table[((Y + yuvtab_3343[V]) >>13)];
  1311. dest+= 3;
  1312. }
  1313. }
  1314. else if (dstFormat==PIX_FMT_BGR565)
  1315. {
  1316. int i;
  1317. for (i=0;i<dstW;i++){
  1318. // vertical linear interpolation && yuv2rgb in a single step:
  1319. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1320. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1321. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1322. ((uint16_t*)dest)[i] =
  1323. clip_table16b[(Y + yuvtab_40cf[U]) >>13] |
  1324. clip_table16g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1325. clip_table16r[(Y + yuvtab_3343[V]) >>13];
  1326. }
  1327. }
  1328. else if (dstFormat==PIX_FMT_BGR555)
  1329. {
  1330. int i;
  1331. for (i=0;i<dstW;i++){
  1332. // vertical linear interpolation && yuv2rgb in a single step:
  1333. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>19)];
  1334. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>19);
  1335. int V=((uvbuf0[i+VOFW]*uvalpha1+uvbuf1[i+VOFW]*uvalpha)>>19);
  1336. ((uint16_t*)dest)[i] =
  1337. clip_table15b[(Y + yuvtab_40cf[U]) >>13] |
  1338. clip_table15g[(Y + yuvtab_1a1e[V] + yuvtab_0c92[U]) >>13] |
  1339. clip_table15r[(Y + yuvtab_3343[V]) >>13];
  1340. }
  1341. }
  1342. }//FULL_UV_IPOL
  1343. else
  1344. {
  1345. #endif // if 0
  1346. #ifdef HAVE_MMX
  1347. if(!(c->flags & SWS_BITEXACT)){
  1348. switch(c->dstFormat)
  1349. {
  1350. //Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
  1351. case PIX_FMT_RGB32:
  1352. asm volatile(
  1353. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1354. "mov %4, %%"REG_b" \n\t"
  1355. "push %%"REG_BP" \n\t"
  1356. YSCALEYUV2RGB(%%REGBP, %5)
  1357. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1358. "pop %%"REG_BP" \n\t"
  1359. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1360. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1361. "a" (&c->redDither)
  1362. );
  1363. return;
  1364. case PIX_FMT_BGR24:
  1365. asm volatile(
  1366. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1367. "mov %4, %%"REG_b" \n\t"
  1368. "push %%"REG_BP" \n\t"
  1369. YSCALEYUV2RGB(%%REGBP, %5)
  1370. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1371. "pop %%"REG_BP" \n\t"
  1372. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1373. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1374. "a" (&c->redDither)
  1375. );
  1376. return;
  1377. case PIX_FMT_RGB555:
  1378. asm volatile(
  1379. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1380. "mov %4, %%"REG_b" \n\t"
  1381. "push %%"REG_BP" \n\t"
  1382. YSCALEYUV2RGB(%%REGBP, %5)
  1383. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1384. #ifdef DITHER1XBPP
  1385. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1386. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1387. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1388. #endif
  1389. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1390. "pop %%"REG_BP" \n\t"
  1391. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1392. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1393. "a" (&c->redDither)
  1394. );
  1395. return;
  1396. case PIX_FMT_RGB565:
  1397. asm volatile(
  1398. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1399. "mov %4, %%"REG_b" \n\t"
  1400. "push %%"REG_BP" \n\t"
  1401. YSCALEYUV2RGB(%%REGBP, %5)
  1402. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1403. #ifdef DITHER1XBPP
  1404. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1405. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1406. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1407. #endif
  1408. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1409. "pop %%"REG_BP" \n\t"
  1410. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1411. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1412. "a" (&c->redDither)
  1413. );
  1414. return;
  1415. case PIX_FMT_YUYV422:
  1416. asm volatile(
  1417. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1418. "mov %4, %%"REG_b" \n\t"
  1419. "push %%"REG_BP" \n\t"
  1420. YSCALEYUV2PACKED(%%REGBP, %5)
  1421. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1422. "pop %%"REG_BP" \n\t"
  1423. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1424. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1425. "a" (&c->redDither)
  1426. );
  1427. return;
  1428. default: break;
  1429. }
  1430. }
  1431. #endif //HAVE_MMX
  1432. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB2_C, YSCALE_YUV_2_PACKED2_C, YSCALE_YUV_2_GRAY16_2_C, YSCALE_YUV_2_MONO2_C)
  1433. }
  1434. /**
  1435. * YV12 to RGB without scaling or interpolating
  1436. */
  1437. static inline void RENAME(yuv2packed1)(SwsContext *c, uint16_t *buf0, uint16_t *uvbuf0, uint16_t *uvbuf1,
  1438. uint8_t *dest, int dstW, int uvalpha, int dstFormat, int flags, int y)
  1439. {
  1440. const int yalpha1=0;
  1441. int i;
  1442. uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
  1443. const int yalpha= 4096; //FIXME ...
  1444. if (flags&SWS_FULL_CHR_H_INT)
  1445. {
  1446. RENAME(yuv2packed2)(c, buf0, buf0, uvbuf0, uvbuf1, dest, dstW, 0, uvalpha, y);
  1447. return;
  1448. }
  1449. #ifdef HAVE_MMX
  1450. if(!(flags & SWS_BITEXACT)){
  1451. if (uvalpha < 2048) // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
  1452. {
  1453. switch(dstFormat)
  1454. {
  1455. case PIX_FMT_RGB32:
  1456. asm volatile(
  1457. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1458. "mov %4, %%"REG_b" \n\t"
  1459. "push %%"REG_BP" \n\t"
  1460. YSCALEYUV2RGB1(%%REGBP, %5)
  1461. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1462. "pop %%"REG_BP" \n\t"
  1463. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1464. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1465. "a" (&c->redDither)
  1466. );
  1467. return;
  1468. case PIX_FMT_BGR24:
  1469. asm volatile(
  1470. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1471. "mov %4, %%"REG_b" \n\t"
  1472. "push %%"REG_BP" \n\t"
  1473. YSCALEYUV2RGB1(%%REGBP, %5)
  1474. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1475. "pop %%"REG_BP" \n\t"
  1476. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1477. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1478. "a" (&c->redDither)
  1479. );
  1480. return;
  1481. case PIX_FMT_RGB555:
  1482. asm volatile(
  1483. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1484. "mov %4, %%"REG_b" \n\t"
  1485. "push %%"REG_BP" \n\t"
  1486. YSCALEYUV2RGB1(%%REGBP, %5)
  1487. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1488. #ifdef DITHER1XBPP
  1489. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1490. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1491. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1492. #endif
  1493. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1494. "pop %%"REG_BP" \n\t"
  1495. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1496. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1497. "a" (&c->redDither)
  1498. );
  1499. return;
  1500. case PIX_FMT_RGB565:
  1501. asm volatile(
  1502. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1503. "mov %4, %%"REG_b" \n\t"
  1504. "push %%"REG_BP" \n\t"
  1505. YSCALEYUV2RGB1(%%REGBP, %5)
  1506. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1507. #ifdef DITHER1XBPP
  1508. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1509. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1510. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1511. #endif
  1512. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1513. "pop %%"REG_BP" \n\t"
  1514. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1515. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1516. "a" (&c->redDither)
  1517. );
  1518. return;
  1519. case PIX_FMT_YUYV422:
  1520. asm volatile(
  1521. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1522. "mov %4, %%"REG_b" \n\t"
  1523. "push %%"REG_BP" \n\t"
  1524. YSCALEYUV2PACKED1(%%REGBP, %5)
  1525. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1526. "pop %%"REG_BP" \n\t"
  1527. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1528. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1529. "a" (&c->redDither)
  1530. );
  1531. return;
  1532. }
  1533. }
  1534. else
  1535. {
  1536. switch(dstFormat)
  1537. {
  1538. case PIX_FMT_RGB32:
  1539. asm volatile(
  1540. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1541. "mov %4, %%"REG_b" \n\t"
  1542. "push %%"REG_BP" \n\t"
  1543. YSCALEYUV2RGB1b(%%REGBP, %5)
  1544. WRITEBGR32(%%REGb, 8280(%5), %%REGBP)
  1545. "pop %%"REG_BP" \n\t"
  1546. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1547. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1548. "a" (&c->redDither)
  1549. );
  1550. return;
  1551. case PIX_FMT_BGR24:
  1552. asm volatile(
  1553. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1554. "mov %4, %%"REG_b" \n\t"
  1555. "push %%"REG_BP" \n\t"
  1556. YSCALEYUV2RGB1b(%%REGBP, %5)
  1557. WRITEBGR24(%%REGb, 8280(%5), %%REGBP)
  1558. "pop %%"REG_BP" \n\t"
  1559. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1560. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1561. "a" (&c->redDither)
  1562. );
  1563. return;
  1564. case PIX_FMT_RGB555:
  1565. asm volatile(
  1566. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1567. "mov %4, %%"REG_b" \n\t"
  1568. "push %%"REG_BP" \n\t"
  1569. YSCALEYUV2RGB1b(%%REGBP, %5)
  1570. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1571. #ifdef DITHER1XBPP
  1572. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1573. "paddusb "MANGLE(g5Dither)", %%mm4 \n\t"
  1574. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1575. #endif
  1576. WRITERGB15(%%REGb, 8280(%5), %%REGBP)
  1577. "pop %%"REG_BP" \n\t"
  1578. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1579. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1580. "a" (&c->redDither)
  1581. );
  1582. return;
  1583. case PIX_FMT_RGB565:
  1584. asm volatile(
  1585. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1586. "mov %4, %%"REG_b" \n\t"
  1587. "push %%"REG_BP" \n\t"
  1588. YSCALEYUV2RGB1b(%%REGBP, %5)
  1589. /* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
  1590. #ifdef DITHER1XBPP
  1591. "paddusb "MANGLE(b5Dither)", %%mm2 \n\t"
  1592. "paddusb "MANGLE(g6Dither)", %%mm4 \n\t"
  1593. "paddusb "MANGLE(r5Dither)", %%mm5 \n\t"
  1594. #endif
  1595. WRITERGB16(%%REGb, 8280(%5), %%REGBP)
  1596. "pop %%"REG_BP" \n\t"
  1597. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1598. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1599. "a" (&c->redDither)
  1600. );
  1601. return;
  1602. case PIX_FMT_YUYV422:
  1603. asm volatile(
  1604. "mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
  1605. "mov %4, %%"REG_b" \n\t"
  1606. "push %%"REG_BP" \n\t"
  1607. YSCALEYUV2PACKED1b(%%REGBP, %5)
  1608. WRITEYUY2(%%REGb, 8280(%5), %%REGBP)
  1609. "pop %%"REG_BP" \n\t"
  1610. "mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
  1611. :: "c" (buf0), "d" (buf1), "S" (uvbuf0), "D" (uvbuf1), "m" (dest),
  1612. "a" (&c->redDither)
  1613. );
  1614. return;
  1615. }
  1616. }
  1617. }
  1618. #endif /* HAVE_MMX */
  1619. if (uvalpha < 2048)
  1620. {
  1621. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1_C, YSCALE_YUV_2_PACKED1_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1622. }else{
  1623. YSCALE_YUV_2_ANYRGB_C(YSCALE_YUV_2_RGB1B_C, YSCALE_YUV_2_PACKED1B_C, YSCALE_YUV_2_GRAY16_1_C, YSCALE_YUV_2_MONO2_C)
  1624. }
  1625. }
  1626. //FIXME yuy2* can read up to 7 samples too much
  1627. static inline void RENAME(yuy2ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1628. {
  1629. #ifdef HAVE_MMX
  1630. asm volatile(
  1631. "movq "MANGLE(bm01010101)", %%mm2 \n\t"
  1632. "mov %0, %%"REG_a" \n\t"
  1633. "1: \n\t"
  1634. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1635. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1636. "pand %%mm2, %%mm0 \n\t"
  1637. "pand %%mm2, %%mm1 \n\t"
  1638. "packuswb %%mm1, %%mm0 \n\t"
  1639. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1640. "add $8, %%"REG_a" \n\t"
  1641. " js 1b \n\t"
  1642. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1643. : "%"REG_a
  1644. );
  1645. #else
  1646. int i;
  1647. for (i=0; i<width; i++)
  1648. dst[i]= src[2*i];
  1649. #endif
  1650. }
  1651. static inline void RENAME(yuy2ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1652. {
  1653. #ifdef HAVE_MMX
  1654. asm volatile(
  1655. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1656. "mov %0, %%"REG_a" \n\t"
  1657. "1: \n\t"
  1658. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1659. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1660. "psrlw $8, %%mm0 \n\t"
  1661. "psrlw $8, %%mm1 \n\t"
  1662. "packuswb %%mm1, %%mm0 \n\t"
  1663. "movq %%mm0, %%mm1 \n\t"
  1664. "psrlw $8, %%mm0 \n\t"
  1665. "pand %%mm4, %%mm1 \n\t"
  1666. "packuswb %%mm0, %%mm0 \n\t"
  1667. "packuswb %%mm1, %%mm1 \n\t"
  1668. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1669. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1670. "add $4, %%"REG_a" \n\t"
  1671. " js 1b \n\t"
  1672. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1673. : "%"REG_a
  1674. );
  1675. #else
  1676. int i;
  1677. for (i=0; i<width; i++)
  1678. {
  1679. dstU[i]= src1[4*i + 1];
  1680. dstV[i]= src1[4*i + 3];
  1681. }
  1682. #endif
  1683. assert(src1 == src2);
  1684. }
  1685. /* This is almost identical to the previous, end exists only because
  1686. * yuy2ToY/UV)(dst, src+1, ...) would have 100% unaligned accesses. */
  1687. static inline void RENAME(uyvyToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1688. {
  1689. #ifdef HAVE_MMX
  1690. asm volatile(
  1691. "mov %0, %%"REG_a" \n\t"
  1692. "1: \n\t"
  1693. "movq (%1, %%"REG_a",2), %%mm0 \n\t"
  1694. "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
  1695. "psrlw $8, %%mm0 \n\t"
  1696. "psrlw $8, %%mm1 \n\t"
  1697. "packuswb %%mm1, %%mm0 \n\t"
  1698. "movq %%mm0, (%2, %%"REG_a") \n\t"
  1699. "add $8, %%"REG_a" \n\t"
  1700. " js 1b \n\t"
  1701. : : "g" (-width), "r" (src+width*2), "r" (dst+width)
  1702. : "%"REG_a
  1703. );
  1704. #else
  1705. int i;
  1706. for (i=0; i<width; i++)
  1707. dst[i]= src[2*i+1];
  1708. #endif
  1709. }
  1710. static inline void RENAME(uyvyToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1711. {
  1712. #ifdef HAVE_MMX
  1713. asm volatile(
  1714. "movq "MANGLE(bm01010101)", %%mm4 \n\t"
  1715. "mov %0, %%"REG_a" \n\t"
  1716. "1: \n\t"
  1717. "movq (%1, %%"REG_a",4), %%mm0 \n\t"
  1718. "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
  1719. "pand %%mm4, %%mm0 \n\t"
  1720. "pand %%mm4, %%mm1 \n\t"
  1721. "packuswb %%mm1, %%mm0 \n\t"
  1722. "movq %%mm0, %%mm1 \n\t"
  1723. "psrlw $8, %%mm0 \n\t"
  1724. "pand %%mm4, %%mm1 \n\t"
  1725. "packuswb %%mm0, %%mm0 \n\t"
  1726. "packuswb %%mm1, %%mm1 \n\t"
  1727. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1728. "movd %%mm1, (%2, %%"REG_a") \n\t"
  1729. "add $4, %%"REG_a" \n\t"
  1730. " js 1b \n\t"
  1731. : : "g" (-width), "r" (src1+width*4), "r" (dstU+width), "r" (dstV+width)
  1732. : "%"REG_a
  1733. );
  1734. #else
  1735. int i;
  1736. for (i=0; i<width; i++)
  1737. {
  1738. dstU[i]= src1[4*i + 0];
  1739. dstV[i]= src1[4*i + 2];
  1740. }
  1741. #endif
  1742. assert(src1 == src2);
  1743. }
  1744. #define BGR2Y(type, name, shr, shg, shb, maskr, maskg, maskb, RY, GY, BY, S)\
  1745. static inline void RENAME(name)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)\
  1746. {\
  1747. int i;\
  1748. for (i=0; i<width; i++)\
  1749. {\
  1750. int b= (((type*)src)[i]>>shb)&maskb;\
  1751. int g= (((type*)src)[i]>>shg)&maskg;\
  1752. int r= (((type*)src)[i]>>shr)&maskr;\
  1753. \
  1754. dst[i]= (((RY)*r + (GY)*g + (BY)*b + (33<<((S)-1)))>>(S));\
  1755. }\
  1756. }
  1757. BGR2Y(uint32_t, bgr32ToY,16, 0, 0, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
  1758. BGR2Y(uint32_t, rgb32ToY, 0, 0,16, 0x00FF, 0xFF00, 0x00FF, RY<< 8, GY , BY<< 8, RGB2YUV_SHIFT+8)
  1759. BGR2Y(uint16_t, bgr16ToY, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RY<<11, GY<<5, BY , RGB2YUV_SHIFT+8)
  1760. BGR2Y(uint16_t, bgr15ToY, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RY<<10, GY<<5, BY , RGB2YUV_SHIFT+7)
  1761. BGR2Y(uint16_t, rgb16ToY, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RY , GY<<5, BY<<11, RGB2YUV_SHIFT+8)
  1762. BGR2Y(uint16_t, rgb15ToY, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RY , GY<<5, BY<<10, RGB2YUV_SHIFT+7)
  1763. #define BGR2UV(type, name, shr, shg, shb, maskr, maskg, maskb, RU, GU, BU, RV, GV, BV, S)\
  1764. static inline void RENAME(name)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
  1765. {\
  1766. int i;\
  1767. for (i=0; i<width; i++)\
  1768. {\
  1769. int b= (((type*)src)[i]&maskb)>>shb;\
  1770. int g= (((type*)src)[i]&maskg)>>shg;\
  1771. int r= (((type*)src)[i]&maskr)>>shr;\
  1772. \
  1773. dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<((S)-1)))>>(S);\
  1774. dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<((S)-1)))>>(S);\
  1775. }\
  1776. }\
  1777. static inline void RENAME(name ## _half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, uint8_t *dummy, long width, uint32_t *unused)\
  1778. {\
  1779. int i;\
  1780. for (i=0; i<width; i++)\
  1781. {\
  1782. int pix0= ((type*)src)[2*i+0];\
  1783. int pix1= ((type*)src)[2*i+1];\
  1784. int g= (pix0&maskg)+(pix1&maskg);\
  1785. int b= ((pix0+pix1-g)&(maskb|(2*maskb)))>>shb;\
  1786. int r= ((pix0+pix1-g)&(maskr|(2*maskr)))>>shr;\
  1787. \
  1788. g>>=shg;\
  1789. \
  1790. dstU[i]= ((RU)*r + (GU)*g + (BU)*b + (257<<(S)))>>((S)+1);\
  1791. dstV[i]= ((RV)*r + (GV)*g + (BV)*b + (257<<(S)))>>((S)+1);\
  1792. }\
  1793. }
  1794. BGR2UV(uint32_t, bgr32ToUV,16, 0, 0, 0xFF0000, 0xFF00, 0x00FF, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
  1795. BGR2UV(uint32_t, rgb32ToUV, 0, 0,16, 0x00FF, 0xFF00, 0xFF0000, RU<< 8, GU , BU<< 8, RV<< 8, GV , BV<< 8, RGB2YUV_SHIFT+8)
  1796. BGR2UV(uint16_t, bgr16ToUV, 0, 0, 0, 0x001F, 0x07E0, 0xF800, RU<<11, GU<<5, BU , RV<<11, GV<<5, BV , RGB2YUV_SHIFT+8)
  1797. BGR2UV(uint16_t, bgr15ToUV, 0, 0, 0, 0x001F, 0x03E0, 0x7C00, RU<<10, GU<<5, BU , RV<<10, GV<<5, BV , RGB2YUV_SHIFT+7)
  1798. BGR2UV(uint16_t, rgb16ToUV, 0, 0, 0, 0xF800, 0x07E0, 0x001F, RU , GU<<5, BU<<11, RV , GV<<5, BV<<11, RGB2YUV_SHIFT+8)
  1799. BGR2UV(uint16_t, rgb15ToUV, 0, 0, 0, 0x7C00, 0x03E0, 0x001F, RU , GU<<5, BU<<10, RV , GV<<5, BV<<10, RGB2YUV_SHIFT+7)
  1800. #ifdef HAVE_MMX
  1801. static inline void RENAME(bgr24ToY_mmx)(uint8_t *dst, uint8_t *src, long width, int srcFormat)
  1802. {
  1803. if(srcFormat == PIX_FMT_BGR24){
  1804. asm volatile(
  1805. "movq "MANGLE(ff_bgr24toY1Coeff)", %%mm5 \n\t"
  1806. "movq "MANGLE(ff_bgr24toY2Coeff)", %%mm6 \n\t"
  1807. :
  1808. );
  1809. }else{
  1810. asm volatile(
  1811. "movq "MANGLE(ff_rgb24toY1Coeff)", %%mm5 \n\t"
  1812. "movq "MANGLE(ff_rgb24toY2Coeff)", %%mm6 \n\t"
  1813. :
  1814. );
  1815. }
  1816. asm volatile(
  1817. "movq "MANGLE(ff_bgr24toYOffset)", %%mm4 \n\t"
  1818. "mov %2, %%"REG_a" \n\t"
  1819. "pxor %%mm7, %%mm7 \n\t"
  1820. "1: \n\t"
  1821. PREFETCH" 64(%0) \n\t"
  1822. "movd (%0), %%mm0 \n\t"
  1823. "movd 2(%0), %%mm1 \n\t"
  1824. "movd 6(%0), %%mm2 \n\t"
  1825. "movd 8(%0), %%mm3 \n\t"
  1826. "add $12, %0 \n\t"
  1827. "punpcklbw %%mm7, %%mm0 \n\t"
  1828. "punpcklbw %%mm7, %%mm1 \n\t"
  1829. "punpcklbw %%mm7, %%mm2 \n\t"
  1830. "punpcklbw %%mm7, %%mm3 \n\t"
  1831. "pmaddwd %%mm5, %%mm0 \n\t"
  1832. "pmaddwd %%mm6, %%mm1 \n\t"
  1833. "pmaddwd %%mm5, %%mm2 \n\t"
  1834. "pmaddwd %%mm6, %%mm3 \n\t"
  1835. "paddd %%mm1, %%mm0 \n\t"
  1836. "paddd %%mm3, %%mm2 \n\t"
  1837. "paddd %%mm4, %%mm0 \n\t"
  1838. "paddd %%mm4, %%mm2 \n\t"
  1839. "psrad $15, %%mm0 \n\t"
  1840. "psrad $15, %%mm2 \n\t"
  1841. "packssdw %%mm2, %%mm0 \n\t"
  1842. "packuswb %%mm0, %%mm0 \n\t"
  1843. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1844. "add $4, %%"REG_a" \n\t"
  1845. " js 1b \n\t"
  1846. : "+r" (src)
  1847. : "r" (dst+width), "g" (-width)
  1848. : "%"REG_a
  1849. );
  1850. }
  1851. static inline void RENAME(bgr24ToUV_mmx)(uint8_t *dstU, uint8_t *dstV, uint8_t *src, long width, int srcFormat)
  1852. {
  1853. asm volatile(
  1854. "movq 24+%4, %%mm6 \n\t"
  1855. "mov %3, %%"REG_a" \n\t"
  1856. "pxor %%mm7, %%mm7 \n\t"
  1857. "1: \n\t"
  1858. PREFETCH" 64(%0) \n\t"
  1859. "movd (%0), %%mm0 \n\t"
  1860. "movd 2(%0), %%mm1 \n\t"
  1861. "punpcklbw %%mm7, %%mm0 \n\t"
  1862. "punpcklbw %%mm7, %%mm1 \n\t"
  1863. "movq %%mm0, %%mm2 \n\t"
  1864. "movq %%mm1, %%mm3 \n\t"
  1865. "pmaddwd %4, %%mm0 \n\t"
  1866. "pmaddwd 8+%4, %%mm1 \n\t"
  1867. "pmaddwd 16+%4, %%mm2 \n\t"
  1868. "pmaddwd %%mm6, %%mm3 \n\t"
  1869. "paddd %%mm1, %%mm0 \n\t"
  1870. "paddd %%mm3, %%mm2 \n\t"
  1871. "movd 6(%0), %%mm1 \n\t"
  1872. "movd 8(%0), %%mm3 \n\t"
  1873. "add $12, %0 \n\t"
  1874. "punpcklbw %%mm7, %%mm1 \n\t"
  1875. "punpcklbw %%mm7, %%mm3 \n\t"
  1876. "movq %%mm1, %%mm4 \n\t"
  1877. "movq %%mm3, %%mm5 \n\t"
  1878. "pmaddwd %4, %%mm1 \n\t"
  1879. "pmaddwd 8+%4, %%mm3 \n\t"
  1880. "pmaddwd 16+%4, %%mm4 \n\t"
  1881. "pmaddwd %%mm6, %%mm5 \n\t"
  1882. "paddd %%mm3, %%mm1 \n\t"
  1883. "paddd %%mm5, %%mm4 \n\t"
  1884. "movq "MANGLE(ff_bgr24toUVOffset)", %%mm3 \n\t"
  1885. "paddd %%mm3, %%mm0 \n\t"
  1886. "paddd %%mm3, %%mm2 \n\t"
  1887. "paddd %%mm3, %%mm1 \n\t"
  1888. "paddd %%mm3, %%mm4 \n\t"
  1889. "psrad $15, %%mm0 \n\t"
  1890. "psrad $15, %%mm2 \n\t"
  1891. "psrad $15, %%mm1 \n\t"
  1892. "psrad $15, %%mm4 \n\t"
  1893. "packssdw %%mm1, %%mm0 \n\t"
  1894. "packssdw %%mm4, %%mm2 \n\t"
  1895. "packuswb %%mm0, %%mm0 \n\t"
  1896. "packuswb %%mm2, %%mm2 \n\t"
  1897. "movd %%mm0, (%1, %%"REG_a") \n\t"
  1898. "movd %%mm2, (%2, %%"REG_a") \n\t"
  1899. "add $4, %%"REG_a" \n\t"
  1900. " js 1b \n\t"
  1901. : "+r" (src)
  1902. : "r" (dstU+width), "r" (dstV+width), "g" (-width), "m"(ff_bgr24toUV[srcFormat == PIX_FMT_RGB24][0])
  1903. : "%"REG_a
  1904. );
  1905. }
  1906. #endif
  1907. static inline void RENAME(bgr24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1908. {
  1909. #ifdef HAVE_MMX
  1910. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_BGR24);
  1911. #else
  1912. int i;
  1913. for (i=0; i<width; i++)
  1914. {
  1915. int b= src[i*3+0];
  1916. int g= src[i*3+1];
  1917. int r= src[i*3+2];
  1918. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1919. }
  1920. #endif /* HAVE_MMX */
  1921. }
  1922. static inline void RENAME(bgr24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1923. {
  1924. #ifdef HAVE_MMX
  1925. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_BGR24);
  1926. #else
  1927. int i;
  1928. for (i=0; i<width; i++)
  1929. {
  1930. int b= src1[3*i + 0];
  1931. int g= src1[3*i + 1];
  1932. int r= src1[3*i + 2];
  1933. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1934. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1935. }
  1936. #endif /* HAVE_MMX */
  1937. assert(src1 == src2);
  1938. }
  1939. static inline void RENAME(bgr24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1940. {
  1941. int i;
  1942. for (i=0; i<width; i++)
  1943. {
  1944. int b= src1[6*i + 0] + src1[6*i + 3];
  1945. int g= src1[6*i + 1] + src1[6*i + 4];
  1946. int r= src1[6*i + 2] + src1[6*i + 5];
  1947. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1948. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1949. }
  1950. assert(src1 == src2);
  1951. }
  1952. static inline void RENAME(rgb24ToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  1953. {
  1954. #ifdef HAVE_MMX
  1955. RENAME(bgr24ToY_mmx)(dst, src, width, PIX_FMT_RGB24);
  1956. #else
  1957. int i;
  1958. for (i=0; i<width; i++)
  1959. {
  1960. int r= src[i*3+0];
  1961. int g= src[i*3+1];
  1962. int b= src[i*3+2];
  1963. dst[i]= ((RY*r + GY*g + BY*b + (33<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT);
  1964. }
  1965. #endif
  1966. }
  1967. static inline void RENAME(rgb24ToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1968. {
  1969. int i;
  1970. assert(src1==src2);
  1971. #ifdef HAVE_MMX
  1972. RENAME(bgr24ToUV_mmx)(dstU, dstV, src1, width, PIX_FMT_RGB24);
  1973. #else
  1974. for (i=0; i<width; i++)
  1975. {
  1976. int r= src1[3*i + 0];
  1977. int g= src1[3*i + 1];
  1978. int b= src1[3*i + 2];
  1979. dstU[i]= (RU*r + GU*g + BU*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1980. dstV[i]= (RV*r + GV*g + BV*b + (257<<(RGB2YUV_SHIFT-1)))>>RGB2YUV_SHIFT;
  1981. }
  1982. #endif
  1983. }
  1984. static inline void RENAME(rgb24ToUV_half)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *unused)
  1985. {
  1986. int i;
  1987. assert(src1==src2);
  1988. for (i=0; i<width; i++)
  1989. {
  1990. int r= src1[6*i + 0] + src1[6*i + 3];
  1991. int g= src1[6*i + 1] + src1[6*i + 4];
  1992. int b= src1[6*i + 2] + src1[6*i + 5];
  1993. dstU[i]= (RU*r + GU*g + BU*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1994. dstV[i]= (RV*r + GV*g + BV*b + (257<<RGB2YUV_SHIFT))>>(RGB2YUV_SHIFT+1);
  1995. }
  1996. }
  1997. static inline void RENAME(palToY)(uint8_t *dst, uint8_t *src, long width, uint32_t *pal)
  1998. {
  1999. int i;
  2000. for (i=0; i<width; i++)
  2001. {
  2002. int d= src[i];
  2003. dst[i]= pal[d] & 0xFF;
  2004. }
  2005. }
  2006. static inline void RENAME(palToUV)(uint8_t *dstU, uint8_t *dstV, uint8_t *src1, uint8_t *src2, long width, uint32_t *pal)
  2007. {
  2008. int i;
  2009. assert(src1 == src2);
  2010. for (i=0; i<width; i++)
  2011. {
  2012. int p= pal[src1[i]];
  2013. dstU[i]= p>>8;
  2014. dstV[i]= p>>16;
  2015. }
  2016. }
  2017. static inline void RENAME(monowhite2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  2018. {
  2019. int i, j;
  2020. for (i=0; i<width/8; i++){
  2021. int d= ~src[i];
  2022. for(j=0; j<8; j++)
  2023. dst[8*i+j]= ((d>>(7-j))&1)*255;
  2024. }
  2025. }
  2026. static inline void RENAME(monoblack2Y)(uint8_t *dst, uint8_t *src, long width, uint32_t *unused)
  2027. {
  2028. int i, j;
  2029. for (i=0; i<width/8; i++){
  2030. int d= src[i];
  2031. for(j=0; j<8; j++)
  2032. dst[8*i+j]= ((d>>(7-j))&1)*255;
  2033. }
  2034. }
  2035. // bilinear / bicubic scaling
  2036. static inline void RENAME(hScale)(int16_t *dst, int dstW, uint8_t *src, int srcW, int xInc,
  2037. int16_t *filter, int16_t *filterPos, long filterSize)
  2038. {
  2039. #ifdef HAVE_MMX
  2040. assert(filterSize % 4 == 0 && filterSize>0);
  2041. if (filterSize==4) // Always true for upscaling, sometimes for down, too.
  2042. {
  2043. long counter= -2*dstW;
  2044. filter-= counter*2;
  2045. filterPos-= counter/2;
  2046. dst-= counter/2;
  2047. asm volatile(
  2048. #if defined(PIC)
  2049. "push %%"REG_b" \n\t"
  2050. #endif
  2051. "pxor %%mm7, %%mm7 \n\t"
  2052. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2053. "mov %%"REG_a", %%"REG_BP" \n\t"
  2054. ASMALIGN(4)
  2055. "1: \n\t"
  2056. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2057. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  2058. "movq (%1, %%"REG_BP", 4), %%mm1 \n\t"
  2059. "movq 8(%1, %%"REG_BP", 4), %%mm3 \n\t"
  2060. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2061. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2062. "punpcklbw %%mm7, %%mm0 \n\t"
  2063. "punpcklbw %%mm7, %%mm2 \n\t"
  2064. "pmaddwd %%mm1, %%mm0 \n\t"
  2065. "pmaddwd %%mm2, %%mm3 \n\t"
  2066. "movq %%mm0, %%mm4 \n\t"
  2067. "punpckldq %%mm3, %%mm0 \n\t"
  2068. "punpckhdq %%mm3, %%mm4 \n\t"
  2069. "paddd %%mm4, %%mm0 \n\t"
  2070. "psrad $7, %%mm0 \n\t"
  2071. "packssdw %%mm0, %%mm0 \n\t"
  2072. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2073. "add $4, %%"REG_BP" \n\t"
  2074. " jnc 1b \n\t"
  2075. "pop %%"REG_BP" \n\t"
  2076. #if defined(PIC)
  2077. "pop %%"REG_b" \n\t"
  2078. #endif
  2079. : "+a" (counter)
  2080. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2081. #if !defined(PIC)
  2082. : "%"REG_b
  2083. #endif
  2084. );
  2085. }
  2086. else if (filterSize==8)
  2087. {
  2088. long counter= -2*dstW;
  2089. filter-= counter*4;
  2090. filterPos-= counter/2;
  2091. dst-= counter/2;
  2092. asm volatile(
  2093. #if defined(PIC)
  2094. "push %%"REG_b" \n\t"
  2095. #endif
  2096. "pxor %%mm7, %%mm7 \n\t"
  2097. "push %%"REG_BP" \n\t" // we use 7 regs here ...
  2098. "mov %%"REG_a", %%"REG_BP" \n\t"
  2099. ASMALIGN(4)
  2100. "1: \n\t"
  2101. "movzwl (%2, %%"REG_BP"), %%eax \n\t"
  2102. "movzwl 2(%2, %%"REG_BP"), %%ebx \n\t"
  2103. "movq (%1, %%"REG_BP", 8), %%mm1 \n\t"
  2104. "movq 16(%1, %%"REG_BP", 8), %%mm3 \n\t"
  2105. "movd (%3, %%"REG_a"), %%mm0 \n\t"
  2106. "movd (%3, %%"REG_b"), %%mm2 \n\t"
  2107. "punpcklbw %%mm7, %%mm0 \n\t"
  2108. "punpcklbw %%mm7, %%mm2 \n\t"
  2109. "pmaddwd %%mm1, %%mm0 \n\t"
  2110. "pmaddwd %%mm2, %%mm3 \n\t"
  2111. "movq 8(%1, %%"REG_BP", 8), %%mm1 \n\t"
  2112. "movq 24(%1, %%"REG_BP", 8), %%mm5 \n\t"
  2113. "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
  2114. "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
  2115. "punpcklbw %%mm7, %%mm4 \n\t"
  2116. "punpcklbw %%mm7, %%mm2 \n\t"
  2117. "pmaddwd %%mm1, %%mm4 \n\t"
  2118. "pmaddwd %%mm2, %%mm5 \n\t"
  2119. "paddd %%mm4, %%mm0 \n\t"
  2120. "paddd %%mm5, %%mm3 \n\t"
  2121. "movq %%mm0, %%mm4 \n\t"
  2122. "punpckldq %%mm3, %%mm0 \n\t"
  2123. "punpckhdq %%mm3, %%mm4 \n\t"
  2124. "paddd %%mm4, %%mm0 \n\t"
  2125. "psrad $7, %%mm0 \n\t"
  2126. "packssdw %%mm0, %%mm0 \n\t"
  2127. "movd %%mm0, (%4, %%"REG_BP") \n\t"
  2128. "add $4, %%"REG_BP" \n\t"
  2129. " jnc 1b \n\t"
  2130. "pop %%"REG_BP" \n\t"
  2131. #if defined(PIC)
  2132. "pop %%"REG_b" \n\t"
  2133. #endif
  2134. : "+a" (counter)
  2135. : "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
  2136. #if !defined(PIC)
  2137. : "%"REG_b
  2138. #endif
  2139. );
  2140. }
  2141. else
  2142. {
  2143. uint8_t *offset = src+filterSize;
  2144. long counter= -2*dstW;
  2145. //filter-= counter*filterSize/2;
  2146. filterPos-= counter/2;
  2147. dst-= counter/2;
  2148. asm volatile(
  2149. "pxor %%mm7, %%mm7 \n\t"
  2150. ASMALIGN(4)
  2151. "1: \n\t"
  2152. "mov %2, %%"REG_c" \n\t"
  2153. "movzwl (%%"REG_c", %0), %%eax \n\t"
  2154. "movzwl 2(%%"REG_c", %0), %%edx \n\t"
  2155. "mov %5, %%"REG_c" \n\t"
  2156. "pxor %%mm4, %%mm4 \n\t"
  2157. "pxor %%mm5, %%mm5 \n\t"
  2158. "2: \n\t"
  2159. "movq (%1), %%mm1 \n\t"
  2160. "movq (%1, %6), %%mm3 \n\t"
  2161. "movd (%%"REG_c", %%"REG_a"), %%mm0 \n\t"
  2162. "movd (%%"REG_c", %%"REG_d"), %%mm2 \n\t"
  2163. "punpcklbw %%mm7, %%mm0 \n\t"
  2164. "punpcklbw %%mm7, %%mm2 \n\t"
  2165. "pmaddwd %%mm1, %%mm0 \n\t"
  2166. "pmaddwd %%mm2, %%mm3 \n\t"
  2167. "paddd %%mm3, %%mm5 \n\t"
  2168. "paddd %%mm0, %%mm4 \n\t"
  2169. "add $8, %1 \n\t"
  2170. "add $4, %%"REG_c" \n\t"
  2171. "cmp %4, %%"REG_c" \n\t"
  2172. " jb 2b \n\t"
  2173. "add %6, %1 \n\t"
  2174. "movq %%mm4, %%mm0 \n\t"
  2175. "punpckldq %%mm5, %%mm4 \n\t"
  2176. "punpckhdq %%mm5, %%mm0 \n\t"
  2177. "paddd %%mm0, %%mm4 \n\t"
  2178. "psrad $7, %%mm4 \n\t"
  2179. "packssdw %%mm4, %%mm4 \n\t"
  2180. "mov %3, %%"REG_a" \n\t"
  2181. "movd %%mm4, (%%"REG_a", %0) \n\t"
  2182. "add $4, %0 \n\t"
  2183. " jnc 1b \n\t"
  2184. : "+r" (counter), "+r" (filter)
  2185. : "m" (filterPos), "m" (dst), "m"(offset),
  2186. "m" (src), "r" (filterSize*2)
  2187. : "%"REG_a, "%"REG_c, "%"REG_d
  2188. );
  2189. }
  2190. #else
  2191. #ifdef HAVE_ALTIVEC
  2192. hScale_altivec_real(dst, dstW, src, srcW, xInc, filter, filterPos, filterSize);
  2193. #else
  2194. int i;
  2195. for (i=0; i<dstW; i++)
  2196. {
  2197. int j;
  2198. int srcPos= filterPos[i];
  2199. int val=0;
  2200. //printf("filterPos: %d\n", filterPos[i]);
  2201. for (j=0; j<filterSize; j++)
  2202. {
  2203. //printf("filter: %d, src: %d\n", filter[i], src[srcPos + j]);
  2204. val += ((int)src[srcPos + j])*filter[filterSize*i + j];
  2205. }
  2206. //filter += hFilterSize;
  2207. dst[i] = FFMIN(val>>7, (1<<15)-1); // the cubic equation does overflow ...
  2208. //dst[i] = val>>7;
  2209. }
  2210. #endif /* HAVE_ALTIVEC */
  2211. #endif /* HAVE_MMX */
  2212. }
  2213. // *** horizontal scale Y line to temp buffer
  2214. static inline void RENAME(hyscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src, int srcW, int xInc,
  2215. int flags, int canMMX2BeUsed, int16_t *hLumFilter,
  2216. int16_t *hLumFilterPos, int hLumFilterSize, void *funnyYCode,
  2217. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2218. int32_t *mmx2FilterPos, uint32_t *pal)
  2219. {
  2220. if (srcFormat==PIX_FMT_YUYV422 || srcFormat==PIX_FMT_GRAY16BE)
  2221. {
  2222. RENAME(yuy2ToY)(formatConvBuffer, src, srcW, pal);
  2223. src= formatConvBuffer;
  2224. }
  2225. else if (srcFormat==PIX_FMT_UYVY422 || srcFormat==PIX_FMT_GRAY16LE)
  2226. {
  2227. RENAME(uyvyToY)(formatConvBuffer, src, srcW, pal);
  2228. src= formatConvBuffer;
  2229. }
  2230. else if (srcFormat==PIX_FMT_RGB32)
  2231. {
  2232. RENAME(bgr32ToY)(formatConvBuffer, src, srcW, pal);
  2233. src= formatConvBuffer;
  2234. }
  2235. else if (srcFormat==PIX_FMT_RGB32_1)
  2236. {
  2237. RENAME(bgr32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
  2238. src= formatConvBuffer;
  2239. }
  2240. else if (srcFormat==PIX_FMT_BGR24)
  2241. {
  2242. RENAME(bgr24ToY)(formatConvBuffer, src, srcW, pal);
  2243. src= formatConvBuffer;
  2244. }
  2245. else if (srcFormat==PIX_FMT_BGR565)
  2246. {
  2247. RENAME(bgr16ToY)(formatConvBuffer, src, srcW, pal);
  2248. src= formatConvBuffer;
  2249. }
  2250. else if (srcFormat==PIX_FMT_BGR555)
  2251. {
  2252. RENAME(bgr15ToY)(formatConvBuffer, src, srcW, pal);
  2253. src= formatConvBuffer;
  2254. }
  2255. else if (srcFormat==PIX_FMT_BGR32)
  2256. {
  2257. RENAME(rgb32ToY)(formatConvBuffer, src, srcW, pal);
  2258. src= formatConvBuffer;
  2259. }
  2260. else if (srcFormat==PIX_FMT_BGR32_1)
  2261. {
  2262. RENAME(rgb32ToY)(formatConvBuffer, src+ALT32_CORR, srcW, pal);
  2263. src= formatConvBuffer;
  2264. }
  2265. else if (srcFormat==PIX_FMT_RGB24)
  2266. {
  2267. RENAME(rgb24ToY)(formatConvBuffer, src, srcW, pal);
  2268. src= formatConvBuffer;
  2269. }
  2270. else if (srcFormat==PIX_FMT_RGB565)
  2271. {
  2272. RENAME(rgb16ToY)(formatConvBuffer, src, srcW, pal);
  2273. src= formatConvBuffer;
  2274. }
  2275. else if (srcFormat==PIX_FMT_RGB555)
  2276. {
  2277. RENAME(rgb15ToY)(formatConvBuffer, src, srcW, pal);
  2278. src= formatConvBuffer;
  2279. }
  2280. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2281. {
  2282. RENAME(palToY)(formatConvBuffer, src, srcW, pal);
  2283. src= formatConvBuffer;
  2284. }
  2285. else if (srcFormat==PIX_FMT_MONOBLACK)
  2286. {
  2287. RENAME(monoblack2Y)(formatConvBuffer, src, srcW, pal);
  2288. src= formatConvBuffer;
  2289. }
  2290. else if (srcFormat==PIX_FMT_MONOWHITE)
  2291. {
  2292. RENAME(monowhite2Y)(formatConvBuffer, src, srcW, pal);
  2293. src= formatConvBuffer;
  2294. }
  2295. #ifdef HAVE_MMX
  2296. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2297. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2298. #else
  2299. if (!(flags&SWS_FAST_BILINEAR))
  2300. #endif
  2301. {
  2302. RENAME(hScale)(dst, dstWidth, src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize);
  2303. }
  2304. else // fast bilinear upscale / crap downscale
  2305. {
  2306. #if defined(ARCH_X86)
  2307. #ifdef HAVE_MMX2
  2308. int i;
  2309. #if defined(PIC)
  2310. uint64_t ebxsave __attribute__((aligned(8)));
  2311. #endif
  2312. if (canMMX2BeUsed)
  2313. {
  2314. asm volatile(
  2315. #if defined(PIC)
  2316. "mov %%"REG_b", %5 \n\t"
  2317. #endif
  2318. "pxor %%mm7, %%mm7 \n\t"
  2319. "mov %0, %%"REG_c" \n\t"
  2320. "mov %1, %%"REG_D" \n\t"
  2321. "mov %2, %%"REG_d" \n\t"
  2322. "mov %3, %%"REG_b" \n\t"
  2323. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2324. PREFETCH" (%%"REG_c") \n\t"
  2325. PREFETCH" 32(%%"REG_c") \n\t"
  2326. PREFETCH" 64(%%"REG_c") \n\t"
  2327. #ifdef ARCH_X86_64
  2328. #define FUNNY_Y_CODE \
  2329. "movl (%%"REG_b"), %%esi \n\t"\
  2330. "call *%4 \n\t"\
  2331. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2332. "add %%"REG_S", %%"REG_c" \n\t"\
  2333. "add %%"REG_a", %%"REG_D" \n\t"\
  2334. "xor %%"REG_a", %%"REG_a" \n\t"\
  2335. #else
  2336. #define FUNNY_Y_CODE \
  2337. "movl (%%"REG_b"), %%esi \n\t"\
  2338. "call *%4 \n\t"\
  2339. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2340. "add %%"REG_a", %%"REG_D" \n\t"\
  2341. "xor %%"REG_a", %%"REG_a" \n\t"\
  2342. #endif /* ARCH_X86_64 */
  2343. FUNNY_Y_CODE
  2344. FUNNY_Y_CODE
  2345. FUNNY_Y_CODE
  2346. FUNNY_Y_CODE
  2347. FUNNY_Y_CODE
  2348. FUNNY_Y_CODE
  2349. FUNNY_Y_CODE
  2350. FUNNY_Y_CODE
  2351. #if defined(PIC)
  2352. "mov %5, %%"REG_b" \n\t"
  2353. #endif
  2354. :: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2355. "m" (funnyYCode)
  2356. #if defined(PIC)
  2357. ,"m" (ebxsave)
  2358. #endif
  2359. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2360. #if !defined(PIC)
  2361. ,"%"REG_b
  2362. #endif
  2363. );
  2364. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
  2365. }
  2366. else
  2367. {
  2368. #endif /* HAVE_MMX2 */
  2369. long xInc_shr16 = xInc >> 16;
  2370. uint16_t xInc_mask = xInc & 0xffff;
  2371. //NO MMX just normal asm ...
  2372. asm volatile(
  2373. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2374. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2375. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2376. ASMALIGN(4)
  2377. "1: \n\t"
  2378. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2379. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2380. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2381. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2382. "shll $16, %%edi \n\t"
  2383. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2384. "mov %1, %%"REG_D" \n\t"
  2385. "shrl $9, %%esi \n\t"
  2386. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2387. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2388. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2389. "movzbl (%0, %%"REG_d"), %%edi \n\t" //src[xx]
  2390. "movzbl 1(%0, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2391. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2392. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2393. "shll $16, %%edi \n\t"
  2394. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2395. "mov %1, %%"REG_D" \n\t"
  2396. "shrl $9, %%esi \n\t"
  2397. "movw %%si, 2(%%"REG_D", %%"REG_a", 2) \n\t"
  2398. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2399. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2400. "add $2, %%"REG_a" \n\t"
  2401. "cmp %2, %%"REG_a" \n\t"
  2402. " jb 1b \n\t"
  2403. :: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask)
  2404. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2405. );
  2406. #ifdef HAVE_MMX2
  2407. } //if MMX2 can't be used
  2408. #endif
  2409. #else
  2410. int i;
  2411. unsigned int xpos=0;
  2412. for (i=0;i<dstWidth;i++)
  2413. {
  2414. register unsigned int xx=xpos>>16;
  2415. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2416. dst[i]= (src[xx]<<7) + (src[xx+1] - src[xx])*xalpha;
  2417. xpos+=xInc;
  2418. }
  2419. #endif /* defined(ARCH_X86) */
  2420. }
  2421. if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2422. int i;
  2423. //FIXME all pal and rgb srcFormats could do this convertion as well
  2424. //FIXME all scalers more complex than bilinear could do half of this transform
  2425. if(c->srcRange){
  2426. for (i=0; i<dstWidth; i++)
  2427. dst[i]= (dst[i]*14071 + 33561947)>>14;
  2428. }else{
  2429. for (i=0; i<dstWidth; i++)
  2430. dst[i]= (FFMIN(dst[i],30189)*19077 - 39057361)>>14;
  2431. }
  2432. }
  2433. }
  2434. inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, uint8_t *src1, uint8_t *src2,
  2435. int srcW, int xInc, int flags, int canMMX2BeUsed, int16_t *hChrFilter,
  2436. int16_t *hChrFilterPos, int hChrFilterSize, void *funnyUVCode,
  2437. int srcFormat, uint8_t *formatConvBuffer, int16_t *mmx2Filter,
  2438. int32_t *mmx2FilterPos, uint32_t *pal)
  2439. {
  2440. if (srcFormat==PIX_FMT_YUYV422)
  2441. {
  2442. RENAME(yuy2ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2443. src1= formatConvBuffer;
  2444. src2= formatConvBuffer+VOFW;
  2445. }
  2446. else if (srcFormat==PIX_FMT_UYVY422)
  2447. {
  2448. RENAME(uyvyToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2449. src1= formatConvBuffer;
  2450. src2= formatConvBuffer+VOFW;
  2451. }
  2452. else if (srcFormat==PIX_FMT_RGB32)
  2453. {
  2454. if(c->chrSrcHSubSample)
  2455. RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2456. else
  2457. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2458. src1= formatConvBuffer;
  2459. src2= formatConvBuffer+VOFW;
  2460. }
  2461. else if (srcFormat==PIX_FMT_RGB32_1)
  2462. {
  2463. if(c->chrSrcHSubSample)
  2464. RENAME(bgr32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2465. else
  2466. RENAME(bgr32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2467. src1= formatConvBuffer;
  2468. src2= formatConvBuffer+VOFW;
  2469. }
  2470. else if (srcFormat==PIX_FMT_BGR24)
  2471. {
  2472. if(c->chrSrcHSubSample)
  2473. RENAME(bgr24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2474. else
  2475. RENAME(bgr24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2476. src1= formatConvBuffer;
  2477. src2= formatConvBuffer+VOFW;
  2478. }
  2479. else if (srcFormat==PIX_FMT_BGR565)
  2480. {
  2481. if(c->chrSrcHSubSample)
  2482. RENAME(bgr16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2483. else
  2484. RENAME(bgr16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2485. src1= formatConvBuffer;
  2486. src2= formatConvBuffer+VOFW;
  2487. }
  2488. else if (srcFormat==PIX_FMT_BGR555)
  2489. {
  2490. if(c->chrSrcHSubSample)
  2491. RENAME(bgr15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2492. else
  2493. RENAME(bgr15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2494. src1= formatConvBuffer;
  2495. src2= formatConvBuffer+VOFW;
  2496. }
  2497. else if (srcFormat==PIX_FMT_BGR32)
  2498. {
  2499. if(c->chrSrcHSubSample)
  2500. RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2501. else
  2502. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2503. src1= formatConvBuffer;
  2504. src2= formatConvBuffer+VOFW;
  2505. }
  2506. else if (srcFormat==PIX_FMT_BGR32_1)
  2507. {
  2508. if(c->chrSrcHSubSample)
  2509. RENAME(rgb32ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2510. else
  2511. RENAME(rgb32ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1+ALT32_CORR, src2+ALT32_CORR, srcW, pal);
  2512. src1= formatConvBuffer;
  2513. src2= formatConvBuffer+VOFW;
  2514. }
  2515. else if (srcFormat==PIX_FMT_RGB24)
  2516. {
  2517. if(c->chrSrcHSubSample)
  2518. RENAME(rgb24ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2519. else
  2520. RENAME(rgb24ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2521. src1= formatConvBuffer;
  2522. src2= formatConvBuffer+VOFW;
  2523. }
  2524. else if (srcFormat==PIX_FMT_RGB565)
  2525. {
  2526. if(c->chrSrcHSubSample)
  2527. RENAME(rgb16ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2528. else
  2529. RENAME(rgb16ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2530. src1= formatConvBuffer;
  2531. src2= formatConvBuffer+VOFW;
  2532. }
  2533. else if (srcFormat==PIX_FMT_RGB555)
  2534. {
  2535. if(c->chrSrcHSubSample)
  2536. RENAME(rgb15ToUV_half)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2537. else
  2538. RENAME(rgb15ToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2539. src1= formatConvBuffer;
  2540. src2= formatConvBuffer+VOFW;
  2541. }
  2542. else if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
  2543. {
  2544. return;
  2545. }
  2546. else if (srcFormat==PIX_FMT_RGB8 || srcFormat==PIX_FMT_BGR8 || srcFormat==PIX_FMT_PAL8 || srcFormat==PIX_FMT_BGR4_BYTE || srcFormat==PIX_FMT_RGB4_BYTE)
  2547. {
  2548. RENAME(palToUV)(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
  2549. src1= formatConvBuffer;
  2550. src2= formatConvBuffer+VOFW;
  2551. }
  2552. #ifdef HAVE_MMX
  2553. // Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
  2554. if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
  2555. #else
  2556. if (!(flags&SWS_FAST_BILINEAR))
  2557. #endif
  2558. {
  2559. RENAME(hScale)(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2560. RENAME(hScale)(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
  2561. }
  2562. else // fast bilinear upscale / crap downscale
  2563. {
  2564. #if defined(ARCH_X86)
  2565. #ifdef HAVE_MMX2
  2566. int i;
  2567. #if defined(PIC)
  2568. uint64_t ebxsave __attribute__((aligned(8)));
  2569. #endif
  2570. if (canMMX2BeUsed)
  2571. {
  2572. asm volatile(
  2573. #if defined(PIC)
  2574. "mov %%"REG_b", %6 \n\t"
  2575. #endif
  2576. "pxor %%mm7, %%mm7 \n\t"
  2577. "mov %0, %%"REG_c" \n\t"
  2578. "mov %1, %%"REG_D" \n\t"
  2579. "mov %2, %%"REG_d" \n\t"
  2580. "mov %3, %%"REG_b" \n\t"
  2581. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2582. PREFETCH" (%%"REG_c") \n\t"
  2583. PREFETCH" 32(%%"REG_c") \n\t"
  2584. PREFETCH" 64(%%"REG_c") \n\t"
  2585. #ifdef ARCH_X86_64
  2586. #define FUNNY_UV_CODE \
  2587. "movl (%%"REG_b"), %%esi \n\t"\
  2588. "call *%4 \n\t"\
  2589. "movl (%%"REG_b", %%"REG_a"), %%esi \n\t"\
  2590. "add %%"REG_S", %%"REG_c" \n\t"\
  2591. "add %%"REG_a", %%"REG_D" \n\t"\
  2592. "xor %%"REG_a", %%"REG_a" \n\t"\
  2593. #else
  2594. #define FUNNY_UV_CODE \
  2595. "movl (%%"REG_b"), %%esi \n\t"\
  2596. "call *%4 \n\t"\
  2597. "addl (%%"REG_b", %%"REG_a"), %%"REG_c" \n\t"\
  2598. "add %%"REG_a", %%"REG_D" \n\t"\
  2599. "xor %%"REG_a", %%"REG_a" \n\t"\
  2600. #endif /* ARCH_X86_64 */
  2601. FUNNY_UV_CODE
  2602. FUNNY_UV_CODE
  2603. FUNNY_UV_CODE
  2604. FUNNY_UV_CODE
  2605. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2606. "mov %5, %%"REG_c" \n\t" // src
  2607. "mov %1, %%"REG_D" \n\t" // buf1
  2608. "add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
  2609. PREFETCH" (%%"REG_c") \n\t"
  2610. PREFETCH" 32(%%"REG_c") \n\t"
  2611. PREFETCH" 64(%%"REG_c") \n\t"
  2612. FUNNY_UV_CODE
  2613. FUNNY_UV_CODE
  2614. FUNNY_UV_CODE
  2615. FUNNY_UV_CODE
  2616. #if defined(PIC)
  2617. "mov %6, %%"REG_b" \n\t"
  2618. #endif
  2619. :: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
  2620. "m" (funnyUVCode), "m" (src2)
  2621. #if defined(PIC)
  2622. ,"m" (ebxsave)
  2623. #endif
  2624. : "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
  2625. #if !defined(PIC)
  2626. ,"%"REG_b
  2627. #endif
  2628. );
  2629. for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
  2630. {
  2631. //printf("%d %d %d\n", dstWidth, i, srcW);
  2632. dst[i] = src1[srcW-1]*128;
  2633. dst[i+VOFW] = src2[srcW-1]*128;
  2634. }
  2635. }
  2636. else
  2637. {
  2638. #endif /* HAVE_MMX2 */
  2639. long xInc_shr16 = (long) (xInc >> 16);
  2640. uint16_t xInc_mask = xInc & 0xffff;
  2641. asm volatile(
  2642. "xor %%"REG_a", %%"REG_a" \n\t" // i
  2643. "xor %%"REG_d", %%"REG_d" \n\t" // xx
  2644. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  2645. ASMALIGN(4)
  2646. "1: \n\t"
  2647. "mov %0, %%"REG_S" \n\t"
  2648. "movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
  2649. "movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
  2650. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2651. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2652. "shll $16, %%edi \n\t"
  2653. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2654. "mov %1, %%"REG_D" \n\t"
  2655. "shrl $9, %%esi \n\t"
  2656. "movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
  2657. "movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
  2658. "movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
  2659. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  2660. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  2661. "shll $16, %%edi \n\t"
  2662. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  2663. "mov %1, %%"REG_D" \n\t"
  2664. "shrl $9, %%esi \n\t"
  2665. "movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
  2666. "addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
  2667. "adc %3, %%"REG_d" \n\t" //xx+= xInc>>8 + carry
  2668. "add $1, %%"REG_a" \n\t"
  2669. "cmp %2, %%"REG_a" \n\t"
  2670. " jb 1b \n\t"
  2671. /* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
  2672. which is needed to support GCC 4.0. */
  2673. #if defined(ARCH_X86_64) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
  2674. :: "m" (src1), "m" (dst), "g" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2675. #else
  2676. :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
  2677. #endif
  2678. "r" (src2)
  2679. : "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
  2680. );
  2681. #ifdef HAVE_MMX2
  2682. } //if MMX2 can't be used
  2683. #endif
  2684. #else
  2685. int i;
  2686. unsigned int xpos=0;
  2687. for (i=0;i<dstWidth;i++)
  2688. {
  2689. register unsigned int xx=xpos>>16;
  2690. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  2691. dst[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  2692. dst[i+VOFW]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  2693. /* slower
  2694. dst[i]= (src1[xx]<<7) + (src1[xx+1] - src1[xx])*xalpha;
  2695. dst[i+VOFW]=(src2[xx]<<7) + (src2[xx+1] - src2[xx])*xalpha;
  2696. */
  2697. xpos+=xInc;
  2698. }
  2699. #endif /* defined(ARCH_X86) */
  2700. }
  2701. if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))){
  2702. int i;
  2703. //FIXME all pal and rgb srcFormats could do this convertion as well
  2704. //FIXME all scalers more complex than bilinear could do half of this transform
  2705. if(c->srcRange){
  2706. for (i=0; i<dstWidth; i++){
  2707. dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
  2708. dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
  2709. }
  2710. }else{
  2711. for (i=0; i<dstWidth; i++){
  2712. dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
  2713. dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
  2714. }
  2715. }
  2716. }
  2717. }
  2718. static int RENAME(swScale)(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
  2719. int srcSliceH, uint8_t* dst[], int dstStride[]){
  2720. /* load a few things into local vars to make the code more readable? and faster */
  2721. const int srcW= c->srcW;
  2722. const int dstW= c->dstW;
  2723. const int dstH= c->dstH;
  2724. const int chrDstW= c->chrDstW;
  2725. const int chrSrcW= c->chrSrcW;
  2726. const int lumXInc= c->lumXInc;
  2727. const int chrXInc= c->chrXInc;
  2728. const int dstFormat= c->dstFormat;
  2729. const int srcFormat= c->srcFormat;
  2730. const int flags= c->flags;
  2731. const int canMMX2BeUsed= c->canMMX2BeUsed;
  2732. int16_t *vLumFilterPos= c->vLumFilterPos;
  2733. int16_t *vChrFilterPos= c->vChrFilterPos;
  2734. int16_t *hLumFilterPos= c->hLumFilterPos;
  2735. int16_t *hChrFilterPos= c->hChrFilterPos;
  2736. int16_t *vLumFilter= c->vLumFilter;
  2737. int16_t *vChrFilter= c->vChrFilter;
  2738. int16_t *hLumFilter= c->hLumFilter;
  2739. int16_t *hChrFilter= c->hChrFilter;
  2740. int32_t *lumMmxFilter= c->lumMmxFilter;
  2741. int32_t *chrMmxFilter= c->chrMmxFilter;
  2742. const int vLumFilterSize= c->vLumFilterSize;
  2743. const int vChrFilterSize= c->vChrFilterSize;
  2744. const int hLumFilterSize= c->hLumFilterSize;
  2745. const int hChrFilterSize= c->hChrFilterSize;
  2746. int16_t **lumPixBuf= c->lumPixBuf;
  2747. int16_t **chrPixBuf= c->chrPixBuf;
  2748. const int vLumBufSize= c->vLumBufSize;
  2749. const int vChrBufSize= c->vChrBufSize;
  2750. uint8_t *funnyYCode= c->funnyYCode;
  2751. uint8_t *funnyUVCode= c->funnyUVCode;
  2752. uint8_t *formatConvBuffer= c->formatConvBuffer;
  2753. const int chrSrcSliceY= srcSliceY >> c->chrSrcVSubSample;
  2754. const int chrSrcSliceH= -((-srcSliceH) >> c->chrSrcVSubSample);
  2755. int lastDstY;
  2756. uint32_t *pal=NULL;
  2757. /* vars which will change and which we need to store back in the context */
  2758. int dstY= c->dstY;
  2759. int lumBufIndex= c->lumBufIndex;
  2760. int chrBufIndex= c->chrBufIndex;
  2761. int lastInLumBuf= c->lastInLumBuf;
  2762. int lastInChrBuf= c->lastInChrBuf;
  2763. if (isPacked(c->srcFormat)){
  2764. pal= (uint32_t *)src[1];
  2765. src[0]=
  2766. src[1]=
  2767. src[2]= src[0];
  2768. srcStride[0]=
  2769. srcStride[1]=
  2770. srcStride[2]= srcStride[0];
  2771. }
  2772. srcStride[1]<<= c->vChrDrop;
  2773. srcStride[2]<<= c->vChrDrop;
  2774. //printf("swscale %X %X %X -> %X %X %X\n", (int)src[0], (int)src[1], (int)src[2],
  2775. // (int)dst[0], (int)dst[1], (int)dst[2]);
  2776. #if 0 //self test FIXME move to a vfilter or something
  2777. {
  2778. static volatile int i=0;
  2779. i++;
  2780. if (srcFormat==PIX_FMT_YUV420P && i==1 && srcSliceH>= c->srcH)
  2781. selfTest(src, srcStride, c->srcW, c->srcH);
  2782. i--;
  2783. }
  2784. #endif
  2785. //printf("sws Strides:%d %d %d -> %d %d %d\n", srcStride[0],srcStride[1],srcStride[2],
  2786. //dstStride[0],dstStride[1],dstStride[2]);
  2787. if (dstStride[0]%8 !=0 || dstStride[1]%8 !=0 || dstStride[2]%8 !=0)
  2788. {
  2789. static int firstTime=1; //FIXME move this into the context perhaps
  2790. if (flags & SWS_PRINT_INFO && firstTime)
  2791. {
  2792. av_log(c, AV_LOG_WARNING, "Warning: dstStride is not aligned!\n"
  2793. " ->cannot do aligned memory accesses anymore\n");
  2794. firstTime=0;
  2795. }
  2796. }
  2797. /* Note the user might start scaling the picture in the middle so this
  2798. will not get executed. This is not really intended but works
  2799. currently, so people might do it. */
  2800. if (srcSliceY ==0){
  2801. lumBufIndex=0;
  2802. chrBufIndex=0;
  2803. dstY=0;
  2804. lastInLumBuf= -1;
  2805. lastInChrBuf= -1;
  2806. }
  2807. lastDstY= dstY;
  2808. for (;dstY < dstH; dstY++){
  2809. unsigned char *dest =dst[0]+dstStride[0]*dstY;
  2810. const int chrDstY= dstY>>c->chrDstVSubSample;
  2811. unsigned char *uDest=dst[1]+dstStride[1]*chrDstY;
  2812. unsigned char *vDest=dst[2]+dstStride[2]*chrDstY;
  2813. const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
  2814. const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
  2815. const int lastLumSrcY= firstLumSrcY + vLumFilterSize -1; // Last line needed as input
  2816. const int lastChrSrcY= firstChrSrcY + vChrFilterSize -1; // Last line needed as input
  2817. //printf("dstY:%d dstH:%d firstLumSrcY:%d lastInLumBuf:%d vLumBufSize: %d vChrBufSize: %d slice: %d %d vLumFilterSize: %d firstChrSrcY: %d vChrFilterSize: %d c->chrSrcVSubSample: %d\n",
  2818. // dstY, dstH, firstLumSrcY, lastInLumBuf, vLumBufSize, vChrBufSize, srcSliceY, srcSliceH, vLumFilterSize, firstChrSrcY, vChrFilterSize, c->chrSrcVSubSample);
  2819. //handle holes (FAST_BILINEAR & weird filters)
  2820. if (firstLumSrcY > lastInLumBuf) lastInLumBuf= firstLumSrcY-1;
  2821. if (firstChrSrcY > lastInChrBuf) lastInChrBuf= firstChrSrcY-1;
  2822. //printf("%d %d %d\n", firstChrSrcY, lastInChrBuf, vChrBufSize);
  2823. assert(firstLumSrcY >= lastInLumBuf - vLumBufSize + 1);
  2824. assert(firstChrSrcY >= lastInChrBuf - vChrBufSize + 1);
  2825. // Do we have enough lines in this slice to output the dstY line
  2826. if (lastLumSrcY < srcSliceY + srcSliceH && lastChrSrcY < -((-srcSliceY - srcSliceH)>>c->chrSrcVSubSample))
  2827. {
  2828. //Do horizontal scaling
  2829. while(lastInLumBuf < lastLumSrcY)
  2830. {
  2831. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2832. lumBufIndex++;
  2833. //printf("%d %d %d %d\n", lumBufIndex, vLumBufSize, lastInLumBuf, lastLumSrcY);
  2834. assert(lumBufIndex < 2*vLumBufSize);
  2835. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2836. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2837. //printf("%d %d\n", lumBufIndex, vLumBufSize);
  2838. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2839. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2840. funnyYCode, c->srcFormat, formatConvBuffer,
  2841. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2842. lastInLumBuf++;
  2843. }
  2844. while(lastInChrBuf < lastChrSrcY)
  2845. {
  2846. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2847. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2848. chrBufIndex++;
  2849. assert(chrBufIndex < 2*vChrBufSize);
  2850. assert(lastInChrBuf + 1 - chrSrcSliceY < (chrSrcSliceH));
  2851. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2852. //FIXME replace parameters through context struct (some at least)
  2853. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2854. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2855. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2856. funnyUVCode, c->srcFormat, formatConvBuffer,
  2857. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2858. lastInChrBuf++;
  2859. }
  2860. //wrap buf index around to stay inside the ring buffer
  2861. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2862. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2863. }
  2864. else // not enough lines left in this slice -> load the rest in the buffer
  2865. {
  2866. /* printf("%d %d Last:%d %d LastInBuf:%d %d Index:%d %d Y:%d FSize: %d %d BSize: %d %d\n",
  2867. firstChrSrcY,firstLumSrcY,lastChrSrcY,lastLumSrcY,
  2868. lastInChrBuf,lastInLumBuf,chrBufIndex,lumBufIndex,dstY,vChrFilterSize,vLumFilterSize,
  2869. vChrBufSize, vLumBufSize);*/
  2870. //Do horizontal scaling
  2871. while(lastInLumBuf+1 < srcSliceY + srcSliceH)
  2872. {
  2873. uint8_t *s= src[0]+(lastInLumBuf + 1 - srcSliceY)*srcStride[0];
  2874. lumBufIndex++;
  2875. assert(lumBufIndex < 2*vLumBufSize);
  2876. assert(lastInLumBuf + 1 - srcSliceY < srcSliceH);
  2877. assert(lastInLumBuf + 1 - srcSliceY >= 0);
  2878. RENAME(hyscale)(c, lumPixBuf[ lumBufIndex ], dstW, s, srcW, lumXInc,
  2879. flags, canMMX2BeUsed, hLumFilter, hLumFilterPos, hLumFilterSize,
  2880. funnyYCode, c->srcFormat, formatConvBuffer,
  2881. c->lumMmx2Filter, c->lumMmx2FilterPos, pal);
  2882. lastInLumBuf++;
  2883. }
  2884. while(lastInChrBuf+1 < (chrSrcSliceY + chrSrcSliceH))
  2885. {
  2886. uint8_t *src1= src[1]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[1];
  2887. uint8_t *src2= src[2]+(lastInChrBuf + 1 - chrSrcSliceY)*srcStride[2];
  2888. chrBufIndex++;
  2889. assert(chrBufIndex < 2*vChrBufSize);
  2890. assert(lastInChrBuf + 1 - chrSrcSliceY < chrSrcSliceH);
  2891. assert(lastInChrBuf + 1 - chrSrcSliceY >= 0);
  2892. if (!(isGray(srcFormat) || isGray(dstFormat)))
  2893. RENAME(hcscale)(c, chrPixBuf[ chrBufIndex ], chrDstW, src1, src2, chrSrcW, chrXInc,
  2894. flags, canMMX2BeUsed, hChrFilter, hChrFilterPos, hChrFilterSize,
  2895. funnyUVCode, c->srcFormat, formatConvBuffer,
  2896. c->chrMmx2Filter, c->chrMmx2FilterPos, pal);
  2897. lastInChrBuf++;
  2898. }
  2899. //wrap buf index around to stay inside the ring buffer
  2900. if (lumBufIndex >= vLumBufSize) lumBufIndex-= vLumBufSize;
  2901. if (chrBufIndex >= vChrBufSize) chrBufIndex-= vChrBufSize;
  2902. break; //we can't output a dstY line so let's try with the next slice
  2903. }
  2904. #ifdef HAVE_MMX
  2905. b5Dither= ff_dither8[dstY&1];
  2906. g6Dither= ff_dither4[dstY&1];
  2907. g5Dither= ff_dither8[dstY&1];
  2908. r5Dither= ff_dither8[(dstY+1)&1];
  2909. #endif
  2910. if (dstY < dstH-2)
  2911. {
  2912. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  2913. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  2914. #ifdef HAVE_MMX
  2915. int i;
  2916. if (flags & SWS_ACCURATE_RND){
  2917. int s= APCK_SIZE / 8;
  2918. for (i=0; i<vLumFilterSize; i+=2){
  2919. *(void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
  2920. *(void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
  2921. lumMmxFilter[s*i+APCK_COEF/4 ]=
  2922. lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
  2923. + (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
  2924. }
  2925. for (i=0; i<vChrFilterSize; i+=2){
  2926. *(void**)&chrMmxFilter[s*i ]= chrSrcPtr[i ];
  2927. *(void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrSrcPtr[i+(vChrFilterSize>1)];
  2928. chrMmxFilter[s*i+APCK_COEF/4 ]=
  2929. chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
  2930. + (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
  2931. }
  2932. }else{
  2933. for (i=0; i<vLumFilterSize; i++)
  2934. {
  2935. lumMmxFilter[4*i+0]= (int32_t)lumSrcPtr[i];
  2936. lumMmxFilter[4*i+1]= (uint64_t)lumSrcPtr[i] >> 32;
  2937. lumMmxFilter[4*i+2]=
  2938. lumMmxFilter[4*i+3]=
  2939. ((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
  2940. }
  2941. for (i=0; i<vChrFilterSize; i++)
  2942. {
  2943. chrMmxFilter[4*i+0]= (int32_t)chrSrcPtr[i];
  2944. chrMmxFilter[4*i+1]= (uint64_t)chrSrcPtr[i] >> 32;
  2945. chrMmxFilter[4*i+2]=
  2946. chrMmxFilter[4*i+3]=
  2947. ((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
  2948. }
  2949. }
  2950. #endif
  2951. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  2952. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2953. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  2954. RENAME(yuv2nv12X)(c,
  2955. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2956. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2957. dest, uDest, dstW, chrDstW, dstFormat);
  2958. }
  2959. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12 like
  2960. {
  2961. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  2962. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  2963. if (vLumFilterSize == 1 && vChrFilterSize == 1) // unscaled YV12
  2964. {
  2965. int16_t *lumBuf = lumPixBuf[0];
  2966. int16_t *chrBuf= chrPixBuf[0];
  2967. RENAME(yuv2yuv1)(c, lumBuf, chrBuf, dest, uDest, vDest, dstW, chrDstW);
  2968. }
  2969. else //General YV12
  2970. {
  2971. RENAME(yuv2yuvX)(c,
  2972. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  2973. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2974. dest, uDest, vDest, dstW, chrDstW);
  2975. }
  2976. }
  2977. else
  2978. {
  2979. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  2980. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  2981. if (vLumFilterSize == 1 && vChrFilterSize == 2) //unscaled RGB
  2982. {
  2983. int chrAlpha= vChrFilter[2*dstY+1];
  2984. if(flags & SWS_FULL_CHR_H_INT){
  2985. yuv2rgbXinC_full(c, //FIXME write a packed1_full function
  2986. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  2987. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  2988. dest, dstW, dstY);
  2989. }else{
  2990. RENAME(yuv2packed1)(c, *lumSrcPtr, *chrSrcPtr, *(chrSrcPtr+1),
  2991. dest, dstW, chrAlpha, dstFormat, flags, dstY);
  2992. }
  2993. }
  2994. else if (vLumFilterSize == 2 && vChrFilterSize == 2) //bilinear upscale RGB
  2995. {
  2996. int lumAlpha= vLumFilter[2*dstY+1];
  2997. int chrAlpha= vChrFilter[2*dstY+1];
  2998. lumMmxFilter[2]=
  2999. lumMmxFilter[3]= vLumFilter[2*dstY ]*0x10001;
  3000. chrMmxFilter[2]=
  3001. chrMmxFilter[3]= vChrFilter[2*chrDstY]*0x10001;
  3002. if(flags & SWS_FULL_CHR_H_INT){
  3003. yuv2rgbXinC_full(c, //FIXME write a packed2_full function
  3004. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3005. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3006. dest, dstW, dstY);
  3007. }else{
  3008. RENAME(yuv2packed2)(c, *lumSrcPtr, *(lumSrcPtr+1), *chrSrcPtr, *(chrSrcPtr+1),
  3009. dest, dstW, lumAlpha, chrAlpha, dstY);
  3010. }
  3011. }
  3012. else //general RGB
  3013. {
  3014. if(flags & SWS_FULL_CHR_H_INT){
  3015. yuv2rgbXinC_full(c,
  3016. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3017. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3018. dest, dstW, dstY);
  3019. }else{
  3020. RENAME(yuv2packedX)(c,
  3021. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3022. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3023. dest, dstW, dstY);
  3024. }
  3025. }
  3026. }
  3027. }
  3028. else // hmm looks like we can't use MMX here without overwriting this array's tail
  3029. {
  3030. int16_t **lumSrcPtr= lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
  3031. int16_t **chrSrcPtr= chrPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
  3032. if (dstFormat == PIX_FMT_NV12 || dstFormat == PIX_FMT_NV21){
  3033. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3034. if (dstY&chrSkipMask) uDest= NULL; //FIXME split functions in lumi / chromi
  3035. yuv2nv12XinC(
  3036. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3037. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3038. dest, uDest, dstW, chrDstW, dstFormat);
  3039. }
  3040. else if (isPlanarYUV(dstFormat) || dstFormat==PIX_FMT_GRAY8) //YV12
  3041. {
  3042. const int chrSkipMask= (1<<c->chrDstVSubSample)-1;
  3043. if ((dstY&chrSkipMask) || isGray(dstFormat)) uDest=vDest= NULL; //FIXME split functions in lumi / chromi
  3044. yuv2yuvXinC(
  3045. vLumFilter+dstY*vLumFilterSize , lumSrcPtr, vLumFilterSize,
  3046. vChrFilter+chrDstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3047. dest, uDest, vDest, dstW, chrDstW);
  3048. }
  3049. else
  3050. {
  3051. assert(lumSrcPtr + vLumFilterSize - 1 < lumPixBuf + vLumBufSize*2);
  3052. assert(chrSrcPtr + vChrFilterSize - 1 < chrPixBuf + vChrBufSize*2);
  3053. if(flags & SWS_FULL_CHR_H_INT){
  3054. yuv2rgbXinC_full(c,
  3055. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3056. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3057. dest, dstW, dstY);
  3058. }else{
  3059. yuv2packedXinC(c,
  3060. vLumFilter+dstY*vLumFilterSize, lumSrcPtr, vLumFilterSize,
  3061. vChrFilter+dstY*vChrFilterSize, chrSrcPtr, vChrFilterSize,
  3062. dest, dstW, dstY);
  3063. }
  3064. }
  3065. }
  3066. }
  3067. #ifdef HAVE_MMX
  3068. asm volatile(SFENCE:::"memory");
  3069. asm volatile(EMMS:::"memory");
  3070. #endif
  3071. /* store changed local vars back in the context */
  3072. c->dstY= dstY;
  3073. c->lumBufIndex= lumBufIndex;
  3074. c->chrBufIndex= chrBufIndex;
  3075. c->lastInLumBuf= lastInLumBuf;
  3076. c->lastInChrBuf= lastInChrBuf;
  3077. return dstY - lastDstY;
  3078. }