You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2514 lines
102KB

  1. /*
  2. * software RGB to RGB converter
  3. * pluralize by software PAL8 to RGB converter
  4. * software YUV to YUV converter
  5. * software YUV to RGB converter
  6. * Written by Nick Kurshev.
  7. * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
  8. * lot of big-endian byte order fixes by Alex Beregszaszi
  9. *
  10. * This file is part of Libav.
  11. *
  12. * Libav is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * Libav is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with Libav; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include <stddef.h>
  27. #undef PREFETCH
  28. #undef MOVNTQ
  29. #undef EMMS
  30. #undef SFENCE
  31. #undef PAVGB
  32. #if COMPILE_TEMPLATE_AMD3DNOW
  33. #define PREFETCH "prefetch"
  34. #define PAVGB "pavgusb"
  35. #elif COMPILE_TEMPLATE_MMXEXT
  36. #define PREFETCH "prefetchnta"
  37. #define PAVGB "pavgb"
  38. #else
  39. #define PREFETCH " # nop"
  40. #endif
  41. #if COMPILE_TEMPLATE_AMD3DNOW
  42. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  43. #define EMMS "femms"
  44. #else
  45. #define EMMS "emms"
  46. #endif
  47. #if COMPILE_TEMPLATE_MMXEXT
  48. #define MOVNTQ "movntq"
  49. #define SFENCE "sfence"
  50. #else
  51. #define MOVNTQ "movq"
  52. #define SFENCE " # nop"
  53. #endif
  54. #if !COMPILE_TEMPLATE_SSE2
  55. #if !COMPILE_TEMPLATE_AMD3DNOW
  56. static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, int src_size)
  57. {
  58. uint8_t *dest = dst;
  59. const uint8_t *s = src;
  60. const uint8_t *end;
  61. const uint8_t *mm_end;
  62. end = s + src_size;
  63. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  64. mm_end = end - 23;
  65. __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
  66. while (s < mm_end) {
  67. __asm__ volatile(
  68. PREFETCH" 32(%1) \n\t"
  69. "movd (%1), %%mm0 \n\t"
  70. "punpckldq 3(%1), %%mm0 \n\t"
  71. "movd 6(%1), %%mm1 \n\t"
  72. "punpckldq 9(%1), %%mm1 \n\t"
  73. "movd 12(%1), %%mm2 \n\t"
  74. "punpckldq 15(%1), %%mm2 \n\t"
  75. "movd 18(%1), %%mm3 \n\t"
  76. "punpckldq 21(%1), %%mm3 \n\t"
  77. "por %%mm7, %%mm0 \n\t"
  78. "por %%mm7, %%mm1 \n\t"
  79. "por %%mm7, %%mm2 \n\t"
  80. "por %%mm7, %%mm3 \n\t"
  81. MOVNTQ" %%mm0, (%0) \n\t"
  82. MOVNTQ" %%mm1, 8(%0) \n\t"
  83. MOVNTQ" %%mm2, 16(%0) \n\t"
  84. MOVNTQ" %%mm3, 24(%0)"
  85. :: "r"(dest), "r"(s)
  86. :"memory");
  87. dest += 32;
  88. s += 24;
  89. }
  90. __asm__ volatile(SFENCE:::"memory");
  91. __asm__ volatile(EMMS:::"memory");
  92. while (s < end) {
  93. *dest++ = *s++;
  94. *dest++ = *s++;
  95. *dest++ = *s++;
  96. *dest++ = 255;
  97. }
  98. }
  99. #define STORE_BGR24_MMX \
  100. "psrlq $8, %%mm2 \n\t" \
  101. "psrlq $8, %%mm3 \n\t" \
  102. "psrlq $8, %%mm6 \n\t" \
  103. "psrlq $8, %%mm7 \n\t" \
  104. "pand "MANGLE(mask24l)", %%mm0\n\t" \
  105. "pand "MANGLE(mask24l)", %%mm1\n\t" \
  106. "pand "MANGLE(mask24l)", %%mm4\n\t" \
  107. "pand "MANGLE(mask24l)", %%mm5\n\t" \
  108. "pand "MANGLE(mask24h)", %%mm2\n\t" \
  109. "pand "MANGLE(mask24h)", %%mm3\n\t" \
  110. "pand "MANGLE(mask24h)", %%mm6\n\t" \
  111. "pand "MANGLE(mask24h)", %%mm7\n\t" \
  112. "por %%mm2, %%mm0 \n\t" \
  113. "por %%mm3, %%mm1 \n\t" \
  114. "por %%mm6, %%mm4 \n\t" \
  115. "por %%mm7, %%mm5 \n\t" \
  116. \
  117. "movq %%mm1, %%mm2 \n\t" \
  118. "movq %%mm4, %%mm3 \n\t" \
  119. "psllq $48, %%mm2 \n\t" \
  120. "psllq $32, %%mm3 \n\t" \
  121. "pand "MANGLE(mask24hh)", %%mm2\n\t" \
  122. "pand "MANGLE(mask24hhh)", %%mm3\n\t" \
  123. "por %%mm2, %%mm0 \n\t" \
  124. "psrlq $16, %%mm1 \n\t" \
  125. "psrlq $32, %%mm4 \n\t" \
  126. "psllq $16, %%mm5 \n\t" \
  127. "por %%mm3, %%mm1 \n\t" \
  128. "pand "MANGLE(mask24hhhh)", %%mm5\n\t" \
  129. "por %%mm5, %%mm4 \n\t" \
  130. \
  131. MOVNTQ" %%mm0, (%0) \n\t" \
  132. MOVNTQ" %%mm1, 8(%0) \n\t" \
  133. MOVNTQ" %%mm4, 16(%0)"
  134. static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  135. {
  136. uint8_t *dest = dst;
  137. const uint8_t *s = src;
  138. const uint8_t *end;
  139. const uint8_t *mm_end;
  140. end = s + src_size;
  141. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  142. mm_end = end - 31;
  143. while (s < mm_end) {
  144. __asm__ volatile(
  145. PREFETCH" 32(%1) \n\t"
  146. "movq (%1), %%mm0 \n\t"
  147. "movq 8(%1), %%mm1 \n\t"
  148. "movq 16(%1), %%mm4 \n\t"
  149. "movq 24(%1), %%mm5 \n\t"
  150. "movq %%mm0, %%mm2 \n\t"
  151. "movq %%mm1, %%mm3 \n\t"
  152. "movq %%mm4, %%mm6 \n\t"
  153. "movq %%mm5, %%mm7 \n\t"
  154. STORE_BGR24_MMX
  155. :: "r"(dest), "r"(s)
  156. :"memory");
  157. dest += 24;
  158. s += 32;
  159. }
  160. __asm__ volatile(SFENCE:::"memory");
  161. __asm__ volatile(EMMS:::"memory");
  162. while (s < end) {
  163. *dest++ = *s++;
  164. *dest++ = *s++;
  165. *dest++ = *s++;
  166. s++;
  167. }
  168. }
  169. /*
  170. original by Strepto/Astral
  171. ported to gcc & bugfixed: A'rpi
  172. MMX2, 3DNOW optimization by Nick Kurshev
  173. 32-bit C version, and and&add trick by Michael Niedermayer
  174. */
  175. static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, int src_size)
  176. {
  177. register const uint8_t* s=src;
  178. register uint8_t* d=dst;
  179. register const uint8_t *end;
  180. const uint8_t *mm_end;
  181. end = s + src_size;
  182. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  183. __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
  184. mm_end = end - 15;
  185. while (s<mm_end) {
  186. __asm__ volatile(
  187. PREFETCH" 32(%1) \n\t"
  188. "movq (%1), %%mm0 \n\t"
  189. "movq 8(%1), %%mm2 \n\t"
  190. "movq %%mm0, %%mm1 \n\t"
  191. "movq %%mm2, %%mm3 \n\t"
  192. "pand %%mm4, %%mm0 \n\t"
  193. "pand %%mm4, %%mm2 \n\t"
  194. "paddw %%mm1, %%mm0 \n\t"
  195. "paddw %%mm3, %%mm2 \n\t"
  196. MOVNTQ" %%mm0, (%0) \n\t"
  197. MOVNTQ" %%mm2, 8(%0)"
  198. :: "r"(d), "r"(s)
  199. );
  200. d+=16;
  201. s+=16;
  202. }
  203. __asm__ volatile(SFENCE:::"memory");
  204. __asm__ volatile(EMMS:::"memory");
  205. mm_end = end - 3;
  206. while (s < mm_end) {
  207. register unsigned x= *((const uint32_t *)s);
  208. *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
  209. d+=4;
  210. s+=4;
  211. }
  212. if (s < end) {
  213. register unsigned short x= *((const uint16_t *)s);
  214. *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
  215. }
  216. }
  217. static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, int src_size)
  218. {
  219. register const uint8_t* s=src;
  220. register uint8_t* d=dst;
  221. register const uint8_t *end;
  222. const uint8_t *mm_end;
  223. end = s + src_size;
  224. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  225. __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
  226. __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
  227. mm_end = end - 15;
  228. while (s<mm_end) {
  229. __asm__ volatile(
  230. PREFETCH" 32(%1) \n\t"
  231. "movq (%1), %%mm0 \n\t"
  232. "movq 8(%1), %%mm2 \n\t"
  233. "movq %%mm0, %%mm1 \n\t"
  234. "movq %%mm2, %%mm3 \n\t"
  235. "psrlq $1, %%mm0 \n\t"
  236. "psrlq $1, %%mm2 \n\t"
  237. "pand %%mm7, %%mm0 \n\t"
  238. "pand %%mm7, %%mm2 \n\t"
  239. "pand %%mm6, %%mm1 \n\t"
  240. "pand %%mm6, %%mm3 \n\t"
  241. "por %%mm1, %%mm0 \n\t"
  242. "por %%mm3, %%mm2 \n\t"
  243. MOVNTQ" %%mm0, (%0) \n\t"
  244. MOVNTQ" %%mm2, 8(%0)"
  245. :: "r"(d), "r"(s)
  246. );
  247. d+=16;
  248. s+=16;
  249. }
  250. __asm__ volatile(SFENCE:::"memory");
  251. __asm__ volatile(EMMS:::"memory");
  252. mm_end = end - 3;
  253. while (s < mm_end) {
  254. register uint32_t x= *((const uint32_t*)s);
  255. *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
  256. s+=4;
  257. d+=4;
  258. }
  259. if (s < end) {
  260. register uint16_t x= *((const uint16_t*)s);
  261. *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
  262. }
  263. }
  264. static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, int src_size)
  265. {
  266. const uint8_t *s = src;
  267. const uint8_t *end;
  268. const uint8_t *mm_end;
  269. uint16_t *d = (uint16_t *)dst;
  270. end = s + src_size;
  271. mm_end = end - 15;
  272. __asm__ volatile(
  273. "movq %3, %%mm5 \n\t"
  274. "movq %4, %%mm6 \n\t"
  275. "movq %5, %%mm7 \n\t"
  276. "jmp 2f \n\t"
  277. ".p2align 4 \n\t"
  278. "1: \n\t"
  279. PREFETCH" 32(%1) \n\t"
  280. "movd (%1), %%mm0 \n\t"
  281. "movd 4(%1), %%mm3 \n\t"
  282. "punpckldq 8(%1), %%mm0 \n\t"
  283. "punpckldq 12(%1), %%mm3 \n\t"
  284. "movq %%mm0, %%mm1 \n\t"
  285. "movq %%mm3, %%mm4 \n\t"
  286. "pand %%mm6, %%mm0 \n\t"
  287. "pand %%mm6, %%mm3 \n\t"
  288. "pmaddwd %%mm7, %%mm0 \n\t"
  289. "pmaddwd %%mm7, %%mm3 \n\t"
  290. "pand %%mm5, %%mm1 \n\t"
  291. "pand %%mm5, %%mm4 \n\t"
  292. "por %%mm1, %%mm0 \n\t"
  293. "por %%mm4, %%mm3 \n\t"
  294. "psrld $5, %%mm0 \n\t"
  295. "pslld $11, %%mm3 \n\t"
  296. "por %%mm3, %%mm0 \n\t"
  297. MOVNTQ" %%mm0, (%0) \n\t"
  298. "add $16, %1 \n\t"
  299. "add $8, %0 \n\t"
  300. "2: \n\t"
  301. "cmp %2, %1 \n\t"
  302. " jb 1b \n\t"
  303. : "+r" (d), "+r"(s)
  304. : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
  305. );
  306. __asm__ volatile(SFENCE:::"memory");
  307. __asm__ volatile(EMMS:::"memory");
  308. while (s < end) {
  309. register int rgb = *(const uint32_t*)s; s += 4;
  310. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
  311. }
  312. }
  313. static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  314. {
  315. const uint8_t *s = src;
  316. const uint8_t *end;
  317. const uint8_t *mm_end;
  318. uint16_t *d = (uint16_t *)dst;
  319. end = s + src_size;
  320. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  321. __asm__ volatile(
  322. "movq %0, %%mm7 \n\t"
  323. "movq %1, %%mm6 \n\t"
  324. ::"m"(red_16mask),"m"(green_16mask));
  325. mm_end = end - 15;
  326. while (s < mm_end) {
  327. __asm__ volatile(
  328. PREFETCH" 32(%1) \n\t"
  329. "movd (%1), %%mm0 \n\t"
  330. "movd 4(%1), %%mm3 \n\t"
  331. "punpckldq 8(%1), %%mm0 \n\t"
  332. "punpckldq 12(%1), %%mm3 \n\t"
  333. "movq %%mm0, %%mm1 \n\t"
  334. "movq %%mm0, %%mm2 \n\t"
  335. "movq %%mm3, %%mm4 \n\t"
  336. "movq %%mm3, %%mm5 \n\t"
  337. "psllq $8, %%mm0 \n\t"
  338. "psllq $8, %%mm3 \n\t"
  339. "pand %%mm7, %%mm0 \n\t"
  340. "pand %%mm7, %%mm3 \n\t"
  341. "psrlq $5, %%mm1 \n\t"
  342. "psrlq $5, %%mm4 \n\t"
  343. "pand %%mm6, %%mm1 \n\t"
  344. "pand %%mm6, %%mm4 \n\t"
  345. "psrlq $19, %%mm2 \n\t"
  346. "psrlq $19, %%mm5 \n\t"
  347. "pand %2, %%mm2 \n\t"
  348. "pand %2, %%mm5 \n\t"
  349. "por %%mm1, %%mm0 \n\t"
  350. "por %%mm4, %%mm3 \n\t"
  351. "por %%mm2, %%mm0 \n\t"
  352. "por %%mm5, %%mm3 \n\t"
  353. "psllq $16, %%mm3 \n\t"
  354. "por %%mm3, %%mm0 \n\t"
  355. MOVNTQ" %%mm0, (%0) \n\t"
  356. :: "r"(d),"r"(s),"m"(blue_16mask):"memory");
  357. d += 4;
  358. s += 16;
  359. }
  360. __asm__ volatile(SFENCE:::"memory");
  361. __asm__ volatile(EMMS:::"memory");
  362. while (s < end) {
  363. register int rgb = *(const uint32_t*)s; s += 4;
  364. *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
  365. }
  366. }
  367. static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, int src_size)
  368. {
  369. const uint8_t *s = src;
  370. const uint8_t *end;
  371. const uint8_t *mm_end;
  372. uint16_t *d = (uint16_t *)dst;
  373. end = s + src_size;
  374. mm_end = end - 15;
  375. __asm__ volatile(
  376. "movq %3, %%mm5 \n\t"
  377. "movq %4, %%mm6 \n\t"
  378. "movq %5, %%mm7 \n\t"
  379. "jmp 2f \n\t"
  380. ".p2align 4 \n\t"
  381. "1: \n\t"
  382. PREFETCH" 32(%1) \n\t"
  383. "movd (%1), %%mm0 \n\t"
  384. "movd 4(%1), %%mm3 \n\t"
  385. "punpckldq 8(%1), %%mm0 \n\t"
  386. "punpckldq 12(%1), %%mm3 \n\t"
  387. "movq %%mm0, %%mm1 \n\t"
  388. "movq %%mm3, %%mm4 \n\t"
  389. "pand %%mm6, %%mm0 \n\t"
  390. "pand %%mm6, %%mm3 \n\t"
  391. "pmaddwd %%mm7, %%mm0 \n\t"
  392. "pmaddwd %%mm7, %%mm3 \n\t"
  393. "pand %%mm5, %%mm1 \n\t"
  394. "pand %%mm5, %%mm4 \n\t"
  395. "por %%mm1, %%mm0 \n\t"
  396. "por %%mm4, %%mm3 \n\t"
  397. "psrld $6, %%mm0 \n\t"
  398. "pslld $10, %%mm3 \n\t"
  399. "por %%mm3, %%mm0 \n\t"
  400. MOVNTQ" %%mm0, (%0) \n\t"
  401. "add $16, %1 \n\t"
  402. "add $8, %0 \n\t"
  403. "2: \n\t"
  404. "cmp %2, %1 \n\t"
  405. " jb 1b \n\t"
  406. : "+r" (d), "+r"(s)
  407. : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
  408. );
  409. __asm__ volatile(SFENCE:::"memory");
  410. __asm__ volatile(EMMS:::"memory");
  411. while (s < end) {
  412. register int rgb = *(const uint32_t*)s; s += 4;
  413. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
  414. }
  415. }
  416. static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  417. {
  418. const uint8_t *s = src;
  419. const uint8_t *end;
  420. const uint8_t *mm_end;
  421. uint16_t *d = (uint16_t *)dst;
  422. end = s + src_size;
  423. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  424. __asm__ volatile(
  425. "movq %0, %%mm7 \n\t"
  426. "movq %1, %%mm6 \n\t"
  427. ::"m"(red_15mask),"m"(green_15mask));
  428. mm_end = end - 15;
  429. while (s < mm_end) {
  430. __asm__ volatile(
  431. PREFETCH" 32(%1) \n\t"
  432. "movd (%1), %%mm0 \n\t"
  433. "movd 4(%1), %%mm3 \n\t"
  434. "punpckldq 8(%1), %%mm0 \n\t"
  435. "punpckldq 12(%1), %%mm3 \n\t"
  436. "movq %%mm0, %%mm1 \n\t"
  437. "movq %%mm0, %%mm2 \n\t"
  438. "movq %%mm3, %%mm4 \n\t"
  439. "movq %%mm3, %%mm5 \n\t"
  440. "psllq $7, %%mm0 \n\t"
  441. "psllq $7, %%mm3 \n\t"
  442. "pand %%mm7, %%mm0 \n\t"
  443. "pand %%mm7, %%mm3 \n\t"
  444. "psrlq $6, %%mm1 \n\t"
  445. "psrlq $6, %%mm4 \n\t"
  446. "pand %%mm6, %%mm1 \n\t"
  447. "pand %%mm6, %%mm4 \n\t"
  448. "psrlq $19, %%mm2 \n\t"
  449. "psrlq $19, %%mm5 \n\t"
  450. "pand %2, %%mm2 \n\t"
  451. "pand %2, %%mm5 \n\t"
  452. "por %%mm1, %%mm0 \n\t"
  453. "por %%mm4, %%mm3 \n\t"
  454. "por %%mm2, %%mm0 \n\t"
  455. "por %%mm5, %%mm3 \n\t"
  456. "psllq $16, %%mm3 \n\t"
  457. "por %%mm3, %%mm0 \n\t"
  458. MOVNTQ" %%mm0, (%0) \n\t"
  459. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  460. d += 4;
  461. s += 16;
  462. }
  463. __asm__ volatile(SFENCE:::"memory");
  464. __asm__ volatile(EMMS:::"memory");
  465. while (s < end) {
  466. register int rgb = *(const uint32_t*)s; s += 4;
  467. *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
  468. }
  469. }
  470. static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  471. {
  472. const uint8_t *s = src;
  473. const uint8_t *end;
  474. const uint8_t *mm_end;
  475. uint16_t *d = (uint16_t *)dst;
  476. end = s + src_size;
  477. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  478. __asm__ volatile(
  479. "movq %0, %%mm7 \n\t"
  480. "movq %1, %%mm6 \n\t"
  481. ::"m"(red_16mask),"m"(green_16mask));
  482. mm_end = end - 11;
  483. while (s < mm_end) {
  484. __asm__ volatile(
  485. PREFETCH" 32(%1) \n\t"
  486. "movd (%1), %%mm0 \n\t"
  487. "movd 3(%1), %%mm3 \n\t"
  488. "punpckldq 6(%1), %%mm0 \n\t"
  489. "punpckldq 9(%1), %%mm3 \n\t"
  490. "movq %%mm0, %%mm1 \n\t"
  491. "movq %%mm0, %%mm2 \n\t"
  492. "movq %%mm3, %%mm4 \n\t"
  493. "movq %%mm3, %%mm5 \n\t"
  494. "psrlq $3, %%mm0 \n\t"
  495. "psrlq $3, %%mm3 \n\t"
  496. "pand %2, %%mm0 \n\t"
  497. "pand %2, %%mm3 \n\t"
  498. "psrlq $5, %%mm1 \n\t"
  499. "psrlq $5, %%mm4 \n\t"
  500. "pand %%mm6, %%mm1 \n\t"
  501. "pand %%mm6, %%mm4 \n\t"
  502. "psrlq $8, %%mm2 \n\t"
  503. "psrlq $8, %%mm5 \n\t"
  504. "pand %%mm7, %%mm2 \n\t"
  505. "pand %%mm7, %%mm5 \n\t"
  506. "por %%mm1, %%mm0 \n\t"
  507. "por %%mm4, %%mm3 \n\t"
  508. "por %%mm2, %%mm0 \n\t"
  509. "por %%mm5, %%mm3 \n\t"
  510. "psllq $16, %%mm3 \n\t"
  511. "por %%mm3, %%mm0 \n\t"
  512. MOVNTQ" %%mm0, (%0) \n\t"
  513. ::"r"(d),"r"(s),"m"(blue_16mask):"memory");
  514. d += 4;
  515. s += 12;
  516. }
  517. __asm__ volatile(SFENCE:::"memory");
  518. __asm__ volatile(EMMS:::"memory");
  519. while (s < end) {
  520. const int b = *s++;
  521. const int g = *s++;
  522. const int r = *s++;
  523. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  524. }
  525. }
  526. static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, int src_size)
  527. {
  528. const uint8_t *s = src;
  529. const uint8_t *end;
  530. const uint8_t *mm_end;
  531. uint16_t *d = (uint16_t *)dst;
  532. end = s + src_size;
  533. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  534. __asm__ volatile(
  535. "movq %0, %%mm7 \n\t"
  536. "movq %1, %%mm6 \n\t"
  537. ::"m"(red_16mask),"m"(green_16mask));
  538. mm_end = end - 15;
  539. while (s < mm_end) {
  540. __asm__ volatile(
  541. PREFETCH" 32(%1) \n\t"
  542. "movd (%1), %%mm0 \n\t"
  543. "movd 3(%1), %%mm3 \n\t"
  544. "punpckldq 6(%1), %%mm0 \n\t"
  545. "punpckldq 9(%1), %%mm3 \n\t"
  546. "movq %%mm0, %%mm1 \n\t"
  547. "movq %%mm0, %%mm2 \n\t"
  548. "movq %%mm3, %%mm4 \n\t"
  549. "movq %%mm3, %%mm5 \n\t"
  550. "psllq $8, %%mm0 \n\t"
  551. "psllq $8, %%mm3 \n\t"
  552. "pand %%mm7, %%mm0 \n\t"
  553. "pand %%mm7, %%mm3 \n\t"
  554. "psrlq $5, %%mm1 \n\t"
  555. "psrlq $5, %%mm4 \n\t"
  556. "pand %%mm6, %%mm1 \n\t"
  557. "pand %%mm6, %%mm4 \n\t"
  558. "psrlq $19, %%mm2 \n\t"
  559. "psrlq $19, %%mm5 \n\t"
  560. "pand %2, %%mm2 \n\t"
  561. "pand %2, %%mm5 \n\t"
  562. "por %%mm1, %%mm0 \n\t"
  563. "por %%mm4, %%mm3 \n\t"
  564. "por %%mm2, %%mm0 \n\t"
  565. "por %%mm5, %%mm3 \n\t"
  566. "psllq $16, %%mm3 \n\t"
  567. "por %%mm3, %%mm0 \n\t"
  568. MOVNTQ" %%mm0, (%0) \n\t"
  569. ::"r"(d),"r"(s),"m"(blue_16mask):"memory");
  570. d += 4;
  571. s += 12;
  572. }
  573. __asm__ volatile(SFENCE:::"memory");
  574. __asm__ volatile(EMMS:::"memory");
  575. while (s < end) {
  576. const int r = *s++;
  577. const int g = *s++;
  578. const int b = *s++;
  579. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  580. }
  581. }
  582. static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  583. {
  584. const uint8_t *s = src;
  585. const uint8_t *end;
  586. const uint8_t *mm_end;
  587. uint16_t *d = (uint16_t *)dst;
  588. end = s + src_size;
  589. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  590. __asm__ volatile(
  591. "movq %0, %%mm7 \n\t"
  592. "movq %1, %%mm6 \n\t"
  593. ::"m"(red_15mask),"m"(green_15mask));
  594. mm_end = end - 11;
  595. while (s < mm_end) {
  596. __asm__ volatile(
  597. PREFETCH" 32(%1) \n\t"
  598. "movd (%1), %%mm0 \n\t"
  599. "movd 3(%1), %%mm3 \n\t"
  600. "punpckldq 6(%1), %%mm0 \n\t"
  601. "punpckldq 9(%1), %%mm3 \n\t"
  602. "movq %%mm0, %%mm1 \n\t"
  603. "movq %%mm0, %%mm2 \n\t"
  604. "movq %%mm3, %%mm4 \n\t"
  605. "movq %%mm3, %%mm5 \n\t"
  606. "psrlq $3, %%mm0 \n\t"
  607. "psrlq $3, %%mm3 \n\t"
  608. "pand %2, %%mm0 \n\t"
  609. "pand %2, %%mm3 \n\t"
  610. "psrlq $6, %%mm1 \n\t"
  611. "psrlq $6, %%mm4 \n\t"
  612. "pand %%mm6, %%mm1 \n\t"
  613. "pand %%mm6, %%mm4 \n\t"
  614. "psrlq $9, %%mm2 \n\t"
  615. "psrlq $9, %%mm5 \n\t"
  616. "pand %%mm7, %%mm2 \n\t"
  617. "pand %%mm7, %%mm5 \n\t"
  618. "por %%mm1, %%mm0 \n\t"
  619. "por %%mm4, %%mm3 \n\t"
  620. "por %%mm2, %%mm0 \n\t"
  621. "por %%mm5, %%mm3 \n\t"
  622. "psllq $16, %%mm3 \n\t"
  623. "por %%mm3, %%mm0 \n\t"
  624. MOVNTQ" %%mm0, (%0) \n\t"
  625. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  626. d += 4;
  627. s += 12;
  628. }
  629. __asm__ volatile(SFENCE:::"memory");
  630. __asm__ volatile(EMMS:::"memory");
  631. while (s < end) {
  632. const int b = *s++;
  633. const int g = *s++;
  634. const int r = *s++;
  635. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  636. }
  637. }
  638. static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, int src_size)
  639. {
  640. const uint8_t *s = src;
  641. const uint8_t *end;
  642. const uint8_t *mm_end;
  643. uint16_t *d = (uint16_t *)dst;
  644. end = s + src_size;
  645. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  646. __asm__ volatile(
  647. "movq %0, %%mm7 \n\t"
  648. "movq %1, %%mm6 \n\t"
  649. ::"m"(red_15mask),"m"(green_15mask));
  650. mm_end = end - 15;
  651. while (s < mm_end) {
  652. __asm__ volatile(
  653. PREFETCH" 32(%1) \n\t"
  654. "movd (%1), %%mm0 \n\t"
  655. "movd 3(%1), %%mm3 \n\t"
  656. "punpckldq 6(%1), %%mm0 \n\t"
  657. "punpckldq 9(%1), %%mm3 \n\t"
  658. "movq %%mm0, %%mm1 \n\t"
  659. "movq %%mm0, %%mm2 \n\t"
  660. "movq %%mm3, %%mm4 \n\t"
  661. "movq %%mm3, %%mm5 \n\t"
  662. "psllq $7, %%mm0 \n\t"
  663. "psllq $7, %%mm3 \n\t"
  664. "pand %%mm7, %%mm0 \n\t"
  665. "pand %%mm7, %%mm3 \n\t"
  666. "psrlq $6, %%mm1 \n\t"
  667. "psrlq $6, %%mm4 \n\t"
  668. "pand %%mm6, %%mm1 \n\t"
  669. "pand %%mm6, %%mm4 \n\t"
  670. "psrlq $19, %%mm2 \n\t"
  671. "psrlq $19, %%mm5 \n\t"
  672. "pand %2, %%mm2 \n\t"
  673. "pand %2, %%mm5 \n\t"
  674. "por %%mm1, %%mm0 \n\t"
  675. "por %%mm4, %%mm3 \n\t"
  676. "por %%mm2, %%mm0 \n\t"
  677. "por %%mm5, %%mm3 \n\t"
  678. "psllq $16, %%mm3 \n\t"
  679. "por %%mm3, %%mm0 \n\t"
  680. MOVNTQ" %%mm0, (%0) \n\t"
  681. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  682. d += 4;
  683. s += 12;
  684. }
  685. __asm__ volatile(SFENCE:::"memory");
  686. __asm__ volatile(EMMS:::"memory");
  687. while (s < end) {
  688. const int r = *s++;
  689. const int g = *s++;
  690. const int b = *s++;
  691. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  692. }
  693. }
  694. /*
  695. I use less accurate approximation here by simply left-shifting the input
  696. value and filling the low order bits with zeroes. This method improves PNG
  697. compression but this scheme cannot reproduce white exactly, since it does
  698. not generate an all-ones maximum value; the net effect is to darken the
  699. image slightly.
  700. The better method should be "left bit replication":
  701. 4 3 2 1 0
  702. ---------
  703. 1 1 0 1 1
  704. 7 6 5 4 3 2 1 0
  705. ----------------
  706. 1 1 0 1 1 1 1 0
  707. |=======| |===|
  708. | leftmost bits repeated to fill open bits
  709. |
  710. original bits
  711. */
  712. static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  713. {
  714. const uint16_t *end;
  715. const uint16_t *mm_end;
  716. uint8_t *d = dst;
  717. const uint16_t *s = (const uint16_t*)src;
  718. end = s + src_size/2;
  719. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  720. mm_end = end - 7;
  721. while (s < mm_end) {
  722. __asm__ volatile(
  723. PREFETCH" 32(%1) \n\t"
  724. "movq (%1), %%mm0 \n\t"
  725. "movq (%1), %%mm1 \n\t"
  726. "movq (%1), %%mm2 \n\t"
  727. "pand %2, %%mm0 \n\t"
  728. "pand %3, %%mm1 \n\t"
  729. "pand %4, %%mm2 \n\t"
  730. "psllq $3, %%mm0 \n\t"
  731. "psrlq $2, %%mm1 \n\t"
  732. "psrlq $7, %%mm2 \n\t"
  733. "movq %%mm0, %%mm3 \n\t"
  734. "movq %%mm1, %%mm4 \n\t"
  735. "movq %%mm2, %%mm5 \n\t"
  736. "punpcklwd %5, %%mm0 \n\t"
  737. "punpcklwd %5, %%mm1 \n\t"
  738. "punpcklwd %5, %%mm2 \n\t"
  739. "punpckhwd %5, %%mm3 \n\t"
  740. "punpckhwd %5, %%mm4 \n\t"
  741. "punpckhwd %5, %%mm5 \n\t"
  742. "psllq $8, %%mm1 \n\t"
  743. "psllq $16, %%mm2 \n\t"
  744. "por %%mm1, %%mm0 \n\t"
  745. "por %%mm2, %%mm0 \n\t"
  746. "psllq $8, %%mm4 \n\t"
  747. "psllq $16, %%mm5 \n\t"
  748. "por %%mm4, %%mm3 \n\t"
  749. "por %%mm5, %%mm3 \n\t"
  750. "movq %%mm0, %%mm6 \n\t"
  751. "movq %%mm3, %%mm7 \n\t"
  752. "movq 8(%1), %%mm0 \n\t"
  753. "movq 8(%1), %%mm1 \n\t"
  754. "movq 8(%1), %%mm2 \n\t"
  755. "pand %2, %%mm0 \n\t"
  756. "pand %3, %%mm1 \n\t"
  757. "pand %4, %%mm2 \n\t"
  758. "psllq $3, %%mm0 \n\t"
  759. "psrlq $2, %%mm1 \n\t"
  760. "psrlq $7, %%mm2 \n\t"
  761. "movq %%mm0, %%mm3 \n\t"
  762. "movq %%mm1, %%mm4 \n\t"
  763. "movq %%mm2, %%mm5 \n\t"
  764. "punpcklwd %5, %%mm0 \n\t"
  765. "punpcklwd %5, %%mm1 \n\t"
  766. "punpcklwd %5, %%mm2 \n\t"
  767. "punpckhwd %5, %%mm3 \n\t"
  768. "punpckhwd %5, %%mm4 \n\t"
  769. "punpckhwd %5, %%mm5 \n\t"
  770. "psllq $8, %%mm1 \n\t"
  771. "psllq $16, %%mm2 \n\t"
  772. "por %%mm1, %%mm0 \n\t"
  773. "por %%mm2, %%mm0 \n\t"
  774. "psllq $8, %%mm4 \n\t"
  775. "psllq $16, %%mm5 \n\t"
  776. "por %%mm4, %%mm3 \n\t"
  777. "por %%mm5, %%mm3 \n\t"
  778. :"=m"(*d)
  779. :"r"(s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
  780. :"memory");
  781. /* borrowed 32 to 24 */
  782. __asm__ volatile(
  783. "movq %%mm0, %%mm4 \n\t"
  784. "movq %%mm3, %%mm5 \n\t"
  785. "movq %%mm6, %%mm0 \n\t"
  786. "movq %%mm7, %%mm1 \n\t"
  787. "movq %%mm4, %%mm6 \n\t"
  788. "movq %%mm5, %%mm7 \n\t"
  789. "movq %%mm0, %%mm2 \n\t"
  790. "movq %%mm1, %%mm3 \n\t"
  791. STORE_BGR24_MMX
  792. :: "r"(d), "m"(*s)
  793. :"memory");
  794. d += 24;
  795. s += 8;
  796. }
  797. __asm__ volatile(SFENCE:::"memory");
  798. __asm__ volatile(EMMS:::"memory");
  799. while (s < end) {
  800. register uint16_t bgr;
  801. bgr = *s++;
  802. *d++ = (bgr&0x1F)<<3;
  803. *d++ = (bgr&0x3E0)>>2;
  804. *d++ = (bgr&0x7C00)>>7;
  805. }
  806. }
  807. static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  808. {
  809. const uint16_t *end;
  810. const uint16_t *mm_end;
  811. uint8_t *d = (uint8_t *)dst;
  812. const uint16_t *s = (const uint16_t *)src;
  813. end = s + src_size/2;
  814. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  815. mm_end = end - 7;
  816. while (s < mm_end) {
  817. __asm__ volatile(
  818. PREFETCH" 32(%1) \n\t"
  819. "movq (%1), %%mm0 \n\t"
  820. "movq (%1), %%mm1 \n\t"
  821. "movq (%1), %%mm2 \n\t"
  822. "pand %2, %%mm0 \n\t"
  823. "pand %3, %%mm1 \n\t"
  824. "pand %4, %%mm2 \n\t"
  825. "psllq $3, %%mm0 \n\t"
  826. "psrlq $3, %%mm1 \n\t"
  827. "psrlq $8, %%mm2 \n\t"
  828. "movq %%mm0, %%mm3 \n\t"
  829. "movq %%mm1, %%mm4 \n\t"
  830. "movq %%mm2, %%mm5 \n\t"
  831. "punpcklwd %5, %%mm0 \n\t"
  832. "punpcklwd %5, %%mm1 \n\t"
  833. "punpcklwd %5, %%mm2 \n\t"
  834. "punpckhwd %5, %%mm3 \n\t"
  835. "punpckhwd %5, %%mm4 \n\t"
  836. "punpckhwd %5, %%mm5 \n\t"
  837. "psllq $8, %%mm1 \n\t"
  838. "psllq $16, %%mm2 \n\t"
  839. "por %%mm1, %%mm0 \n\t"
  840. "por %%mm2, %%mm0 \n\t"
  841. "psllq $8, %%mm4 \n\t"
  842. "psllq $16, %%mm5 \n\t"
  843. "por %%mm4, %%mm3 \n\t"
  844. "por %%mm5, %%mm3 \n\t"
  845. "movq %%mm0, %%mm6 \n\t"
  846. "movq %%mm3, %%mm7 \n\t"
  847. "movq 8(%1), %%mm0 \n\t"
  848. "movq 8(%1), %%mm1 \n\t"
  849. "movq 8(%1), %%mm2 \n\t"
  850. "pand %2, %%mm0 \n\t"
  851. "pand %3, %%mm1 \n\t"
  852. "pand %4, %%mm2 \n\t"
  853. "psllq $3, %%mm0 \n\t"
  854. "psrlq $3, %%mm1 \n\t"
  855. "psrlq $8, %%mm2 \n\t"
  856. "movq %%mm0, %%mm3 \n\t"
  857. "movq %%mm1, %%mm4 \n\t"
  858. "movq %%mm2, %%mm5 \n\t"
  859. "punpcklwd %5, %%mm0 \n\t"
  860. "punpcklwd %5, %%mm1 \n\t"
  861. "punpcklwd %5, %%mm2 \n\t"
  862. "punpckhwd %5, %%mm3 \n\t"
  863. "punpckhwd %5, %%mm4 \n\t"
  864. "punpckhwd %5, %%mm5 \n\t"
  865. "psllq $8, %%mm1 \n\t"
  866. "psllq $16, %%mm2 \n\t"
  867. "por %%mm1, %%mm0 \n\t"
  868. "por %%mm2, %%mm0 \n\t"
  869. "psllq $8, %%mm4 \n\t"
  870. "psllq $16, %%mm5 \n\t"
  871. "por %%mm4, %%mm3 \n\t"
  872. "por %%mm5, %%mm3 \n\t"
  873. :"=m"(*d)
  874. :"r"(s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
  875. :"memory");
  876. /* borrowed 32 to 24 */
  877. __asm__ volatile(
  878. "movq %%mm0, %%mm4 \n\t"
  879. "movq %%mm3, %%mm5 \n\t"
  880. "movq %%mm6, %%mm0 \n\t"
  881. "movq %%mm7, %%mm1 \n\t"
  882. "movq %%mm4, %%mm6 \n\t"
  883. "movq %%mm5, %%mm7 \n\t"
  884. "movq %%mm0, %%mm2 \n\t"
  885. "movq %%mm1, %%mm3 \n\t"
  886. STORE_BGR24_MMX
  887. :: "r"(d), "m"(*s)
  888. :"memory");
  889. d += 24;
  890. s += 8;
  891. }
  892. __asm__ volatile(SFENCE:::"memory");
  893. __asm__ volatile(EMMS:::"memory");
  894. while (s < end) {
  895. register uint16_t bgr;
  896. bgr = *s++;
  897. *d++ = (bgr&0x1F)<<3;
  898. *d++ = (bgr&0x7E0)>>3;
  899. *d++ = (bgr&0xF800)>>8;
  900. }
  901. }
  902. /*
  903. * mm0 = 00 B3 00 B2 00 B1 00 B0
  904. * mm1 = 00 G3 00 G2 00 G1 00 G0
  905. * mm2 = 00 R3 00 R2 00 R1 00 R0
  906. * mm6 = FF FF FF FF FF FF FF FF
  907. * mm7 = 00 00 00 00 00 00 00 00
  908. */
  909. #define PACK_RGB32 \
  910. "packuswb %%mm7, %%mm0 \n\t" /* 00 00 00 00 B3 B2 B1 B0 */ \
  911. "packuswb %%mm7, %%mm1 \n\t" /* 00 00 00 00 G3 G2 G1 G0 */ \
  912. "packuswb %%mm7, %%mm2 \n\t" /* 00 00 00 00 R3 R2 R1 R0 */ \
  913. "punpcklbw %%mm1, %%mm0 \n\t" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
  914. "punpcklbw %%mm6, %%mm2 \n\t" /* FF R3 FF R2 FF R1 FF R0 */ \
  915. "movq %%mm0, %%mm3 \n\t" \
  916. "punpcklwd %%mm2, %%mm0 \n\t" /* FF R1 G1 B1 FF R0 G0 B0 */ \
  917. "punpckhwd %%mm2, %%mm3 \n\t" /* FF R3 G3 B3 FF R2 G2 B2 */ \
  918. MOVNTQ" %%mm0, (%0) \n\t" \
  919. MOVNTQ" %%mm3, 8(%0) \n\t" \
  920. static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, int src_size)
  921. {
  922. const uint16_t *end;
  923. const uint16_t *mm_end;
  924. uint8_t *d = dst;
  925. const uint16_t *s = (const uint16_t *)src;
  926. end = s + src_size/2;
  927. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  928. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  929. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  930. mm_end = end - 3;
  931. while (s < mm_end) {
  932. __asm__ volatile(
  933. PREFETCH" 32(%1) \n\t"
  934. "movq (%1), %%mm0 \n\t"
  935. "movq (%1), %%mm1 \n\t"
  936. "movq (%1), %%mm2 \n\t"
  937. "pand %2, %%mm0 \n\t"
  938. "pand %3, %%mm1 \n\t"
  939. "pand %4, %%mm2 \n\t"
  940. "psllq $3, %%mm0 \n\t"
  941. "psrlq $2, %%mm1 \n\t"
  942. "psrlq $7, %%mm2 \n\t"
  943. PACK_RGB32
  944. ::"r"(d),"r"(s),"m"(mask15b),"m"(mask15g),"m"(mask15r)
  945. :"memory");
  946. d += 16;
  947. s += 4;
  948. }
  949. __asm__ volatile(SFENCE:::"memory");
  950. __asm__ volatile(EMMS:::"memory");
  951. while (s < end) {
  952. register uint16_t bgr;
  953. bgr = *s++;
  954. *d++ = (bgr&0x1F)<<3;
  955. *d++ = (bgr&0x3E0)>>2;
  956. *d++ = (bgr&0x7C00)>>7;
  957. *d++ = 255;
  958. }
  959. }
  960. static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, int src_size)
  961. {
  962. const uint16_t *end;
  963. const uint16_t *mm_end;
  964. uint8_t *d = dst;
  965. const uint16_t *s = (const uint16_t*)src;
  966. end = s + src_size/2;
  967. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  968. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  969. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  970. mm_end = end - 3;
  971. while (s < mm_end) {
  972. __asm__ volatile(
  973. PREFETCH" 32(%1) \n\t"
  974. "movq (%1), %%mm0 \n\t"
  975. "movq (%1), %%mm1 \n\t"
  976. "movq (%1), %%mm2 \n\t"
  977. "pand %2, %%mm0 \n\t"
  978. "pand %3, %%mm1 \n\t"
  979. "pand %4, %%mm2 \n\t"
  980. "psllq $3, %%mm0 \n\t"
  981. "psrlq $3, %%mm1 \n\t"
  982. "psrlq $8, %%mm2 \n\t"
  983. PACK_RGB32
  984. ::"r"(d),"r"(s),"m"(mask16b),"m"(mask16g),"m"(mask16r)
  985. :"memory");
  986. d += 16;
  987. s += 4;
  988. }
  989. __asm__ volatile(SFENCE:::"memory");
  990. __asm__ volatile(EMMS:::"memory");
  991. while (s < end) {
  992. register uint16_t bgr;
  993. bgr = *s++;
  994. *d++ = (bgr&0x1F)<<3;
  995. *d++ = (bgr&0x7E0)>>3;
  996. *d++ = (bgr&0xF800)>>8;
  997. *d++ = 255;
  998. }
  999. }
  1000. static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, int src_size)
  1001. {
  1002. x86_reg idx = 15 - src_size;
  1003. const uint8_t *s = src-idx;
  1004. uint8_t *d = dst-idx;
  1005. __asm__ volatile(
  1006. "test %0, %0 \n\t"
  1007. "jns 2f \n\t"
  1008. PREFETCH" (%1, %0) \n\t"
  1009. "movq %3, %%mm7 \n\t"
  1010. "pxor %4, %%mm7 \n\t"
  1011. "movq %%mm7, %%mm6 \n\t"
  1012. "pxor %5, %%mm7 \n\t"
  1013. ".p2align 4 \n\t"
  1014. "1: \n\t"
  1015. PREFETCH" 32(%1, %0) \n\t"
  1016. "movq (%1, %0), %%mm0 \n\t"
  1017. "movq 8(%1, %0), %%mm1 \n\t"
  1018. # if COMPILE_TEMPLATE_MMXEXT
  1019. "pshufw $177, %%mm0, %%mm3 \n\t"
  1020. "pshufw $177, %%mm1, %%mm5 \n\t"
  1021. "pand %%mm7, %%mm0 \n\t"
  1022. "pand %%mm6, %%mm3 \n\t"
  1023. "pand %%mm7, %%mm1 \n\t"
  1024. "pand %%mm6, %%mm5 \n\t"
  1025. "por %%mm3, %%mm0 \n\t"
  1026. "por %%mm5, %%mm1 \n\t"
  1027. # else
  1028. "movq %%mm0, %%mm2 \n\t"
  1029. "movq %%mm1, %%mm4 \n\t"
  1030. "pand %%mm7, %%mm0 \n\t"
  1031. "pand %%mm6, %%mm2 \n\t"
  1032. "pand %%mm7, %%mm1 \n\t"
  1033. "pand %%mm6, %%mm4 \n\t"
  1034. "movq %%mm2, %%mm3 \n\t"
  1035. "movq %%mm4, %%mm5 \n\t"
  1036. "pslld $16, %%mm2 \n\t"
  1037. "psrld $16, %%mm3 \n\t"
  1038. "pslld $16, %%mm4 \n\t"
  1039. "psrld $16, %%mm5 \n\t"
  1040. "por %%mm2, %%mm0 \n\t"
  1041. "por %%mm4, %%mm1 \n\t"
  1042. "por %%mm3, %%mm0 \n\t"
  1043. "por %%mm5, %%mm1 \n\t"
  1044. # endif
  1045. MOVNTQ" %%mm0, (%2, %0) \n\t"
  1046. MOVNTQ" %%mm1, 8(%2, %0) \n\t"
  1047. "add $16, %0 \n\t"
  1048. "js 1b \n\t"
  1049. SFENCE" \n\t"
  1050. EMMS" \n\t"
  1051. "2: \n\t"
  1052. : "+&r"(idx)
  1053. : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one)
  1054. : "memory");
  1055. for (; idx<15; idx+=4) {
  1056. register int v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
  1057. v &= 0xff00ff;
  1058. *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
  1059. }
  1060. }
  1061. static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  1062. {
  1063. unsigned i;
  1064. x86_reg mmx_size= 23 - src_size;
  1065. __asm__ volatile (
  1066. "test %%"REG_a", %%"REG_a" \n\t"
  1067. "jns 2f \n\t"
  1068. "movq "MANGLE(mask24r)", %%mm5 \n\t"
  1069. "movq "MANGLE(mask24g)", %%mm6 \n\t"
  1070. "movq "MANGLE(mask24b)", %%mm7 \n\t"
  1071. ".p2align 4 \n\t"
  1072. "1: \n\t"
  1073. PREFETCH" 32(%1, %%"REG_a") \n\t"
  1074. "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
  1075. "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
  1076. "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
  1077. "psllq $16, %%mm0 \n\t" // 00 BGR BGR
  1078. "pand %%mm5, %%mm0 \n\t"
  1079. "pand %%mm6, %%mm1 \n\t"
  1080. "pand %%mm7, %%mm2 \n\t"
  1081. "por %%mm0, %%mm1 \n\t"
  1082. "por %%mm2, %%mm1 \n\t"
  1083. "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
  1084. MOVNTQ" %%mm1, (%2, %%"REG_a") \n\t" // RGB RGB RG
  1085. "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
  1086. "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
  1087. "pand %%mm7, %%mm0 \n\t"
  1088. "pand %%mm5, %%mm1 \n\t"
  1089. "pand %%mm6, %%mm2 \n\t"
  1090. "por %%mm0, %%mm1 \n\t"
  1091. "por %%mm2, %%mm1 \n\t"
  1092. "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
  1093. MOVNTQ" %%mm1, 8(%2, %%"REG_a") \n\t" // B RGB RGB R
  1094. "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
  1095. "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
  1096. "pand %%mm6, %%mm0 \n\t"
  1097. "pand %%mm7, %%mm1 \n\t"
  1098. "pand %%mm5, %%mm2 \n\t"
  1099. "por %%mm0, %%mm1 \n\t"
  1100. "por %%mm2, %%mm1 \n\t"
  1101. MOVNTQ" %%mm1, 16(%2, %%"REG_a") \n\t"
  1102. "add $24, %%"REG_a" \n\t"
  1103. " js 1b \n\t"
  1104. "2: \n\t"
  1105. : "+a" (mmx_size)
  1106. : "r" (src-mmx_size), "r"(dst-mmx_size)
  1107. );
  1108. __asm__ volatile(SFENCE:::"memory");
  1109. __asm__ volatile(EMMS:::"memory");
  1110. if (mmx_size==23) return; //finished, was multiple of 8
  1111. src+= src_size;
  1112. dst+= src_size;
  1113. src_size= 23-mmx_size;
  1114. src-= src_size;
  1115. dst-= src_size;
  1116. for (i=0; i<src_size; i+=3) {
  1117. register uint8_t x;
  1118. x = src[i + 2];
  1119. dst[i + 1] = src[i + 1];
  1120. dst[i + 2] = src[i + 0];
  1121. dst[i + 0] = x;
  1122. }
  1123. }
  1124. static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1125. int width, int height,
  1126. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1127. {
  1128. int y;
  1129. const x86_reg chromWidth= width>>1;
  1130. for (y=0; y<height; y++) {
  1131. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1132. __asm__ volatile(
  1133. "xor %%"REG_a", %%"REG_a" \n\t"
  1134. ".p2align 4 \n\t"
  1135. "1: \n\t"
  1136. PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
  1137. PREFETCH" 32(%2, %%"REG_a") \n\t"
  1138. PREFETCH" 32(%3, %%"REG_a") \n\t"
  1139. "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
  1140. "movq %%mm0, %%mm2 \n\t" // U(0)
  1141. "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
  1142. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1143. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1144. "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
  1145. "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
  1146. "movq %%mm3, %%mm4 \n\t" // Y(0)
  1147. "movq %%mm5, %%mm6 \n\t" // Y(8)
  1148. "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
  1149. "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
  1150. "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
  1151. "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
  1152. MOVNTQ" %%mm3, (%0, %%"REG_a", 4) \n\t"
  1153. MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
  1154. MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4) \n\t"
  1155. MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
  1156. "add $8, %%"REG_a" \n\t"
  1157. "cmp %4, %%"REG_a" \n\t"
  1158. " jb 1b \n\t"
  1159. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1160. : "%"REG_a
  1161. );
  1162. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1163. usrc += chromStride;
  1164. vsrc += chromStride;
  1165. }
  1166. ysrc += lumStride;
  1167. dst += dstStride;
  1168. }
  1169. __asm__(EMMS" \n\t"
  1170. SFENCE" \n\t"
  1171. :::"memory");
  1172. }
  1173. /**
  1174. * Height should be a multiple of 2 and width should be a multiple of 16.
  1175. * (If this is a problem for anyone then tell me, and I will fix it.)
  1176. */
  1177. static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1178. int width, int height,
  1179. int lumStride, int chromStride, int dstStride)
  1180. {
  1181. //FIXME interpolate chroma
  1182. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1183. }
  1184. static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1185. int width, int height,
  1186. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1187. {
  1188. int y;
  1189. const x86_reg chromWidth= width>>1;
  1190. for (y=0; y<height; y++) {
  1191. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1192. __asm__ volatile(
  1193. "xor %%"REG_a", %%"REG_a" \n\t"
  1194. ".p2align 4 \n\t"
  1195. "1: \n\t"
  1196. PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
  1197. PREFETCH" 32(%2, %%"REG_a") \n\t"
  1198. PREFETCH" 32(%3, %%"REG_a") \n\t"
  1199. "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
  1200. "movq %%mm0, %%mm2 \n\t" // U(0)
  1201. "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
  1202. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1203. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1204. "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
  1205. "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
  1206. "movq %%mm0, %%mm4 \n\t" // Y(0)
  1207. "movq %%mm2, %%mm6 \n\t" // Y(8)
  1208. "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
  1209. "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
  1210. "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
  1211. "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
  1212. MOVNTQ" %%mm0, (%0, %%"REG_a", 4) \n\t"
  1213. MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4) \n\t"
  1214. MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4) \n\t"
  1215. MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4) \n\t"
  1216. "add $8, %%"REG_a" \n\t"
  1217. "cmp %4, %%"REG_a" \n\t"
  1218. " jb 1b \n\t"
  1219. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1220. : "%"REG_a
  1221. );
  1222. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1223. usrc += chromStride;
  1224. vsrc += chromStride;
  1225. }
  1226. ysrc += lumStride;
  1227. dst += dstStride;
  1228. }
  1229. __asm__(EMMS" \n\t"
  1230. SFENCE" \n\t"
  1231. :::"memory");
  1232. }
  1233. /**
  1234. * Height should be a multiple of 2 and width should be a multiple of 16
  1235. * (If this is a problem for anyone then tell me, and I will fix it.)
  1236. */
  1237. static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1238. int width, int height,
  1239. int lumStride, int chromStride, int dstStride)
  1240. {
  1241. //FIXME interpolate chroma
  1242. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1243. }
  1244. /**
  1245. * Width should be a multiple of 16.
  1246. */
  1247. static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1248. int width, int height,
  1249. int lumStride, int chromStride, int dstStride)
  1250. {
  1251. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1252. }
  1253. /**
  1254. * Width should be a multiple of 16.
  1255. */
  1256. static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1257. int width, int height,
  1258. int lumStride, int chromStride, int dstStride)
  1259. {
  1260. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1261. }
  1262. /**
  1263. * Height should be a multiple of 2 and width should be a multiple of 16.
  1264. * (If this is a problem for anyone then tell me, and I will fix it.)
  1265. */
  1266. static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1267. int width, int height,
  1268. int lumStride, int chromStride, int srcStride)
  1269. {
  1270. int y;
  1271. const x86_reg chromWidth= width>>1;
  1272. for (y=0; y<height; y+=2) {
  1273. __asm__ volatile(
  1274. "xor %%"REG_a", %%"REG_a" \n\t"
  1275. "pcmpeqw %%mm7, %%mm7 \n\t"
  1276. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1277. ".p2align 4 \n\t"
  1278. "1: \n\t"
  1279. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1280. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1281. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1282. "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
  1283. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
  1284. "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
  1285. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
  1286. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1287. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1288. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1289. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1290. MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
  1291. "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8)
  1292. "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12)
  1293. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
  1294. "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
  1295. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
  1296. "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
  1297. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1298. "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1299. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1300. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1301. MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
  1302. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1303. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1304. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1305. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1306. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1307. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1308. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1309. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1310. MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
  1311. MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
  1312. "add $8, %%"REG_a" \n\t"
  1313. "cmp %4, %%"REG_a" \n\t"
  1314. " jb 1b \n\t"
  1315. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1316. : "memory", "%"REG_a
  1317. );
  1318. ydst += lumStride;
  1319. src += srcStride;
  1320. __asm__ volatile(
  1321. "xor %%"REG_a", %%"REG_a" \n\t"
  1322. ".p2align 4 \n\t"
  1323. "1: \n\t"
  1324. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1325. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1326. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1327. "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1328. "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1329. "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1330. "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1331. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1332. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1333. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1334. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1335. MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
  1336. MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
  1337. "add $8, %%"REG_a" \n\t"
  1338. "cmp %4, %%"REG_a" \n\t"
  1339. " jb 1b \n\t"
  1340. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1341. : "memory", "%"REG_a
  1342. );
  1343. udst += chromStride;
  1344. vdst += chromStride;
  1345. ydst += lumStride;
  1346. src += srcStride;
  1347. }
  1348. __asm__ volatile(EMMS" \n\t"
  1349. SFENCE" \n\t"
  1350. :::"memory");
  1351. }
  1352. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1353. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1354. static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWidth, int srcHeight, int srcStride, int dstStride)
  1355. {
  1356. int x,y;
  1357. dst[0]= src[0];
  1358. // first line
  1359. for (x=0; x<srcWidth-1; x++) {
  1360. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1361. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1362. }
  1363. dst[2*srcWidth-1]= src[srcWidth-1];
  1364. dst+= dstStride;
  1365. for (y=1; y<srcHeight; y++) {
  1366. const x86_reg mmxSize= srcWidth&~15;
  1367. __asm__ volatile(
  1368. "mov %4, %%"REG_a" \n\t"
  1369. "movq "MANGLE(mmx_ff)", %%mm0 \n\t"
  1370. "movq (%0, %%"REG_a"), %%mm4 \n\t"
  1371. "movq %%mm4, %%mm2 \n\t"
  1372. "psllq $8, %%mm4 \n\t"
  1373. "pand %%mm0, %%mm2 \n\t"
  1374. "por %%mm2, %%mm4 \n\t"
  1375. "movq (%1, %%"REG_a"), %%mm5 \n\t"
  1376. "movq %%mm5, %%mm3 \n\t"
  1377. "psllq $8, %%mm5 \n\t"
  1378. "pand %%mm0, %%mm3 \n\t"
  1379. "por %%mm3, %%mm5 \n\t"
  1380. "1: \n\t"
  1381. "movq (%0, %%"REG_a"), %%mm0 \n\t"
  1382. "movq (%1, %%"REG_a"), %%mm1 \n\t"
  1383. "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
  1384. "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
  1385. PAVGB" %%mm0, %%mm5 \n\t"
  1386. PAVGB" %%mm0, %%mm3 \n\t"
  1387. PAVGB" %%mm0, %%mm5 \n\t"
  1388. PAVGB" %%mm0, %%mm3 \n\t"
  1389. PAVGB" %%mm1, %%mm4 \n\t"
  1390. PAVGB" %%mm1, %%mm2 \n\t"
  1391. PAVGB" %%mm1, %%mm4 \n\t"
  1392. PAVGB" %%mm1, %%mm2 \n\t"
  1393. "movq %%mm5, %%mm7 \n\t"
  1394. "movq %%mm4, %%mm6 \n\t"
  1395. "punpcklbw %%mm3, %%mm5 \n\t"
  1396. "punpckhbw %%mm3, %%mm7 \n\t"
  1397. "punpcklbw %%mm2, %%mm4 \n\t"
  1398. "punpckhbw %%mm2, %%mm6 \n\t"
  1399. MOVNTQ" %%mm5, (%2, %%"REG_a", 2) \n\t"
  1400. MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2) \n\t"
  1401. MOVNTQ" %%mm4, (%3, %%"REG_a", 2) \n\t"
  1402. MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2) \n\t"
  1403. "add $8, %%"REG_a" \n\t"
  1404. "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
  1405. "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
  1406. " js 1b \n\t"
  1407. :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
  1408. "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
  1409. "g" (-mmxSize)
  1410. : "%"REG_a
  1411. );
  1412. for (x=mmxSize-1; x<srcWidth-1; x++) {
  1413. dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
  1414. dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
  1415. dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
  1416. dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
  1417. }
  1418. dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
  1419. dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
  1420. dst+=dstStride*2;
  1421. src+=srcStride;
  1422. }
  1423. // last line
  1424. dst[0]= src[0];
  1425. for (x=0; x<srcWidth-1; x++) {
  1426. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1427. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1428. }
  1429. dst[2*srcWidth-1]= src[srcWidth-1];
  1430. __asm__ volatile(EMMS" \n\t"
  1431. SFENCE" \n\t"
  1432. :::"memory");
  1433. }
  1434. #endif /* COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW */
  1435. #if !COMPILE_TEMPLATE_AMD3DNOW
  1436. /**
  1437. * Height should be a multiple of 2 and width should be a multiple of 16.
  1438. * (If this is a problem for anyone then tell me, and I will fix it.)
  1439. * Chrominance data is only taken from every second line, others are ignored.
  1440. * FIXME: Write HQ version.
  1441. */
  1442. static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1443. int width, int height,
  1444. int lumStride, int chromStride, int srcStride)
  1445. {
  1446. int y;
  1447. const x86_reg chromWidth= width>>1;
  1448. for (y=0; y<height; y+=2) {
  1449. __asm__ volatile(
  1450. "xor %%"REG_a", %%"REG_a" \n\t"
  1451. "pcmpeqw %%mm7, %%mm7 \n\t"
  1452. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1453. ".p2align 4 \n\t"
  1454. "1: \n\t"
  1455. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1456. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0)
  1457. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4)
  1458. "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
  1459. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
  1460. "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
  1461. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
  1462. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1463. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1464. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1465. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1466. MOVNTQ" %%mm2, (%1, %%"REG_a", 2) \n\t"
  1467. "movq 16(%0, %%"REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8)
  1468. "movq 24(%0, %%"REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12)
  1469. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
  1470. "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
  1471. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
  1472. "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
  1473. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1474. "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1475. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1476. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1477. MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2) \n\t"
  1478. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1479. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1480. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1481. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1482. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1483. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1484. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1485. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1486. MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
  1487. MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
  1488. "add $8, %%"REG_a" \n\t"
  1489. "cmp %4, %%"REG_a" \n\t"
  1490. " jb 1b \n\t"
  1491. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1492. : "memory", "%"REG_a
  1493. );
  1494. ydst += lumStride;
  1495. src += srcStride;
  1496. __asm__ volatile(
  1497. "xor %%"REG_a", %%"REG_a" \n\t"
  1498. ".p2align 4 \n\t"
  1499. "1: \n\t"
  1500. PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
  1501. "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1502. "movq 8(%0, %%"REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1503. "movq 16(%0, %%"REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1504. "movq 24(%0, %%"REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1505. "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1506. "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1507. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1508. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1509. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1510. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1511. MOVNTQ" %%mm0, (%1, %%"REG_a", 2) \n\t"
  1512. MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2) \n\t"
  1513. "add $8, %%"REG_a" \n\t"
  1514. "cmp %4, %%"REG_a" \n\t"
  1515. " jb 1b \n\t"
  1516. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1517. : "memory", "%"REG_a
  1518. );
  1519. udst += chromStride;
  1520. vdst += chromStride;
  1521. ydst += lumStride;
  1522. src += srcStride;
  1523. }
  1524. __asm__ volatile(EMMS" \n\t"
  1525. SFENCE" \n\t"
  1526. :::"memory");
  1527. }
  1528. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1529. /**
  1530. * Height should be a multiple of 2 and width should be a multiple of 2.
  1531. * (If this is a problem for anyone then tell me, and I will fix it.)
  1532. * Chrominance data is only taken from every second line,
  1533. * others are ignored in the C version.
  1534. * FIXME: Write HQ version.
  1535. */
  1536. static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1537. int width, int height,
  1538. int lumStride, int chromStride, int srcStride)
  1539. {
  1540. int y;
  1541. const x86_reg chromWidth= width>>1;
  1542. for (y=0; y<height-2; y+=2) {
  1543. int i;
  1544. for (i=0; i<2; i++) {
  1545. __asm__ volatile(
  1546. "mov %2, %%"REG_a" \n\t"
  1547. "movq "MANGLE(ff_bgr2YCoeff)", %%mm6 \n\t"
  1548. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1549. "pxor %%mm7, %%mm7 \n\t"
  1550. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1551. ".p2align 4 \n\t"
  1552. "1: \n\t"
  1553. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1554. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1555. "movd 3(%0, %%"REG_d"), %%mm1 \n\t"
  1556. "punpcklbw %%mm7, %%mm0 \n\t"
  1557. "punpcklbw %%mm7, %%mm1 \n\t"
  1558. "movd 6(%0, %%"REG_d"), %%mm2 \n\t"
  1559. "movd 9(%0, %%"REG_d"), %%mm3 \n\t"
  1560. "punpcklbw %%mm7, %%mm2 \n\t"
  1561. "punpcklbw %%mm7, %%mm3 \n\t"
  1562. "pmaddwd %%mm6, %%mm0 \n\t"
  1563. "pmaddwd %%mm6, %%mm1 \n\t"
  1564. "pmaddwd %%mm6, %%mm2 \n\t"
  1565. "pmaddwd %%mm6, %%mm3 \n\t"
  1566. #ifndef FAST_BGR2YV12
  1567. "psrad $8, %%mm0 \n\t"
  1568. "psrad $8, %%mm1 \n\t"
  1569. "psrad $8, %%mm2 \n\t"
  1570. "psrad $8, %%mm3 \n\t"
  1571. #endif
  1572. "packssdw %%mm1, %%mm0 \n\t"
  1573. "packssdw %%mm3, %%mm2 \n\t"
  1574. "pmaddwd %%mm5, %%mm0 \n\t"
  1575. "pmaddwd %%mm5, %%mm2 \n\t"
  1576. "packssdw %%mm2, %%mm0 \n\t"
  1577. "psraw $7, %%mm0 \n\t"
  1578. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1579. "movd 15(%0, %%"REG_d"), %%mm1 \n\t"
  1580. "punpcklbw %%mm7, %%mm4 \n\t"
  1581. "punpcklbw %%mm7, %%mm1 \n\t"
  1582. "movd 18(%0, %%"REG_d"), %%mm2 \n\t"
  1583. "movd 21(%0, %%"REG_d"), %%mm3 \n\t"
  1584. "punpcklbw %%mm7, %%mm2 \n\t"
  1585. "punpcklbw %%mm7, %%mm3 \n\t"
  1586. "pmaddwd %%mm6, %%mm4 \n\t"
  1587. "pmaddwd %%mm6, %%mm1 \n\t"
  1588. "pmaddwd %%mm6, %%mm2 \n\t"
  1589. "pmaddwd %%mm6, %%mm3 \n\t"
  1590. #ifndef FAST_BGR2YV12
  1591. "psrad $8, %%mm4 \n\t"
  1592. "psrad $8, %%mm1 \n\t"
  1593. "psrad $8, %%mm2 \n\t"
  1594. "psrad $8, %%mm3 \n\t"
  1595. #endif
  1596. "packssdw %%mm1, %%mm4 \n\t"
  1597. "packssdw %%mm3, %%mm2 \n\t"
  1598. "pmaddwd %%mm5, %%mm4 \n\t"
  1599. "pmaddwd %%mm5, %%mm2 \n\t"
  1600. "add $24, %%"REG_d" \n\t"
  1601. "packssdw %%mm2, %%mm4 \n\t"
  1602. "psraw $7, %%mm4 \n\t"
  1603. "packuswb %%mm4, %%mm0 \n\t"
  1604. "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
  1605. MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
  1606. "add $8, %%"REG_a" \n\t"
  1607. " js 1b \n\t"
  1608. : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width)
  1609. : "%"REG_a, "%"REG_d
  1610. );
  1611. ydst += lumStride;
  1612. src += srcStride;
  1613. }
  1614. src -= srcStride*2;
  1615. __asm__ volatile(
  1616. "mov %4, %%"REG_a" \n\t"
  1617. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1618. "movq "MANGLE(ff_bgr2UCoeff)", %%mm6 \n\t"
  1619. "pxor %%mm7, %%mm7 \n\t"
  1620. "lea (%%"REG_a", %%"REG_a", 2), %%"REG_d" \n\t"
  1621. "add %%"REG_d", %%"REG_d" \n\t"
  1622. ".p2align 4 \n\t"
  1623. "1: \n\t"
  1624. PREFETCH" 64(%0, %%"REG_d") \n\t"
  1625. PREFETCH" 64(%1, %%"REG_d") \n\t"
  1626. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1627. "movq (%0, %%"REG_d"), %%mm0 \n\t"
  1628. "movq (%1, %%"REG_d"), %%mm1 \n\t"
  1629. "movq 6(%0, %%"REG_d"), %%mm2 \n\t"
  1630. "movq 6(%1, %%"REG_d"), %%mm3 \n\t"
  1631. PAVGB" %%mm1, %%mm0 \n\t"
  1632. PAVGB" %%mm3, %%mm2 \n\t"
  1633. "movq %%mm0, %%mm1 \n\t"
  1634. "movq %%mm2, %%mm3 \n\t"
  1635. "psrlq $24, %%mm0 \n\t"
  1636. "psrlq $24, %%mm2 \n\t"
  1637. PAVGB" %%mm1, %%mm0 \n\t"
  1638. PAVGB" %%mm3, %%mm2 \n\t"
  1639. "punpcklbw %%mm7, %%mm0 \n\t"
  1640. "punpcklbw %%mm7, %%mm2 \n\t"
  1641. #else
  1642. "movd (%0, %%"REG_d"), %%mm0 \n\t"
  1643. "movd (%1, %%"REG_d"), %%mm1 \n\t"
  1644. "movd 3(%0, %%"REG_d"), %%mm2 \n\t"
  1645. "movd 3(%1, %%"REG_d"), %%mm3 \n\t"
  1646. "punpcklbw %%mm7, %%mm0 \n\t"
  1647. "punpcklbw %%mm7, %%mm1 \n\t"
  1648. "punpcklbw %%mm7, %%mm2 \n\t"
  1649. "punpcklbw %%mm7, %%mm3 \n\t"
  1650. "paddw %%mm1, %%mm0 \n\t"
  1651. "paddw %%mm3, %%mm2 \n\t"
  1652. "paddw %%mm2, %%mm0 \n\t"
  1653. "movd 6(%0, %%"REG_d"), %%mm4 \n\t"
  1654. "movd 6(%1, %%"REG_d"), %%mm1 \n\t"
  1655. "movd 9(%0, %%"REG_d"), %%mm2 \n\t"
  1656. "movd 9(%1, %%"REG_d"), %%mm3 \n\t"
  1657. "punpcklbw %%mm7, %%mm4 \n\t"
  1658. "punpcklbw %%mm7, %%mm1 \n\t"
  1659. "punpcklbw %%mm7, %%mm2 \n\t"
  1660. "punpcklbw %%mm7, %%mm3 \n\t"
  1661. "paddw %%mm1, %%mm4 \n\t"
  1662. "paddw %%mm3, %%mm2 \n\t"
  1663. "paddw %%mm4, %%mm2 \n\t"
  1664. "psrlw $2, %%mm0 \n\t"
  1665. "psrlw $2, %%mm2 \n\t"
  1666. #endif
  1667. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1668. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1669. "pmaddwd %%mm0, %%mm1 \n\t"
  1670. "pmaddwd %%mm2, %%mm3 \n\t"
  1671. "pmaddwd %%mm6, %%mm0 \n\t"
  1672. "pmaddwd %%mm6, %%mm2 \n\t"
  1673. #ifndef FAST_BGR2YV12
  1674. "psrad $8, %%mm0 \n\t"
  1675. "psrad $8, %%mm1 \n\t"
  1676. "psrad $8, %%mm2 \n\t"
  1677. "psrad $8, %%mm3 \n\t"
  1678. #endif
  1679. "packssdw %%mm2, %%mm0 \n\t"
  1680. "packssdw %%mm3, %%mm1 \n\t"
  1681. "pmaddwd %%mm5, %%mm0 \n\t"
  1682. "pmaddwd %%mm5, %%mm1 \n\t"
  1683. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1684. "psraw $7, %%mm0 \n\t"
  1685. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1686. "movq 12(%0, %%"REG_d"), %%mm4 \n\t"
  1687. "movq 12(%1, %%"REG_d"), %%mm1 \n\t"
  1688. "movq 18(%0, %%"REG_d"), %%mm2 \n\t"
  1689. "movq 18(%1, %%"REG_d"), %%mm3 \n\t"
  1690. PAVGB" %%mm1, %%mm4 \n\t"
  1691. PAVGB" %%mm3, %%mm2 \n\t"
  1692. "movq %%mm4, %%mm1 \n\t"
  1693. "movq %%mm2, %%mm3 \n\t"
  1694. "psrlq $24, %%mm4 \n\t"
  1695. "psrlq $24, %%mm2 \n\t"
  1696. PAVGB" %%mm1, %%mm4 \n\t"
  1697. PAVGB" %%mm3, %%mm2 \n\t"
  1698. "punpcklbw %%mm7, %%mm4 \n\t"
  1699. "punpcklbw %%mm7, %%mm2 \n\t"
  1700. #else
  1701. "movd 12(%0, %%"REG_d"), %%mm4 \n\t"
  1702. "movd 12(%1, %%"REG_d"), %%mm1 \n\t"
  1703. "movd 15(%0, %%"REG_d"), %%mm2 \n\t"
  1704. "movd 15(%1, %%"REG_d"), %%mm3 \n\t"
  1705. "punpcklbw %%mm7, %%mm4 \n\t"
  1706. "punpcklbw %%mm7, %%mm1 \n\t"
  1707. "punpcklbw %%mm7, %%mm2 \n\t"
  1708. "punpcklbw %%mm7, %%mm3 \n\t"
  1709. "paddw %%mm1, %%mm4 \n\t"
  1710. "paddw %%mm3, %%mm2 \n\t"
  1711. "paddw %%mm2, %%mm4 \n\t"
  1712. "movd 18(%0, %%"REG_d"), %%mm5 \n\t"
  1713. "movd 18(%1, %%"REG_d"), %%mm1 \n\t"
  1714. "movd 21(%0, %%"REG_d"), %%mm2 \n\t"
  1715. "movd 21(%1, %%"REG_d"), %%mm3 \n\t"
  1716. "punpcklbw %%mm7, %%mm5 \n\t"
  1717. "punpcklbw %%mm7, %%mm1 \n\t"
  1718. "punpcklbw %%mm7, %%mm2 \n\t"
  1719. "punpcklbw %%mm7, %%mm3 \n\t"
  1720. "paddw %%mm1, %%mm5 \n\t"
  1721. "paddw %%mm3, %%mm2 \n\t"
  1722. "paddw %%mm5, %%mm2 \n\t"
  1723. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1724. "psrlw $2, %%mm4 \n\t"
  1725. "psrlw $2, %%mm2 \n\t"
  1726. #endif
  1727. "movq "MANGLE(ff_bgr2VCoeff)", %%mm1 \n\t"
  1728. "movq "MANGLE(ff_bgr2VCoeff)", %%mm3 \n\t"
  1729. "pmaddwd %%mm4, %%mm1 \n\t"
  1730. "pmaddwd %%mm2, %%mm3 \n\t"
  1731. "pmaddwd %%mm6, %%mm4 \n\t"
  1732. "pmaddwd %%mm6, %%mm2 \n\t"
  1733. #ifndef FAST_BGR2YV12
  1734. "psrad $8, %%mm4 \n\t"
  1735. "psrad $8, %%mm1 \n\t"
  1736. "psrad $8, %%mm2 \n\t"
  1737. "psrad $8, %%mm3 \n\t"
  1738. #endif
  1739. "packssdw %%mm2, %%mm4 \n\t"
  1740. "packssdw %%mm3, %%mm1 \n\t"
  1741. "pmaddwd %%mm5, %%mm4 \n\t"
  1742. "pmaddwd %%mm5, %%mm1 \n\t"
  1743. "add $24, %%"REG_d" \n\t"
  1744. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1745. "psraw $7, %%mm4 \n\t"
  1746. "movq %%mm0, %%mm1 \n\t"
  1747. "punpckldq %%mm4, %%mm0 \n\t"
  1748. "punpckhdq %%mm4, %%mm1 \n\t"
  1749. "packsswb %%mm1, %%mm0 \n\t"
  1750. "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
  1751. "movd %%mm0, (%2, %%"REG_a") \n\t"
  1752. "punpckhdq %%mm0, %%mm0 \n\t"
  1753. "movd %%mm0, (%3, %%"REG_a") \n\t"
  1754. "add $4, %%"REG_a" \n\t"
  1755. " js 1b \n\t"
  1756. : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
  1757. : "%"REG_a, "%"REG_d
  1758. );
  1759. udst += chromStride;
  1760. vdst += chromStride;
  1761. src += srcStride*2;
  1762. }
  1763. __asm__ volatile(EMMS" \n\t"
  1764. SFENCE" \n\t"
  1765. :::"memory");
  1766. rgb24toyv12_c(src, ydst, udst, vdst, width, height-y, lumStride, chromStride, srcStride);
  1767. }
  1768. #endif /* !COMPILE_TEMPLATE_SSE2 */
  1769. #if !COMPILE_TEMPLATE_AMD3DNOW
  1770. static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dest,
  1771. int width, int height, int src1Stride,
  1772. int src2Stride, int dstStride)
  1773. {
  1774. int h;
  1775. for (h=0; h < height; h++) {
  1776. int w;
  1777. #if COMPILE_TEMPLATE_SSE2
  1778. __asm__(
  1779. "xor %%"REG_a", %%"REG_a" \n\t"
  1780. "1: \n\t"
  1781. PREFETCH" 64(%1, %%"REG_a") \n\t"
  1782. PREFETCH" 64(%2, %%"REG_a") \n\t"
  1783. "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
  1784. "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
  1785. "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
  1786. "punpcklbw %%xmm2, %%xmm0 \n\t"
  1787. "punpckhbw %%xmm2, %%xmm1 \n\t"
  1788. "movntdq %%xmm0, (%0, %%"REG_a", 2) \n\t"
  1789. "movntdq %%xmm1, 16(%0, %%"REG_a", 2) \n\t"
  1790. "add $16, %%"REG_a" \n\t"
  1791. "cmp %3, %%"REG_a" \n\t"
  1792. " jb 1b \n\t"
  1793. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1794. : "memory", "%"REG_a""
  1795. );
  1796. #else
  1797. __asm__(
  1798. "xor %%"REG_a", %%"REG_a" \n\t"
  1799. "1: \n\t"
  1800. PREFETCH" 64(%1, %%"REG_a") \n\t"
  1801. PREFETCH" 64(%2, %%"REG_a") \n\t"
  1802. "movq (%1, %%"REG_a"), %%mm0 \n\t"
  1803. "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
  1804. "movq %%mm0, %%mm1 \n\t"
  1805. "movq %%mm2, %%mm3 \n\t"
  1806. "movq (%2, %%"REG_a"), %%mm4 \n\t"
  1807. "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
  1808. "punpcklbw %%mm4, %%mm0 \n\t"
  1809. "punpckhbw %%mm4, %%mm1 \n\t"
  1810. "punpcklbw %%mm5, %%mm2 \n\t"
  1811. "punpckhbw %%mm5, %%mm3 \n\t"
  1812. MOVNTQ" %%mm0, (%0, %%"REG_a", 2) \n\t"
  1813. MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2) \n\t"
  1814. MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2) \n\t"
  1815. MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2) \n\t"
  1816. "add $16, %%"REG_a" \n\t"
  1817. "cmp %3, %%"REG_a" \n\t"
  1818. " jb 1b \n\t"
  1819. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1820. : "memory", "%"REG_a
  1821. );
  1822. #endif
  1823. for (w= (width&(~15)); w < width; w++) {
  1824. dest[2*w+0] = src1[w];
  1825. dest[2*w+1] = src2[w];
  1826. }
  1827. dest += dstStride;
  1828. src1 += src1Stride;
  1829. src2 += src2Stride;
  1830. }
  1831. __asm__(
  1832. EMMS" \n\t"
  1833. SFENCE" \n\t"
  1834. ::: "memory"
  1835. );
  1836. }
  1837. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1838. #if !COMPILE_TEMPLATE_SSE2
  1839. #if !COMPILE_TEMPLATE_AMD3DNOW
  1840. static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
  1841. uint8_t *dst1, uint8_t *dst2,
  1842. int width, int height,
  1843. int srcStride1, int srcStride2,
  1844. int dstStride1, int dstStride2)
  1845. {
  1846. x86_reg x, y;
  1847. int w,h;
  1848. w=width/2; h=height/2;
  1849. __asm__ volatile(
  1850. PREFETCH" %0 \n\t"
  1851. PREFETCH" %1 \n\t"
  1852. ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
  1853. for (y=0;y<h;y++) {
  1854. const uint8_t* s1=src1+srcStride1*(y>>1);
  1855. uint8_t* d=dst1+dstStride1*y;
  1856. x=0;
  1857. for (;x<w-31;x+=32) {
  1858. __asm__ volatile(
  1859. PREFETCH" 32(%1,%2) \n\t"
  1860. "movq (%1,%2), %%mm0 \n\t"
  1861. "movq 8(%1,%2), %%mm2 \n\t"
  1862. "movq 16(%1,%2), %%mm4 \n\t"
  1863. "movq 24(%1,%2), %%mm6 \n\t"
  1864. "movq %%mm0, %%mm1 \n\t"
  1865. "movq %%mm2, %%mm3 \n\t"
  1866. "movq %%mm4, %%mm5 \n\t"
  1867. "movq %%mm6, %%mm7 \n\t"
  1868. "punpcklbw %%mm0, %%mm0 \n\t"
  1869. "punpckhbw %%mm1, %%mm1 \n\t"
  1870. "punpcklbw %%mm2, %%mm2 \n\t"
  1871. "punpckhbw %%mm3, %%mm3 \n\t"
  1872. "punpcklbw %%mm4, %%mm4 \n\t"
  1873. "punpckhbw %%mm5, %%mm5 \n\t"
  1874. "punpcklbw %%mm6, %%mm6 \n\t"
  1875. "punpckhbw %%mm7, %%mm7 \n\t"
  1876. MOVNTQ" %%mm0, (%0,%2,2) \n\t"
  1877. MOVNTQ" %%mm1, 8(%0,%2,2) \n\t"
  1878. MOVNTQ" %%mm2, 16(%0,%2,2) \n\t"
  1879. MOVNTQ" %%mm3, 24(%0,%2,2) \n\t"
  1880. MOVNTQ" %%mm4, 32(%0,%2,2) \n\t"
  1881. MOVNTQ" %%mm5, 40(%0,%2,2) \n\t"
  1882. MOVNTQ" %%mm6, 48(%0,%2,2) \n\t"
  1883. MOVNTQ" %%mm7, 56(%0,%2,2)"
  1884. :: "r"(d), "r"(s1), "r"(x)
  1885. :"memory");
  1886. }
  1887. for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
  1888. }
  1889. for (y=0;y<h;y++) {
  1890. const uint8_t* s2=src2+srcStride2*(y>>1);
  1891. uint8_t* d=dst2+dstStride2*y;
  1892. x=0;
  1893. for (;x<w-31;x+=32) {
  1894. __asm__ volatile(
  1895. PREFETCH" 32(%1,%2) \n\t"
  1896. "movq (%1,%2), %%mm0 \n\t"
  1897. "movq 8(%1,%2), %%mm2 \n\t"
  1898. "movq 16(%1,%2), %%mm4 \n\t"
  1899. "movq 24(%1,%2), %%mm6 \n\t"
  1900. "movq %%mm0, %%mm1 \n\t"
  1901. "movq %%mm2, %%mm3 \n\t"
  1902. "movq %%mm4, %%mm5 \n\t"
  1903. "movq %%mm6, %%mm7 \n\t"
  1904. "punpcklbw %%mm0, %%mm0 \n\t"
  1905. "punpckhbw %%mm1, %%mm1 \n\t"
  1906. "punpcklbw %%mm2, %%mm2 \n\t"
  1907. "punpckhbw %%mm3, %%mm3 \n\t"
  1908. "punpcklbw %%mm4, %%mm4 \n\t"
  1909. "punpckhbw %%mm5, %%mm5 \n\t"
  1910. "punpcklbw %%mm6, %%mm6 \n\t"
  1911. "punpckhbw %%mm7, %%mm7 \n\t"
  1912. MOVNTQ" %%mm0, (%0,%2,2) \n\t"
  1913. MOVNTQ" %%mm1, 8(%0,%2,2) \n\t"
  1914. MOVNTQ" %%mm2, 16(%0,%2,2) \n\t"
  1915. MOVNTQ" %%mm3, 24(%0,%2,2) \n\t"
  1916. MOVNTQ" %%mm4, 32(%0,%2,2) \n\t"
  1917. MOVNTQ" %%mm5, 40(%0,%2,2) \n\t"
  1918. MOVNTQ" %%mm6, 48(%0,%2,2) \n\t"
  1919. MOVNTQ" %%mm7, 56(%0,%2,2)"
  1920. :: "r"(d), "r"(s2), "r"(x)
  1921. :"memory");
  1922. }
  1923. for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
  1924. }
  1925. __asm__(
  1926. EMMS" \n\t"
  1927. SFENCE" \n\t"
  1928. ::: "memory"
  1929. );
  1930. }
  1931. static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
  1932. uint8_t *dst,
  1933. int width, int height,
  1934. int srcStride1, int srcStride2,
  1935. int srcStride3, int dstStride)
  1936. {
  1937. x86_reg x;
  1938. int y,w,h;
  1939. w=width/2; h=height;
  1940. for (y=0;y<h;y++) {
  1941. const uint8_t* yp=src1+srcStride1*y;
  1942. const uint8_t* up=src2+srcStride2*(y>>2);
  1943. const uint8_t* vp=src3+srcStride3*(y>>2);
  1944. uint8_t* d=dst+dstStride*y;
  1945. x=0;
  1946. for (;x<w-7;x+=8) {
  1947. __asm__ volatile(
  1948. PREFETCH" 32(%1, %0) \n\t"
  1949. PREFETCH" 32(%2, %0) \n\t"
  1950. PREFETCH" 32(%3, %0) \n\t"
  1951. "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  1952. "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
  1953. "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
  1954. "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  1955. "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
  1956. "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
  1957. "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
  1958. "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
  1959. "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
  1960. "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
  1961. "movq %%mm1, %%mm6 \n\t"
  1962. "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
  1963. "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
  1964. "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
  1965. MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
  1966. MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
  1967. "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
  1968. "movq 8(%1, %0, 4), %%mm0 \n\t"
  1969. "movq %%mm0, %%mm3 \n\t"
  1970. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
  1971. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
  1972. MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
  1973. MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
  1974. "movq %%mm4, %%mm6 \n\t"
  1975. "movq 16(%1, %0, 4), %%mm0 \n\t"
  1976. "movq %%mm0, %%mm3 \n\t"
  1977. "punpcklbw %%mm5, %%mm4 \n\t"
  1978. "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
  1979. "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
  1980. MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
  1981. MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
  1982. "punpckhbw %%mm5, %%mm6 \n\t"
  1983. "movq 24(%1, %0, 4), %%mm0 \n\t"
  1984. "movq %%mm0, %%mm3 \n\t"
  1985. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
  1986. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
  1987. MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
  1988. MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
  1989. : "+r" (x)
  1990. : "r"(yp), "r" (up), "r"(vp), "r"(d)
  1991. :"memory");
  1992. }
  1993. for (; x<w; x++) {
  1994. const int x2 = x<<2;
  1995. d[8*x+0] = yp[x2];
  1996. d[8*x+1] = up[x];
  1997. d[8*x+2] = yp[x2+1];
  1998. d[8*x+3] = vp[x];
  1999. d[8*x+4] = yp[x2+2];
  2000. d[8*x+5] = up[x];
  2001. d[8*x+6] = yp[x2+3];
  2002. d[8*x+7] = vp[x];
  2003. }
  2004. }
  2005. __asm__(
  2006. EMMS" \n\t"
  2007. SFENCE" \n\t"
  2008. ::: "memory"
  2009. );
  2010. }
  2011. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2012. static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count)
  2013. {
  2014. dst += count;
  2015. src += 2*count;
  2016. count= - count;
  2017. if(count <= -16) {
  2018. count += 15;
  2019. __asm__ volatile(
  2020. "pcmpeqw %%mm7, %%mm7 \n\t"
  2021. "psrlw $8, %%mm7 \n\t"
  2022. "1: \n\t"
  2023. "movq -30(%1, %0, 2), %%mm0 \n\t"
  2024. "movq -22(%1, %0, 2), %%mm1 \n\t"
  2025. "movq -14(%1, %0, 2), %%mm2 \n\t"
  2026. "movq -6(%1, %0, 2), %%mm3 \n\t"
  2027. "pand %%mm7, %%mm0 \n\t"
  2028. "pand %%mm7, %%mm1 \n\t"
  2029. "pand %%mm7, %%mm2 \n\t"
  2030. "pand %%mm7, %%mm3 \n\t"
  2031. "packuswb %%mm1, %%mm0 \n\t"
  2032. "packuswb %%mm3, %%mm2 \n\t"
  2033. MOVNTQ" %%mm0,-15(%2, %0) \n\t"
  2034. MOVNTQ" %%mm2,- 7(%2, %0) \n\t"
  2035. "add $16, %0 \n\t"
  2036. " js 1b \n\t"
  2037. : "+r"(count)
  2038. : "r"(src), "r"(dst)
  2039. );
  2040. count -= 15;
  2041. }
  2042. while(count<0) {
  2043. dst[count]= src[2*count];
  2044. count++;
  2045. }
  2046. }
  2047. #if !COMPILE_TEMPLATE_AMD3DNOW
  2048. static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2049. {
  2050. dst0+= count;
  2051. dst1+= count;
  2052. src += 4*count;
  2053. count= - count;
  2054. if(count <= -8) {
  2055. count += 7;
  2056. __asm__ volatile(
  2057. "pcmpeqw %%mm7, %%mm7 \n\t"
  2058. "psrlw $8, %%mm7 \n\t"
  2059. "1: \n\t"
  2060. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2061. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2062. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2063. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2064. "pand %%mm7, %%mm0 \n\t"
  2065. "pand %%mm7, %%mm1 \n\t"
  2066. "pand %%mm7, %%mm2 \n\t"
  2067. "pand %%mm7, %%mm3 \n\t"
  2068. "packuswb %%mm1, %%mm0 \n\t"
  2069. "packuswb %%mm3, %%mm2 \n\t"
  2070. "movq %%mm0, %%mm1 \n\t"
  2071. "movq %%mm2, %%mm3 \n\t"
  2072. "psrlw $8, %%mm0 \n\t"
  2073. "psrlw $8, %%mm2 \n\t"
  2074. "pand %%mm7, %%mm1 \n\t"
  2075. "pand %%mm7, %%mm3 \n\t"
  2076. "packuswb %%mm2, %%mm0 \n\t"
  2077. "packuswb %%mm3, %%mm1 \n\t"
  2078. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2079. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2080. "add $8, %0 \n\t"
  2081. " js 1b \n\t"
  2082. : "+r"(count)
  2083. : "r"(src), "r"(dst0), "r"(dst1)
  2084. );
  2085. count -= 7;
  2086. }
  2087. while(count<0) {
  2088. dst0[count]= src[4*count+0];
  2089. dst1[count]= src[4*count+2];
  2090. count++;
  2091. }
  2092. }
  2093. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2094. static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2095. {
  2096. dst0 += count;
  2097. dst1 += count;
  2098. src0 += 4*count;
  2099. src1 += 4*count;
  2100. count= - count;
  2101. #ifdef PAVGB
  2102. if(count <= -8) {
  2103. count += 7;
  2104. __asm__ volatile(
  2105. "pcmpeqw %%mm7, %%mm7 \n\t"
  2106. "psrlw $8, %%mm7 \n\t"
  2107. "1: \n\t"
  2108. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2109. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2110. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2111. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2112. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2113. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2114. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2115. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2116. "pand %%mm7, %%mm0 \n\t"
  2117. "pand %%mm7, %%mm1 \n\t"
  2118. "pand %%mm7, %%mm2 \n\t"
  2119. "pand %%mm7, %%mm3 \n\t"
  2120. "packuswb %%mm1, %%mm0 \n\t"
  2121. "packuswb %%mm3, %%mm2 \n\t"
  2122. "movq %%mm0, %%mm1 \n\t"
  2123. "movq %%mm2, %%mm3 \n\t"
  2124. "psrlw $8, %%mm0 \n\t"
  2125. "psrlw $8, %%mm2 \n\t"
  2126. "pand %%mm7, %%mm1 \n\t"
  2127. "pand %%mm7, %%mm3 \n\t"
  2128. "packuswb %%mm2, %%mm0 \n\t"
  2129. "packuswb %%mm3, %%mm1 \n\t"
  2130. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2131. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2132. "add $8, %0 \n\t"
  2133. " js 1b \n\t"
  2134. : "+r"(count)
  2135. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2136. );
  2137. count -= 7;
  2138. }
  2139. #endif
  2140. while(count<0) {
  2141. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2142. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2143. count++;
  2144. }
  2145. }
  2146. #if !COMPILE_TEMPLATE_AMD3DNOW
  2147. static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2148. {
  2149. dst0+= count;
  2150. dst1+= count;
  2151. src += 4*count;
  2152. count= - count;
  2153. if(count <= -8) {
  2154. count += 7;
  2155. __asm__ volatile(
  2156. "pcmpeqw %%mm7, %%mm7 \n\t"
  2157. "psrlw $8, %%mm7 \n\t"
  2158. "1: \n\t"
  2159. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2160. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2161. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2162. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2163. "psrlw $8, %%mm0 \n\t"
  2164. "psrlw $8, %%mm1 \n\t"
  2165. "psrlw $8, %%mm2 \n\t"
  2166. "psrlw $8, %%mm3 \n\t"
  2167. "packuswb %%mm1, %%mm0 \n\t"
  2168. "packuswb %%mm3, %%mm2 \n\t"
  2169. "movq %%mm0, %%mm1 \n\t"
  2170. "movq %%mm2, %%mm3 \n\t"
  2171. "psrlw $8, %%mm0 \n\t"
  2172. "psrlw $8, %%mm2 \n\t"
  2173. "pand %%mm7, %%mm1 \n\t"
  2174. "pand %%mm7, %%mm3 \n\t"
  2175. "packuswb %%mm2, %%mm0 \n\t"
  2176. "packuswb %%mm3, %%mm1 \n\t"
  2177. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2178. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2179. "add $8, %0 \n\t"
  2180. " js 1b \n\t"
  2181. : "+r"(count)
  2182. : "r"(src), "r"(dst0), "r"(dst1)
  2183. );
  2184. count -= 7;
  2185. }
  2186. src++;
  2187. while(count<0) {
  2188. dst0[count]= src[4*count+0];
  2189. dst1[count]= src[4*count+2];
  2190. count++;
  2191. }
  2192. }
  2193. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2194. static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2195. {
  2196. dst0 += count;
  2197. dst1 += count;
  2198. src0 += 4*count;
  2199. src1 += 4*count;
  2200. count= - count;
  2201. #ifdef PAVGB
  2202. if(count <= -8) {
  2203. count += 7;
  2204. __asm__ volatile(
  2205. "pcmpeqw %%mm7, %%mm7 \n\t"
  2206. "psrlw $8, %%mm7 \n\t"
  2207. "1: \n\t"
  2208. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2209. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2210. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2211. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2212. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2213. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2214. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2215. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2216. "psrlw $8, %%mm0 \n\t"
  2217. "psrlw $8, %%mm1 \n\t"
  2218. "psrlw $8, %%mm2 \n\t"
  2219. "psrlw $8, %%mm3 \n\t"
  2220. "packuswb %%mm1, %%mm0 \n\t"
  2221. "packuswb %%mm3, %%mm2 \n\t"
  2222. "movq %%mm0, %%mm1 \n\t"
  2223. "movq %%mm2, %%mm3 \n\t"
  2224. "psrlw $8, %%mm0 \n\t"
  2225. "psrlw $8, %%mm2 \n\t"
  2226. "pand %%mm7, %%mm1 \n\t"
  2227. "pand %%mm7, %%mm3 \n\t"
  2228. "packuswb %%mm2, %%mm0 \n\t"
  2229. "packuswb %%mm3, %%mm1 \n\t"
  2230. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2231. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2232. "add $8, %0 \n\t"
  2233. " js 1b \n\t"
  2234. : "+r"(count)
  2235. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2236. );
  2237. count -= 7;
  2238. }
  2239. #endif
  2240. src0++;
  2241. src1++;
  2242. while(count<0) {
  2243. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2244. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2245. count++;
  2246. }
  2247. }
  2248. static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2249. int width, int height,
  2250. int lumStride, int chromStride, int srcStride)
  2251. {
  2252. int y;
  2253. const int chromWidth= -((-width)>>1);
  2254. for (y=0; y<height; y++) {
  2255. RENAME(extract_even)(src, ydst, width);
  2256. if(y&1) {
  2257. RENAME(extract_odd2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2258. udst+= chromStride;
  2259. vdst+= chromStride;
  2260. }
  2261. src += srcStride;
  2262. ydst+= lumStride;
  2263. }
  2264. __asm__(
  2265. EMMS" \n\t"
  2266. SFENCE" \n\t"
  2267. ::: "memory"
  2268. );
  2269. }
  2270. #if !COMPILE_TEMPLATE_AMD3DNOW
  2271. static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2272. int width, int height,
  2273. int lumStride, int chromStride, int srcStride)
  2274. {
  2275. int y;
  2276. const int chromWidth= -((-width)>>1);
  2277. for (y=0; y<height; y++) {
  2278. RENAME(extract_even)(src, ydst, width);
  2279. RENAME(extract_odd2)(src, udst, vdst, chromWidth);
  2280. src += srcStride;
  2281. ydst+= lumStride;
  2282. udst+= chromStride;
  2283. vdst+= chromStride;
  2284. }
  2285. __asm__(
  2286. EMMS" \n\t"
  2287. SFENCE" \n\t"
  2288. ::: "memory"
  2289. );
  2290. }
  2291. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2292. static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2293. int width, int height,
  2294. int lumStride, int chromStride, int srcStride)
  2295. {
  2296. int y;
  2297. const int chromWidth= -((-width)>>1);
  2298. for (y=0; y<height; y++) {
  2299. RENAME(extract_even)(src+1, ydst, width);
  2300. if(y&1) {
  2301. RENAME(extract_even2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2302. udst+= chromStride;
  2303. vdst+= chromStride;
  2304. }
  2305. src += srcStride;
  2306. ydst+= lumStride;
  2307. }
  2308. __asm__(
  2309. EMMS" \n\t"
  2310. SFENCE" \n\t"
  2311. ::: "memory"
  2312. );
  2313. }
  2314. #if !COMPILE_TEMPLATE_AMD3DNOW
  2315. static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2316. int width, int height,
  2317. int lumStride, int chromStride, int srcStride)
  2318. {
  2319. int y;
  2320. const int chromWidth= -((-width)>>1);
  2321. for (y=0; y<height; y++) {
  2322. RENAME(extract_even)(src+1, ydst, width);
  2323. RENAME(extract_even2)(src, udst, vdst, chromWidth);
  2324. src += srcStride;
  2325. ydst+= lumStride;
  2326. udst+= chromStride;
  2327. vdst+= chromStride;
  2328. }
  2329. __asm__(
  2330. EMMS" \n\t"
  2331. SFENCE" \n\t"
  2332. ::: "memory"
  2333. );
  2334. }
  2335. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2336. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2337. static inline void RENAME(rgb2rgb_init)(void)
  2338. {
  2339. #if !COMPILE_TEMPLATE_SSE2
  2340. #if !COMPILE_TEMPLATE_AMD3DNOW
  2341. rgb15to16 = RENAME(rgb15to16);
  2342. rgb15tobgr24 = RENAME(rgb15tobgr24);
  2343. rgb15to32 = RENAME(rgb15to32);
  2344. rgb16tobgr24 = RENAME(rgb16tobgr24);
  2345. rgb16to32 = RENAME(rgb16to32);
  2346. rgb16to15 = RENAME(rgb16to15);
  2347. rgb24tobgr16 = RENAME(rgb24tobgr16);
  2348. rgb24tobgr15 = RENAME(rgb24tobgr15);
  2349. rgb24tobgr32 = RENAME(rgb24tobgr32);
  2350. rgb32to16 = RENAME(rgb32to16);
  2351. rgb32to15 = RENAME(rgb32to15);
  2352. rgb32tobgr24 = RENAME(rgb32tobgr24);
  2353. rgb24to15 = RENAME(rgb24to15);
  2354. rgb24to16 = RENAME(rgb24to16);
  2355. rgb24tobgr24 = RENAME(rgb24tobgr24);
  2356. shuffle_bytes_2103 = RENAME(shuffle_bytes_2103);
  2357. rgb32tobgr16 = RENAME(rgb32tobgr16);
  2358. rgb32tobgr15 = RENAME(rgb32tobgr15);
  2359. yv12toyuy2 = RENAME(yv12toyuy2);
  2360. yv12touyvy = RENAME(yv12touyvy);
  2361. yuv422ptoyuy2 = RENAME(yuv422ptoyuy2);
  2362. yuv422ptouyvy = RENAME(yuv422ptouyvy);
  2363. yuy2toyv12 = RENAME(yuy2toyv12);
  2364. vu9_to_vu12 = RENAME(vu9_to_vu12);
  2365. yvu9_to_yuy2 = RENAME(yvu9_to_yuy2);
  2366. uyvytoyuv422 = RENAME(uyvytoyuv422);
  2367. yuyvtoyuv422 = RENAME(yuyvtoyuv422);
  2368. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2369. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  2370. planar2x = RENAME(planar2x);
  2371. #endif /* COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW */
  2372. rgb24toyv12 = RENAME(rgb24toyv12);
  2373. yuyvtoyuv420 = RENAME(yuyvtoyuv420);
  2374. uyvytoyuv420 = RENAME(uyvytoyuv420);
  2375. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2376. #if !COMPILE_TEMPLATE_AMD3DNOW
  2377. interleaveBytes = RENAME(interleaveBytes);
  2378. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2379. }