You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2609 lines
107KB

  1. /*
  2. * software RGB to RGB converter
  3. * pluralize by software PAL8 to RGB converter
  4. * software YUV to YUV converter
  5. * software YUV to RGB converter
  6. * Written by Nick Kurshev.
  7. * palette & YUV & runtime CPU stuff by Michael (michaelni@gmx.at)
  8. * lot of big-endian byte order fixes by Alex Beregszaszi
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include <stddef.h>
  27. #include <stdint.h>
  28. #include "libavutil/attributes.h"
  29. #include "libavutil/x86/asm.h"
  30. #undef PREFETCH
  31. #undef MOVNTQ
  32. #undef EMMS
  33. #undef SFENCE
  34. #undef PAVGB
  35. #if COMPILE_TEMPLATE_AMD3DNOW
  36. #define PREFETCH "prefetch"
  37. #define PAVGB "pavgusb"
  38. #elif COMPILE_TEMPLATE_MMXEXT
  39. #define PREFETCH "prefetchnta"
  40. #define PAVGB "pavgb"
  41. #else
  42. #define PREFETCH " # nop"
  43. #endif
  44. #if COMPILE_TEMPLATE_AMD3DNOW
  45. /* On K6 femms is faster than emms. On K7 femms is directly mapped to emms. */
  46. #define EMMS "femms"
  47. #else
  48. #define EMMS "emms"
  49. #endif
  50. #if COMPILE_TEMPLATE_MMXEXT
  51. #define MOVNTQ "movntq"
  52. #define SFENCE "sfence"
  53. #else
  54. #define MOVNTQ "movq"
  55. #define SFENCE " # nop"
  56. #endif
  57. #if !COMPILE_TEMPLATE_SSE2
  58. #if !COMPILE_TEMPLATE_AMD3DNOW
  59. static inline void RENAME(rgb24tobgr32)(const uint8_t *src, uint8_t *dst, int src_size)
  60. {
  61. uint8_t *dest = dst;
  62. const uint8_t *s = src;
  63. const uint8_t *end;
  64. const uint8_t *mm_end;
  65. end = s + src_size;
  66. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  67. mm_end = end - 23;
  68. __asm__ volatile("movq %0, %%mm7"::"m"(mask32a):"memory");
  69. while (s < mm_end) {
  70. __asm__ volatile(
  71. PREFETCH" 32(%1) \n\t"
  72. "movd (%1), %%mm0 \n\t"
  73. "punpckldq 3(%1), %%mm0 \n\t"
  74. "movd 6(%1), %%mm1 \n\t"
  75. "punpckldq 9(%1), %%mm1 \n\t"
  76. "movd 12(%1), %%mm2 \n\t"
  77. "punpckldq 15(%1), %%mm2 \n\t"
  78. "movd 18(%1), %%mm3 \n\t"
  79. "punpckldq 21(%1), %%mm3 \n\t"
  80. "por %%mm7, %%mm0 \n\t"
  81. "por %%mm7, %%mm1 \n\t"
  82. "por %%mm7, %%mm2 \n\t"
  83. "por %%mm7, %%mm3 \n\t"
  84. MOVNTQ" %%mm0, (%0) \n\t"
  85. MOVNTQ" %%mm1, 8(%0) \n\t"
  86. MOVNTQ" %%mm2, 16(%0) \n\t"
  87. MOVNTQ" %%mm3, 24(%0)"
  88. :: "r"(dest), "r"(s)
  89. :"memory");
  90. dest += 32;
  91. s += 24;
  92. }
  93. __asm__ volatile(SFENCE:::"memory");
  94. __asm__ volatile(EMMS:::"memory");
  95. while (s < end) {
  96. *dest++ = *s++;
  97. *dest++ = *s++;
  98. *dest++ = *s++;
  99. *dest++ = 255;
  100. }
  101. }
  102. #define STORE_BGR24_MMX \
  103. "psrlq $8, %%mm2 \n\t" \
  104. "psrlq $8, %%mm3 \n\t" \
  105. "psrlq $8, %%mm6 \n\t" \
  106. "psrlq $8, %%mm7 \n\t" \
  107. "pand "MANGLE(mask24l)", %%mm0\n\t" \
  108. "pand "MANGLE(mask24l)", %%mm1\n\t" \
  109. "pand "MANGLE(mask24l)", %%mm4\n\t" \
  110. "pand "MANGLE(mask24l)", %%mm5\n\t" \
  111. "pand "MANGLE(mask24h)", %%mm2\n\t" \
  112. "pand "MANGLE(mask24h)", %%mm3\n\t" \
  113. "pand "MANGLE(mask24h)", %%mm6\n\t" \
  114. "pand "MANGLE(mask24h)", %%mm7\n\t" \
  115. "por %%mm2, %%mm0 \n\t" \
  116. "por %%mm3, %%mm1 \n\t" \
  117. "por %%mm6, %%mm4 \n\t" \
  118. "por %%mm7, %%mm5 \n\t" \
  119. \
  120. "movq %%mm1, %%mm2 \n\t" \
  121. "movq %%mm4, %%mm3 \n\t" \
  122. "psllq $48, %%mm2 \n\t" \
  123. "psllq $32, %%mm3 \n\t" \
  124. "por %%mm2, %%mm0 \n\t" \
  125. "psrlq $16, %%mm1 \n\t" \
  126. "psrlq $32, %%mm4 \n\t" \
  127. "psllq $16, %%mm5 \n\t" \
  128. "por %%mm3, %%mm1 \n\t" \
  129. "por %%mm5, %%mm4 \n\t" \
  130. \
  131. MOVNTQ" %%mm0, (%0) \n\t" \
  132. MOVNTQ" %%mm1, 8(%0) \n\t" \
  133. MOVNTQ" %%mm4, 16(%0)"
  134. static inline void RENAME(rgb32tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  135. {
  136. uint8_t *dest = dst;
  137. const uint8_t *s = src;
  138. const uint8_t *end;
  139. const uint8_t *mm_end;
  140. end = s + src_size;
  141. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  142. mm_end = end - 31;
  143. while (s < mm_end) {
  144. __asm__ volatile(
  145. PREFETCH" 32(%1) \n\t"
  146. "movq (%1), %%mm0 \n\t"
  147. "movq 8(%1), %%mm1 \n\t"
  148. "movq 16(%1), %%mm4 \n\t"
  149. "movq 24(%1), %%mm5 \n\t"
  150. "movq %%mm0, %%mm2 \n\t"
  151. "movq %%mm1, %%mm3 \n\t"
  152. "movq %%mm4, %%mm6 \n\t"
  153. "movq %%mm5, %%mm7 \n\t"
  154. STORE_BGR24_MMX
  155. :: "r"(dest), "r"(s)
  156. NAMED_CONSTRAINTS_ADD(mask24l,mask24h)
  157. :"memory");
  158. dest += 24;
  159. s += 32;
  160. }
  161. __asm__ volatile(SFENCE:::"memory");
  162. __asm__ volatile(EMMS:::"memory");
  163. while (s < end) {
  164. *dest++ = *s++;
  165. *dest++ = *s++;
  166. *dest++ = *s++;
  167. s++;
  168. }
  169. }
  170. /*
  171. original by Strepto/Astral
  172. ported to gcc & bugfixed: A'rpi
  173. MMXEXT, 3DNOW optimization by Nick Kurshev
  174. 32-bit C version, and and&add trick by Michael Niedermayer
  175. */
  176. static inline void RENAME(rgb15to16)(const uint8_t *src, uint8_t *dst, int src_size)
  177. {
  178. register const uint8_t* s=src;
  179. register uint8_t* d=dst;
  180. register const uint8_t *end;
  181. const uint8_t *mm_end;
  182. end = s + src_size;
  183. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  184. __asm__ volatile("movq %0, %%mm4"::"m"(mask15s));
  185. mm_end = end - 15;
  186. while (s<mm_end) {
  187. __asm__ volatile(
  188. PREFETCH" 32(%1) \n\t"
  189. "movq (%1), %%mm0 \n\t"
  190. "movq 8(%1), %%mm2 \n\t"
  191. "movq %%mm0, %%mm1 \n\t"
  192. "movq %%mm2, %%mm3 \n\t"
  193. "pand %%mm4, %%mm0 \n\t"
  194. "pand %%mm4, %%mm2 \n\t"
  195. "paddw %%mm1, %%mm0 \n\t"
  196. "paddw %%mm3, %%mm2 \n\t"
  197. MOVNTQ" %%mm0, (%0) \n\t"
  198. MOVNTQ" %%mm2, 8(%0)"
  199. :: "r"(d), "r"(s)
  200. );
  201. d+=16;
  202. s+=16;
  203. }
  204. __asm__ volatile(SFENCE:::"memory");
  205. __asm__ volatile(EMMS:::"memory");
  206. mm_end = end - 3;
  207. while (s < mm_end) {
  208. register unsigned x= *((const uint32_t *)s);
  209. *((uint32_t *)d) = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
  210. d+=4;
  211. s+=4;
  212. }
  213. if (s < end) {
  214. register unsigned short x= *((const uint16_t *)s);
  215. *((uint16_t *)d) = (x&0x7FFF) + (x&0x7FE0);
  216. }
  217. }
  218. static inline void RENAME(rgb16to15)(const uint8_t *src, uint8_t *dst, int src_size)
  219. {
  220. register const uint8_t* s=src;
  221. register uint8_t* d=dst;
  222. register const uint8_t *end;
  223. const uint8_t *mm_end;
  224. end = s + src_size;
  225. __asm__ volatile(PREFETCH" %0"::"m"(*s));
  226. __asm__ volatile("movq %0, %%mm7"::"m"(mask15rg));
  227. __asm__ volatile("movq %0, %%mm6"::"m"(mask15b));
  228. mm_end = end - 15;
  229. while (s<mm_end) {
  230. __asm__ volatile(
  231. PREFETCH" 32(%1) \n\t"
  232. "movq (%1), %%mm0 \n\t"
  233. "movq 8(%1), %%mm2 \n\t"
  234. "movq %%mm0, %%mm1 \n\t"
  235. "movq %%mm2, %%mm3 \n\t"
  236. "psrlq $1, %%mm0 \n\t"
  237. "psrlq $1, %%mm2 \n\t"
  238. "pand %%mm7, %%mm0 \n\t"
  239. "pand %%mm7, %%mm2 \n\t"
  240. "pand %%mm6, %%mm1 \n\t"
  241. "pand %%mm6, %%mm3 \n\t"
  242. "por %%mm1, %%mm0 \n\t"
  243. "por %%mm3, %%mm2 \n\t"
  244. MOVNTQ" %%mm0, (%0) \n\t"
  245. MOVNTQ" %%mm2, 8(%0)"
  246. :: "r"(d), "r"(s)
  247. );
  248. d+=16;
  249. s+=16;
  250. }
  251. __asm__ volatile(SFENCE:::"memory");
  252. __asm__ volatile(EMMS:::"memory");
  253. mm_end = end - 3;
  254. while (s < mm_end) {
  255. register uint32_t x= *((const uint32_t*)s);
  256. *((uint32_t *)d) = ((x>>1)&0x7FE07FE0) | (x&0x001F001F);
  257. s+=4;
  258. d+=4;
  259. }
  260. if (s < end) {
  261. register uint16_t x= *((const uint16_t*)s);
  262. *((uint16_t *)d) = ((x>>1)&0x7FE0) | (x&0x001F);
  263. }
  264. }
  265. static inline void RENAME(rgb32to16)(const uint8_t *src, uint8_t *dst, int src_size)
  266. {
  267. const uint8_t *s = src;
  268. const uint8_t *end;
  269. const uint8_t *mm_end;
  270. uint16_t *d = (uint16_t *)dst;
  271. end = s + src_size;
  272. mm_end = end - 15;
  273. __asm__ volatile(
  274. "movq %3, %%mm5 \n\t"
  275. "movq %4, %%mm6 \n\t"
  276. "movq %5, %%mm7 \n\t"
  277. "jmp 2f \n\t"
  278. ".p2align 4 \n\t"
  279. "1: \n\t"
  280. PREFETCH" 32(%1) \n\t"
  281. "movd (%1), %%mm0 \n\t"
  282. "movd 4(%1), %%mm3 \n\t"
  283. "punpckldq 8(%1), %%mm0 \n\t"
  284. "punpckldq 12(%1), %%mm3 \n\t"
  285. "movq %%mm0, %%mm1 \n\t"
  286. "movq %%mm3, %%mm4 \n\t"
  287. "pand %%mm6, %%mm0 \n\t"
  288. "pand %%mm6, %%mm3 \n\t"
  289. "pmaddwd %%mm7, %%mm0 \n\t"
  290. "pmaddwd %%mm7, %%mm3 \n\t"
  291. "pand %%mm5, %%mm1 \n\t"
  292. "pand %%mm5, %%mm4 \n\t"
  293. "por %%mm1, %%mm0 \n\t"
  294. "por %%mm4, %%mm3 \n\t"
  295. "psrld $5, %%mm0 \n\t"
  296. "pslld $11, %%mm3 \n\t"
  297. "por %%mm3, %%mm0 \n\t"
  298. MOVNTQ" %%mm0, (%0) \n\t"
  299. "add $16, %1 \n\t"
  300. "add $8, %0 \n\t"
  301. "2: \n\t"
  302. "cmp %2, %1 \n\t"
  303. " jb 1b \n\t"
  304. : "+r" (d), "+r"(s)
  305. : "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
  306. );
  307. __asm__ volatile(SFENCE:::"memory");
  308. __asm__ volatile(EMMS:::"memory");
  309. while (s < end) {
  310. register int rgb = *(const uint32_t*)s; s += 4;
  311. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>8);
  312. }
  313. }
  314. static inline void RENAME(rgb32tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  315. {
  316. const uint8_t *s = src;
  317. const uint8_t *end;
  318. const uint8_t *mm_end;
  319. uint16_t *d = (uint16_t *)dst;
  320. end = s + src_size;
  321. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  322. __asm__ volatile(
  323. "movq %0, %%mm7 \n\t"
  324. "movq %1, %%mm6 \n\t"
  325. ::"m"(red_16mask),"m"(green_16mask));
  326. mm_end = end - 15;
  327. while (s < mm_end) {
  328. __asm__ volatile(
  329. PREFETCH" 32(%1) \n\t"
  330. "movd (%1), %%mm0 \n\t"
  331. "movd 4(%1), %%mm3 \n\t"
  332. "punpckldq 8(%1), %%mm0 \n\t"
  333. "punpckldq 12(%1), %%mm3 \n\t"
  334. "movq %%mm0, %%mm1 \n\t"
  335. "movq %%mm0, %%mm2 \n\t"
  336. "movq %%mm3, %%mm4 \n\t"
  337. "movq %%mm3, %%mm5 \n\t"
  338. "psllq $8, %%mm0 \n\t"
  339. "psllq $8, %%mm3 \n\t"
  340. "pand %%mm7, %%mm0 \n\t"
  341. "pand %%mm7, %%mm3 \n\t"
  342. "psrlq $5, %%mm1 \n\t"
  343. "psrlq $5, %%mm4 \n\t"
  344. "pand %%mm6, %%mm1 \n\t"
  345. "pand %%mm6, %%mm4 \n\t"
  346. "psrlq $19, %%mm2 \n\t"
  347. "psrlq $19, %%mm5 \n\t"
  348. "pand %2, %%mm2 \n\t"
  349. "pand %2, %%mm5 \n\t"
  350. "por %%mm1, %%mm0 \n\t"
  351. "por %%mm4, %%mm3 \n\t"
  352. "por %%mm2, %%mm0 \n\t"
  353. "por %%mm5, %%mm3 \n\t"
  354. "psllq $16, %%mm3 \n\t"
  355. "por %%mm3, %%mm0 \n\t"
  356. MOVNTQ" %%mm0, (%0) \n\t"
  357. :: "r"(d),"r"(s),"m"(blue_16mask):"memory");
  358. d += 4;
  359. s += 16;
  360. }
  361. __asm__ volatile(SFENCE:::"memory");
  362. __asm__ volatile(EMMS:::"memory");
  363. while (s < end) {
  364. register int rgb = *(const uint32_t*)s; s += 4;
  365. *d++ = ((rgb&0xF8)<<8) + ((rgb&0xFC00)>>5) + ((rgb&0xF80000)>>19);
  366. }
  367. }
  368. static inline void RENAME(rgb32to15)(const uint8_t *src, uint8_t *dst, int src_size)
  369. {
  370. const uint8_t *s = src;
  371. const uint8_t *end;
  372. const uint8_t *mm_end;
  373. uint16_t *d = (uint16_t *)dst;
  374. end = s + src_size;
  375. mm_end = end - 15;
  376. __asm__ volatile(
  377. "movq %3, %%mm5 \n\t"
  378. "movq %4, %%mm6 \n\t"
  379. "movq %5, %%mm7 \n\t"
  380. "jmp 2f \n\t"
  381. ".p2align 4 \n\t"
  382. "1: \n\t"
  383. PREFETCH" 32(%1) \n\t"
  384. "movd (%1), %%mm0 \n\t"
  385. "movd 4(%1), %%mm3 \n\t"
  386. "punpckldq 8(%1), %%mm0 \n\t"
  387. "punpckldq 12(%1), %%mm3 \n\t"
  388. "movq %%mm0, %%mm1 \n\t"
  389. "movq %%mm3, %%mm4 \n\t"
  390. "pand %%mm6, %%mm0 \n\t"
  391. "pand %%mm6, %%mm3 \n\t"
  392. "pmaddwd %%mm7, %%mm0 \n\t"
  393. "pmaddwd %%mm7, %%mm3 \n\t"
  394. "pand %%mm5, %%mm1 \n\t"
  395. "pand %%mm5, %%mm4 \n\t"
  396. "por %%mm1, %%mm0 \n\t"
  397. "por %%mm4, %%mm3 \n\t"
  398. "psrld $6, %%mm0 \n\t"
  399. "pslld $10, %%mm3 \n\t"
  400. "por %%mm3, %%mm0 \n\t"
  401. MOVNTQ" %%mm0, (%0) \n\t"
  402. "add $16, %1 \n\t"
  403. "add $8, %0 \n\t"
  404. "2: \n\t"
  405. "cmp %2, %1 \n\t"
  406. " jb 1b \n\t"
  407. : "+r" (d), "+r"(s)
  408. : "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
  409. );
  410. __asm__ volatile(SFENCE:::"memory");
  411. __asm__ volatile(EMMS:::"memory");
  412. while (s < end) {
  413. register int rgb = *(const uint32_t*)s; s += 4;
  414. *d++ = ((rgb&0xFF)>>3) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>9);
  415. }
  416. }
  417. static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  418. {
  419. const uint8_t *s = src;
  420. const uint8_t *end;
  421. const uint8_t *mm_end;
  422. uint16_t *d = (uint16_t *)dst;
  423. end = s + src_size;
  424. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  425. __asm__ volatile(
  426. "movq %0, %%mm7 \n\t"
  427. "movq %1, %%mm6 \n\t"
  428. ::"m"(red_15mask),"m"(green_15mask));
  429. mm_end = end - 15;
  430. while (s < mm_end) {
  431. __asm__ volatile(
  432. PREFETCH" 32(%1) \n\t"
  433. "movd (%1), %%mm0 \n\t"
  434. "movd 4(%1), %%mm3 \n\t"
  435. "punpckldq 8(%1), %%mm0 \n\t"
  436. "punpckldq 12(%1), %%mm3 \n\t"
  437. "movq %%mm0, %%mm1 \n\t"
  438. "movq %%mm0, %%mm2 \n\t"
  439. "movq %%mm3, %%mm4 \n\t"
  440. "movq %%mm3, %%mm5 \n\t"
  441. "psllq $7, %%mm0 \n\t"
  442. "psllq $7, %%mm3 \n\t"
  443. "pand %%mm7, %%mm0 \n\t"
  444. "pand %%mm7, %%mm3 \n\t"
  445. "psrlq $6, %%mm1 \n\t"
  446. "psrlq $6, %%mm4 \n\t"
  447. "pand %%mm6, %%mm1 \n\t"
  448. "pand %%mm6, %%mm4 \n\t"
  449. "psrlq $19, %%mm2 \n\t"
  450. "psrlq $19, %%mm5 \n\t"
  451. "pand %2, %%mm2 \n\t"
  452. "pand %2, %%mm5 \n\t"
  453. "por %%mm1, %%mm0 \n\t"
  454. "por %%mm4, %%mm3 \n\t"
  455. "por %%mm2, %%mm0 \n\t"
  456. "por %%mm5, %%mm3 \n\t"
  457. "psllq $16, %%mm3 \n\t"
  458. "por %%mm3, %%mm0 \n\t"
  459. MOVNTQ" %%mm0, (%0) \n\t"
  460. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  461. d += 4;
  462. s += 16;
  463. }
  464. __asm__ volatile(SFENCE:::"memory");
  465. __asm__ volatile(EMMS:::"memory");
  466. while (s < end) {
  467. register int rgb = *(const uint32_t*)s; s += 4;
  468. *d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
  469. }
  470. }
  471. static inline void RENAME(rgb24tobgr16)(const uint8_t *src, uint8_t *dst, int src_size)
  472. {
  473. const uint8_t *s = src;
  474. const uint8_t *end;
  475. const uint8_t *mm_end;
  476. uint16_t *d = (uint16_t *)dst;
  477. end = s + src_size;
  478. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  479. __asm__ volatile(
  480. "movq %0, %%mm7 \n\t"
  481. "movq %1, %%mm6 \n\t"
  482. ::"m"(red_16mask),"m"(green_16mask));
  483. mm_end = end - 11;
  484. while (s < mm_end) {
  485. __asm__ volatile(
  486. PREFETCH" 32(%1) \n\t"
  487. "movd (%1), %%mm0 \n\t"
  488. "movd 3(%1), %%mm3 \n\t"
  489. "punpckldq 6(%1), %%mm0 \n\t"
  490. "punpckldq 9(%1), %%mm3 \n\t"
  491. "movq %%mm0, %%mm1 \n\t"
  492. "movq %%mm0, %%mm2 \n\t"
  493. "movq %%mm3, %%mm4 \n\t"
  494. "movq %%mm3, %%mm5 \n\t"
  495. "psrlq $3, %%mm0 \n\t"
  496. "psrlq $3, %%mm3 \n\t"
  497. "pand %2, %%mm0 \n\t"
  498. "pand %2, %%mm3 \n\t"
  499. "psrlq $5, %%mm1 \n\t"
  500. "psrlq $5, %%mm4 \n\t"
  501. "pand %%mm6, %%mm1 \n\t"
  502. "pand %%mm6, %%mm4 \n\t"
  503. "psrlq $8, %%mm2 \n\t"
  504. "psrlq $8, %%mm5 \n\t"
  505. "pand %%mm7, %%mm2 \n\t"
  506. "pand %%mm7, %%mm5 \n\t"
  507. "por %%mm1, %%mm0 \n\t"
  508. "por %%mm4, %%mm3 \n\t"
  509. "por %%mm2, %%mm0 \n\t"
  510. "por %%mm5, %%mm3 \n\t"
  511. "psllq $16, %%mm3 \n\t"
  512. "por %%mm3, %%mm0 \n\t"
  513. MOVNTQ" %%mm0, (%0) \n\t"
  514. ::"r"(d),"r"(s),"m"(blue_16mask):"memory");
  515. d += 4;
  516. s += 12;
  517. }
  518. __asm__ volatile(SFENCE:::"memory");
  519. __asm__ volatile(EMMS:::"memory");
  520. while (s < end) {
  521. const int b = *s++;
  522. const int g = *s++;
  523. const int r = *s++;
  524. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  525. }
  526. }
  527. static inline void RENAME(rgb24to16)(const uint8_t *src, uint8_t *dst, int src_size)
  528. {
  529. const uint8_t *s = src;
  530. const uint8_t *end;
  531. const uint8_t *mm_end;
  532. uint16_t *d = (uint16_t *)dst;
  533. end = s + src_size;
  534. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  535. __asm__ volatile(
  536. "movq %0, %%mm7 \n\t"
  537. "movq %1, %%mm6 \n\t"
  538. ::"m"(red_16mask),"m"(green_16mask));
  539. mm_end = end - 15;
  540. while (s < mm_end) {
  541. __asm__ volatile(
  542. PREFETCH" 32(%1) \n\t"
  543. "movd (%1), %%mm0 \n\t"
  544. "movd 3(%1), %%mm3 \n\t"
  545. "punpckldq 6(%1), %%mm0 \n\t"
  546. "punpckldq 9(%1), %%mm3 \n\t"
  547. "movq %%mm0, %%mm1 \n\t"
  548. "movq %%mm0, %%mm2 \n\t"
  549. "movq %%mm3, %%mm4 \n\t"
  550. "movq %%mm3, %%mm5 \n\t"
  551. "psllq $8, %%mm0 \n\t"
  552. "psllq $8, %%mm3 \n\t"
  553. "pand %%mm7, %%mm0 \n\t"
  554. "pand %%mm7, %%mm3 \n\t"
  555. "psrlq $5, %%mm1 \n\t"
  556. "psrlq $5, %%mm4 \n\t"
  557. "pand %%mm6, %%mm1 \n\t"
  558. "pand %%mm6, %%mm4 \n\t"
  559. "psrlq $19, %%mm2 \n\t"
  560. "psrlq $19, %%mm5 \n\t"
  561. "pand %2, %%mm2 \n\t"
  562. "pand %2, %%mm5 \n\t"
  563. "por %%mm1, %%mm0 \n\t"
  564. "por %%mm4, %%mm3 \n\t"
  565. "por %%mm2, %%mm0 \n\t"
  566. "por %%mm5, %%mm3 \n\t"
  567. "psllq $16, %%mm3 \n\t"
  568. "por %%mm3, %%mm0 \n\t"
  569. MOVNTQ" %%mm0, (%0) \n\t"
  570. ::"r"(d),"r"(s),"m"(blue_16mask):"memory");
  571. d += 4;
  572. s += 12;
  573. }
  574. __asm__ volatile(SFENCE:::"memory");
  575. __asm__ volatile(EMMS:::"memory");
  576. while (s < end) {
  577. const int r = *s++;
  578. const int g = *s++;
  579. const int b = *s++;
  580. *d++ = (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  581. }
  582. }
  583. static inline void RENAME(rgb24tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
  584. {
  585. const uint8_t *s = src;
  586. const uint8_t *end;
  587. const uint8_t *mm_end;
  588. uint16_t *d = (uint16_t *)dst;
  589. end = s + src_size;
  590. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  591. __asm__ volatile(
  592. "movq %0, %%mm7 \n\t"
  593. "movq %1, %%mm6 \n\t"
  594. ::"m"(red_15mask),"m"(green_15mask));
  595. mm_end = end - 11;
  596. while (s < mm_end) {
  597. __asm__ volatile(
  598. PREFETCH" 32(%1) \n\t"
  599. "movd (%1), %%mm0 \n\t"
  600. "movd 3(%1), %%mm3 \n\t"
  601. "punpckldq 6(%1), %%mm0 \n\t"
  602. "punpckldq 9(%1), %%mm3 \n\t"
  603. "movq %%mm0, %%mm1 \n\t"
  604. "movq %%mm0, %%mm2 \n\t"
  605. "movq %%mm3, %%mm4 \n\t"
  606. "movq %%mm3, %%mm5 \n\t"
  607. "psrlq $3, %%mm0 \n\t"
  608. "psrlq $3, %%mm3 \n\t"
  609. "pand %2, %%mm0 \n\t"
  610. "pand %2, %%mm3 \n\t"
  611. "psrlq $6, %%mm1 \n\t"
  612. "psrlq $6, %%mm4 \n\t"
  613. "pand %%mm6, %%mm1 \n\t"
  614. "pand %%mm6, %%mm4 \n\t"
  615. "psrlq $9, %%mm2 \n\t"
  616. "psrlq $9, %%mm5 \n\t"
  617. "pand %%mm7, %%mm2 \n\t"
  618. "pand %%mm7, %%mm5 \n\t"
  619. "por %%mm1, %%mm0 \n\t"
  620. "por %%mm4, %%mm3 \n\t"
  621. "por %%mm2, %%mm0 \n\t"
  622. "por %%mm5, %%mm3 \n\t"
  623. "psllq $16, %%mm3 \n\t"
  624. "por %%mm3, %%mm0 \n\t"
  625. MOVNTQ" %%mm0, (%0) \n\t"
  626. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  627. d += 4;
  628. s += 12;
  629. }
  630. __asm__ volatile(SFENCE:::"memory");
  631. __asm__ volatile(EMMS:::"memory");
  632. while (s < end) {
  633. const int b = *s++;
  634. const int g = *s++;
  635. const int r = *s++;
  636. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  637. }
  638. }
  639. static inline void RENAME(rgb24to15)(const uint8_t *src, uint8_t *dst, int src_size)
  640. {
  641. const uint8_t *s = src;
  642. const uint8_t *end;
  643. const uint8_t *mm_end;
  644. uint16_t *d = (uint16_t *)dst;
  645. end = s + src_size;
  646. __asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
  647. __asm__ volatile(
  648. "movq %0, %%mm7 \n\t"
  649. "movq %1, %%mm6 \n\t"
  650. ::"m"(red_15mask),"m"(green_15mask));
  651. mm_end = end - 15;
  652. while (s < mm_end) {
  653. __asm__ volatile(
  654. PREFETCH" 32(%1) \n\t"
  655. "movd (%1), %%mm0 \n\t"
  656. "movd 3(%1), %%mm3 \n\t"
  657. "punpckldq 6(%1), %%mm0 \n\t"
  658. "punpckldq 9(%1), %%mm3 \n\t"
  659. "movq %%mm0, %%mm1 \n\t"
  660. "movq %%mm0, %%mm2 \n\t"
  661. "movq %%mm3, %%mm4 \n\t"
  662. "movq %%mm3, %%mm5 \n\t"
  663. "psllq $7, %%mm0 \n\t"
  664. "psllq $7, %%mm3 \n\t"
  665. "pand %%mm7, %%mm0 \n\t"
  666. "pand %%mm7, %%mm3 \n\t"
  667. "psrlq $6, %%mm1 \n\t"
  668. "psrlq $6, %%mm4 \n\t"
  669. "pand %%mm6, %%mm1 \n\t"
  670. "pand %%mm6, %%mm4 \n\t"
  671. "psrlq $19, %%mm2 \n\t"
  672. "psrlq $19, %%mm5 \n\t"
  673. "pand %2, %%mm2 \n\t"
  674. "pand %2, %%mm5 \n\t"
  675. "por %%mm1, %%mm0 \n\t"
  676. "por %%mm4, %%mm3 \n\t"
  677. "por %%mm2, %%mm0 \n\t"
  678. "por %%mm5, %%mm3 \n\t"
  679. "psllq $16, %%mm3 \n\t"
  680. "por %%mm3, %%mm0 \n\t"
  681. MOVNTQ" %%mm0, (%0) \n\t"
  682. ::"r"(d),"r"(s),"m"(blue_15mask):"memory");
  683. d += 4;
  684. s += 12;
  685. }
  686. __asm__ volatile(SFENCE:::"memory");
  687. __asm__ volatile(EMMS:::"memory");
  688. while (s < end) {
  689. const int r = *s++;
  690. const int g = *s++;
  691. const int b = *s++;
  692. *d++ = (b>>3) | ((g&0xF8)<<2) | ((r&0xF8)<<7);
  693. }
  694. }
  695. static inline void RENAME(rgb15tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  696. {
  697. const uint16_t *end;
  698. const uint16_t *mm_end;
  699. uint8_t *d = dst;
  700. const uint16_t *s = (const uint16_t*)src;
  701. end = s + src_size/2;
  702. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  703. mm_end = end - 7;
  704. while (s < mm_end) {
  705. __asm__ volatile(
  706. PREFETCH" 32(%1) \n\t"
  707. "movq (%1), %%mm0 \n\t"
  708. "movq (%1), %%mm1 \n\t"
  709. "movq (%1), %%mm2 \n\t"
  710. "pand %2, %%mm0 \n\t"
  711. "pand %3, %%mm1 \n\t"
  712. "pand %4, %%mm2 \n\t"
  713. "psllq $5, %%mm0 \n\t"
  714. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  715. "pmulhw "MANGLE(mul15_mid)", %%mm1 \n\t"
  716. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  717. "movq %%mm0, %%mm3 \n\t"
  718. "movq %%mm1, %%mm4 \n\t"
  719. "movq %%mm2, %%mm5 \n\t"
  720. "punpcklwd %5, %%mm0 \n\t"
  721. "punpcklwd %5, %%mm1 \n\t"
  722. "punpcklwd %5, %%mm2 \n\t"
  723. "punpckhwd %5, %%mm3 \n\t"
  724. "punpckhwd %5, %%mm4 \n\t"
  725. "punpckhwd %5, %%mm5 \n\t"
  726. "psllq $8, %%mm1 \n\t"
  727. "psllq $16, %%mm2 \n\t"
  728. "por %%mm1, %%mm0 \n\t"
  729. "por %%mm2, %%mm0 \n\t"
  730. "psllq $8, %%mm4 \n\t"
  731. "psllq $16, %%mm5 \n\t"
  732. "por %%mm4, %%mm3 \n\t"
  733. "por %%mm5, %%mm3 \n\t"
  734. "movq %%mm0, %%mm6 \n\t"
  735. "movq %%mm3, %%mm7 \n\t"
  736. "movq 8(%1), %%mm0 \n\t"
  737. "movq 8(%1), %%mm1 \n\t"
  738. "movq 8(%1), %%mm2 \n\t"
  739. "pand %2, %%mm0 \n\t"
  740. "pand %3, %%mm1 \n\t"
  741. "pand %4, %%mm2 \n\t"
  742. "psllq $5, %%mm0 \n\t"
  743. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  744. "pmulhw "MANGLE(mul15_mid)", %%mm1 \n\t"
  745. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  746. "movq %%mm0, %%mm3 \n\t"
  747. "movq %%mm1, %%mm4 \n\t"
  748. "movq %%mm2, %%mm5 \n\t"
  749. "punpcklwd %5, %%mm0 \n\t"
  750. "punpcklwd %5, %%mm1 \n\t"
  751. "punpcklwd %5, %%mm2 \n\t"
  752. "punpckhwd %5, %%mm3 \n\t"
  753. "punpckhwd %5, %%mm4 \n\t"
  754. "punpckhwd %5, %%mm5 \n\t"
  755. "psllq $8, %%mm1 \n\t"
  756. "psllq $16, %%mm2 \n\t"
  757. "por %%mm1, %%mm0 \n\t"
  758. "por %%mm2, %%mm0 \n\t"
  759. "psllq $8, %%mm4 \n\t"
  760. "psllq $16, %%mm5 \n\t"
  761. "por %%mm4, %%mm3 \n\t"
  762. "por %%mm5, %%mm3 \n\t"
  763. :"=m"(*d)
  764. :"r"(s),"m"(mask15b),"m"(mask15g),"m"(mask15r), "m"(mmx_null)
  765. NAMED_CONSTRAINTS_ADD(mul15_mid,mul15_hi)
  766. :"memory");
  767. /* borrowed 32 to 24 */
  768. __asm__ volatile(
  769. "movq %%mm0, %%mm4 \n\t"
  770. "movq %%mm3, %%mm5 \n\t"
  771. "movq %%mm6, %%mm0 \n\t"
  772. "movq %%mm7, %%mm1 \n\t"
  773. "movq %%mm4, %%mm6 \n\t"
  774. "movq %%mm5, %%mm7 \n\t"
  775. "movq %%mm0, %%mm2 \n\t"
  776. "movq %%mm1, %%mm3 \n\t"
  777. STORE_BGR24_MMX
  778. :: "r"(d), "m"(*s)
  779. NAMED_CONSTRAINTS_ADD(mask24l,mask24h)
  780. :"memory");
  781. d += 24;
  782. s += 8;
  783. }
  784. __asm__ volatile(SFENCE:::"memory");
  785. __asm__ volatile(EMMS:::"memory");
  786. while (s < end) {
  787. register uint16_t bgr;
  788. bgr = *s++;
  789. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  790. *d++ = ((bgr&0x3E0)>>2) | ((bgr&0x3E0)>>7);
  791. *d++ = ((bgr&0x7C00)>>7) | ((bgr&0x7C00)>>12);
  792. }
  793. }
  794. static inline void RENAME(rgb16tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  795. {
  796. const uint16_t *end;
  797. const uint16_t *mm_end;
  798. uint8_t *d = (uint8_t *)dst;
  799. const uint16_t *s = (const uint16_t *)src;
  800. end = s + src_size/2;
  801. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  802. mm_end = end - 7;
  803. while (s < mm_end) {
  804. __asm__ volatile(
  805. PREFETCH" 32(%1) \n\t"
  806. "movq (%1), %%mm0 \n\t"
  807. "movq (%1), %%mm1 \n\t"
  808. "movq (%1), %%mm2 \n\t"
  809. "pand %2, %%mm0 \n\t"
  810. "pand %3, %%mm1 \n\t"
  811. "pand %4, %%mm2 \n\t"
  812. "psllq $5, %%mm0 \n\t"
  813. "psrlq $1, %%mm2 \n\t"
  814. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  815. "pmulhw "MANGLE(mul16_mid)", %%mm1 \n\t"
  816. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  817. "movq %%mm0, %%mm3 \n\t"
  818. "movq %%mm1, %%mm4 \n\t"
  819. "movq %%mm2, %%mm5 \n\t"
  820. "punpcklwd %5, %%mm0 \n\t"
  821. "punpcklwd %5, %%mm1 \n\t"
  822. "punpcklwd %5, %%mm2 \n\t"
  823. "punpckhwd %5, %%mm3 \n\t"
  824. "punpckhwd %5, %%mm4 \n\t"
  825. "punpckhwd %5, %%mm5 \n\t"
  826. "psllq $8, %%mm1 \n\t"
  827. "psllq $16, %%mm2 \n\t"
  828. "por %%mm1, %%mm0 \n\t"
  829. "por %%mm2, %%mm0 \n\t"
  830. "psllq $8, %%mm4 \n\t"
  831. "psllq $16, %%mm5 \n\t"
  832. "por %%mm4, %%mm3 \n\t"
  833. "por %%mm5, %%mm3 \n\t"
  834. "movq %%mm0, %%mm6 \n\t"
  835. "movq %%mm3, %%mm7 \n\t"
  836. "movq 8(%1), %%mm0 \n\t"
  837. "movq 8(%1), %%mm1 \n\t"
  838. "movq 8(%1), %%mm2 \n\t"
  839. "pand %2, %%mm0 \n\t"
  840. "pand %3, %%mm1 \n\t"
  841. "pand %4, %%mm2 \n\t"
  842. "psllq $5, %%mm0 \n\t"
  843. "psrlq $1, %%mm2 \n\t"
  844. "pmulhw "MANGLE(mul15_mid)", %%mm0 \n\t"
  845. "pmulhw "MANGLE(mul16_mid)", %%mm1 \n\t"
  846. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  847. "movq %%mm0, %%mm3 \n\t"
  848. "movq %%mm1, %%mm4 \n\t"
  849. "movq %%mm2, %%mm5 \n\t"
  850. "punpcklwd %5, %%mm0 \n\t"
  851. "punpcklwd %5, %%mm1 \n\t"
  852. "punpcklwd %5, %%mm2 \n\t"
  853. "punpckhwd %5, %%mm3 \n\t"
  854. "punpckhwd %5, %%mm4 \n\t"
  855. "punpckhwd %5, %%mm5 \n\t"
  856. "psllq $8, %%mm1 \n\t"
  857. "psllq $16, %%mm2 \n\t"
  858. "por %%mm1, %%mm0 \n\t"
  859. "por %%mm2, %%mm0 \n\t"
  860. "psllq $8, %%mm4 \n\t"
  861. "psllq $16, %%mm5 \n\t"
  862. "por %%mm4, %%mm3 \n\t"
  863. "por %%mm5, %%mm3 \n\t"
  864. :"=m"(*d)
  865. :"r"(s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mmx_null)
  866. NAMED_CONSTRAINTS_ADD(mul15_mid,mul16_mid,mul15_hi)
  867. :"memory");
  868. /* borrowed 32 to 24 */
  869. __asm__ volatile(
  870. "movq %%mm0, %%mm4 \n\t"
  871. "movq %%mm3, %%mm5 \n\t"
  872. "movq %%mm6, %%mm0 \n\t"
  873. "movq %%mm7, %%mm1 \n\t"
  874. "movq %%mm4, %%mm6 \n\t"
  875. "movq %%mm5, %%mm7 \n\t"
  876. "movq %%mm0, %%mm2 \n\t"
  877. "movq %%mm1, %%mm3 \n\t"
  878. STORE_BGR24_MMX
  879. :: "r"(d), "m"(*s)
  880. NAMED_CONSTRAINTS_ADD(mask24l,mask24h)
  881. :"memory");
  882. d += 24;
  883. s += 8;
  884. }
  885. __asm__ volatile(SFENCE:::"memory");
  886. __asm__ volatile(EMMS:::"memory");
  887. while (s < end) {
  888. register uint16_t bgr;
  889. bgr = *s++;
  890. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  891. *d++ = ((bgr&0x7E0)>>3) | ((bgr&0x7E0)>>9);
  892. *d++ = ((bgr&0xF800)>>8) | ((bgr&0xF800)>>13);
  893. }
  894. }
  895. /*
  896. * mm0 = 00 B3 00 B2 00 B1 00 B0
  897. * mm1 = 00 G3 00 G2 00 G1 00 G0
  898. * mm2 = 00 R3 00 R2 00 R1 00 R0
  899. * mm6 = FF FF FF FF FF FF FF FF
  900. * mm7 = 00 00 00 00 00 00 00 00
  901. */
  902. #define PACK_RGB32 \
  903. "packuswb %%mm7, %%mm0 \n\t" /* 00 00 00 00 B3 B2 B1 B0 */ \
  904. "packuswb %%mm7, %%mm1 \n\t" /* 00 00 00 00 G3 G2 G1 G0 */ \
  905. "packuswb %%mm7, %%mm2 \n\t" /* 00 00 00 00 R3 R2 R1 R0 */ \
  906. "punpcklbw %%mm1, %%mm0 \n\t" /* G3 B3 G2 B2 G1 B1 G0 B0 */ \
  907. "punpcklbw %%mm6, %%mm2 \n\t" /* FF R3 FF R2 FF R1 FF R0 */ \
  908. "movq %%mm0, %%mm3 \n\t" \
  909. "punpcklwd %%mm2, %%mm0 \n\t" /* FF R1 G1 B1 FF R0 G0 B0 */ \
  910. "punpckhwd %%mm2, %%mm3 \n\t" /* FF R3 G3 B3 FF R2 G2 B2 */ \
  911. MOVNTQ" %%mm0, (%0) \n\t" \
  912. MOVNTQ" %%mm3, 8(%0) \n\t" \
  913. static inline void RENAME(rgb15to32)(const uint8_t *src, uint8_t *dst, int src_size)
  914. {
  915. const uint16_t *end;
  916. const uint16_t *mm_end;
  917. uint8_t *d = dst;
  918. const uint16_t *s = (const uint16_t *)src;
  919. end = s + src_size/2;
  920. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  921. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  922. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  923. mm_end = end - 3;
  924. while (s < mm_end) {
  925. __asm__ volatile(
  926. PREFETCH" 32(%1) \n\t"
  927. "movq (%1), %%mm0 \n\t"
  928. "movq (%1), %%mm1 \n\t"
  929. "movq (%1), %%mm2 \n\t"
  930. "pand %2, %%mm0 \n\t"
  931. "pand %3, %%mm1 \n\t"
  932. "pand %4, %%mm2 \n\t"
  933. "psllq $5, %%mm0 \n\t"
  934. "pmulhw %5, %%mm0 \n\t"
  935. "pmulhw %5, %%mm1 \n\t"
  936. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  937. PACK_RGB32
  938. ::"r"(d),"r"(s),"m"(mask15b),"m"(mask15g),"m"(mask15r) ,"m"(mul15_mid)
  939. NAMED_CONSTRAINTS_ADD(mul15_hi)
  940. :"memory");
  941. d += 16;
  942. s += 4;
  943. }
  944. __asm__ volatile(SFENCE:::"memory");
  945. __asm__ volatile(EMMS:::"memory");
  946. while (s < end) {
  947. register uint16_t bgr;
  948. bgr = *s++;
  949. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  950. *d++ = ((bgr&0x3E0)>>2) | ((bgr&0x3E0)>>7);
  951. *d++ = ((bgr&0x7C00)>>7) | ((bgr&0x7C00)>>12);
  952. *d++ = 255;
  953. }
  954. }
  955. static inline void RENAME(rgb16to32)(const uint8_t *src, uint8_t *dst, int src_size)
  956. {
  957. const uint16_t *end;
  958. const uint16_t *mm_end;
  959. uint8_t *d = dst;
  960. const uint16_t *s = (const uint16_t*)src;
  961. end = s + src_size/2;
  962. __asm__ volatile(PREFETCH" %0"::"m"(*s):"memory");
  963. __asm__ volatile("pxor %%mm7,%%mm7 \n\t":::"memory");
  964. __asm__ volatile("pcmpeqd %%mm6,%%mm6 \n\t":::"memory");
  965. mm_end = end - 3;
  966. while (s < mm_end) {
  967. __asm__ volatile(
  968. PREFETCH" 32(%1) \n\t"
  969. "movq (%1), %%mm0 \n\t"
  970. "movq (%1), %%mm1 \n\t"
  971. "movq (%1), %%mm2 \n\t"
  972. "pand %2, %%mm0 \n\t"
  973. "pand %3, %%mm1 \n\t"
  974. "pand %4, %%mm2 \n\t"
  975. "psllq $5, %%mm0 \n\t"
  976. "psrlq $1, %%mm2 \n\t"
  977. "pmulhw %5, %%mm0 \n\t"
  978. "pmulhw "MANGLE(mul16_mid)", %%mm1 \n\t"
  979. "pmulhw "MANGLE(mul15_hi)", %%mm2 \n\t"
  980. PACK_RGB32
  981. ::"r"(d),"r"(s),"m"(mask16b),"m"(mask16g),"m"(mask16r),"m"(mul15_mid)
  982. NAMED_CONSTRAINTS_ADD(mul16_mid,mul15_hi)
  983. :"memory");
  984. d += 16;
  985. s += 4;
  986. }
  987. __asm__ volatile(SFENCE:::"memory");
  988. __asm__ volatile(EMMS:::"memory");
  989. while (s < end) {
  990. register uint16_t bgr;
  991. bgr = *s++;
  992. *d++ = ((bgr&0x1F)<<3) | ((bgr&0x1F)>>2);
  993. *d++ = ((bgr&0x7E0)>>3) | ((bgr&0x7E0)>>9);
  994. *d++ = ((bgr&0xF800)>>8) | ((bgr&0xF800)>>13);
  995. *d++ = 255;
  996. }
  997. }
  998. static inline void RENAME(shuffle_bytes_2103)(const uint8_t *src, uint8_t *dst, int src_size)
  999. {
  1000. x86_reg idx = 15 - src_size;
  1001. const uint8_t *s = src-idx;
  1002. uint8_t *d = dst-idx;
  1003. __asm__ volatile(
  1004. "test %0, %0 \n\t"
  1005. "jns 2f \n\t"
  1006. PREFETCH" (%1, %0) \n\t"
  1007. "movq %3, %%mm7 \n\t"
  1008. "pxor %4, %%mm7 \n\t"
  1009. "movq %%mm7, %%mm6 \n\t"
  1010. "pxor %5, %%mm7 \n\t"
  1011. ".p2align 4 \n\t"
  1012. "1: \n\t"
  1013. PREFETCH" 32(%1, %0) \n\t"
  1014. "movq (%1, %0), %%mm0 \n\t"
  1015. "movq 8(%1, %0), %%mm1 \n\t"
  1016. # if COMPILE_TEMPLATE_MMXEXT
  1017. "pshufw $177, %%mm0, %%mm3 \n\t"
  1018. "pshufw $177, %%mm1, %%mm5 \n\t"
  1019. "pand %%mm7, %%mm0 \n\t"
  1020. "pand %%mm6, %%mm3 \n\t"
  1021. "pand %%mm7, %%mm1 \n\t"
  1022. "pand %%mm6, %%mm5 \n\t"
  1023. "por %%mm3, %%mm0 \n\t"
  1024. "por %%mm5, %%mm1 \n\t"
  1025. # else
  1026. "movq %%mm0, %%mm2 \n\t"
  1027. "movq %%mm1, %%mm4 \n\t"
  1028. "pand %%mm7, %%mm0 \n\t"
  1029. "pand %%mm6, %%mm2 \n\t"
  1030. "pand %%mm7, %%mm1 \n\t"
  1031. "pand %%mm6, %%mm4 \n\t"
  1032. "movq %%mm2, %%mm3 \n\t"
  1033. "movq %%mm4, %%mm5 \n\t"
  1034. "pslld $16, %%mm2 \n\t"
  1035. "psrld $16, %%mm3 \n\t"
  1036. "pslld $16, %%mm4 \n\t"
  1037. "psrld $16, %%mm5 \n\t"
  1038. "por %%mm2, %%mm0 \n\t"
  1039. "por %%mm4, %%mm1 \n\t"
  1040. "por %%mm3, %%mm0 \n\t"
  1041. "por %%mm5, %%mm1 \n\t"
  1042. # endif
  1043. MOVNTQ" %%mm0, (%2, %0) \n\t"
  1044. MOVNTQ" %%mm1, 8(%2, %0) \n\t"
  1045. "add $16, %0 \n\t"
  1046. "js 1b \n\t"
  1047. SFENCE" \n\t"
  1048. EMMS" \n\t"
  1049. "2: \n\t"
  1050. : "+&r"(idx)
  1051. : "r" (s), "r" (d), "m" (mask32b), "m" (mask32r), "m" (mmx_one)
  1052. : "memory");
  1053. for (; idx<15; idx+=4) {
  1054. register unsigned v = *(const uint32_t *)&s[idx], g = v & 0xff00ff00;
  1055. v &= 0xff00ff;
  1056. *(uint32_t *)&d[idx] = (v>>16) + g + (v<<16);
  1057. }
  1058. }
  1059. static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, int src_size)
  1060. {
  1061. unsigned i;
  1062. x86_reg mmx_size= 23 - src_size;
  1063. __asm__ volatile (
  1064. "test %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1065. "jns 2f \n\t"
  1066. "movq "MANGLE(mask24r)", %%mm5 \n\t"
  1067. "movq "MANGLE(mask24g)", %%mm6 \n\t"
  1068. "movq "MANGLE(mask24b)", %%mm7 \n\t"
  1069. ".p2align 4 \n\t"
  1070. "1: \n\t"
  1071. PREFETCH" 32(%1, %%"FF_REG_a") \n\t"
  1072. "movq (%1, %%"FF_REG_a"), %%mm0 \n\t" // BGR BGR BG
  1073. "movq (%1, %%"FF_REG_a"), %%mm1 \n\t" // BGR BGR BG
  1074. "movq 2(%1, %%"FF_REG_a"), %%mm2 \n\t" // R BGR BGR B
  1075. "psllq $16, %%mm0 \n\t" // 00 BGR BGR
  1076. "pand %%mm5, %%mm0 \n\t"
  1077. "pand %%mm6, %%mm1 \n\t"
  1078. "pand %%mm7, %%mm2 \n\t"
  1079. "por %%mm0, %%mm1 \n\t"
  1080. "por %%mm2, %%mm1 \n\t"
  1081. "movq 6(%1, %%"FF_REG_a"), %%mm0 \n\t" // BGR BGR BG
  1082. MOVNTQ" %%mm1,(%2, %%"FF_REG_a") \n\t" // RGB RGB RG
  1083. "movq 8(%1, %%"FF_REG_a"), %%mm1 \n\t" // R BGR BGR B
  1084. "movq 10(%1, %%"FF_REG_a"), %%mm2 \n\t" // GR BGR BGR
  1085. "pand %%mm7, %%mm0 \n\t"
  1086. "pand %%mm5, %%mm1 \n\t"
  1087. "pand %%mm6, %%mm2 \n\t"
  1088. "por %%mm0, %%mm1 \n\t"
  1089. "por %%mm2, %%mm1 \n\t"
  1090. "movq 14(%1, %%"FF_REG_a"), %%mm0 \n\t" // R BGR BGR B
  1091. MOVNTQ" %%mm1, 8(%2, %%"FF_REG_a")\n\t" // B RGB RGB R
  1092. "movq 16(%1, %%"FF_REG_a"), %%mm1 \n\t" // GR BGR BGR
  1093. "movq 18(%1, %%"FF_REG_a"), %%mm2 \n\t" // BGR BGR BG
  1094. "pand %%mm6, %%mm0 \n\t"
  1095. "pand %%mm7, %%mm1 \n\t"
  1096. "pand %%mm5, %%mm2 \n\t"
  1097. "por %%mm0, %%mm1 \n\t"
  1098. "por %%mm2, %%mm1 \n\t"
  1099. MOVNTQ" %%mm1, 16(%2, %%"FF_REG_a") \n\t"
  1100. "add $24, %%"FF_REG_a" \n\t"
  1101. " js 1b \n\t"
  1102. "2: \n\t"
  1103. : "+a" (mmx_size)
  1104. : "r" (src-mmx_size), "r"(dst-mmx_size)
  1105. NAMED_CONSTRAINTS_ADD(mask24r,mask24g,mask24b)
  1106. );
  1107. __asm__ volatile(SFENCE:::"memory");
  1108. __asm__ volatile(EMMS:::"memory");
  1109. if (mmx_size==23) return; //finished, was multiple of 8
  1110. src+= src_size;
  1111. dst+= src_size;
  1112. src_size= 23-mmx_size;
  1113. src-= src_size;
  1114. dst-= src_size;
  1115. for (i=0; i<src_size; i+=3) {
  1116. register uint8_t x;
  1117. x = src[i + 2];
  1118. dst[i + 1] = src[i + 1];
  1119. dst[i + 2] = src[i + 0];
  1120. dst[i + 0] = x;
  1121. }
  1122. }
  1123. static inline void RENAME(yuvPlanartoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1124. int width, int height,
  1125. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1126. {
  1127. int y;
  1128. const x86_reg chromWidth= width>>1;
  1129. for (y=0; y<height; y++) {
  1130. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1131. __asm__ volatile(
  1132. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1133. ".p2align 4 \n\t"
  1134. "1: \n\t"
  1135. PREFETCH" 32(%1, %%"FF_REG_a", 2) \n\t"
  1136. PREFETCH" 32(%2, %%"FF_REG_a") \n\t"
  1137. PREFETCH" 32(%3, %%"FF_REG_a") \n\t"
  1138. "movq (%2, %%"FF_REG_a"), %%mm0 \n\t" // U(0)
  1139. "movq %%mm0, %%mm2 \n\t" // U(0)
  1140. "movq (%3, %%"FF_REG_a"), %%mm1 \n\t" // V(0)
  1141. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1142. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1143. "movq (%1, %%"FF_REG_a",2), %%mm3 \n\t" // Y(0)
  1144. "movq 8(%1, %%"FF_REG_a",2), %%mm5 \n\t" // Y(8)
  1145. "movq %%mm3, %%mm4 \n\t" // Y(0)
  1146. "movq %%mm5, %%mm6 \n\t" // Y(8)
  1147. "punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
  1148. "punpckhbw %%mm0, %%mm4 \n\t" // YUYV YUYV(4)
  1149. "punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
  1150. "punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
  1151. MOVNTQ" %%mm3, (%0, %%"FF_REG_a", 4) \n\t"
  1152. MOVNTQ" %%mm4, 8(%0, %%"FF_REG_a", 4) \n\t"
  1153. MOVNTQ" %%mm5, 16(%0, %%"FF_REG_a", 4) \n\t"
  1154. MOVNTQ" %%mm6, 24(%0, %%"FF_REG_a", 4) \n\t"
  1155. "add $8, %%"FF_REG_a" \n\t"
  1156. "cmp %4, %%"FF_REG_a" \n\t"
  1157. " jb 1b \n\t"
  1158. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1159. : "%"FF_REG_a
  1160. );
  1161. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1162. usrc += chromStride;
  1163. vsrc += chromStride;
  1164. }
  1165. ysrc += lumStride;
  1166. dst += dstStride;
  1167. }
  1168. __asm__(EMMS" \n\t"
  1169. SFENCE" \n\t"
  1170. :::"memory");
  1171. }
  1172. /**
  1173. * Height should be a multiple of 2 and width should be a multiple of 16.
  1174. * (If this is a problem for anyone then tell me, and I will fix it.)
  1175. */
  1176. static inline void RENAME(yv12toyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1177. int width, int height,
  1178. int lumStride, int chromStride, int dstStride)
  1179. {
  1180. //FIXME interpolate chroma
  1181. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1182. }
  1183. static inline void RENAME(yuvPlanartouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1184. int width, int height,
  1185. int lumStride, int chromStride, int dstStride, int vertLumPerChroma)
  1186. {
  1187. int y;
  1188. const x86_reg chromWidth= width>>1;
  1189. for (y=0; y<height; y++) {
  1190. //FIXME handle 2 lines at once (fewer prefetches, reuse some chroma, but very likely memory-limited anyway)
  1191. __asm__ volatile(
  1192. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1193. ".p2align 4 \n\t"
  1194. "1: \n\t"
  1195. PREFETCH" 32(%1, %%"FF_REG_a", 2) \n\t"
  1196. PREFETCH" 32(%2, %%"FF_REG_a") \n\t"
  1197. PREFETCH" 32(%3, %%"FF_REG_a") \n\t"
  1198. "movq (%2, %%"FF_REG_a"), %%mm0 \n\t" // U(0)
  1199. "movq %%mm0, %%mm2 \n\t" // U(0)
  1200. "movq (%3, %%"FF_REG_a"), %%mm1 \n\t" // V(0)
  1201. "punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1202. "punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
  1203. "movq (%1, %%"FF_REG_a",2), %%mm3 \n\t" // Y(0)
  1204. "movq 8(%1, %%"FF_REG_a",2), %%mm5 \n\t" // Y(8)
  1205. "movq %%mm0, %%mm4 \n\t" // Y(0)
  1206. "movq %%mm2, %%mm6 \n\t" // Y(8)
  1207. "punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
  1208. "punpckhbw %%mm3, %%mm4 \n\t" // YUYV YUYV(4)
  1209. "punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
  1210. "punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
  1211. MOVNTQ" %%mm0, (%0, %%"FF_REG_a", 4) \n\t"
  1212. MOVNTQ" %%mm4, 8(%0, %%"FF_REG_a", 4) \n\t"
  1213. MOVNTQ" %%mm2, 16(%0, %%"FF_REG_a", 4) \n\t"
  1214. MOVNTQ" %%mm6, 24(%0, %%"FF_REG_a", 4) \n\t"
  1215. "add $8, %%"FF_REG_a" \n\t"
  1216. "cmp %4, %%"FF_REG_a" \n\t"
  1217. " jb 1b \n\t"
  1218. ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
  1219. : "%"FF_REG_a
  1220. );
  1221. if ((y&(vertLumPerChroma-1)) == vertLumPerChroma-1) {
  1222. usrc += chromStride;
  1223. vsrc += chromStride;
  1224. }
  1225. ysrc += lumStride;
  1226. dst += dstStride;
  1227. }
  1228. __asm__(EMMS" \n\t"
  1229. SFENCE" \n\t"
  1230. :::"memory");
  1231. }
  1232. /**
  1233. * Height should be a multiple of 2 and width should be a multiple of 16
  1234. * (If this is a problem for anyone then tell me, and I will fix it.)
  1235. */
  1236. static inline void RENAME(yv12touyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1237. int width, int height,
  1238. int lumStride, int chromStride, int dstStride)
  1239. {
  1240. //FIXME interpolate chroma
  1241. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 2);
  1242. }
  1243. /**
  1244. * Width should be a multiple of 16.
  1245. */
  1246. static inline void RENAME(yuv422ptouyvy)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1247. int width, int height,
  1248. int lumStride, int chromStride, int dstStride)
  1249. {
  1250. RENAME(yuvPlanartouyvy)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1251. }
  1252. /**
  1253. * Width should be a multiple of 16.
  1254. */
  1255. static inline void RENAME(yuv422ptoyuy2)(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  1256. int width, int height,
  1257. int lumStride, int chromStride, int dstStride)
  1258. {
  1259. RENAME(yuvPlanartoyuy2)(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride, 1);
  1260. }
  1261. /**
  1262. * Height should be a multiple of 2 and width should be a multiple of 16.
  1263. * (If this is a problem for anyone then tell me, and I will fix it.)
  1264. */
  1265. static inline void RENAME(yuy2toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1266. int width, int height,
  1267. int lumStride, int chromStride, int srcStride)
  1268. {
  1269. int y;
  1270. const x86_reg chromWidth= width>>1;
  1271. for (y=0; y<height; y+=2) {
  1272. __asm__ volatile(
  1273. "xor %%"FF_REG_a", %%"FF_REG_a"\n\t"
  1274. "pcmpeqw %%mm7, %%mm7 \n\t"
  1275. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1276. ".p2align 4 \n\t"
  1277. "1: \n\t"
  1278. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1279. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1280. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1281. "movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
  1282. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
  1283. "psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
  1284. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(4)
  1285. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1286. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1287. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1288. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1289. MOVNTQ" %%mm2, (%1, %%"FF_REG_a", 2) \n\t"
  1290. "movq 16(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(8)
  1291. "movq 24(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // YUYV YUYV(12)
  1292. "movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
  1293. "movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
  1294. "psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
  1295. "psrlw $8, %%mm2 \n\t" // U0V0 U0V0(12)
  1296. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1297. "pand %%mm7, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1298. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1299. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1300. MOVNTQ" %%mm3, 8(%1, %%"FF_REG_a", 2) \n\t"
  1301. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1302. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1303. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1304. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1305. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1306. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1307. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1308. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1309. MOVNTQ" %%mm0, (%3, %%"FF_REG_a") \n\t"
  1310. MOVNTQ" %%mm2, (%2, %%"FF_REG_a") \n\t"
  1311. "add $8, %%"FF_REG_a" \n\t"
  1312. "cmp %4, %%"FF_REG_a" \n\t"
  1313. " jb 1b \n\t"
  1314. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1315. : "memory", "%"FF_REG_a
  1316. );
  1317. ydst += lumStride;
  1318. src += srcStride;
  1319. __asm__ volatile(
  1320. "xor %%"FF_REG_a", %%"FF_REG_a"\n\t"
  1321. ".p2align 4 \n\t"
  1322. "1: \n\t"
  1323. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1324. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1325. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1326. "movq 16(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1327. "movq 24(%0, %%"FF_REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1328. "pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1329. "pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1330. "pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1331. "pand %%mm7, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1332. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1333. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1334. MOVNTQ" %%mm0, (%1, %%"FF_REG_a", 2) \n\t"
  1335. MOVNTQ" %%mm2, 8(%1, %%"FF_REG_a", 2) \n\t"
  1336. "add $8, %%"FF_REG_a"\n\t"
  1337. "cmp %4, %%"FF_REG_a"\n\t"
  1338. " jb 1b \n\t"
  1339. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1340. : "memory", "%"FF_REG_a
  1341. );
  1342. udst += chromStride;
  1343. vdst += chromStride;
  1344. ydst += lumStride;
  1345. src += srcStride;
  1346. }
  1347. __asm__ volatile(EMMS" \n\t"
  1348. SFENCE" \n\t"
  1349. :::"memory");
  1350. }
  1351. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1352. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1353. static inline void RENAME(planar2x)(const uint8_t *src, uint8_t *dst, int srcWidth, int srcHeight, int srcStride, int dstStride)
  1354. {
  1355. int x,y;
  1356. dst[0]= src[0];
  1357. // first line
  1358. for (x=0; x<srcWidth-1; x++) {
  1359. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1360. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1361. }
  1362. dst[2*srcWidth-1]= src[srcWidth-1];
  1363. dst+= dstStride;
  1364. for (y=1; y<srcHeight; y++) {
  1365. x86_reg mmxSize= srcWidth&~15;
  1366. if (mmxSize) {
  1367. __asm__ volatile(
  1368. "mov %4, %%"FF_REG_a" \n\t"
  1369. "movq "MANGLE(mmx_ff)", %%mm0 \n\t"
  1370. "movq (%0, %%"FF_REG_a"), %%mm4 \n\t"
  1371. "movq %%mm4, %%mm2 \n\t"
  1372. "psllq $8, %%mm4 \n\t"
  1373. "pand %%mm0, %%mm2 \n\t"
  1374. "por %%mm2, %%mm4 \n\t"
  1375. "movq (%1, %%"FF_REG_a"), %%mm5 \n\t"
  1376. "movq %%mm5, %%mm3 \n\t"
  1377. "psllq $8, %%mm5 \n\t"
  1378. "pand %%mm0, %%mm3 \n\t"
  1379. "por %%mm3, %%mm5 \n\t"
  1380. "1: \n\t"
  1381. "movq (%0, %%"FF_REG_a"), %%mm0 \n\t"
  1382. "movq (%1, %%"FF_REG_a"), %%mm1 \n\t"
  1383. "movq 1(%0, %%"FF_REG_a"), %%mm2 \n\t"
  1384. "movq 1(%1, %%"FF_REG_a"), %%mm3 \n\t"
  1385. PAVGB" %%mm0, %%mm5 \n\t"
  1386. PAVGB" %%mm0, %%mm3 \n\t"
  1387. PAVGB" %%mm0, %%mm5 \n\t"
  1388. PAVGB" %%mm0, %%mm3 \n\t"
  1389. PAVGB" %%mm1, %%mm4 \n\t"
  1390. PAVGB" %%mm1, %%mm2 \n\t"
  1391. PAVGB" %%mm1, %%mm4 \n\t"
  1392. PAVGB" %%mm1, %%mm2 \n\t"
  1393. "movq %%mm5, %%mm7 \n\t"
  1394. "movq %%mm4, %%mm6 \n\t"
  1395. "punpcklbw %%mm3, %%mm5 \n\t"
  1396. "punpckhbw %%mm3, %%mm7 \n\t"
  1397. "punpcklbw %%mm2, %%mm4 \n\t"
  1398. "punpckhbw %%mm2, %%mm6 \n\t"
  1399. MOVNTQ" %%mm5, (%2, %%"FF_REG_a", 2) \n\t"
  1400. MOVNTQ" %%mm7, 8(%2, %%"FF_REG_a", 2) \n\t"
  1401. MOVNTQ" %%mm4, (%3, %%"FF_REG_a", 2) \n\t"
  1402. MOVNTQ" %%mm6, 8(%3, %%"FF_REG_a", 2) \n\t"
  1403. "add $8, %%"FF_REG_a" \n\t"
  1404. "movq -1(%0, %%"FF_REG_a"), %%mm4 \n\t"
  1405. "movq -1(%1, %%"FF_REG_a"), %%mm5 \n\t"
  1406. " js 1b \n\t"
  1407. :: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
  1408. "r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
  1409. "g" (-mmxSize)
  1410. NAMED_CONSTRAINTS_ADD(mmx_ff)
  1411. : "%"FF_REG_a
  1412. );
  1413. } else {
  1414. mmxSize = 1;
  1415. dst[0] = (src[0] * 3 + src[srcStride]) >> 2;
  1416. dst[dstStride] = (src[0] + 3 * src[srcStride]) >> 2;
  1417. }
  1418. for (x=mmxSize-1; x<srcWidth-1; x++) {
  1419. dst[2*x +1]= (3*src[x+0] + src[x+srcStride+1])>>2;
  1420. dst[2*x+dstStride+2]= ( src[x+0] + 3*src[x+srcStride+1])>>2;
  1421. dst[2*x+dstStride+1]= ( src[x+1] + 3*src[x+srcStride ])>>2;
  1422. dst[2*x +2]= (3*src[x+1] + src[x+srcStride ])>>2;
  1423. }
  1424. dst[srcWidth*2 -1 ]= (3*src[srcWidth-1] + src[srcWidth-1 + srcStride])>>2;
  1425. dst[srcWidth*2 -1 + dstStride]= ( src[srcWidth-1] + 3*src[srcWidth-1 + srcStride])>>2;
  1426. dst+=dstStride*2;
  1427. src+=srcStride;
  1428. }
  1429. // last line
  1430. dst[0]= src[0];
  1431. for (x=0; x<srcWidth-1; x++) {
  1432. dst[2*x+1]= (3*src[x] + src[x+1])>>2;
  1433. dst[2*x+2]= ( src[x] + 3*src[x+1])>>2;
  1434. }
  1435. dst[2*srcWidth-1]= src[srcWidth-1];
  1436. __asm__ volatile(EMMS" \n\t"
  1437. SFENCE" \n\t"
  1438. :::"memory");
  1439. }
  1440. #endif /* COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW */
  1441. #if !COMPILE_TEMPLATE_AMD3DNOW
  1442. /**
  1443. * Height should be a multiple of 2 and width should be a multiple of 16.
  1444. * (If this is a problem for anyone then tell me, and I will fix it.)
  1445. * Chrominance data is only taken from every second line, others are ignored.
  1446. * FIXME: Write HQ version.
  1447. */
  1448. static inline void RENAME(uyvytoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1449. int width, int height,
  1450. int lumStride, int chromStride, int srcStride)
  1451. {
  1452. int y;
  1453. const x86_reg chromWidth= width>>1;
  1454. for (y=0; y<height; y+=2) {
  1455. __asm__ volatile(
  1456. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1457. "pcmpeqw %%mm7, %%mm7 \n\t"
  1458. "psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
  1459. ".p2align 4 \n\t"
  1460. "1: \n\t"
  1461. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1462. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // UYVY UYVY(0)
  1463. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // UYVY UYVY(4)
  1464. "movq %%mm0, %%mm2 \n\t" // UYVY UYVY(0)
  1465. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(4)
  1466. "pand %%mm7, %%mm0 \n\t" // U0V0 U0V0(0)
  1467. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(4)
  1468. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(0)
  1469. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(4)
  1470. "packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
  1471. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
  1472. MOVNTQ" %%mm2, (%1, %%"FF_REG_a", 2) \n\t"
  1473. "movq 16(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // UYVY UYVY(8)
  1474. "movq 24(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // UYVY UYVY(12)
  1475. "movq %%mm1, %%mm3 \n\t" // UYVY UYVY(8)
  1476. "movq %%mm2, %%mm4 \n\t" // UYVY UYVY(12)
  1477. "pand %%mm7, %%mm1 \n\t" // U0V0 U0V0(8)
  1478. "pand %%mm7, %%mm2 \n\t" // U0V0 U0V0(12)
  1479. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(8)
  1480. "psrlw $8, %%mm4 \n\t" // Y0Y0 Y0Y0(12)
  1481. "packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
  1482. "packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
  1483. MOVNTQ" %%mm3, 8(%1, %%"FF_REG_a", 2) \n\t"
  1484. "movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
  1485. "movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
  1486. "psrlw $8, %%mm0 \n\t" // V0V0 V0V0(0)
  1487. "psrlw $8, %%mm1 \n\t" // V0V0 V0V0(8)
  1488. "pand %%mm7, %%mm2 \n\t" // U0U0 U0U0(0)
  1489. "pand %%mm7, %%mm3 \n\t" // U0U0 U0U0(8)
  1490. "packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
  1491. "packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
  1492. MOVNTQ" %%mm0, (%3, %%"FF_REG_a") \n\t"
  1493. MOVNTQ" %%mm2, (%2, %%"FF_REG_a") \n\t"
  1494. "add $8, %%"FF_REG_a" \n\t"
  1495. "cmp %4, %%"FF_REG_a" \n\t"
  1496. " jb 1b \n\t"
  1497. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1498. : "memory", "%"FF_REG_a
  1499. );
  1500. ydst += lumStride;
  1501. src += srcStride;
  1502. __asm__ volatile(
  1503. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1504. ".p2align 4 \n\t"
  1505. "1: \n\t"
  1506. PREFETCH" 64(%0, %%"FF_REG_a", 4) \n\t"
  1507. "movq (%0, %%"FF_REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
  1508. "movq 8(%0, %%"FF_REG_a", 4), %%mm1 \n\t" // YUYV YUYV(4)
  1509. "movq 16(%0, %%"FF_REG_a", 4), %%mm2 \n\t" // YUYV YUYV(8)
  1510. "movq 24(%0, %%"FF_REG_a", 4), %%mm3 \n\t" // YUYV YUYV(12)
  1511. "psrlw $8, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
  1512. "psrlw $8, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
  1513. "psrlw $8, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
  1514. "psrlw $8, %%mm3 \n\t" // Y0Y0 Y0Y0(12)
  1515. "packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
  1516. "packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
  1517. MOVNTQ" %%mm0, (%1, %%"FF_REG_a", 2) \n\t"
  1518. MOVNTQ" %%mm2, 8(%1, %%"FF_REG_a", 2) \n\t"
  1519. "add $8, %%"FF_REG_a" \n\t"
  1520. "cmp %4, %%"FF_REG_a" \n\t"
  1521. " jb 1b \n\t"
  1522. ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
  1523. : "memory", "%"FF_REG_a
  1524. );
  1525. udst += chromStride;
  1526. vdst += chromStride;
  1527. ydst += lumStride;
  1528. src += srcStride;
  1529. }
  1530. __asm__ volatile(EMMS" \n\t"
  1531. SFENCE" \n\t"
  1532. :::"memory");
  1533. }
  1534. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1535. /**
  1536. * Height should be a multiple of 2 and width should be a multiple of 2.
  1537. * (If this is a problem for anyone then tell me, and I will fix it.)
  1538. * Chrominance data is only taken from every second line,
  1539. * others are ignored in the C version.
  1540. * FIXME: Write HQ version.
  1541. */
  1542. #if HAVE_7REGS
  1543. static inline void RENAME(rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  1544. int width, int height,
  1545. int lumStride, int chromStride, int srcStride,
  1546. int32_t *rgb2yuv)
  1547. {
  1548. #define BGR2Y_IDX "16*4+16*32"
  1549. #define BGR2U_IDX "16*4+16*33"
  1550. #define BGR2V_IDX "16*4+16*34"
  1551. int y;
  1552. const x86_reg chromWidth= width>>1;
  1553. if (height > 2) {
  1554. ff_rgb24toyv12_c(src, ydst, udst, vdst, width, 2, lumStride, chromStride, srcStride, rgb2yuv);
  1555. src += 2*srcStride;
  1556. ydst += 2*lumStride;
  1557. udst += chromStride;
  1558. vdst += chromStride;
  1559. height -= 2;
  1560. }
  1561. for (y=0; y<height-2; y+=2) {
  1562. int i;
  1563. for (i=0; i<2; i++) {
  1564. __asm__ volatile(
  1565. "mov %2, %%"FF_REG_a"\n\t"
  1566. "movq "BGR2Y_IDX"(%3), %%mm6 \n\t"
  1567. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1568. "pxor %%mm7, %%mm7 \n\t"
  1569. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_d" \n\t"
  1570. ".p2align 4 \n\t"
  1571. "1: \n\t"
  1572. PREFETCH" 64(%0, %%"FF_REG_d") \n\t"
  1573. "movd (%0, %%"FF_REG_d"), %%mm0 \n\t"
  1574. "movd 3(%0, %%"FF_REG_d"), %%mm1 \n\t"
  1575. "punpcklbw %%mm7, %%mm0 \n\t"
  1576. "punpcklbw %%mm7, %%mm1 \n\t"
  1577. "movd 6(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1578. "movd 9(%0, %%"FF_REG_d"), %%mm3 \n\t"
  1579. "punpcklbw %%mm7, %%mm2 \n\t"
  1580. "punpcklbw %%mm7, %%mm3 \n\t"
  1581. "pmaddwd %%mm6, %%mm0 \n\t"
  1582. "pmaddwd %%mm6, %%mm1 \n\t"
  1583. "pmaddwd %%mm6, %%mm2 \n\t"
  1584. "pmaddwd %%mm6, %%mm3 \n\t"
  1585. "psrad $8, %%mm0 \n\t"
  1586. "psrad $8, %%mm1 \n\t"
  1587. "psrad $8, %%mm2 \n\t"
  1588. "psrad $8, %%mm3 \n\t"
  1589. "packssdw %%mm1, %%mm0 \n\t"
  1590. "packssdw %%mm3, %%mm2 \n\t"
  1591. "pmaddwd %%mm5, %%mm0 \n\t"
  1592. "pmaddwd %%mm5, %%mm2 \n\t"
  1593. "packssdw %%mm2, %%mm0 \n\t"
  1594. "psraw $7, %%mm0 \n\t"
  1595. "movd 12(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1596. "movd 15(%0, %%"FF_REG_d"), %%mm1 \n\t"
  1597. "punpcklbw %%mm7, %%mm4 \n\t"
  1598. "punpcklbw %%mm7, %%mm1 \n\t"
  1599. "movd 18(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1600. "movd 21(%0, %%"FF_REG_d"), %%mm3 \n\t"
  1601. "punpcklbw %%mm7, %%mm2 \n\t"
  1602. "punpcklbw %%mm7, %%mm3 \n\t"
  1603. "pmaddwd %%mm6, %%mm4 \n\t"
  1604. "pmaddwd %%mm6, %%mm1 \n\t"
  1605. "pmaddwd %%mm6, %%mm2 \n\t"
  1606. "pmaddwd %%mm6, %%mm3 \n\t"
  1607. "psrad $8, %%mm4 \n\t"
  1608. "psrad $8, %%mm1 \n\t"
  1609. "psrad $8, %%mm2 \n\t"
  1610. "psrad $8, %%mm3 \n\t"
  1611. "packssdw %%mm1, %%mm4 \n\t"
  1612. "packssdw %%mm3, %%mm2 \n\t"
  1613. "pmaddwd %%mm5, %%mm4 \n\t"
  1614. "pmaddwd %%mm5, %%mm2 \n\t"
  1615. "add $24, %%"FF_REG_d"\n\t"
  1616. "packssdw %%mm2, %%mm4 \n\t"
  1617. "psraw $7, %%mm4 \n\t"
  1618. "packuswb %%mm4, %%mm0 \n\t"
  1619. "paddusb "MANGLE(ff_bgr2YOffset)", %%mm0 \n\t"
  1620. MOVNTQ" %%mm0, (%1, %%"FF_REG_a") \n\t"
  1621. "add $8, %%"FF_REG_a" \n\t"
  1622. " js 1b \n\t"
  1623. : : "r" (src+width*3), "r" (ydst+width), "g" ((x86_reg)-width), "r"(rgb2yuv)
  1624. NAMED_CONSTRAINTS_ADD(ff_w1111,ff_bgr2YOffset)
  1625. : "%"FF_REG_a, "%"FF_REG_d
  1626. );
  1627. ydst += lumStride;
  1628. src += srcStride;
  1629. }
  1630. src -= srcStride*2;
  1631. __asm__ volatile(
  1632. "mov %4, %%"FF_REG_a"\n\t"
  1633. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1634. "movq "BGR2U_IDX"(%5), %%mm6 \n\t"
  1635. "pxor %%mm7, %%mm7 \n\t"
  1636. "lea (%%"FF_REG_a", %%"FF_REG_a", 2), %%"FF_REG_d" \n\t"
  1637. "add %%"FF_REG_d", %%"FF_REG_d"\n\t"
  1638. ".p2align 4 \n\t"
  1639. "1: \n\t"
  1640. PREFETCH" 64(%0, %%"FF_REG_d") \n\t"
  1641. PREFETCH" 64(%1, %%"FF_REG_d") \n\t"
  1642. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1643. "movq (%0, %%"FF_REG_d"), %%mm0 \n\t"
  1644. "movq (%1, %%"FF_REG_d"), %%mm1 \n\t"
  1645. "movq 6(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1646. "movq 6(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1647. PAVGB" %%mm1, %%mm0 \n\t"
  1648. PAVGB" %%mm3, %%mm2 \n\t"
  1649. "movq %%mm0, %%mm1 \n\t"
  1650. "movq %%mm2, %%mm3 \n\t"
  1651. "psrlq $24, %%mm0 \n\t"
  1652. "psrlq $24, %%mm2 \n\t"
  1653. PAVGB" %%mm1, %%mm0 \n\t"
  1654. PAVGB" %%mm3, %%mm2 \n\t"
  1655. "punpcklbw %%mm7, %%mm0 \n\t"
  1656. "punpcklbw %%mm7, %%mm2 \n\t"
  1657. #else
  1658. "movd (%0, %%"FF_REG_d"), %%mm0 \n\t"
  1659. "movd (%1, %%"FF_REG_d"), %%mm1 \n\t"
  1660. "movd 3(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1661. "movd 3(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1662. "punpcklbw %%mm7, %%mm0 \n\t"
  1663. "punpcklbw %%mm7, %%mm1 \n\t"
  1664. "punpcklbw %%mm7, %%mm2 \n\t"
  1665. "punpcklbw %%mm7, %%mm3 \n\t"
  1666. "paddw %%mm1, %%mm0 \n\t"
  1667. "paddw %%mm3, %%mm2 \n\t"
  1668. "paddw %%mm2, %%mm0 \n\t"
  1669. "movd 6(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1670. "movd 6(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1671. "movd 9(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1672. "movd 9(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1673. "punpcklbw %%mm7, %%mm4 \n\t"
  1674. "punpcklbw %%mm7, %%mm1 \n\t"
  1675. "punpcklbw %%mm7, %%mm2 \n\t"
  1676. "punpcklbw %%mm7, %%mm3 \n\t"
  1677. "paddw %%mm1, %%mm4 \n\t"
  1678. "paddw %%mm3, %%mm2 \n\t"
  1679. "paddw %%mm4, %%mm2 \n\t"
  1680. "psrlw $2, %%mm0 \n\t"
  1681. "psrlw $2, %%mm2 \n\t"
  1682. #endif
  1683. "movq "BGR2V_IDX"(%5), %%mm1 \n\t"
  1684. "movq "BGR2V_IDX"(%5), %%mm3 \n\t"
  1685. "pmaddwd %%mm0, %%mm1 \n\t"
  1686. "pmaddwd %%mm2, %%mm3 \n\t"
  1687. "pmaddwd %%mm6, %%mm0 \n\t"
  1688. "pmaddwd %%mm6, %%mm2 \n\t"
  1689. "psrad $8, %%mm0 \n\t"
  1690. "psrad $8, %%mm1 \n\t"
  1691. "psrad $8, %%mm2 \n\t"
  1692. "psrad $8, %%mm3 \n\t"
  1693. "packssdw %%mm2, %%mm0 \n\t"
  1694. "packssdw %%mm3, %%mm1 \n\t"
  1695. "pmaddwd %%mm5, %%mm0 \n\t"
  1696. "pmaddwd %%mm5, %%mm1 \n\t"
  1697. "packssdw %%mm1, %%mm0 \n\t" // V1 V0 U1 U0
  1698. "psraw $7, %%mm0 \n\t"
  1699. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  1700. "movq 12(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1701. "movq 12(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1702. "movq 18(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1703. "movq 18(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1704. PAVGB" %%mm1, %%mm4 \n\t"
  1705. PAVGB" %%mm3, %%mm2 \n\t"
  1706. "movq %%mm4, %%mm1 \n\t"
  1707. "movq %%mm2, %%mm3 \n\t"
  1708. "psrlq $24, %%mm4 \n\t"
  1709. "psrlq $24, %%mm2 \n\t"
  1710. PAVGB" %%mm1, %%mm4 \n\t"
  1711. PAVGB" %%mm3, %%mm2 \n\t"
  1712. "punpcklbw %%mm7, %%mm4 \n\t"
  1713. "punpcklbw %%mm7, %%mm2 \n\t"
  1714. #else
  1715. "movd 12(%0, %%"FF_REG_d"), %%mm4 \n\t"
  1716. "movd 12(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1717. "movd 15(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1718. "movd 15(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1719. "punpcklbw %%mm7, %%mm4 \n\t"
  1720. "punpcklbw %%mm7, %%mm1 \n\t"
  1721. "punpcklbw %%mm7, %%mm2 \n\t"
  1722. "punpcklbw %%mm7, %%mm3 \n\t"
  1723. "paddw %%mm1, %%mm4 \n\t"
  1724. "paddw %%mm3, %%mm2 \n\t"
  1725. "paddw %%mm2, %%mm4 \n\t"
  1726. "movd 18(%0, %%"FF_REG_d"), %%mm5 \n\t"
  1727. "movd 18(%1, %%"FF_REG_d"), %%mm1 \n\t"
  1728. "movd 21(%0, %%"FF_REG_d"), %%mm2 \n\t"
  1729. "movd 21(%1, %%"FF_REG_d"), %%mm3 \n\t"
  1730. "punpcklbw %%mm7, %%mm5 \n\t"
  1731. "punpcklbw %%mm7, %%mm1 \n\t"
  1732. "punpcklbw %%mm7, %%mm2 \n\t"
  1733. "punpcklbw %%mm7, %%mm3 \n\t"
  1734. "paddw %%mm1, %%mm5 \n\t"
  1735. "paddw %%mm3, %%mm2 \n\t"
  1736. "paddw %%mm5, %%mm2 \n\t"
  1737. "movq "MANGLE(ff_w1111)", %%mm5 \n\t"
  1738. "psrlw $2, %%mm4 \n\t"
  1739. "psrlw $2, %%mm2 \n\t"
  1740. #endif
  1741. "movq "BGR2V_IDX"(%5), %%mm1 \n\t"
  1742. "movq "BGR2V_IDX"(%5), %%mm3 \n\t"
  1743. "pmaddwd %%mm4, %%mm1 \n\t"
  1744. "pmaddwd %%mm2, %%mm3 \n\t"
  1745. "pmaddwd %%mm6, %%mm4 \n\t"
  1746. "pmaddwd %%mm6, %%mm2 \n\t"
  1747. "psrad $8, %%mm4 \n\t"
  1748. "psrad $8, %%mm1 \n\t"
  1749. "psrad $8, %%mm2 \n\t"
  1750. "psrad $8, %%mm3 \n\t"
  1751. "packssdw %%mm2, %%mm4 \n\t"
  1752. "packssdw %%mm3, %%mm1 \n\t"
  1753. "pmaddwd %%mm5, %%mm4 \n\t"
  1754. "pmaddwd %%mm5, %%mm1 \n\t"
  1755. "add $24, %%"FF_REG_d"\n\t"
  1756. "packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
  1757. "psraw $7, %%mm4 \n\t"
  1758. "movq %%mm0, %%mm1 \n\t"
  1759. "punpckldq %%mm4, %%mm0 \n\t"
  1760. "punpckhdq %%mm4, %%mm1 \n\t"
  1761. "packsswb %%mm1, %%mm0 \n\t"
  1762. "paddb "MANGLE(ff_bgr2UVOffset)", %%mm0 \n\t"
  1763. "movd %%mm0, (%2, %%"FF_REG_a") \n\t"
  1764. "punpckhdq %%mm0, %%mm0 \n\t"
  1765. "movd %%mm0, (%3, %%"FF_REG_a") \n\t"
  1766. "add $4, %%"FF_REG_a" \n\t"
  1767. " js 1b \n\t"
  1768. : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth), "r"(rgb2yuv)
  1769. NAMED_CONSTRAINTS_ADD(ff_w1111,ff_bgr2UVOffset)
  1770. : "%"FF_REG_a, "%"FF_REG_d
  1771. );
  1772. udst += chromStride;
  1773. vdst += chromStride;
  1774. src += srcStride*2;
  1775. }
  1776. __asm__ volatile(EMMS" \n\t"
  1777. SFENCE" \n\t"
  1778. :::"memory");
  1779. ff_rgb24toyv12_c(src, ydst, udst, vdst, width, height-y, lumStride, chromStride, srcStride, rgb2yuv);
  1780. }
  1781. #endif /* HAVE_7REGS */
  1782. #endif /* !COMPILE_TEMPLATE_SSE2 */
  1783. #if !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX
  1784. static void RENAME(interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dest,
  1785. int width, int height, int src1Stride,
  1786. int src2Stride, int dstStride)
  1787. {
  1788. int h;
  1789. for (h=0; h < height; h++) {
  1790. int w;
  1791. if (width >= 16) {
  1792. #if COMPILE_TEMPLATE_SSE2
  1793. if (!((((intptr_t)src1) | ((intptr_t)src2) | ((intptr_t)dest))&15)) {
  1794. __asm__(
  1795. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1796. "1: \n\t"
  1797. PREFETCH" 64(%1, %%"FF_REG_a") \n\t"
  1798. PREFETCH" 64(%2, %%"FF_REG_a") \n\t"
  1799. "movdqa (%1, %%"FF_REG_a"), %%xmm0 \n\t"
  1800. "movdqa (%1, %%"FF_REG_a"), %%xmm1 \n\t"
  1801. "movdqa (%2, %%"FF_REG_a"), %%xmm2 \n\t"
  1802. "punpcklbw %%xmm2, %%xmm0 \n\t"
  1803. "punpckhbw %%xmm2, %%xmm1 \n\t"
  1804. "movntdq %%xmm0, (%0, %%"FF_REG_a", 2) \n\t"
  1805. "movntdq %%xmm1, 16(%0, %%"FF_REG_a", 2) \n\t"
  1806. "add $16, %%"FF_REG_a" \n\t"
  1807. "cmp %3, %%"FF_REG_a" \n\t"
  1808. " jb 1b \n\t"
  1809. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1810. : "memory", XMM_CLOBBERS("xmm0", "xmm1", "xmm2",) "%"FF_REG_a
  1811. );
  1812. } else
  1813. #endif
  1814. __asm__(
  1815. "xor %%"FF_REG_a", %%"FF_REG_a" \n\t"
  1816. "1: \n\t"
  1817. PREFETCH" 64(%1, %%"FF_REG_a") \n\t"
  1818. PREFETCH" 64(%2, %%"FF_REG_a") \n\t"
  1819. "movq (%1, %%"FF_REG_a"), %%mm0 \n\t"
  1820. "movq 8(%1, %%"FF_REG_a"), %%mm2 \n\t"
  1821. "movq %%mm0, %%mm1 \n\t"
  1822. "movq %%mm2, %%mm3 \n\t"
  1823. "movq (%2, %%"FF_REG_a"), %%mm4 \n\t"
  1824. "movq 8(%2, %%"FF_REG_a"), %%mm5 \n\t"
  1825. "punpcklbw %%mm4, %%mm0 \n\t"
  1826. "punpckhbw %%mm4, %%mm1 \n\t"
  1827. "punpcklbw %%mm5, %%mm2 \n\t"
  1828. "punpckhbw %%mm5, %%mm3 \n\t"
  1829. MOVNTQ" %%mm0, (%0, %%"FF_REG_a", 2) \n\t"
  1830. MOVNTQ" %%mm1, 8(%0, %%"FF_REG_a", 2) \n\t"
  1831. MOVNTQ" %%mm2, 16(%0, %%"FF_REG_a", 2) \n\t"
  1832. MOVNTQ" %%mm3, 24(%0, %%"FF_REG_a", 2) \n\t"
  1833. "add $16, %%"FF_REG_a" \n\t"
  1834. "cmp %3, %%"FF_REG_a" \n\t"
  1835. " jb 1b \n\t"
  1836. ::"r"(dest), "r"(src1), "r"(src2), "r" ((x86_reg)width-15)
  1837. : "memory", "%"FF_REG_a
  1838. );
  1839. }
  1840. for (w= (width&(~15)); w < width; w++) {
  1841. dest[2*w+0] = src1[w];
  1842. dest[2*w+1] = src2[w];
  1843. }
  1844. dest += dstStride;
  1845. src1 += src1Stride;
  1846. src2 += src2Stride;
  1847. }
  1848. __asm__(
  1849. EMMS" \n\t"
  1850. SFENCE" \n\t"
  1851. ::: "memory"
  1852. );
  1853. }
  1854. #endif /* !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX */
  1855. #if !COMPILE_TEMPLATE_AVX || HAVE_AVX_EXTERNAL
  1856. #if !COMPILE_TEMPLATE_AMD3DNOW && (ARCH_X86_32 || COMPILE_TEMPLATE_SSE2) && COMPILE_TEMPLATE_MMXEXT == COMPILE_TEMPLATE_SSE2 && HAVE_YASM
  1857. void RENAME(ff_nv12ToUV)(uint8_t *dstU, uint8_t *dstV,
  1858. const uint8_t *unused,
  1859. const uint8_t *src1,
  1860. const uint8_t *src2,
  1861. int w,
  1862. uint32_t *unused2);
  1863. static void RENAME(deinterleaveBytes)(const uint8_t *src, uint8_t *dst1, uint8_t *dst2,
  1864. int width, int height, int srcStride,
  1865. int dst1Stride, int dst2Stride)
  1866. {
  1867. int h;
  1868. for (h = 0; h < height; h++) {
  1869. RENAME(ff_nv12ToUV)(dst1, dst2, NULL, src, NULL, width, NULL);
  1870. src += srcStride;
  1871. dst1 += dst1Stride;
  1872. dst2 += dst2Stride;
  1873. }
  1874. __asm__(
  1875. #if !COMPILE_TEMPLATE_SSE2
  1876. EMMS" \n\t"
  1877. #endif
  1878. SFENCE" \n\t"
  1879. ::: "memory"
  1880. );
  1881. }
  1882. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  1883. #endif /* !COMPILE_TEMPLATE_AVX || HAVE_AVX_EXTERNAL */
  1884. #if !COMPILE_TEMPLATE_SSE2
  1885. #if !COMPILE_TEMPLATE_AMD3DNOW
  1886. static inline void RENAME(vu9_to_vu12)(const uint8_t *src1, const uint8_t *src2,
  1887. uint8_t *dst1, uint8_t *dst2,
  1888. int width, int height,
  1889. int srcStride1, int srcStride2,
  1890. int dstStride1, int dstStride2)
  1891. {
  1892. x86_reg x, y;
  1893. int w,h;
  1894. w=width/2; h=height/2;
  1895. __asm__ volatile(
  1896. PREFETCH" %0 \n\t"
  1897. PREFETCH" %1 \n\t"
  1898. ::"m"(*(src1+srcStride1)),"m"(*(src2+srcStride2)):"memory");
  1899. for (y=0;y<h;y++) {
  1900. const uint8_t* s1=src1+srcStride1*(y>>1);
  1901. uint8_t* d=dst1+dstStride1*y;
  1902. x=0;
  1903. for (;x<w-31;x+=32) {
  1904. __asm__ volatile(
  1905. PREFETCH" 32(%1,%2) \n\t"
  1906. "movq (%1,%2), %%mm0 \n\t"
  1907. "movq 8(%1,%2), %%mm2 \n\t"
  1908. "movq 16(%1,%2), %%mm4 \n\t"
  1909. "movq 24(%1,%2), %%mm6 \n\t"
  1910. "movq %%mm0, %%mm1 \n\t"
  1911. "movq %%mm2, %%mm3 \n\t"
  1912. "movq %%mm4, %%mm5 \n\t"
  1913. "movq %%mm6, %%mm7 \n\t"
  1914. "punpcklbw %%mm0, %%mm0 \n\t"
  1915. "punpckhbw %%mm1, %%mm1 \n\t"
  1916. "punpcklbw %%mm2, %%mm2 \n\t"
  1917. "punpckhbw %%mm3, %%mm3 \n\t"
  1918. "punpcklbw %%mm4, %%mm4 \n\t"
  1919. "punpckhbw %%mm5, %%mm5 \n\t"
  1920. "punpcklbw %%mm6, %%mm6 \n\t"
  1921. "punpckhbw %%mm7, %%mm7 \n\t"
  1922. MOVNTQ" %%mm0, (%0,%2,2) \n\t"
  1923. MOVNTQ" %%mm1, 8(%0,%2,2) \n\t"
  1924. MOVNTQ" %%mm2, 16(%0,%2,2) \n\t"
  1925. MOVNTQ" %%mm3, 24(%0,%2,2) \n\t"
  1926. MOVNTQ" %%mm4, 32(%0,%2,2) \n\t"
  1927. MOVNTQ" %%mm5, 40(%0,%2,2) \n\t"
  1928. MOVNTQ" %%mm6, 48(%0,%2,2) \n\t"
  1929. MOVNTQ" %%mm7, 56(%0,%2,2)"
  1930. :: "r"(d), "r"(s1), "r"(x)
  1931. :"memory");
  1932. }
  1933. for (;x<w;x++) d[2*x]=d[2*x+1]=s1[x];
  1934. }
  1935. for (y=0;y<h;y++) {
  1936. const uint8_t* s2=src2+srcStride2*(y>>1);
  1937. uint8_t* d=dst2+dstStride2*y;
  1938. x=0;
  1939. for (;x<w-31;x+=32) {
  1940. __asm__ volatile(
  1941. PREFETCH" 32(%1,%2) \n\t"
  1942. "movq (%1,%2), %%mm0 \n\t"
  1943. "movq 8(%1,%2), %%mm2 \n\t"
  1944. "movq 16(%1,%2), %%mm4 \n\t"
  1945. "movq 24(%1,%2), %%mm6 \n\t"
  1946. "movq %%mm0, %%mm1 \n\t"
  1947. "movq %%mm2, %%mm3 \n\t"
  1948. "movq %%mm4, %%mm5 \n\t"
  1949. "movq %%mm6, %%mm7 \n\t"
  1950. "punpcklbw %%mm0, %%mm0 \n\t"
  1951. "punpckhbw %%mm1, %%mm1 \n\t"
  1952. "punpcklbw %%mm2, %%mm2 \n\t"
  1953. "punpckhbw %%mm3, %%mm3 \n\t"
  1954. "punpcklbw %%mm4, %%mm4 \n\t"
  1955. "punpckhbw %%mm5, %%mm5 \n\t"
  1956. "punpcklbw %%mm6, %%mm6 \n\t"
  1957. "punpckhbw %%mm7, %%mm7 \n\t"
  1958. MOVNTQ" %%mm0, (%0,%2,2) \n\t"
  1959. MOVNTQ" %%mm1, 8(%0,%2,2) \n\t"
  1960. MOVNTQ" %%mm2, 16(%0,%2,2) \n\t"
  1961. MOVNTQ" %%mm3, 24(%0,%2,2) \n\t"
  1962. MOVNTQ" %%mm4, 32(%0,%2,2) \n\t"
  1963. MOVNTQ" %%mm5, 40(%0,%2,2) \n\t"
  1964. MOVNTQ" %%mm6, 48(%0,%2,2) \n\t"
  1965. MOVNTQ" %%mm7, 56(%0,%2,2)"
  1966. :: "r"(d), "r"(s2), "r"(x)
  1967. :"memory");
  1968. }
  1969. for (;x<w;x++) d[2*x]=d[2*x+1]=s2[x];
  1970. }
  1971. __asm__(
  1972. EMMS" \n\t"
  1973. SFENCE" \n\t"
  1974. ::: "memory"
  1975. );
  1976. }
  1977. static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
  1978. uint8_t *dst,
  1979. int width, int height,
  1980. int srcStride1, int srcStride2,
  1981. int srcStride3, int dstStride)
  1982. {
  1983. x86_reg x;
  1984. int y,w,h;
  1985. w=width/2; h=height;
  1986. for (y=0;y<h;y++) {
  1987. const uint8_t* yp=src1+srcStride1*y;
  1988. const uint8_t* up=src2+srcStride2*(y>>2);
  1989. const uint8_t* vp=src3+srcStride3*(y>>2);
  1990. uint8_t* d=dst+dstStride*y;
  1991. x=0;
  1992. for (;x<w-7;x+=8) {
  1993. __asm__ volatile(
  1994. PREFETCH" 32(%1, %0) \n\t"
  1995. PREFETCH" 32(%2, %0) \n\t"
  1996. PREFETCH" 32(%3, %0) \n\t"
  1997. "movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  1998. "movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
  1999. "movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
  2000. "movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
  2001. "movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
  2002. "movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
  2003. "punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
  2004. "punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
  2005. "punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
  2006. "punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
  2007. "movq %%mm1, %%mm6 \n\t"
  2008. "punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
  2009. "punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
  2010. "punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
  2011. MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
  2012. MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
  2013. "punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
  2014. "movq 8(%1, %0, 4), %%mm0 \n\t"
  2015. "movq %%mm0, %%mm3 \n\t"
  2016. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
  2017. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
  2018. MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
  2019. MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
  2020. "movq %%mm4, %%mm6 \n\t"
  2021. "movq 16(%1, %0, 4), %%mm0 \n\t"
  2022. "movq %%mm0, %%mm3 \n\t"
  2023. "punpcklbw %%mm5, %%mm4 \n\t"
  2024. "punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
  2025. "punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
  2026. MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
  2027. MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
  2028. "punpckhbw %%mm5, %%mm6 \n\t"
  2029. "movq 24(%1, %0, 4), %%mm0 \n\t"
  2030. "movq %%mm0, %%mm3 \n\t"
  2031. "punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
  2032. "punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
  2033. MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
  2034. MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
  2035. : "+r" (x)
  2036. : "r"(yp), "r" (up), "r"(vp), "r"(d)
  2037. :"memory");
  2038. }
  2039. for (; x<w; x++) {
  2040. const int x2 = x<<2;
  2041. d[8*x+0] = yp[x2];
  2042. d[8*x+1] = up[x];
  2043. d[8*x+2] = yp[x2+1];
  2044. d[8*x+3] = vp[x];
  2045. d[8*x+4] = yp[x2+2];
  2046. d[8*x+5] = up[x];
  2047. d[8*x+6] = yp[x2+3];
  2048. d[8*x+7] = vp[x];
  2049. }
  2050. }
  2051. __asm__(
  2052. EMMS" \n\t"
  2053. SFENCE" \n\t"
  2054. ::: "memory"
  2055. );
  2056. }
  2057. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2058. static void RENAME(extract_even)(const uint8_t *src, uint8_t *dst, x86_reg count)
  2059. {
  2060. dst += count;
  2061. src += 2*count;
  2062. count= - count;
  2063. if(count <= -16) {
  2064. count += 15;
  2065. __asm__ volatile(
  2066. "pcmpeqw %%mm7, %%mm7 \n\t"
  2067. "psrlw $8, %%mm7 \n\t"
  2068. "1: \n\t"
  2069. "movq -30(%1, %0, 2), %%mm0 \n\t"
  2070. "movq -22(%1, %0, 2), %%mm1 \n\t"
  2071. "movq -14(%1, %0, 2), %%mm2 \n\t"
  2072. "movq -6(%1, %0, 2), %%mm3 \n\t"
  2073. "pand %%mm7, %%mm0 \n\t"
  2074. "pand %%mm7, %%mm1 \n\t"
  2075. "pand %%mm7, %%mm2 \n\t"
  2076. "pand %%mm7, %%mm3 \n\t"
  2077. "packuswb %%mm1, %%mm0 \n\t"
  2078. "packuswb %%mm3, %%mm2 \n\t"
  2079. MOVNTQ" %%mm0,-15(%2, %0) \n\t"
  2080. MOVNTQ" %%mm2,- 7(%2, %0) \n\t"
  2081. "add $16, %0 \n\t"
  2082. " js 1b \n\t"
  2083. : "+r"(count)
  2084. : "r"(src), "r"(dst)
  2085. );
  2086. count -= 15;
  2087. }
  2088. while(count<0) {
  2089. dst[count]= src[2*count];
  2090. count++;
  2091. }
  2092. }
  2093. static void RENAME(extract_odd)(const uint8_t *src, uint8_t *dst, x86_reg count)
  2094. {
  2095. src ++;
  2096. dst += count;
  2097. src += 2*count;
  2098. count= - count;
  2099. if(count < -16) {
  2100. count += 16;
  2101. __asm__ volatile(
  2102. "pcmpeqw %%mm7, %%mm7 \n\t"
  2103. "psrlw $8, %%mm7 \n\t"
  2104. "1: \n\t"
  2105. "movq -32(%1, %0, 2), %%mm0 \n\t"
  2106. "movq -24(%1, %0, 2), %%mm1 \n\t"
  2107. "movq -16(%1, %0, 2), %%mm2 \n\t"
  2108. "movq -8(%1, %0, 2), %%mm3 \n\t"
  2109. "pand %%mm7, %%mm0 \n\t"
  2110. "pand %%mm7, %%mm1 \n\t"
  2111. "pand %%mm7, %%mm2 \n\t"
  2112. "pand %%mm7, %%mm3 \n\t"
  2113. "packuswb %%mm1, %%mm0 \n\t"
  2114. "packuswb %%mm3, %%mm2 \n\t"
  2115. MOVNTQ" %%mm0,-16(%2, %0) \n\t"
  2116. MOVNTQ" %%mm2,- 8(%2, %0) \n\t"
  2117. "add $16, %0 \n\t"
  2118. " js 1b \n\t"
  2119. : "+r"(count)
  2120. : "r"(src), "r"(dst)
  2121. );
  2122. count -= 16;
  2123. }
  2124. while(count<0) {
  2125. dst[count]= src[2*count];
  2126. count++;
  2127. }
  2128. }
  2129. #if !COMPILE_TEMPLATE_AMD3DNOW
  2130. static void RENAME(extract_even2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2131. {
  2132. dst0+= count;
  2133. dst1+= count;
  2134. src += 4*count;
  2135. count= - count;
  2136. if(count <= -8) {
  2137. count += 7;
  2138. __asm__ volatile(
  2139. "pcmpeqw %%mm7, %%mm7 \n\t"
  2140. "psrlw $8, %%mm7 \n\t"
  2141. "1: \n\t"
  2142. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2143. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2144. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2145. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2146. "pand %%mm7, %%mm0 \n\t"
  2147. "pand %%mm7, %%mm1 \n\t"
  2148. "pand %%mm7, %%mm2 \n\t"
  2149. "pand %%mm7, %%mm3 \n\t"
  2150. "packuswb %%mm1, %%mm0 \n\t"
  2151. "packuswb %%mm3, %%mm2 \n\t"
  2152. "movq %%mm0, %%mm1 \n\t"
  2153. "movq %%mm2, %%mm3 \n\t"
  2154. "psrlw $8, %%mm0 \n\t"
  2155. "psrlw $8, %%mm2 \n\t"
  2156. "pand %%mm7, %%mm1 \n\t"
  2157. "pand %%mm7, %%mm3 \n\t"
  2158. "packuswb %%mm2, %%mm0 \n\t"
  2159. "packuswb %%mm3, %%mm1 \n\t"
  2160. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2161. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2162. "add $8, %0 \n\t"
  2163. " js 1b \n\t"
  2164. : "+r"(count)
  2165. : "r"(src), "r"(dst0), "r"(dst1)
  2166. );
  2167. count -= 7;
  2168. }
  2169. while(count<0) {
  2170. dst0[count]= src[4*count+0];
  2171. dst1[count]= src[4*count+2];
  2172. count++;
  2173. }
  2174. }
  2175. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2176. static void RENAME(extract_even2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2177. {
  2178. dst0 += count;
  2179. dst1 += count;
  2180. src0 += 4*count;
  2181. src1 += 4*count;
  2182. count= - count;
  2183. #ifdef PAVGB
  2184. if(count <= -8) {
  2185. count += 7;
  2186. __asm__ volatile(
  2187. "pcmpeqw %%mm7, %%mm7 \n\t"
  2188. "psrlw $8, %%mm7 \n\t"
  2189. "1: \n\t"
  2190. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2191. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2192. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2193. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2194. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2195. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2196. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2197. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2198. "pand %%mm7, %%mm0 \n\t"
  2199. "pand %%mm7, %%mm1 \n\t"
  2200. "pand %%mm7, %%mm2 \n\t"
  2201. "pand %%mm7, %%mm3 \n\t"
  2202. "packuswb %%mm1, %%mm0 \n\t"
  2203. "packuswb %%mm3, %%mm2 \n\t"
  2204. "movq %%mm0, %%mm1 \n\t"
  2205. "movq %%mm2, %%mm3 \n\t"
  2206. "psrlw $8, %%mm0 \n\t"
  2207. "psrlw $8, %%mm2 \n\t"
  2208. "pand %%mm7, %%mm1 \n\t"
  2209. "pand %%mm7, %%mm3 \n\t"
  2210. "packuswb %%mm2, %%mm0 \n\t"
  2211. "packuswb %%mm3, %%mm1 \n\t"
  2212. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2213. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2214. "add $8, %0 \n\t"
  2215. " js 1b \n\t"
  2216. : "+r"(count)
  2217. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2218. );
  2219. count -= 7;
  2220. }
  2221. #endif
  2222. while(count<0) {
  2223. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2224. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2225. count++;
  2226. }
  2227. }
  2228. #if !COMPILE_TEMPLATE_AMD3DNOW
  2229. static void RENAME(extract_odd2)(const uint8_t *src, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2230. {
  2231. dst0+= count;
  2232. dst1+= count;
  2233. src += 4*count;
  2234. count= - count;
  2235. if(count <= -8) {
  2236. count += 7;
  2237. __asm__ volatile(
  2238. "pcmpeqw %%mm7, %%mm7 \n\t"
  2239. "psrlw $8, %%mm7 \n\t"
  2240. "1: \n\t"
  2241. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2242. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2243. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2244. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2245. "psrlw $8, %%mm0 \n\t"
  2246. "psrlw $8, %%mm1 \n\t"
  2247. "psrlw $8, %%mm2 \n\t"
  2248. "psrlw $8, %%mm3 \n\t"
  2249. "packuswb %%mm1, %%mm0 \n\t"
  2250. "packuswb %%mm3, %%mm2 \n\t"
  2251. "movq %%mm0, %%mm1 \n\t"
  2252. "movq %%mm2, %%mm3 \n\t"
  2253. "psrlw $8, %%mm0 \n\t"
  2254. "psrlw $8, %%mm2 \n\t"
  2255. "pand %%mm7, %%mm1 \n\t"
  2256. "pand %%mm7, %%mm3 \n\t"
  2257. "packuswb %%mm2, %%mm0 \n\t"
  2258. "packuswb %%mm3, %%mm1 \n\t"
  2259. MOVNTQ" %%mm0,- 7(%3, %0) \n\t"
  2260. MOVNTQ" %%mm1,- 7(%2, %0) \n\t"
  2261. "add $8, %0 \n\t"
  2262. " js 1b \n\t"
  2263. : "+r"(count)
  2264. : "r"(src), "r"(dst0), "r"(dst1)
  2265. );
  2266. count -= 7;
  2267. }
  2268. src++;
  2269. while(count<0) {
  2270. dst0[count]= src[4*count+0];
  2271. dst1[count]= src[4*count+2];
  2272. count++;
  2273. }
  2274. }
  2275. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2276. static void RENAME(extract_odd2avg)(const uint8_t *src0, const uint8_t *src1, uint8_t *dst0, uint8_t *dst1, x86_reg count)
  2277. {
  2278. dst0 += count;
  2279. dst1 += count;
  2280. src0 += 4*count;
  2281. src1 += 4*count;
  2282. count= - count;
  2283. #ifdef PAVGB
  2284. if(count <= -8) {
  2285. count += 7;
  2286. __asm__ volatile(
  2287. "pcmpeqw %%mm7, %%mm7 \n\t"
  2288. "psrlw $8, %%mm7 \n\t"
  2289. "1: \n\t"
  2290. "movq -28(%1, %0, 4), %%mm0 \n\t"
  2291. "movq -20(%1, %0, 4), %%mm1 \n\t"
  2292. "movq -12(%1, %0, 4), %%mm2 \n\t"
  2293. "movq -4(%1, %0, 4), %%mm3 \n\t"
  2294. PAVGB" -28(%2, %0, 4), %%mm0 \n\t"
  2295. PAVGB" -20(%2, %0, 4), %%mm1 \n\t"
  2296. PAVGB" -12(%2, %0, 4), %%mm2 \n\t"
  2297. PAVGB" - 4(%2, %0, 4), %%mm3 \n\t"
  2298. "psrlw $8, %%mm0 \n\t"
  2299. "psrlw $8, %%mm1 \n\t"
  2300. "psrlw $8, %%mm2 \n\t"
  2301. "psrlw $8, %%mm3 \n\t"
  2302. "packuswb %%mm1, %%mm0 \n\t"
  2303. "packuswb %%mm3, %%mm2 \n\t"
  2304. "movq %%mm0, %%mm1 \n\t"
  2305. "movq %%mm2, %%mm3 \n\t"
  2306. "psrlw $8, %%mm0 \n\t"
  2307. "psrlw $8, %%mm2 \n\t"
  2308. "pand %%mm7, %%mm1 \n\t"
  2309. "pand %%mm7, %%mm3 \n\t"
  2310. "packuswb %%mm2, %%mm0 \n\t"
  2311. "packuswb %%mm3, %%mm1 \n\t"
  2312. MOVNTQ" %%mm0,- 7(%4, %0) \n\t"
  2313. MOVNTQ" %%mm1,- 7(%3, %0) \n\t"
  2314. "add $8, %0 \n\t"
  2315. " js 1b \n\t"
  2316. : "+r"(count)
  2317. : "r"(src0), "r"(src1), "r"(dst0), "r"(dst1)
  2318. );
  2319. count -= 7;
  2320. }
  2321. #endif
  2322. src0++;
  2323. src1++;
  2324. while(count<0) {
  2325. dst0[count]= (src0[4*count+0]+src1[4*count+0])>>1;
  2326. dst1[count]= (src0[4*count+2]+src1[4*count+2])>>1;
  2327. count++;
  2328. }
  2329. }
  2330. static void RENAME(yuyvtoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2331. int width, int height,
  2332. int lumStride, int chromStride, int srcStride)
  2333. {
  2334. int y;
  2335. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2336. for (y=0; y<height; y++) {
  2337. RENAME(extract_even)(src, ydst, width);
  2338. if(y&1) {
  2339. RENAME(extract_odd2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2340. udst+= chromStride;
  2341. vdst+= chromStride;
  2342. }
  2343. src += srcStride;
  2344. ydst+= lumStride;
  2345. }
  2346. __asm__(
  2347. EMMS" \n\t"
  2348. SFENCE" \n\t"
  2349. ::: "memory"
  2350. );
  2351. }
  2352. #if !COMPILE_TEMPLATE_AMD3DNOW
  2353. static void RENAME(yuyvtoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2354. int width, int height,
  2355. int lumStride, int chromStride, int srcStride)
  2356. {
  2357. int y;
  2358. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2359. for (y=0; y<height; y++) {
  2360. RENAME(extract_even)(src, ydst, width);
  2361. RENAME(extract_odd2)(src, udst, vdst, chromWidth);
  2362. src += srcStride;
  2363. ydst+= lumStride;
  2364. udst+= chromStride;
  2365. vdst+= chromStride;
  2366. }
  2367. __asm__(
  2368. EMMS" \n\t"
  2369. SFENCE" \n\t"
  2370. ::: "memory"
  2371. );
  2372. }
  2373. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2374. static void RENAME(uyvytoyuv420)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2375. int width, int height,
  2376. int lumStride, int chromStride, int srcStride)
  2377. {
  2378. int y;
  2379. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2380. for (y=0; y<height; y++) {
  2381. RENAME(extract_odd)(src, ydst, width);
  2382. if(y&1) {
  2383. RENAME(extract_even2avg)(src-srcStride, src, udst, vdst, chromWidth);
  2384. udst+= chromStride;
  2385. vdst+= chromStride;
  2386. }
  2387. src += srcStride;
  2388. ydst+= lumStride;
  2389. }
  2390. __asm__(
  2391. EMMS" \n\t"
  2392. SFENCE" \n\t"
  2393. ::: "memory"
  2394. );
  2395. }
  2396. #if !COMPILE_TEMPLATE_AMD3DNOW
  2397. static void RENAME(uyvytoyuv422)(uint8_t *ydst, uint8_t *udst, uint8_t *vdst, const uint8_t *src,
  2398. int width, int height,
  2399. int lumStride, int chromStride, int srcStride)
  2400. {
  2401. int y;
  2402. const int chromWidth = AV_CEIL_RSHIFT(width, 1);
  2403. for (y=0; y<height; y++) {
  2404. RENAME(extract_odd)(src, ydst, width);
  2405. RENAME(extract_even2)(src, udst, vdst, chromWidth);
  2406. src += srcStride;
  2407. ydst+= lumStride;
  2408. udst+= chromStride;
  2409. vdst+= chromStride;
  2410. }
  2411. __asm__(
  2412. EMMS" \n\t"
  2413. SFENCE" \n\t"
  2414. ::: "memory"
  2415. );
  2416. }
  2417. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2418. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2419. static av_cold void RENAME(rgb2rgb_init)(void)
  2420. {
  2421. #if !COMPILE_TEMPLATE_SSE2
  2422. #if !COMPILE_TEMPLATE_AMD3DNOW
  2423. rgb15to16 = RENAME(rgb15to16);
  2424. rgb15tobgr24 = RENAME(rgb15tobgr24);
  2425. rgb15to32 = RENAME(rgb15to32);
  2426. rgb16tobgr24 = RENAME(rgb16tobgr24);
  2427. rgb16to32 = RENAME(rgb16to32);
  2428. rgb16to15 = RENAME(rgb16to15);
  2429. rgb24tobgr16 = RENAME(rgb24tobgr16);
  2430. rgb24tobgr15 = RENAME(rgb24tobgr15);
  2431. rgb24tobgr32 = RENAME(rgb24tobgr32);
  2432. rgb32to16 = RENAME(rgb32to16);
  2433. rgb32to15 = RENAME(rgb32to15);
  2434. rgb32tobgr24 = RENAME(rgb32tobgr24);
  2435. rgb24to15 = RENAME(rgb24to15);
  2436. rgb24to16 = RENAME(rgb24to16);
  2437. rgb24tobgr24 = RENAME(rgb24tobgr24);
  2438. shuffle_bytes_2103 = RENAME(shuffle_bytes_2103);
  2439. rgb32tobgr16 = RENAME(rgb32tobgr16);
  2440. rgb32tobgr15 = RENAME(rgb32tobgr15);
  2441. yv12toyuy2 = RENAME(yv12toyuy2);
  2442. yv12touyvy = RENAME(yv12touyvy);
  2443. yuv422ptoyuy2 = RENAME(yuv422ptoyuy2);
  2444. yuv422ptouyvy = RENAME(yuv422ptouyvy);
  2445. yuy2toyv12 = RENAME(yuy2toyv12);
  2446. vu9_to_vu12 = RENAME(vu9_to_vu12);
  2447. yvu9_to_yuy2 = RENAME(yvu9_to_yuy2);
  2448. uyvytoyuv422 = RENAME(uyvytoyuv422);
  2449. yuyvtoyuv422 = RENAME(yuyvtoyuv422);
  2450. #endif /* !COMPILE_TEMPLATE_AMD3DNOW */
  2451. #if COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW
  2452. planar2x = RENAME(planar2x);
  2453. #endif /* COMPILE_TEMPLATE_MMXEXT || COMPILE_TEMPLATE_AMD3DNOW */
  2454. #if HAVE_7REGS
  2455. ff_rgb24toyv12 = RENAME(rgb24toyv12);
  2456. #endif /* HAVE_7REGS */
  2457. yuyvtoyuv420 = RENAME(yuyvtoyuv420);
  2458. uyvytoyuv420 = RENAME(uyvytoyuv420);
  2459. #endif /* !COMPILE_TEMPLATE_SSE2 */
  2460. #if !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX
  2461. interleaveBytes = RENAME(interleaveBytes);
  2462. #endif /* !COMPILE_TEMPLATE_AMD3DNOW && !COMPILE_TEMPLATE_AVX */
  2463. #if !COMPILE_TEMPLATE_AVX || HAVE_AVX_EXTERNAL
  2464. #if !COMPILE_TEMPLATE_AMD3DNOW && (ARCH_X86_32 || COMPILE_TEMPLATE_SSE2) && COMPILE_TEMPLATE_MMXEXT == COMPILE_TEMPLATE_SSE2 && HAVE_YASM
  2465. deinterleaveBytes = RENAME(deinterleaveBytes);
  2466. #endif
  2467. #endif
  2468. }