You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

173 lines
4.1KB

  1. /*
  2. *
  3. * rgb2rgb.c, Software RGB to RGB convertor
  4. * Written by Nick Kurshev.
  5. */
  6. #include <inttypes.h>
  7. #include "../config.h"
  8. #include "rgb2rgb.h"
  9. #include "../mmx_defs.h"
  10. #ifdef HAVE_MMX
  11. static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
  12. static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
  13. static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
  14. static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
  15. static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
  16. #endif
  17. void rgb24to32(uint8_t *src,uint8_t *dst,uint32_t src_size)
  18. {
  19. uint8_t *dest = dst;
  20. uint8_t *s = src;
  21. uint8_t *end;
  22. #ifdef HAVE_MMX
  23. uint8_t *mm_end;
  24. #endif
  25. end = s + src_size;
  26. #ifdef HAVE_MMX
  27. __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
  28. mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
  29. __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
  30. if(mm_end == end) mm_end -= MMREG_SIZE*2;
  31. while(s < mm_end)
  32. {
  33. __asm __volatile(
  34. PREFETCH" 32%1\n\t"
  35. "movd %1, %%mm0\n\t"
  36. "movd 3%1, %%mm1\n\t"
  37. "movd 6%1, %%mm2\n\t"
  38. "movd 9%1, %%mm3\n\t"
  39. "punpckldq %%mm1, %%mm0\n\t"
  40. "punpckldq %%mm3, %%mm2\n\t"
  41. "pand %%mm7, %%mm0\n\t"
  42. "pand %%mm7, %%mm2\n\t"
  43. MOVNTQ" %%mm0, %0\n\t"
  44. MOVNTQ" %%mm2, 8%0"
  45. :"=m"(*dest)
  46. :"m"(*s)
  47. :"memory");
  48. dest += 16;
  49. s += 12;
  50. }
  51. __asm __volatile(SFENCE:::"memory");
  52. __asm __volatile(EMMS:::"memory");
  53. #endif
  54. while(s < end)
  55. {
  56. *dest++ = *s++;
  57. *dest++ = *s++;
  58. *dest++ = *s++;
  59. *dest++ = 0;
  60. }
  61. }
  62. void rgb32to24(uint8_t *src,uint8_t *dst,uint32_t src_size)
  63. {
  64. uint8_t *dest = dst;
  65. uint8_t *s = src;
  66. uint8_t *end;
  67. #ifdef HAVE_MMX
  68. uint8_t *mm_end;
  69. #endif
  70. end = s + src_size;
  71. #ifdef HAVE_MMX
  72. __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
  73. mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
  74. __asm __volatile(
  75. "movq %0, %%mm7\n\t"
  76. "movq %1, %%mm6"
  77. ::"m"(mask24l),"m"(mask24h):"memory");
  78. if(mm_end == end) mm_end -= MMREG_SIZE*2;
  79. while(s < mm_end)
  80. {
  81. __asm __volatile(
  82. PREFETCH" 32%1\n\t"
  83. "movq %1, %%mm0\n\t"
  84. "movq 8%1, %%mm1\n\t"
  85. "movq %%mm0, %%mm2\n\t"
  86. "movq %%mm1, %%mm3\n\t"
  87. "psrlq $8, %%mm2\n\t"
  88. "psrlq $8, %%mm3\n\t"
  89. "pand %%mm7, %%mm0\n\t"
  90. "pand %%mm7, %%mm1\n\t"
  91. "pand %%mm6, %%mm2\n\t"
  92. "pand %%mm6, %%mm3\n\t"
  93. "por %%mm2, %%mm0\n\t"
  94. "por %%mm3, %%mm1\n\t"
  95. MOVNTQ" %%mm0, %0\n\t"
  96. MOVNTQ" %%mm1, 6%0"
  97. :"=m"(*dest)
  98. :"m"(*s)
  99. :"memory");
  100. dest += 12;
  101. s += 16;
  102. }
  103. __asm __volatile(SFENCE:::"memory");
  104. __asm __volatile(EMMS:::"memory");
  105. #endif
  106. while(s < end)
  107. {
  108. *dest++ = *s++;
  109. *dest++ = *s++;
  110. *dest++ = *s++;
  111. s++;
  112. }
  113. }
  114. /*
  115. Original by Strepto/Astral
  116. ported to gcc & bugfixed : A'rpi
  117. MMX2, 3DNOW optimization by Nick Kurshev
  118. */
  119. void rgb15to16(uint8_t *src,uint8_t *dst,uint32_t src_size)
  120. {
  121. #ifdef HAVE_MMX
  122. register char* s=src+src_size;
  123. register char* d=dst+src_size;
  124. register int offs=-src_size;
  125. __asm __volatile(PREFETCH" %0"::"m"(*(s+offs)):"memory");
  126. __asm __volatile(
  127. "movq %0, %%mm4\n\t"
  128. "movq %1, %%mm5"
  129. ::"m"(mask15b), "m"(mask15rg):"memory");
  130. while(offs<0)
  131. {
  132. __asm __volatile(
  133. PREFETCH" 32%1\n\t"
  134. "movq %1, %%mm0\n\t"
  135. "movq 8%1, %%mm2\n\t"
  136. "movq %%mm0, %%mm1\n\t"
  137. "movq %%mm2, %%mm3\n\t"
  138. "pand %%mm4, %%mm0\n\t"
  139. "pand %%mm5, %%mm1\n\t"
  140. "pand %%mm4, %%mm2\n\t"
  141. "pand %%mm5, %%mm3\n\t"
  142. "psllq $1, %%mm1\n\t"
  143. "psllq $1, %%mm3\n\t"
  144. "por %%mm1, %%mm0\n\t"
  145. "por %%mm3, %%mm2\n\t"
  146. MOVNTQ" %%mm0, %0\n\t"
  147. MOVNTQ" %%mm2, 8%0"
  148. :"=m"(*(d+offs))
  149. :"m"(*(s+offs))
  150. :"memory");
  151. offs+=16;
  152. }
  153. __asm __volatile(SFENCE:::"memory");
  154. __asm __volatile(EMMS:::"memory");
  155. #else
  156. uint16_t *s1=( uint16_t * )src;
  157. uint16_t *d1=( uint16_t * )dst;
  158. uint16_t *e=((uint8_t *)s1)+src_size;
  159. while( s1<e ){
  160. register int x=*( s1++ );
  161. /* rrrrrggggggbbbbb
  162. 0rrrrrgggggbbbbb
  163. 0111 1111 1110 0000=0x7FE0
  164. 00000000000001 1111=0x001F */
  165. *( d1++ )=( x&0x001F )|( ( x&0x7FE0 )<<1 );
  166. }
  167. #endif
  168. }