You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

288 lines
6.6KB

  1. /*
  2. *
  3. * rgb2rgb.c, Software RGB to RGB convertor
  4. * Written by Nick Kurshev.
  5. */
  6. #include <inttypes.h>
  7. #include "../config.h"
  8. #include "rgb2rgb.h"
  9. #include "../mmx_defs.h"
  10. #ifdef HAVE_MMX
  11. static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
  12. static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
  13. static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
  14. static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
  15. static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
  16. static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL;
  17. #endif
  18. void rgb24to32(const uint8_t *src,uint8_t *dst,uint32_t src_size)
  19. {
  20. uint8_t *dest = dst;
  21. const uint8_t *s = src;
  22. const uint8_t *end;
  23. #ifdef HAVE_MMX
  24. uint8_t *mm_end;
  25. #endif
  26. end = s + src_size;
  27. #ifdef HAVE_MMX
  28. __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
  29. mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
  30. __asm __volatile("movq %0, %%mm7"::"m"(mask32):"memory");
  31. if(mm_end == end) mm_end -= MMREG_SIZE*2;
  32. while(s < mm_end)
  33. {
  34. __asm __volatile(
  35. PREFETCH" 32%1\n\t"
  36. "movd %1, %%mm0\n\t"
  37. "movd 3%1, %%mm1\n\t"
  38. "movd 6%1, %%mm2\n\t"
  39. "movd 9%1, %%mm3\n\t"
  40. "punpckldq %%mm1, %%mm0\n\t"
  41. "punpckldq %%mm3, %%mm2\n\t"
  42. "pand %%mm7, %%mm0\n\t"
  43. "pand %%mm7, %%mm2\n\t"
  44. MOVNTQ" %%mm0, %0\n\t"
  45. MOVNTQ" %%mm2, 8%0"
  46. :"=m"(*dest)
  47. :"m"(*s)
  48. :"memory");
  49. dest += 16;
  50. s += 12;
  51. }
  52. __asm __volatile(SFENCE:::"memory");
  53. __asm __volatile(EMMS:::"memory");
  54. #endif
  55. while(s < end)
  56. {
  57. *dest++ = *s++;
  58. *dest++ = *s++;
  59. *dest++ = *s++;
  60. *dest++ = 0;
  61. }
  62. }
  63. void rgb32to24(const uint8_t *src,uint8_t *dst,uint32_t src_size)
  64. {
  65. uint8_t *dest = dst;
  66. const uint8_t *s = src;
  67. const uint8_t *end;
  68. #ifdef HAVE_MMX
  69. uint8_t *mm_end;
  70. #endif
  71. end = s + src_size;
  72. #ifdef HAVE_MMX
  73. __asm __volatile(PREFETCH" %0"::"m"(*s):"memory");
  74. mm_end = (uint8_t*)((((unsigned long)end)/(MMREG_SIZE*2))*(MMREG_SIZE*2));
  75. __asm __volatile(
  76. "movq %0, %%mm7\n\t"
  77. "movq %1, %%mm6"
  78. ::"m"(mask24l),"m"(mask24h):"memory");
  79. if(mm_end == end) mm_end -= MMREG_SIZE*2;
  80. while(s < mm_end)
  81. {
  82. __asm __volatile(
  83. PREFETCH" 32%1\n\t"
  84. "movq %1, %%mm0\n\t"
  85. "movq 8%1, %%mm1\n\t"
  86. "movq %%mm0, %%mm2\n\t"
  87. "movq %%mm1, %%mm3\n\t"
  88. "psrlq $8, %%mm2\n\t"
  89. "psrlq $8, %%mm3\n\t"
  90. "pand %%mm7, %%mm0\n\t"
  91. "pand %%mm7, %%mm1\n\t"
  92. "pand %%mm6, %%mm2\n\t"
  93. "pand %%mm6, %%mm3\n\t"
  94. "por %%mm2, %%mm0\n\t"
  95. "por %%mm3, %%mm1\n\t"
  96. MOVNTQ" %%mm0, %0\n\t"
  97. MOVNTQ" %%mm1, 6%0"
  98. :"=m"(*dest)
  99. :"m"(*s)
  100. :"memory");
  101. dest += 12;
  102. s += 16;
  103. }
  104. __asm __volatile(SFENCE:::"memory");
  105. __asm __volatile(EMMS:::"memory");
  106. #endif
  107. while(s < end)
  108. {
  109. *dest++ = *s++;
  110. *dest++ = *s++;
  111. *dest++ = *s++;
  112. s++;
  113. }
  114. }
  115. /*
  116. Original by Strepto/Astral
  117. ported to gcc & bugfixed : A'rpi
  118. MMX2, 3DNOW optimization by Nick Kurshev
  119. 32bit c version, and and&add trick by Michael Niedermayer
  120. */
  121. void rgb15to16(const uint8_t *src,uint8_t *dst,uint32_t src_size)
  122. {
  123. #ifdef HAVE_MMX
  124. register const char* s=src+src_size;
  125. register char* d=dst+src_size;
  126. register int offs=-src_size;
  127. __asm __volatile(PREFETCH" %0"::"m"(*(s+offs)));
  128. __asm __volatile(
  129. "movq %0, %%mm4\n\t"
  130. ::"m"(mask15s));
  131. while(offs<0)
  132. {
  133. __asm __volatile(
  134. PREFETCH" 32%1\n\t"
  135. "movq %1, %%mm0\n\t"
  136. "movq 8%1, %%mm2\n\t"
  137. "movq %%mm0, %%mm1\n\t"
  138. "movq %%mm2, %%mm3\n\t"
  139. "pand %%mm4, %%mm0\n\t"
  140. "pand %%mm4, %%mm2\n\t"
  141. "paddw %%mm1, %%mm0\n\t"
  142. "paddw %%mm3, %%mm2\n\t"
  143. MOVNTQ" %%mm0, %0\n\t"
  144. MOVNTQ" %%mm2, 8%0"
  145. :"=m"(*(d+offs))
  146. :"m"(*(s+offs))
  147. );
  148. offs+=16;
  149. }
  150. __asm __volatile(SFENCE:::"memory");
  151. __asm __volatile(EMMS:::"memory");
  152. #else
  153. #if 0
  154. const uint16_t *s1=( uint16_t * )src;
  155. uint16_t *d1=( uint16_t * )dst;
  156. uint16_t *e=((uint8_t *)s1)+src_size;
  157. while( s1<e ){
  158. register int x=*( s1++ );
  159. /* rrrrrggggggbbbbb
  160. 0rrrrrgggggbbbbb
  161. 0111 1111 1110 0000=0x7FE0
  162. 00000000000001 1111=0x001F */
  163. *( d1++ )=( x&0x001F )|( ( x&0x7FE0 )<<1 );
  164. }
  165. #else
  166. const uint32_t *s1=( uint32_t * )src;
  167. uint32_t *d1=( uint32_t * )dst;
  168. int i;
  169. int size= src_size>>2;
  170. for(i=0; i<size; i++)
  171. {
  172. register int x= s1[i];
  173. // d1[i] = x + (x&0x7FE07FE0); //faster but need msbit =0 which might not allways be true
  174. d1[i] = (x&0x7FFF7FFF) + (x&0x7FE07FE0);
  175. }
  176. #endif
  177. #endif
  178. }
  179. /**
  180. * Pallete is assumed to contain bgr32
  181. */
  182. void palette8torgb32(uint8_t *src, uint8_t *dst, int src_size, uint8_t *palette)
  183. {
  184. int i;
  185. for(i=0; i<src_size; i++)
  186. ((uint32_t *)dst)[i] = ((uint32_t *)palette)[ src[i] ];
  187. }
  188. /**
  189. * Pallete is assumed to contain bgr32
  190. */
  191. void palette8torgb24(uint8_t *src, uint8_t *dst, int src_size, uint8_t *palette)
  192. {
  193. int i;
  194. /*
  195. writes 1 byte o much and might cause alignment issues on some architectures?
  196. for(i=0; i<src_size; i++)
  197. ((uint32_t *)(&dst[i*3])) = ((uint32_t *)palette)[ src[i] ];
  198. */
  199. for(i=0; i<src_size; i++)
  200. {
  201. //FIXME slow?
  202. dst[0]= palette[ src[i]*4+0 ];
  203. dst[1]= palette[ src[i]*4+1 ];
  204. dst[2]= palette[ src[i]*4+2 ];
  205. dst+= 3;
  206. }
  207. }
  208. void rgb32to16(uint8_t *src, uint8_t *dst, int src_size)
  209. {
  210. int i;
  211. for(i=0; i<src_size; i+=4)
  212. {
  213. const int b= src[i+0];
  214. const int g= src[i+1];
  215. const int r= src[i+2];
  216. ((uint16_t *)dst)[i]= (b>>3) | ((g&0xFC)<<3) | ((r&0xF8)<<8);
  217. }
  218. }
  219. void rgb32to15(uint8_t *src, uint8_t *dst, int src_size)
  220. {
  221. int i;
  222. for(i=0; i<src_size; i+=4)
  223. {
  224. const int b= src[i+0];
  225. const int g= src[i+1];
  226. const int r= src[i+2];
  227. ((uint16_t *)dst)[i]= (b>>3) | ((g&0xF8)<<3) | ((r&0xF8)<<7);
  228. }
  229. }
  230. /**
  231. * Palette is assumed to contain bgr16, see rgb32to16 to convert the palette
  232. */
  233. void palette8torgb16(uint8_t *src, uint8_t *dst, int src_size, uint8_t *palette)
  234. {
  235. int i;
  236. for(i=0; i<src_size; i++)
  237. ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
  238. }
  239. /**
  240. * Pallete is assumed to contain bgr15, see rgb32to15 to convert the palette
  241. */
  242. void palette8torgb15(uint8_t *src, uint8_t *dst, int src_size, uint8_t *palette)
  243. {
  244. int i;
  245. for(i=0; i<src_size; i++)
  246. ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
  247. }
  248. void yv12toyuy2(uint8_t *ysrc, uint8_t *usrc, uint8_t *vsrc, uint8_t *dst, int src_size)
  249. {
  250. int i;
  251. src_size>>=1;
  252. for(i=0; i<src_size; i++)
  253. {
  254. dst[4*i+0] = ysrc[2*i+0];
  255. dst[4*i+1] = usrc[i];
  256. dst[4*i+2] = ysrc[2*i+1];
  257. dst[4*i+3] = vsrc[i];
  258. }
  259. }
  260. void yuy2toyv12(uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, int src_size)
  261. {
  262. int i;
  263. src_size>>=1;
  264. for(i=0; i<src_size; i++)
  265. {
  266. ydst[2*i+0] = src[4*i+0];
  267. udst[i] = src[4*i+1];
  268. ydst[2*i+1] = src[4*i+2];
  269. vdst[i] = src[4*i+3];
  270. }
  271. }