You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

915 lines
27KB

  1. /*
  2. *
  3. * rgb2rgb.c, Software RGB to RGB convertor
  4. * pluralize by Software PAL8 to RGB convertor
  5. * Software YUV to YUV convertor
  6. * Software YUV to RGB convertor
  7. * Written by Nick Kurshev.
  8. * palette & yuv & runtime cpu stuff by Michael (michaelni@gmx.at) (under GPL)
  9. */
  10. #include <inttypes.h>
  11. #include "../config.h"
  12. #include "rgb2rgb.h"
  13. #include "../cpudetect.h"
  14. #include "../mangle.h"
  15. #include "../bswap.h"
  16. #include "../libvo/fastmemcpy.h"
  17. #ifdef ARCH_X86
  18. #define CAN_COMPILE_X86_ASM
  19. #endif
  20. #define FAST_BGR2YV12 // use 7 bit coeffs instead of 15bit
  21. #ifdef CAN_COMPILE_X86_ASM
  22. static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL;
  23. static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
  24. static const uint64_t mask32b __attribute__((aligned(8))) = 0x000000FF000000FFULL;
  25. static const uint64_t mask32g __attribute__((aligned(8))) = 0x0000FF000000FF00ULL;
  26. static const uint64_t mask32r __attribute__((aligned(8))) = 0x00FF000000FF0000ULL;
  27. static const uint64_t mask32 __attribute__((aligned(8))) = 0x00FFFFFF00FFFFFFULL;
  28. static const uint64_t mask3216br __attribute__((aligned(8)))=0x00F800F800F800F8ULL;
  29. static const uint64_t mask3216g __attribute__((aligned(8)))=0x0000FC000000FC00ULL;
  30. static const uint64_t mask3215g __attribute__((aligned(8)))=0x0000F8000000F800ULL;
  31. static const uint64_t mul3216 __attribute__((aligned(8))) = 0x2000000420000004ULL;
  32. static const uint64_t mul3215 __attribute__((aligned(8))) = 0x2000000820000008ULL;
  33. static const uint64_t mask24b __attribute__((aligned(8))) = 0x00FF0000FF0000FFULL;
  34. static const uint64_t mask24g __attribute__((aligned(8))) = 0xFF0000FF0000FF00ULL;
  35. static const uint64_t mask24r __attribute__((aligned(8))) = 0x0000FF0000FF0000ULL;
  36. static const uint64_t mask24l __attribute__((aligned(8))) = 0x0000000000FFFFFFULL;
  37. static const uint64_t mask24h __attribute__((aligned(8))) = 0x0000FFFFFF000000ULL;
  38. static const uint64_t mask24hh __attribute__((aligned(8))) = 0xffff000000000000ULL;
  39. static const uint64_t mask24hhh __attribute__((aligned(8))) = 0xffffffff00000000ULL;
  40. static const uint64_t mask24hhhh __attribute__((aligned(8))) = 0xffffffffffff0000ULL;
  41. static const uint64_t mask15b __attribute__((aligned(8))) = 0x001F001F001F001FULL; /* 00000000 00011111 xxB */
  42. static const uint64_t mask15rg __attribute__((aligned(8))) = 0x7FE07FE07FE07FE0ULL; /* 01111111 11100000 RGx */
  43. static const uint64_t mask15s __attribute__((aligned(8))) = 0xFFE0FFE0FFE0FFE0ULL;
  44. static const uint64_t mask15g __attribute__((aligned(8))) = 0x03E003E003E003E0ULL;
  45. static const uint64_t mask15r __attribute__((aligned(8))) = 0x7C007C007C007C00ULL;
  46. #define mask16b mask15b
  47. static const uint64_t mask16g __attribute__((aligned(8))) = 0x07E007E007E007E0ULL;
  48. static const uint64_t mask16r __attribute__((aligned(8))) = 0xF800F800F800F800ULL;
  49. static const uint64_t red_16mask __attribute__((aligned(8))) = 0x0000f8000000f800ULL;
  50. static const uint64_t green_16mask __attribute__((aligned(8)))= 0x000007e0000007e0ULL;
  51. static const uint64_t blue_16mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
  52. static const uint64_t red_15mask __attribute__((aligned(8))) = 0x00007c000000f800ULL;
  53. static const uint64_t green_15mask __attribute__((aligned(8)))= 0x000003e0000007e0ULL;
  54. static const uint64_t blue_15mask __attribute__((aligned(8))) = 0x0000001f0000001fULL;
  55. #ifdef FAST_BGR2YV12
  56. static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000000210041000DULL;
  57. static const uint64_t bgr2UCoeff __attribute__((aligned(8))) = 0x0000FFEEFFDC0038ULL;
  58. static const uint64_t bgr2VCoeff __attribute__((aligned(8))) = 0x00000038FFD2FFF8ULL;
  59. #else
  60. static const uint64_t bgr2YCoeff __attribute__((aligned(8))) = 0x000020E540830C8BULL;
  61. static const uint64_t bgr2UCoeff __attribute__((aligned(8))) = 0x0000ED0FDAC23831ULL;
  62. static const uint64_t bgr2VCoeff __attribute__((aligned(8))) = 0x00003831D0E6F6EAULL;
  63. #endif
  64. static const uint64_t bgr2YOffset __attribute__((aligned(8))) = 0x1010101010101010ULL;
  65. static const uint64_t bgr2UVOffset __attribute__((aligned(8)))= 0x8080808080808080ULL;
  66. static const uint64_t w1111 __attribute__((aligned(8))) = 0x0001000100010001ULL;
  67. #if 0
  68. static volatile uint64_t __attribute__((aligned(8))) b5Dither;
  69. static volatile uint64_t __attribute__((aligned(8))) g5Dither;
  70. static volatile uint64_t __attribute__((aligned(8))) g6Dither;
  71. static volatile uint64_t __attribute__((aligned(8))) r5Dither;
  72. static uint64_t __attribute__((aligned(8))) dither4[2]={
  73. 0x0103010301030103LL,
  74. 0x0200020002000200LL,};
  75. static uint64_t __attribute__((aligned(8))) dither8[2]={
  76. 0x0602060206020602LL,
  77. 0x0004000400040004LL,};
  78. #endif
  79. #endif
  80. #define RGB2YUV_SHIFT 8
  81. #define BY ((int)( 0.098*(1<<RGB2YUV_SHIFT)+0.5))
  82. #define BV ((int)(-0.071*(1<<RGB2YUV_SHIFT)+0.5))
  83. #define BU ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
  84. #define GY ((int)( 0.504*(1<<RGB2YUV_SHIFT)+0.5))
  85. #define GV ((int)(-0.368*(1<<RGB2YUV_SHIFT)+0.5))
  86. #define GU ((int)(-0.291*(1<<RGB2YUV_SHIFT)+0.5))
  87. #define RY ((int)( 0.257*(1<<RGB2YUV_SHIFT)+0.5))
  88. #define RV ((int)( 0.439*(1<<RGB2YUV_SHIFT)+0.5))
  89. #define RU ((int)(-0.148*(1<<RGB2YUV_SHIFT)+0.5))
  90. //Note: we have C, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
  91. //Plain C versions
  92. #undef HAVE_MMX
  93. #undef HAVE_MMX2
  94. #undef HAVE_3DNOW
  95. #undef ARCH_X86
  96. #undef HAVE_SSE2
  97. #define RENAME(a) a ## _C
  98. #include "rgb2rgb_template.c"
  99. #ifdef CAN_COMPILE_X86_ASM
  100. //MMX versions
  101. #undef RENAME
  102. #define HAVE_MMX
  103. #undef HAVE_MMX2
  104. #undef HAVE_3DNOW
  105. #undef HAVE_SSE2
  106. #define ARCH_X86
  107. #define RENAME(a) a ## _MMX
  108. #include "rgb2rgb_template.c"
  109. //MMX2 versions
  110. #undef RENAME
  111. #define HAVE_MMX
  112. #define HAVE_MMX2
  113. #undef HAVE_3DNOW
  114. #undef HAVE_SSE2
  115. #define ARCH_X86
  116. #define RENAME(a) a ## _MMX2
  117. #include "rgb2rgb_template.c"
  118. //3DNOW versions
  119. #undef RENAME
  120. #define HAVE_MMX
  121. #undef HAVE_MMX2
  122. #define HAVE_3DNOW
  123. #undef HAVE_SSE2
  124. #define ARCH_X86
  125. #define RENAME(a) a ## _3DNow
  126. #include "rgb2rgb_template.c"
  127. #endif //CAN_COMPILE_X86_ASM
  128. void rgb24to32(const uint8_t *src,uint8_t *dst,unsigned src_size)
  129. {
  130. #ifdef CAN_COMPILE_X86_ASM
  131. // ordered per speed fasterst first
  132. if(gCpuCaps.hasMMX2)
  133. rgb24to32_MMX2(src, dst, src_size);
  134. else if(gCpuCaps.has3DNow)
  135. rgb24to32_3DNow(src, dst, src_size);
  136. else if(gCpuCaps.hasMMX)
  137. rgb24to32_MMX(src, dst, src_size);
  138. else
  139. #endif
  140. rgb24to32_C(src, dst, src_size);
  141. }
  142. void rgb15to24(const uint8_t *src,uint8_t *dst,unsigned src_size)
  143. {
  144. #ifdef CAN_COMPILE_X86_ASM
  145. // ordered per speed fasterst first
  146. if(gCpuCaps.hasMMX2)
  147. rgb15to24_MMX2(src, dst, src_size);
  148. else if(gCpuCaps.has3DNow)
  149. rgb15to24_3DNow(src, dst, src_size);
  150. else if(gCpuCaps.hasMMX)
  151. rgb15to24_MMX(src, dst, src_size);
  152. else
  153. #endif
  154. rgb15to24_C(src, dst, src_size);
  155. }
  156. void rgb16to24(const uint8_t *src,uint8_t *dst,unsigned src_size)
  157. {
  158. #ifdef CAN_COMPILE_X86_ASM
  159. // ordered per speed fasterst first
  160. if(gCpuCaps.hasMMX2)
  161. rgb16to24_MMX2(src, dst, src_size);
  162. else if(gCpuCaps.has3DNow)
  163. rgb16to24_3DNow(src, dst, src_size);
  164. else if(gCpuCaps.hasMMX)
  165. rgb16to24_MMX(src, dst, src_size);
  166. else
  167. #endif
  168. rgb16to24_C(src, dst, src_size);
  169. }
  170. void rgb15to32(const uint8_t *src,uint8_t *dst,unsigned src_size)
  171. {
  172. #ifdef CAN_COMPILE_X86_ASM
  173. // ordered per speed fasterst first
  174. if(gCpuCaps.hasMMX2)
  175. rgb15to32_MMX2(src, dst, src_size);
  176. else if(gCpuCaps.has3DNow)
  177. rgb15to32_3DNow(src, dst, src_size);
  178. else if(gCpuCaps.hasMMX)
  179. rgb15to32_MMX(src, dst, src_size);
  180. else
  181. #endif
  182. rgb15to32_C(src, dst, src_size);
  183. }
  184. void rgb16to32(const uint8_t *src,uint8_t *dst,unsigned src_size)
  185. {
  186. #ifdef CAN_COMPILE_X86_ASM
  187. // ordered per speed fasterst first
  188. if(gCpuCaps.hasMMX2)
  189. rgb16to32_MMX2(src, dst, src_size);
  190. else if(gCpuCaps.has3DNow)
  191. rgb16to32_3DNow(src, dst, src_size);
  192. else if(gCpuCaps.hasMMX)
  193. rgb16to32_MMX(src, dst, src_size);
  194. else
  195. #endif
  196. rgb16to32_C(src, dst, src_size);
  197. }
  198. void rgb32to24(const uint8_t *src,uint8_t *dst,unsigned src_size)
  199. {
  200. #ifdef CAN_COMPILE_X86_ASM
  201. // ordered per speed fasterst first
  202. if(gCpuCaps.hasMMX2)
  203. rgb32to24_MMX2(src, dst, src_size);
  204. else if(gCpuCaps.has3DNow)
  205. rgb32to24_3DNow(src, dst, src_size);
  206. else if(gCpuCaps.hasMMX)
  207. rgb32to24_MMX(src, dst, src_size);
  208. else
  209. #endif
  210. rgb32to24_C(src, dst, src_size);
  211. }
  212. /*
  213. Original by Strepto/Astral
  214. ported to gcc & bugfixed : A'rpi
  215. MMX2, 3DNOW optimization by Nick Kurshev
  216. 32bit c version, and and&add trick by Michael Niedermayer
  217. */
  218. void rgb15to16(const uint8_t *src,uint8_t *dst,unsigned src_size)
  219. {
  220. #ifdef CAN_COMPILE_X86_ASM
  221. // ordered per speed fasterst first
  222. if(gCpuCaps.hasMMX2)
  223. rgb15to16_MMX2(src, dst, src_size);
  224. else if(gCpuCaps.has3DNow)
  225. rgb15to16_3DNow(src, dst, src_size);
  226. else if(gCpuCaps.hasMMX)
  227. rgb15to16_MMX(src, dst, src_size);
  228. else
  229. #endif
  230. rgb15to16_C(src, dst, src_size);
  231. }
  232. void rgb16to15(const uint8_t *src,uint8_t *dst,unsigned src_size)
  233. {
  234. #ifdef CAN_COMPILE_X86_ASM
  235. // ordered per speed fasterst first
  236. if(gCpuCaps.hasMMX2)
  237. rgb16to15_MMX2(src, dst, src_size);
  238. else if(gCpuCaps.has3DNow)
  239. rgb16to15_3DNow(src, dst, src_size);
  240. else if(gCpuCaps.hasMMX)
  241. rgb16to15_MMX(src, dst, src_size);
  242. else
  243. #endif
  244. rgb16to15_C(src, dst, src_size);
  245. }
  246. /**
  247. * Pallete is assumed to contain bgr32
  248. */
  249. void palette8torgb32(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  250. {
  251. unsigned i;
  252. /*
  253. for(i=0; i<num_pixels; i++)
  254. ((unsigned *)dst)[i] = ((unsigned *)palette)[ src[i] ];
  255. */
  256. for(i=0; i<num_pixels; i++)
  257. {
  258. //FIXME slow?
  259. dst[0]= palette[ src[i]*4+2 ];
  260. dst[1]= palette[ src[i]*4+1 ];
  261. dst[2]= palette[ src[i]*4+0 ];
  262. // dst[3]= 0; /* do we need this cleansing? */
  263. dst+= 4;
  264. }
  265. }
  266. void palette8tobgr32(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  267. {
  268. unsigned i;
  269. for(i=0; i<num_pixels; i++)
  270. {
  271. //FIXME slow?
  272. dst[0]= palette[ src[i]*4+0 ];
  273. dst[1]= palette[ src[i]*4+1 ];
  274. dst[2]= palette[ src[i]*4+2 ];
  275. // dst[3]= 0; /* do we need this cleansing? */
  276. dst+= 4;
  277. }
  278. }
  279. /**
  280. * Pallete is assumed to contain bgr32
  281. */
  282. void palette8torgb24(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  283. {
  284. unsigned i;
  285. /*
  286. writes 1 byte o much and might cause alignment issues on some architectures?
  287. for(i=0; i<num_pixels; i++)
  288. ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
  289. */
  290. for(i=0; i<num_pixels; i++)
  291. {
  292. //FIXME slow?
  293. dst[0]= palette[ src[i]*4+2 ];
  294. dst[1]= palette[ src[i]*4+1 ];
  295. dst[2]= palette[ src[i]*4+0 ];
  296. dst+= 3;
  297. }
  298. }
  299. void palette8tobgr24(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  300. {
  301. unsigned i;
  302. /*
  303. writes 1 byte o much and might cause alignment issues on some architectures?
  304. for(i=0; i<num_pixels; i++)
  305. ((unsigned *)(&dst[i*3])) = ((unsigned *)palette)[ src[i] ];
  306. */
  307. for(i=0; i<num_pixels; i++)
  308. {
  309. //FIXME slow?
  310. dst[0]= palette[ src[i]*4+0 ];
  311. dst[1]= palette[ src[i]*4+1 ];
  312. dst[2]= palette[ src[i]*4+2 ];
  313. dst+= 3;
  314. }
  315. }
  316. void bgr24torgb24(const uint8_t *src, uint8_t *dst, unsigned src_size)
  317. {
  318. #ifdef CAN_COMPILE_X86_ASM
  319. // ordered per speed fasterst first
  320. if(gCpuCaps.hasMMX2)
  321. bgr24torgb24_MMX2(src, dst, src_size);
  322. else if(gCpuCaps.has3DNow)
  323. bgr24torgb24_3DNow(src, dst, src_size);
  324. else if(gCpuCaps.hasMMX)
  325. bgr24torgb24_MMX(src, dst, src_size);
  326. else
  327. bgr24torgb24_C(src, dst, src_size);
  328. #else
  329. bgr24torgb24_C(src, dst, src_size);
  330. #endif
  331. }
  332. void rgb32to16(const uint8_t *src, uint8_t *dst, unsigned src_size)
  333. {
  334. #ifdef CAN_COMPILE_X86_ASM
  335. // ordered per speed fasterst first
  336. if(gCpuCaps.hasMMX2)
  337. rgb32to16_MMX2(src, dst, src_size);
  338. else if(gCpuCaps.has3DNow)
  339. rgb32to16_3DNow(src, dst, src_size);
  340. else if(gCpuCaps.hasMMX)
  341. rgb32to16_MMX(src, dst, src_size);
  342. else
  343. #endif
  344. rgb32to16_C(src, dst, src_size);
  345. }
  346. void rgb32to15(const uint8_t *src, uint8_t *dst, unsigned src_size)
  347. {
  348. #ifdef CAN_COMPILE_X86_ASM
  349. // ordered per speed fasterst first
  350. if(gCpuCaps.hasMMX2)
  351. rgb32to15_MMX2(src, dst, src_size);
  352. else if(gCpuCaps.has3DNow)
  353. rgb32to15_3DNow(src, dst, src_size);
  354. else if(gCpuCaps.hasMMX)
  355. rgb32to15_MMX(src, dst, src_size);
  356. else
  357. #endif
  358. rgb32to15_C(src, dst, src_size);
  359. }
  360. void rgb24to16(const uint8_t *src, uint8_t *dst, unsigned src_size)
  361. {
  362. #ifdef CAN_COMPILE_X86_ASM
  363. // ordered per speed fasterst first
  364. if(gCpuCaps.hasMMX2)
  365. rgb24to16_MMX2(src, dst, src_size);
  366. else if(gCpuCaps.has3DNow)
  367. rgb24to16_3DNow(src, dst, src_size);
  368. else if(gCpuCaps.hasMMX)
  369. rgb24to16_MMX(src, dst, src_size);
  370. else
  371. #endif
  372. rgb24to16_C(src, dst, src_size);
  373. }
  374. void rgb24to15(const uint8_t *src, uint8_t *dst, unsigned src_size)
  375. {
  376. #ifdef CAN_COMPILE_X86_ASM
  377. // ordered per speed fasterst first
  378. if(gCpuCaps.hasMMX2)
  379. rgb24to15_MMX2(src, dst, src_size);
  380. else if(gCpuCaps.has3DNow)
  381. rgb24to15_3DNow(src, dst, src_size);
  382. else if(gCpuCaps.hasMMX)
  383. rgb24to15_MMX(src, dst, src_size);
  384. else
  385. #endif
  386. rgb24to15_C(src, dst, src_size);
  387. }
  388. /**
  389. * Palette is assumed to contain bgr16, see rgb32to16 to convert the palette
  390. */
  391. void palette8torgb16(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  392. {
  393. unsigned i;
  394. for(i=0; i<num_pixels; i++)
  395. ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
  396. }
  397. void palette8tobgr16(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  398. {
  399. unsigned i;
  400. for(i=0; i<num_pixels; i++)
  401. ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
  402. }
  403. /**
  404. * Pallete is assumed to contain bgr15, see rgb32to15 to convert the palette
  405. */
  406. void palette8torgb15(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  407. {
  408. unsigned i;
  409. for(i=0; i<num_pixels; i++)
  410. ((uint16_t *)dst)[i] = ((uint16_t *)palette)[ src[i] ];
  411. }
  412. void palette8tobgr15(const uint8_t *src, uint8_t *dst, unsigned num_pixels, const uint8_t *palette)
  413. {
  414. unsigned i;
  415. for(i=0; i<num_pixels; i++)
  416. ((uint16_t *)dst)[i] = bswap_16(((uint16_t *)palette)[ src[i] ]);
  417. }
  418. void rgb32tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  419. {
  420. #ifdef CAN_COMPILE_X86_ASM
  421. // ordered per speed fasterst first
  422. if(gCpuCaps.hasMMX2)
  423. rgb32tobgr32_MMX2(src, dst, src_size);
  424. else if(gCpuCaps.has3DNow)
  425. rgb32tobgr32_3DNow(src, dst, src_size);
  426. else if(gCpuCaps.hasMMX)
  427. rgb32tobgr32_MMX(src, dst, src_size);
  428. else
  429. #endif
  430. rgb32tobgr32_C(src, dst, src_size);
  431. }
  432. void rgb32tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  433. {
  434. unsigned i;
  435. unsigned num_pixels = src_size >> 2;
  436. for(i=0; i<num_pixels; i++)
  437. {
  438. dst[3*i + 0] = src[4*i + 2];
  439. dst[3*i + 1] = src[4*i + 1];
  440. dst[3*i + 2] = src[4*i + 0];
  441. }
  442. }
  443. void rgb32tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  444. {
  445. #ifdef CAN_COMPILE_X86_ASM
  446. // ordered per speed fasterst first
  447. if(gCpuCaps.hasMMX2)
  448. rgb32tobgr16_MMX2(src, dst, src_size);
  449. else if(gCpuCaps.has3DNow)
  450. rgb32tobgr16_3DNow(src, dst, src_size);
  451. else if(gCpuCaps.hasMMX)
  452. rgb32tobgr16_MMX(src, dst, src_size);
  453. else
  454. #endif
  455. rgb32tobgr16_C(src, dst, src_size);
  456. }
  457. void rgb32tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  458. {
  459. #ifdef CAN_COMPILE_X86_ASM
  460. // ordered per speed fasterst first
  461. if(gCpuCaps.hasMMX2)
  462. rgb32tobgr15_MMX2(src, dst, src_size);
  463. else if(gCpuCaps.has3DNow)
  464. rgb32tobgr15_3DNow(src, dst, src_size);
  465. else if(gCpuCaps.hasMMX)
  466. rgb32tobgr15_MMX(src, dst, src_size);
  467. else
  468. #endif
  469. rgb32tobgr15_C(src, dst, src_size);
  470. }
  471. void rgb24tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  472. {
  473. unsigned i;
  474. for(i=0; 3*i<src_size; i++)
  475. {
  476. dst[4*i + 0] = src[3*i + 2];
  477. dst[4*i + 1] = src[3*i + 1];
  478. dst[4*i + 2] = src[3*i + 0];
  479. dst[4*i + 3] = 0;
  480. }
  481. }
  482. void rgb24tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  483. {
  484. #ifdef CAN_COMPILE_X86_ASM
  485. // ordered per speed fasterst first
  486. if(gCpuCaps.hasMMX2)
  487. rgb24tobgr24_MMX2(src, dst, src_size);
  488. else if(gCpuCaps.has3DNow)
  489. rgb24tobgr24_3DNow(src, dst, src_size);
  490. else if(gCpuCaps.hasMMX)
  491. rgb24tobgr24_MMX(src, dst, src_size);
  492. else
  493. #endif
  494. rgb24tobgr24_C(src, dst, src_size);
  495. }
  496. void rgb24tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  497. {
  498. #ifdef CAN_COMPILE_X86_ASM
  499. // ordered per speed fasterst first
  500. if(gCpuCaps.hasMMX2)
  501. rgb24tobgr16_MMX2(src, dst, src_size);
  502. else if(gCpuCaps.has3DNow)
  503. rgb24tobgr16_3DNow(src, dst, src_size);
  504. else if(gCpuCaps.hasMMX)
  505. rgb24tobgr16_MMX(src, dst, src_size);
  506. else
  507. #endif
  508. rgb24tobgr16_C(src, dst, src_size);
  509. }
  510. void rgb24tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  511. {
  512. #ifdef CAN_COMPILE_X86_ASM
  513. // ordered per speed fasterst first
  514. if(gCpuCaps.hasMMX2)
  515. rgb24tobgr15_MMX2(src, dst, src_size);
  516. else if(gCpuCaps.has3DNow)
  517. rgb24tobgr15_3DNow(src, dst, src_size);
  518. else if(gCpuCaps.hasMMX)
  519. rgb24tobgr15_MMX(src, dst, src_size);
  520. else
  521. #endif
  522. rgb24tobgr15_C(src, dst, src_size);
  523. }
  524. void rgb16tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  525. {
  526. const uint16_t *end;
  527. uint8_t *d = (uint8_t *)dst;
  528. const uint16_t *s = (uint16_t *)src;
  529. end = s + src_size/2;
  530. while(s < end)
  531. {
  532. register uint16_t bgr;
  533. bgr = *s++;
  534. *d++ = (bgr&0xF800)>>8;
  535. *d++ = (bgr&0x7E0)>>3;
  536. *d++ = (bgr&0x1F)<<3;
  537. *d++ = 0;
  538. }
  539. }
  540. void rgb16tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  541. {
  542. const uint16_t *end;
  543. uint8_t *d = (uint8_t *)dst;
  544. const uint16_t *s = (const uint16_t *)src;
  545. end = s + src_size/2;
  546. while(s < end)
  547. {
  548. register uint16_t bgr;
  549. bgr = *s++;
  550. *d++ = (bgr&0xF800)>>8;
  551. *d++ = (bgr&0x7E0)>>3;
  552. *d++ = (bgr&0x1F)<<3;
  553. }
  554. }
  555. void rgb16tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  556. {
  557. unsigned i;
  558. unsigned num_pixels = src_size >> 1;
  559. for(i=0; i<num_pixels; i++)
  560. {
  561. unsigned b,g,r;
  562. register uint16_t rgb;
  563. rgb = src[2*i];
  564. r = rgb&0x1F;
  565. g = (rgb&0x7E0)>>5;
  566. b = (rgb&0xF800)>>11;
  567. dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
  568. }
  569. }
  570. void rgb16tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  571. {
  572. unsigned i;
  573. unsigned num_pixels = src_size >> 1;
  574. for(i=0; i<num_pixels; i++)
  575. {
  576. unsigned b,g,r;
  577. register uint16_t rgb;
  578. rgb = src[2*i];
  579. r = rgb&0x1F;
  580. g = (rgb&0x7E0)>>5;
  581. b = (rgb&0xF800)>>11;
  582. dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
  583. }
  584. }
  585. void rgb15tobgr32(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  586. {
  587. const uint16_t *end;
  588. uint8_t *d = (uint8_t *)dst;
  589. const uint16_t *s = (const uint16_t *)src;
  590. end = s + src_size/2;
  591. while(s < end)
  592. {
  593. register uint16_t bgr;
  594. bgr = *s++;
  595. *d++ = (bgr&0x7C00)>>7;
  596. *d++ = (bgr&0x3E0)>>2;
  597. *d++ = (bgr&0x1F)<<3;
  598. *d++ = 0;
  599. }
  600. }
  601. void rgb15tobgr24(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  602. {
  603. const uint16_t *end;
  604. uint8_t *d = (uint8_t *)dst;
  605. const uint16_t *s = (uint16_t *)src;
  606. end = s + src_size/2;
  607. while(s < end)
  608. {
  609. register uint16_t bgr;
  610. bgr = *s++;
  611. *d++ = (bgr&0x7C00)>>7;
  612. *d++ = (bgr&0x3E0)>>2;
  613. *d++ = (bgr&0x1F)<<3;
  614. }
  615. }
  616. void rgb15tobgr16(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  617. {
  618. unsigned i;
  619. unsigned num_pixels = src_size >> 1;
  620. for(i=0; i<num_pixels; i++)
  621. {
  622. unsigned b,g,r;
  623. register uint16_t rgb;
  624. rgb = src[2*i];
  625. r = rgb&0x1F;
  626. g = (rgb&0x3E0)>>5;
  627. b = (rgb&0x7C00)>>10;
  628. dst[2*i] = (b&0x1F) | ((g&0x3F)<<5) | ((r&0x1F)<<11);
  629. }
  630. }
  631. void rgb15tobgr15(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  632. {
  633. unsigned i;
  634. unsigned num_pixels = src_size >> 1;
  635. for(i=0; i<num_pixels; i++)
  636. {
  637. unsigned b,g,r;
  638. register uint16_t rgb;
  639. rgb = src[2*i];
  640. r = rgb&0x1F;
  641. g = (rgb&0x3E0)>>5;
  642. b = (rgb&0x7C00)>>10;
  643. dst[2*i] = (b&0x1F) | ((g&0x1F)<<5) | ((r&0x1F)<<10);
  644. }
  645. }
  646. void rgb8tobgr8(const uint8_t *src, uint8_t *dst, unsigned int src_size)
  647. {
  648. unsigned i;
  649. unsigned num_pixels = src_size;
  650. for(i=0; i<num_pixels; i++)
  651. {
  652. unsigned b,g,r;
  653. register uint8_t rgb;
  654. rgb = src[i];
  655. r = (rgb&0x07);
  656. g = (rgb&0x38)>>3;
  657. b = (rgb&0xC0)>>6;
  658. dst[i] = ((b<<1)&0x07) | ((g&0x07)<<3) | ((r&0x03)<<6);
  659. }
  660. }
  661. /**
  662. *
  663. * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
  664. * problem for anyone then tell me, and ill fix it)
  665. */
  666. void yv12toyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  667. unsigned int width, unsigned int height,
  668. int lumStride, int chromStride, int dstStride)
  669. {
  670. #ifdef CAN_COMPILE_X86_ASM
  671. // ordered per speed fasterst first
  672. if(gCpuCaps.hasMMX2)
  673. yv12toyuy2_MMX2(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  674. else if(gCpuCaps.has3DNow)
  675. yv12toyuy2_3DNow(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  676. else if(gCpuCaps.hasMMX)
  677. yv12toyuy2_MMX(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  678. else
  679. #endif
  680. yv12toyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  681. }
  682. /**
  683. *
  684. * width should be a multiple of 16
  685. */
  686. void yuv422ptoyuy2(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc, uint8_t *dst,
  687. unsigned int width, unsigned int height,
  688. int lumStride, int chromStride, int dstStride)
  689. {
  690. #ifdef CAN_COMPILE_X86_ASM
  691. // ordered per speed fasterst first
  692. if(gCpuCaps.hasMMX2)
  693. yuv422ptoyuy2_MMX2(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  694. else if(gCpuCaps.has3DNow)
  695. yuv422ptoyuy2_3DNow(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  696. else if(gCpuCaps.hasMMX)
  697. yuv422ptoyuy2_MMX(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  698. else
  699. #endif
  700. yuv422ptoyuy2_C(ysrc, usrc, vsrc, dst, width, height, lumStride, chromStride, dstStride);
  701. }
  702. /**
  703. *
  704. * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
  705. * problem for anyone then tell me, and ill fix it)
  706. */
  707. void yuy2toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  708. unsigned int width, unsigned int height,
  709. int lumStride, int chromStride, int srcStride)
  710. {
  711. #ifdef CAN_COMPILE_X86_ASM
  712. // ordered per speed fasterst first
  713. if(gCpuCaps.hasMMX2)
  714. yuy2toyv12_MMX2(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  715. else if(gCpuCaps.has3DNow)
  716. yuy2toyv12_3DNow(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  717. else if(gCpuCaps.hasMMX)
  718. yuy2toyv12_MMX(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  719. else
  720. #endif
  721. yuy2toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  722. }
  723. /**
  724. *
  725. * height should be a multiple of 2 and width should be a multiple of 16 (if this is a
  726. * problem for anyone then tell me, and ill fix it)
  727. * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
  728. */
  729. void uyvytoyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  730. unsigned int width, unsigned int height,
  731. int lumStride, int chromStride, int srcStride)
  732. {
  733. #ifdef CAN_COMPILE_X86_ASM
  734. // ordered per speed fasterst first
  735. if(gCpuCaps.hasMMX2)
  736. uyvytoyv12_MMX2(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  737. else if(gCpuCaps.has3DNow)
  738. uyvytoyv12_3DNow(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  739. else if(gCpuCaps.hasMMX)
  740. uyvytoyv12_MMX(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  741. else
  742. uyvytoyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  743. #else
  744. uyvytoyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  745. #endif
  746. }
  747. void yvu9toyv12(const uint8_t *ysrc, const uint8_t *usrc, const uint8_t *vsrc,
  748. uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  749. unsigned int width, unsigned int height,
  750. int lumStride, int chromStride)
  751. {
  752. #ifdef CAN_COMPILE_X86_ASM
  753. // ordered per speed fasterst first
  754. if(gCpuCaps.hasMMX2)
  755. yvu9toyv12_MMX2(ysrc, usrc, vsrc, ydst, udst, vdst, width, height, lumStride, chromStride);
  756. else if(gCpuCaps.has3DNow)
  757. yvu9toyv12_3DNow(ysrc, usrc, vsrc, ydst, udst, vdst, width, height, lumStride, chromStride);
  758. else if(gCpuCaps.hasMMX)
  759. yvu9toyv12_MMX(ysrc, usrc, vsrc, ydst, udst, vdst, width, height, lumStride, chromStride);
  760. else
  761. yvu9toyv12_C(ysrc, usrc, vsrc, ydst, udst, vdst, width, height, lumStride, chromStride);
  762. #else
  763. yvu9toyv12_C(ysrc, usrc, vsrc, ydst, udst, vdst, width, height, lumStride, chromStride);
  764. #endif
  765. }
  766. void planar2x(const uint8_t *src, uint8_t *dst, int width, int height, int srcStride, int dstStride)
  767. {
  768. #ifdef CAN_COMPILE_X86_ASM
  769. // ordered per speed fasterst first
  770. if(gCpuCaps.hasMMX2)
  771. planar2x_MMX2(src, dst, width, height, srcStride, dstStride);
  772. else if(gCpuCaps.has3DNow)
  773. planar2x_3DNow(src, dst, width, height, srcStride, dstStride);
  774. else
  775. #endif
  776. planar2x_C(src, dst, width, height, srcStride, dstStride);
  777. }
  778. /**
  779. *
  780. * height should be a multiple of 2 and width should be a multiple of 2 (if this is a
  781. * problem for anyone then tell me, and ill fix it)
  782. * chrominance data is only taken from every secound line others are ignored FIXME write HQ version
  783. */
  784. void rgb24toyv12(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst,
  785. unsigned int width, unsigned int height,
  786. int lumStride, int chromStride, int srcStride)
  787. {
  788. #ifdef CAN_COMPILE_X86_ASM
  789. // ordered per speed fasterst first
  790. if(gCpuCaps.hasMMX2)
  791. rgb24toyv12_MMX2(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  792. else if(gCpuCaps.has3DNow)
  793. rgb24toyv12_3DNow(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  794. else if(gCpuCaps.hasMMX)
  795. rgb24toyv12_MMX(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  796. else
  797. #endif
  798. rgb24toyv12_C(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride);
  799. }
  800. void interleaveBytes(uint8_t *src1, uint8_t *src2, uint8_t *dst,
  801. unsigned width, unsigned height, int src1Stride,
  802. int src2Stride, int dstStride)
  803. {
  804. #ifdef CAN_COMPILE_X86_ASM
  805. // ordered per speed fasterst first
  806. if(gCpuCaps.hasMMX2)
  807. interleaveBytes_MMX2(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride);
  808. else if(gCpuCaps.has3DNow)
  809. interleaveBytes_3DNow(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride);
  810. else if(gCpuCaps.hasMMX)
  811. interleaveBytes_MMX(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride);
  812. else
  813. #endif
  814. interleaveBytes_C(src1, src2, dst, width, height, src1Stride, src2Stride, dstStride);
  815. }
  816. void vu9_to_vu12(const uint8_t *src1, const uint8_t *src2,
  817. uint8_t *dst1, uint8_t *dst2,
  818. unsigned width, unsigned height,
  819. int srcStride1, int srcStride2,
  820. int dstStride1, int dstStride2)
  821. {
  822. #ifdef CAN_COMPILE_X86_ASM
  823. if(gCpuCaps.hasMMX2)
  824. vu9_to_vu12_MMX2(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
  825. else if(gCpuCaps.has3DNow)
  826. vu9_to_vu12_3DNow(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
  827. else if(gCpuCaps.hasMMX)
  828. vu9_to_vu12_MMX(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
  829. else
  830. #endif
  831. vu9_to_vu12_C(src1, src2, dst1, dst2, width, height, srcStride1, srcStride2, dstStride1, dstStride2);
  832. }
  833. void yvu9_to_yuy2(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
  834. uint8_t *dst,
  835. unsigned width, unsigned height,
  836. int srcStride1, int srcStride2,
  837. int srcStride3, int dstStride)
  838. {
  839. #ifdef CAN_COMPILE_X86_ASM
  840. if(gCpuCaps.hasMMX2)
  841. yvu9_to_yuy2_MMX2(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
  842. else if(gCpuCaps.has3DNow)
  843. yvu9_to_yuy2_3DNow(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
  844. else if(gCpuCaps.hasMMX)
  845. yvu9_to_yuy2_MMX(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
  846. else
  847. #endif
  848. yvu9_to_yuy2_C(src1, src2, src3, dst, width, height, srcStride1, srcStride2, srcStride3, dstStride);
  849. }