You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

792 lines
22KB

  1. // Software scaling and colorspace conversion routines for MPlayer
  2. // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
  3. // current version mostly by Michael Niedermayer (michaelni@gmx.at)
  4. #include <inttypes.h>
  5. #include "../config.h"
  6. //#undef HAVE_MMX2
  7. //#undef HAVE_MMX
  8. //#undef ARCH_X86
  9. #define DITHER16BPP
  10. #define ALT_ERROR
  11. #define RET 0xC3 //near return opcode
  12. /*
  13. NOTES
  14. known BUGS with known cause (no bugreports please!)
  15. line at the right (c,asm and mmx2)
  16. code reads 1 sample too much (might cause a sig11)
  17. TODO
  18. check alignment off everything
  19. */
  20. static uint64_t yCoeff= 0x2568256825682568LL;
  21. static uint64_t ubCoeff= 0x3343334333433343LL;
  22. static uint64_t vrCoeff= 0x40cf40cf40cf40cfLL;
  23. static uint64_t ugCoeff= 0xE5E2E5E2E5E2E5E2LL;
  24. static uint64_t vgCoeff= 0xF36EF36EF36EF36ELL;
  25. static uint64_t w80= 0x0080008000800080LL;
  26. static uint64_t w10= 0x0010001000100010LL;
  27. static uint64_t bm00000111=0x0000000000FFFFFFLL;
  28. static uint64_t bm11111000=0xFFFFFFFFFF000000LL;
  29. static uint64_t b16Dither= 0x0004000400040004LL;
  30. static uint64_t b16Dither1=0x0004000400040004LL;
  31. static uint64_t b16Dither2=0x0602060206020602LL;
  32. static uint64_t g16Dither= 0x0002000200020002LL;
  33. static uint64_t g16Dither1=0x0002000200020002LL;
  34. static uint64_t g16Dither2=0x0301030103010301LL;
  35. static uint64_t b16Mask= 0x001F001F001F001FLL;
  36. static uint64_t g16Mask= 0x07E007E007E007E0LL;
  37. static uint64_t r16Mask= 0xF800F800F800F800LL;
  38. static uint64_t temp0;
  39. // temporary storage for 4 yuv lines:
  40. // 16bit for now (mmx likes it more compact)
  41. static uint16_t pix_buf_y[4][2048];
  42. static uint16_t pix_buf_uv[2][2048*2];
  43. // clipping helper table for C implementations:
  44. static unsigned char clip_table[768];
  45. // yuv->rgb conversion tables:
  46. static int yuvtab_2568[256];
  47. static int yuvtab_3343[256];
  48. static int yuvtab_0c92[256];
  49. static int yuvtab_1a1e[256];
  50. static int yuvtab_40cf[256];
  51. static uint8_t funnyYCode[10000];
  52. static uint8_t funnyUVCode[10000];
  53. // *** bilinear scaling and yuv->rgb conversion of yv12 slices:
  54. // *** Note: it's called multiple times while decoding a frame, first time y==0
  55. // *** Designed to upscale, but may work for downscale too.
  56. // s_xinc = (src_width << 8) / dst_width
  57. // s_yinc = (src_height << 16) / dst_height
  58. void SwScale_YV12slice_brg24(unsigned char* srcptr[],int stride[], int y, int h,
  59. unsigned char* dstptr, int dststride, int dstw, int dstbpp,
  60. unsigned int s_xinc,unsigned int s_yinc){
  61. // scaling factors:
  62. //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
  63. //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
  64. unsigned int s_xinc2;
  65. static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
  66. static int s_ypos;
  67. // last horzontally interpolated lines, used to avoid unnecessary calculations
  68. static int s_last_ypos;
  69. static int s_last_y1pos;
  70. static int static_dstw;
  71. #ifdef HAVE_MMX2
  72. // used to detect a horizontal size change
  73. static int old_dstw= -1;
  74. static int old_s_xinc= -1;
  75. // difference between the requested xinc and the required one for the mmx2 routine
  76. static int s_xinc_diff=0;
  77. static int s_xinc2_diff=0;
  78. #endif
  79. int canMMX2BeUsed;
  80. // we need that precission at least for the mmx2 code
  81. s_xinc*= 256;
  82. s_xinc2=s_xinc>>1;
  83. canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0) ? 1 : 0;
  84. #ifdef HAVE_MMX2
  85. if(canMMX2BeUsed)
  86. {
  87. s_xinc+= s_xinc_diff;
  88. s_xinc2+= s_xinc2_diff;
  89. }
  90. #endif
  91. // force calculation of the horizontal interpolation of the first line
  92. s_last_ypos=-99;
  93. s_last_y1pos=-99;
  94. if(y==0){
  95. s_srcypos= s_yinc/2 - 0x8000;
  96. s_ypos=0;
  97. #ifdef HAVE_MMX2
  98. // cant downscale !!!
  99. if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
  100. {
  101. uint8_t *fragment;
  102. int imm8OfPShufW1;
  103. int imm8OfPShufW2;
  104. int fragmentLength;
  105. int xpos, xx, xalpha, i;
  106. old_s_xinc= s_xinc;
  107. old_dstw= dstw;
  108. static_dstw= dstw;
  109. // create an optimized horizontal scaling routine
  110. //code fragment
  111. asm volatile(
  112. "jmp 9f \n\t"
  113. // Begin
  114. "0: \n\t"
  115. "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
  116. "movq %%mm0, %%mm1 \n\t"
  117. "psrlq $8, %%mm0 \n\t"
  118. "punpcklbw %%mm7, %%mm1 \n\t"
  119. "movq %%mm2, %%mm3 \n\t"
  120. "punpcklbw %%mm7, %%mm0 \n\t"
  121. "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
  122. "pshufw $0xFF, %%mm1, %%mm1 \n\t"
  123. "1: \n\t"
  124. "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
  125. "pshufw $0xFF, %%mm0, %%mm0 \n\t"
  126. "2: \n\t"
  127. "psrlw $9, %%mm3 \n\t"
  128. "psubw %%mm1, %%mm0 \n\t"
  129. "pmullw %%mm3, %%mm0 \n\t"
  130. "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
  131. "psllw $7, %%mm1 \n\t"
  132. "paddw %%mm1, %%mm0 \n\t"
  133. "movq %%mm0, (%%edi, %%eax) \n\t"
  134. "addl $8, %%eax \n\t"
  135. // End
  136. "9: \n\t"
  137. // "int $3\n\t"
  138. "leal 0b, %0 \n\t"
  139. "leal 1b, %1 \n\t"
  140. "leal 2b, %2 \n\t"
  141. "decl %1 \n\t"
  142. "decl %2 \n\t"
  143. "subl %0, %1 \n\t"
  144. "subl %0, %2 \n\t"
  145. "leal 9b, %3 \n\t"
  146. "subl %0, %3 \n\t"
  147. :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
  148. "=r" (fragmentLength)
  149. );
  150. xpos= xx=xalpha= 0;
  151. /* choose xinc so that all 8 parts fit exactly
  152. Note: we cannot use just 1 part because it would not fit in the code cache */
  153. s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))+10;
  154. // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
  155. #ifdef ALT_ERROR
  156. s_xinc2_diff+= ((0x10000/(dstw/8)));
  157. #endif
  158. s_xinc_diff= s_xinc2_diff*2;
  159. s_xinc2+= s_xinc2_diff;
  160. s_xinc+= s_xinc_diff;
  161. old_s_xinc= s_xinc;
  162. for(i=0; i<dstw/8; i++)
  163. {
  164. int xx=xpos>>16;
  165. if((i&3) == 0)
  166. {
  167. int a=0;
  168. int b=((xpos+s_xinc)>>16) - xx;
  169. int c=((xpos+s_xinc*2)>>16) - xx;
  170. int d=((xpos+s_xinc*3)>>16) - xx;
  171. memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
  172. funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
  173. funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
  174. a | (b<<2) | (c<<4) | (d<<6);
  175. funnyYCode[fragmentLength*(i+4)/4]= RET;
  176. }
  177. xpos+=s_xinc;
  178. }
  179. xpos= xx=xalpha= 0;
  180. //FIXME choose size and or xinc so that they fit exactly
  181. for(i=0; i<dstw/8; i++)
  182. {
  183. int xx=xpos>>16;
  184. if((i&3) == 0)
  185. {
  186. int a=0;
  187. int b=((xpos+s_xinc2)>>16) - xx;
  188. int c=((xpos+s_xinc2*2)>>16) - xx;
  189. int d=((xpos+s_xinc2*3)>>16) - xx;
  190. memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
  191. funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
  192. funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
  193. a | (b<<2) | (c<<4) | (d<<6);
  194. funnyUVCode[fragmentLength*(i+4)/4]= RET;
  195. }
  196. xpos+=s_xinc2;
  197. }
  198. // funnyCode[0]= RET;
  199. }
  200. #endif // HAVE_MMX2
  201. } // reset counters
  202. while(1){
  203. unsigned char *dest=dstptr+dststride*s_ypos;
  204. int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
  205. // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
  206. int srcuvpos= s_srcypos + s_yinc/2 - 0x8000;
  207. int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
  208. int yalpha=((s_srcypos-1)&0xFFFF)>>7;
  209. int yalpha1=yalpha^511;
  210. int uvalpha=((srcuvpos-1)&0x1FFFF)>>8;
  211. int uvalpha1=uvalpha^511;
  212. uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
  213. uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
  214. uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
  215. uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
  216. int i;
  217. // if this is before the first line than use only the first src line
  218. if(y0==0) buf0= buf1;
  219. if(y1==0) uvbuf0= uvbuf1; // yes we do have to check this, its not the same as y0==0
  220. if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
  221. // if this is after the last line than use only the last src line
  222. if(y0>=y+h)
  223. {
  224. buf1= buf0;
  225. s_last_ypos=y0;
  226. }
  227. if(y1>=(y+h)/2)
  228. {
  229. uvbuf1= uvbuf0;
  230. s_last_y1pos=y1;
  231. }
  232. s_ypos++; s_srcypos+=s_yinc;
  233. //only interpolate the src line horizontally if we didnt do it allready
  234. if(s_last_ypos!=y0){
  235. unsigned char *src=srcptr[0]+(y0-y)*stride[0];
  236. unsigned int xpos=0;
  237. s_last_ypos=y0;
  238. // *** horizontal scale Y line to temp buffer
  239. #ifdef ARCH_X86
  240. #ifdef HAVE_MMX2
  241. if(canMMX2BeUsed)
  242. {
  243. asm volatile(
  244. "pxor %%mm7, %%mm7 \n\t"
  245. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  246. "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
  247. "punpcklwd %%mm6, %%mm6 \n\t"
  248. "punpcklwd %%mm6, %%mm6 \n\t"
  249. "movq %%mm6, %%mm2 \n\t"
  250. "psllq $16, %%mm2 \n\t"
  251. "paddw %%mm6, %%mm2 \n\t"
  252. "psllq $16, %%mm2 \n\t"
  253. "paddw %%mm6, %%mm2 \n\t"
  254. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFF
  255. "movq %%mm2, temp0 \n\t"
  256. "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
  257. "punpcklwd %%mm6, %%mm6 \n\t"
  258. "punpcklwd %%mm6, %%mm6 \n\t"
  259. "xorl %%eax, %%eax \n\t" // i
  260. "movl %0, %%esi \n\t" // src
  261. "movl %1, %%edi \n\t" // buf1
  262. "movl %3, %%edx \n\t" // (s_xinc*4)>>16
  263. "xorl %%ecx, %%ecx \n\t"
  264. "xorl %%ebx, %%ebx \n\t"
  265. "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
  266. // "int $3\n\t"
  267. "call funnyYCode \n\t"
  268. "movq temp0, %%mm2 \n\t"
  269. "xorl %%ecx, %%ecx \n\t"
  270. "call funnyYCode \n\t"
  271. "movq temp0, %%mm2 \n\t"
  272. "xorl %%ecx, %%ecx \n\t"
  273. "call funnyYCode \n\t"
  274. "movq temp0, %%mm2 \n\t"
  275. "xorl %%ecx, %%ecx \n\t"
  276. "call funnyYCode \n\t"
  277. "movq temp0, %%mm2 \n\t"
  278. "xorl %%ecx, %%ecx \n\t"
  279. "call funnyYCode \n\t"
  280. "movq temp0, %%mm2 \n\t"
  281. "xorl %%ecx, %%ecx \n\t"
  282. "call funnyYCode \n\t"
  283. "movq temp0, %%mm2 \n\t"
  284. "xorl %%ecx, %%ecx \n\t"
  285. "call funnyYCode \n\t"
  286. "movq temp0, %%mm2 \n\t"
  287. "xorl %%ecx, %%ecx \n\t"
  288. "call funnyYCode \n\t"
  289. :: "m" (src), "m" (buf1), "m" (dstw), "m" ((s_xinc*4)>>16),
  290. "m" ((s_xinc*4)&0xFFFF), "m" (s_xinc&0xFFFF)
  291. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  292. );
  293. }
  294. else
  295. {
  296. #endif
  297. //NO MMX just normal asm ... FIXME try/write funny MMX2 variant
  298. //FIXME add prefetch
  299. asm volatile(
  300. "xorl %%eax, %%eax \n\t" // i
  301. "xorl %%ebx, %%ebx \n\t" // xx
  302. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  303. "1: \n\t"
  304. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  305. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  306. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  307. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  308. "shll $16, %%edi \n\t"
  309. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  310. "movl %1, %%edi \n\t"
  311. "shrl $9, %%esi \n\t"
  312. "movw %%si, (%%edi, %%eax, 2) \n\t"
  313. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  314. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  315. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  316. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  317. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  318. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  319. "shll $16, %%edi \n\t"
  320. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  321. "movl %1, %%edi \n\t"
  322. "shrl $9, %%esi \n\t"
  323. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  324. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  325. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  326. "addl $2, %%eax \n\t"
  327. "cmpl %2, %%eax \n\t"
  328. " jb 1b \n\t"
  329. :: "r" (src), "m" (buf1), "m" (dstw), "m" (s_xinc>>16), "m" (s_xinc&0xFFFF)
  330. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  331. );
  332. #ifdef HAVE_MMX2
  333. } //if MMX2 cant be used
  334. #endif
  335. #else
  336. for(i=0;i<dstw;i++){
  337. register unsigned int xx=xpos>>16;
  338. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  339. buf1[i]=(src[xx]*(xalpha^127)+src[xx+1]*xalpha);
  340. xpos+=s_xinc;
  341. }
  342. #endif
  343. }
  344. // *** horizontal scale U and V lines to temp buffer
  345. if(s_last_y1pos!=y1){
  346. unsigned char *src1=srcptr[1]+(y1-y/2)*stride[1];
  347. unsigned char *src2=srcptr[2]+(y1-y/2)*stride[2];
  348. int xpos=0;
  349. s_last_y1pos= y1;
  350. #ifdef ARCH_X86
  351. #ifdef HAVE_MMX2
  352. if(canMMX2BeUsed)
  353. {
  354. asm volatile(
  355. "pxor %%mm7, %%mm7 \n\t"
  356. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  357. "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
  358. "punpcklwd %%mm6, %%mm6 \n\t"
  359. "punpcklwd %%mm6, %%mm6 \n\t"
  360. "movq %%mm6, %%mm2 \n\t"
  361. "psllq $16, %%mm2 \n\t"
  362. "paddw %%mm6, %%mm2 \n\t"
  363. "psllq $16, %%mm2 \n\t"
  364. "paddw %%mm6, %%mm2 \n\t"
  365. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFFFF
  366. "movq %%mm2, temp0 \n\t"
  367. "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
  368. "punpcklwd %%mm6, %%mm6 \n\t"
  369. "punpcklwd %%mm6, %%mm6 \n\t"
  370. "xorl %%eax, %%eax \n\t" // i
  371. "movl %0, %%esi \n\t" // src
  372. "movl %1, %%edi \n\t" // buf1
  373. "movl %3, %%edx \n\t" // (s_xinc*4)>>16
  374. "xorl %%ecx, %%ecx \n\t"
  375. "xorl %%ebx, %%ebx \n\t"
  376. "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
  377. // "int $3\n\t"
  378. #define FUNNYUVCODE \
  379. "call funnyUVCode \n\t"\
  380. "movq temp0, %%mm2 \n\t"\
  381. "xorl %%ecx, %%ecx \n\t"
  382. FUNNYUVCODE
  383. FUNNYUVCODE
  384. FUNNYUVCODE
  385. FUNNYUVCODE
  386. FUNNYUVCODE
  387. FUNNYUVCODE
  388. FUNNYUVCODE
  389. FUNNYUVCODE
  390. "xorl %%eax, %%eax \n\t" // i
  391. "movl %6, %%esi \n\t" // src
  392. "movl %1, %%edi \n\t" // buf1
  393. "addl $4096, %%edi \n\t"
  394. FUNNYUVCODE
  395. FUNNYUVCODE
  396. FUNNYUVCODE
  397. FUNNYUVCODE
  398. FUNNYUVCODE
  399. FUNNYUVCODE
  400. FUNNYUVCODE
  401. FUNNYUVCODE
  402. :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" ((s_xinc2*4)>>16),
  403. "m" ((s_xinc2*4)&0xFFFF), "m" (s_xinc2&0xFFFF), "m" (src2)
  404. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  405. );
  406. }
  407. else
  408. {
  409. #endif
  410. asm volatile(
  411. "xorl %%eax, %%eax \n\t" // i
  412. "xorl %%ebx, %%ebx \n\t" // xx
  413. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  414. "1: \n\t"
  415. "movl %0, %%esi \n\t"
  416. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  417. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  418. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  419. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  420. "shll $16, %%edi \n\t"
  421. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  422. "movl %1, %%edi \n\t"
  423. "shrl $9, %%esi \n\t"
  424. "movw %%si, (%%edi, %%eax, 2) \n\t"
  425. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  426. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  427. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  428. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  429. "shll $16, %%edi \n\t"
  430. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  431. "movl %1, %%edi \n\t"
  432. "shrl $9, %%esi \n\t"
  433. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  434. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  435. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  436. "addl $1, %%eax \n\t"
  437. "cmpl %2, %%eax \n\t"
  438. " jb 1b \n\t"
  439. :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>16), "m" (s_xinc2&0xFFFF),
  440. "r" (src2)
  441. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  442. );
  443. #ifdef HAVE_MMX2
  444. } //if MMX2 cant be used
  445. #endif
  446. #else
  447. for(i=0;i<dstw;i++){
  448. register unsigned int xx=xpos>>16;
  449. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  450. uvbuf1[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  451. uvbuf1[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  452. xpos+=s_xinc2;
  453. }
  454. #endif
  455. }
  456. // Note1: this code can be resticted to n*8 (or n*16) width lines to simplify optimization...
  457. // Re: Note1: ok n*4 for now
  458. // Note2: instead of using lookup tabs, mmx version could do the multiply...
  459. // Re: Note2: yep
  460. // Note3: maybe we should make separated 15/16, 24 and 32bpp version of this:
  461. // Re: done (32 & 16) and 16 has dithering :) but 16 is untested
  462. #ifdef HAVE_MMX
  463. //FIXME write lq version with less uv ...
  464. //FIXME reorder / optimize
  465. if(dstbpp == 32)
  466. {
  467. asm volatile(
  468. #define YSCALEYUV2RGB \
  469. "pxor %%mm7, %%mm7 \n\t"\
  470. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  471. "punpcklwd %%mm6, %%mm6 \n\t"\
  472. "punpcklwd %%mm6, %%mm6 \n\t"\
  473. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  474. "punpcklwd %%mm5, %%mm5 \n\t"\
  475. "punpcklwd %%mm5, %%mm5 \n\t"\
  476. "xorl %%eax, %%eax \n\t"\
  477. "1: \n\t"\
  478. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  479. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  480. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  481. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  482. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  483. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  484. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  485. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  486. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>7*/\
  487. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  488. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>7*/\
  489. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  490. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  491. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  492. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  493. "psubw w10, %%mm1 \n\t" /* Y-16*/\
  494. "psubw w80, %%mm3 \n\t" /* (U-128)*/\
  495. "psllw $3, %%mm1 \n\t" /* (y-16)*8*/\
  496. "psllw $3, %%mm3 \n\t" /*(U-128)8*/\
  497. "pmulhw yCoeff, %%mm1 \n\t"\
  498. \
  499. \
  500. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  501. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  502. "pmulhw ubCoeff, %%mm3 \n\t"\
  503. "psraw $7, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>7*/\
  504. "pmulhw ugCoeff, %%mm2 \n\t"\
  505. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  506. "psubw w80, %%mm0 \n\t" /* (V-128)*/\
  507. "psllw $3, %%mm0 \n\t" /* (V-128)8*/\
  508. \
  509. \
  510. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  511. "pmulhw vrCoeff, %%mm0 \n\t"\
  512. "pmulhw vgCoeff, %%mm4 \n\t"\
  513. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  514. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  515. "packuswb %%mm3, %%mm3 \n\t"\
  516. \
  517. "packuswb %%mm0, %%mm0 \n\t"\
  518. "paddw %%mm4, %%mm2 \n\t"\
  519. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  520. \
  521. "packuswb %%mm1, %%mm1 \n\t"
  522. YSCALEYUV2RGB
  523. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  524. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  525. "movq %%mm3, %%mm1 \n\t"
  526. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  527. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  528. #ifdef HAVE_MMX2
  529. "movntq %%mm3, (%4, %%eax, 4) \n\t"
  530. "movntq %%mm1, 8(%4, %%eax, 4) \n\t"
  531. #else
  532. "movq %%mm3, (%4, %%eax, 4) \n\t"
  533. "movq %%mm1, 8(%4, %%eax, 4) \n\t"
  534. #endif
  535. "addl $4, %%eax \n\t"
  536. "cmpl %5, %%eax \n\t"
  537. " jb 1b \n\t"
  538. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
  539. "m" (yalpha1), "m" (uvalpha1)
  540. : "%eax"
  541. );
  542. }
  543. else if(dstbpp==24)
  544. {
  545. asm volatile(
  546. YSCALEYUV2RGB
  547. // lsb ... msb
  548. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  549. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  550. "movq %%mm3, %%mm1 \n\t"
  551. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  552. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  553. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  554. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  555. "pand bm00000111, %%mm2 \n\t" // BGR00000
  556. "pand bm11111000, %%mm3 \n\t" // 000BGR00
  557. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  558. "movq %%mm1, %%mm2 \n\t"
  559. "psllq $48, %%mm1 \n\t" // 000000BG
  560. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  561. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  562. "psrld $16, %%mm2 \n\t" // R000R000
  563. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  564. "por %%mm2, %%mm1 \n\t" // RBGRR000
  565. "movl %4, %%ebx \n\t"
  566. "addl %%eax, %%ebx \n\t"
  567. #ifdef HAVE_MMX2
  568. //FIXME Alignment
  569. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  570. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  571. #else
  572. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  573. "psrlq $32, %%mm3 \n\t"
  574. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  575. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  576. #endif
  577. "addl $4, %%eax \n\t"
  578. "cmpl %5, %%eax \n\t"
  579. " jb 1b \n\t"
  580. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
  581. "m" (yalpha1), "m" (uvalpha1)
  582. : "%eax", "%ebx"
  583. );
  584. }
  585. else if(dstbpp==16)
  586. {
  587. asm volatile(
  588. YSCALEYUV2RGB
  589. #ifdef DITHER16BPP
  590. "paddusb g16Dither, %%mm1 \n\t"
  591. "paddusb b16Dither, %%mm0 \n\t"
  592. "paddusb b16Dither, %%mm3 \n\t"
  593. #endif
  594. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  595. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  596. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  597. "psrlw $3, %%mm3 \n\t"
  598. "psllw $3, %%mm1 \n\t"
  599. "psllw $8, %%mm0 \n\t"
  600. "pand g16Mask, %%mm1 \n\t"
  601. "pand r16Mask, %%mm0 \n\t"
  602. "por %%mm3, %%mm1 \n\t"
  603. "por %%mm1, %%mm0 \n\t"
  604. #ifdef HAVE_MMX2
  605. "movntq %%mm0, (%4, %%eax, 2) \n\t"
  606. #else
  607. "movq %%mm0, (%4, %%eax, 2) \n\t"
  608. #endif
  609. "addl $4, %%eax \n\t"
  610. "cmpl %5, %%eax \n\t"
  611. " jb 1b \n\t"
  612. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
  613. "m" (yalpha1), "m" (uvalpha1)
  614. : "%eax"
  615. );
  616. }
  617. #else
  618. if(dstbpp==32 || dstbpp==24)
  619. {
  620. for(i=0;i<dstw;i++){
  621. // vertical linear interpolation && yuv2rgb in a single step:
  622. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  623. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  624. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  625. dest[0]=clip_table[((Y + yuvtab_3343[U]) >>13)];
  626. dest[1]=clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)];
  627. dest[2]=clip_table[((Y + yuvtab_40cf[V]) >>13)];
  628. dest+=dstbpp>>3;
  629. }
  630. }
  631. else if(dstbpp==16)
  632. {
  633. for(i=0;i<dstw;i++){
  634. // vertical linear interpolation && yuv2rgb in a single step:
  635. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  636. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  637. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  638. ((uint16_t*)dest)[0] =
  639. (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
  640. (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<3)&0x07E0 |
  641. (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<8)&0xF800;
  642. dest+=2;
  643. }
  644. }
  645. else if(dstbpp==15) //15bit FIXME how do i figure out if its 15 or 16?
  646. {
  647. for(i=0;i<dstw;i++){
  648. // vertical linear interpolation && yuv2rgb in a single step:
  649. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  650. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  651. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  652. ((uint16_t*)dest)[0] =
  653. (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
  654. (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<2)&0x03E0 |
  655. (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<7)&0x7C00;
  656. dest+=2;
  657. }
  658. }
  659. #endif
  660. b16Dither= b16Dither1;
  661. b16Dither1= b16Dither2;
  662. b16Dither2= b16Dither;
  663. g16Dither= g16Dither1;
  664. g16Dither1= g16Dither2;
  665. g16Dither2= g16Dither;
  666. }
  667. #ifdef HAVE_3DNOW
  668. asm volatile("femms");
  669. #elif defined (HAVE_MMX)
  670. asm volatile("emms");
  671. #endif
  672. }
  673. void SwScale_Init(){
  674. // generating tables:
  675. int i;
  676. for(i=0;i<256;i++){
  677. clip_table[i]=0;
  678. clip_table[i+256]=i;
  679. clip_table[i+512]=255;
  680. yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
  681. yuvtab_3343[i]=0x3343*(i-128);
  682. yuvtab_0c92[i]=-0x0c92*(i-128);
  683. yuvtab_1a1e[i]=-0x1a1e*(i-128);
  684. yuvtab_40cf[i]=0x40cf*(i-128);
  685. }
  686. }