You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

795 lines
23KB

  1. // Software scaling and colorspace conversion routines for MPlayer
  2. // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
  3. // current version mostly by Michael Niedermayer (michaelni@gmx.at)
  4. #include <inttypes.h>
  5. #include "../config.h"
  6. //#undef HAVE_MMX2
  7. //#undef HAVE_MMX
  8. //#undef ARCH_X86
  9. #define DITHER16BPP
  10. //#define ALT_ERROR
  11. #define RET 0xC3 //near return opcode
  12. /*
  13. NOTES
  14. known BUGS with known cause (no bugreports please!)
  15. code reads 1 sample too much (might cause a sig11)
  16. TODO
  17. check alignment off everything
  18. */
  19. static uint64_t yCoeff= 0x2568256825682568LL;
  20. static uint64_t ubCoeff= 0x3343334333433343LL;
  21. static uint64_t vrCoeff= 0x40cf40cf40cf40cfLL;
  22. static uint64_t ugCoeff= 0xE5E2E5E2E5E2E5E2LL;
  23. static uint64_t vgCoeff= 0xF36EF36EF36EF36ELL;
  24. static uint64_t w80= 0x0080008000800080LL;
  25. static uint64_t w10= 0x0010001000100010LL;
  26. static uint64_t bm00000111=0x0000000000FFFFFFLL;
  27. static uint64_t bm11111000=0xFFFFFFFFFF000000LL;
  28. static uint64_t b16Dither= 0x0004000400040004LL;
  29. static uint64_t b16Dither1=0x0004000400040004LL;
  30. static uint64_t b16Dither2=0x0602060206020602LL;
  31. static uint64_t g16Dither= 0x0002000200020002LL;
  32. static uint64_t g16Dither1=0x0002000200020002LL;
  33. static uint64_t g16Dither2=0x0301030103010301LL;
  34. static uint64_t b16Mask= 0x001F001F001F001FLL;
  35. static uint64_t g16Mask= 0x07E007E007E007E0LL;
  36. static uint64_t r16Mask= 0xF800F800F800F800LL;
  37. static uint64_t temp0;
  38. // temporary storage for 4 yuv lines:
  39. // 16bit for now (mmx likes it more compact)
  40. static uint16_t pix_buf_y[4][2048];
  41. static uint16_t pix_buf_uv[2][2048*2];
  42. // clipping helper table for C implementations:
  43. static unsigned char clip_table[768];
  44. // yuv->rgb conversion tables:
  45. static int yuvtab_2568[256];
  46. static int yuvtab_3343[256];
  47. static int yuvtab_0c92[256];
  48. static int yuvtab_1a1e[256];
  49. static int yuvtab_40cf[256];
  50. static uint8_t funnyYCode[10000];
  51. static uint8_t funnyUVCode[10000];
  52. // *** bilinear scaling and yuv->rgb conversion of yv12 slices:
  53. // *** Note: it's called multiple times while decoding a frame, first time y==0
  54. // *** Designed to upscale, but may work for downscale too.
  55. // s_xinc = (src_width << 16) / dst_width
  56. // s_yinc = (src_height << 16) / dst_height
  57. void SwScale_YV12slice_brg24(unsigned char* srcptr[],int stride[], int y, int h,
  58. unsigned char* dstptr, int dststride, int dstw, int dstbpp,
  59. unsigned int s_xinc,unsigned int s_yinc){
  60. // scaling factors:
  61. //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
  62. //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
  63. unsigned int s_xinc2;
  64. static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
  65. static int s_ypos;
  66. // last horzontally interpolated lines, used to avoid unnecessary calculations
  67. static int s_last_ypos;
  68. static int s_last_y1pos;
  69. static int static_dstw;
  70. #ifdef HAVE_MMX2
  71. // used to detect a horizontal size change
  72. static int old_dstw= -1;
  73. static int old_s_xinc= -1;
  74. #endif
  75. int canMMX2BeUsed=0;
  76. static int test=0;
  77. int srcWidth= (dstw*s_xinc + 0x8000)>>16;
  78. #ifdef HAVE_MMX2
  79. canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0 && (srcWidth&15)==0) ? 1 : 0;
  80. #endif
  81. // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
  82. // n-2 is the last chrominance sample available
  83. // FIXME this is not perfect, but noone shuld notice the difference, the more correct variant
  84. // would be like the vertical one, but that would require some special code for the
  85. // first and last pixel
  86. if(canMMX2BeUsed) s_xinc+= 20;
  87. else s_xinc = ((srcWidth-2)<<16)/(dstw-2) - 20;
  88. s_xinc2=s_xinc>>1;
  89. // force calculation of the horizontal interpolation of the first line
  90. s_last_ypos=-99;
  91. s_last_y1pos=-99;
  92. if(y==0){
  93. s_srcypos= s_yinc/2 - 0x8000;
  94. s_ypos=0;
  95. #ifdef HAVE_MMX2
  96. // cant downscale !!!
  97. if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
  98. {
  99. uint8_t *fragment;
  100. int imm8OfPShufW1;
  101. int imm8OfPShufW2;
  102. int fragmentLength;
  103. int xpos, xx, xalpha, i;
  104. old_s_xinc= s_xinc;
  105. old_dstw= dstw;
  106. static_dstw= dstw;
  107. // create an optimized horizontal scaling routine
  108. //code fragment
  109. asm volatile(
  110. "jmp 9f \n\t"
  111. // Begin
  112. "0: \n\t"
  113. "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
  114. "movq %%mm0, %%mm1 \n\t"
  115. "psrlq $8, %%mm0 \n\t"
  116. "punpcklbw %%mm7, %%mm1 \n\t"
  117. "movq %%mm2, %%mm3 \n\t"
  118. "punpcklbw %%mm7, %%mm0 \n\t"
  119. "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
  120. "pshufw $0xFF, %%mm1, %%mm1 \n\t"
  121. "1: \n\t"
  122. "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
  123. "pshufw $0xFF, %%mm0, %%mm0 \n\t"
  124. "2: \n\t"
  125. "psrlw $9, %%mm3 \n\t"
  126. "psubw %%mm1, %%mm0 \n\t"
  127. "pmullw %%mm3, %%mm0 \n\t"
  128. "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
  129. "psllw $7, %%mm1 \n\t"
  130. "paddw %%mm1, %%mm0 \n\t"
  131. "movq %%mm0, (%%edi, %%eax) \n\t"
  132. "addl $8, %%eax \n\t"
  133. // End
  134. "9: \n\t"
  135. // "int $3\n\t"
  136. "leal 0b, %0 \n\t"
  137. "leal 1b, %1 \n\t"
  138. "leal 2b, %2 \n\t"
  139. "decl %1 \n\t"
  140. "decl %2 \n\t"
  141. "subl %0, %1 \n\t"
  142. "subl %0, %2 \n\t"
  143. "leal 9b, %3 \n\t"
  144. "subl %0, %3 \n\t"
  145. :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
  146. "=r" (fragmentLength)
  147. );
  148. xpos= 0; //s_xinc/2 - 0x8000; // difference between pixel centers
  149. /* choose xinc so that all 8 parts fit exactly
  150. Note: we cannot use just 1 part because it would not fit in the code cache */
  151. // s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))-10;
  152. // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
  153. #ifdef ALT_ERROR
  154. // s_xinc2_diff+= ((0x10000/(dstw/8)));
  155. #endif
  156. // s_xinc_diff= s_xinc2_diff*2;
  157. // s_xinc2+= s_xinc2_diff;
  158. // s_xinc+= s_xinc_diff;
  159. // old_s_xinc= s_xinc;
  160. for(i=0; i<dstw/8; i++)
  161. {
  162. int xx=xpos>>16;
  163. if((i&3) == 0)
  164. {
  165. int a=0;
  166. int b=((xpos+s_xinc)>>16) - xx;
  167. int c=((xpos+s_xinc*2)>>16) - xx;
  168. int d=((xpos+s_xinc*3)>>16) - xx;
  169. memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
  170. funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
  171. funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
  172. a | (b<<2) | (c<<4) | (d<<6);
  173. funnyYCode[fragmentLength*(i+4)/4]= RET;
  174. }
  175. xpos+=s_xinc;
  176. }
  177. xpos= 0; //s_xinc2/2 - 0x10000; // difference between centers of chrom samples
  178. for(i=0; i<dstw/8; i++)
  179. {
  180. int xx=xpos>>16;
  181. if((i&3) == 0)
  182. {
  183. int a=0;
  184. int b=((xpos+s_xinc2)>>16) - xx;
  185. int c=((xpos+s_xinc2*2)>>16) - xx;
  186. int d=((xpos+s_xinc2*3)>>16) - xx;
  187. memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
  188. funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
  189. funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
  190. a | (b<<2) | (c<<4) | (d<<6);
  191. funnyUVCode[fragmentLength*(i+4)/4]= RET;
  192. }
  193. xpos+=s_xinc2;
  194. }
  195. // funnyCode[0]= RET;
  196. }
  197. #endif // HAVE_MMX2
  198. } // reset counters
  199. while(1){
  200. unsigned char *dest=dstptr+dststride*s_ypos;
  201. int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
  202. // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
  203. int srcuvpos= s_srcypos + s_yinc/2 - 0x8000;
  204. int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
  205. int yalpha=((s_srcypos-1)&0xFFFF)>>7;
  206. int yalpha1=yalpha^511;
  207. int uvalpha=((srcuvpos-1)&0x1FFFF)>>8;
  208. int uvalpha1=uvalpha^511;
  209. uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
  210. uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
  211. uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
  212. uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
  213. int i;
  214. // if this is before the first line than use only the first src line
  215. if(y0==0) buf0= buf1;
  216. if(y1==0) uvbuf0= uvbuf1; // yes we do have to check this, its not the same as y0==0
  217. if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
  218. // if this is after the last line than use only the last src line
  219. if(y0>=y+h)
  220. {
  221. buf1= buf0;
  222. s_last_ypos=y0;
  223. }
  224. if(y1>=(y+h)/2)
  225. {
  226. uvbuf1= uvbuf0;
  227. s_last_y1pos=y1;
  228. }
  229. s_ypos++; s_srcypos+=s_yinc;
  230. //only interpolate the src line horizontally if we didnt do it allready
  231. if(s_last_ypos!=y0){
  232. unsigned char *src=srcptr[0]+(y0-y)*stride[0];
  233. unsigned int xpos=0;
  234. s_last_ypos=y0;
  235. // *** horizontal scale Y line to temp buffer
  236. #ifdef ARCH_X86
  237. #ifdef HAVE_MMX2
  238. if(canMMX2BeUsed)
  239. {
  240. asm volatile(
  241. "pxor %%mm7, %%mm7 \n\t"
  242. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  243. "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
  244. "punpcklwd %%mm6, %%mm6 \n\t"
  245. "punpcklwd %%mm6, %%mm6 \n\t"
  246. "movq %%mm6, %%mm2 \n\t"
  247. "psllq $16, %%mm2 \n\t"
  248. "paddw %%mm6, %%mm2 \n\t"
  249. "psllq $16, %%mm2 \n\t"
  250. "paddw %%mm6, %%mm2 \n\t"
  251. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFF
  252. "movq %%mm2, temp0 \n\t"
  253. "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
  254. "punpcklwd %%mm6, %%mm6 \n\t"
  255. "punpcklwd %%mm6, %%mm6 \n\t"
  256. "xorl %%eax, %%eax \n\t" // i
  257. "movl %0, %%esi \n\t" // src
  258. "movl %1, %%edi \n\t" // buf1
  259. "movl %3, %%edx \n\t" // (s_xinc*4)>>16
  260. "xorl %%ecx, %%ecx \n\t"
  261. "xorl %%ebx, %%ebx \n\t"
  262. "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
  263. // "int $3\n\t"
  264. "call funnyYCode \n\t"
  265. "movq temp0, %%mm2 \n\t"
  266. "xorl %%ecx, %%ecx \n\t"
  267. "call funnyYCode \n\t"
  268. "movq temp0, %%mm2 \n\t"
  269. "xorl %%ecx, %%ecx \n\t"
  270. "call funnyYCode \n\t"
  271. "movq temp0, %%mm2 \n\t"
  272. "xorl %%ecx, %%ecx \n\t"
  273. "call funnyYCode \n\t"
  274. "movq temp0, %%mm2 \n\t"
  275. "xorl %%ecx, %%ecx \n\t"
  276. "call funnyYCode \n\t"
  277. "movq temp0, %%mm2 \n\t"
  278. "xorl %%ecx, %%ecx \n\t"
  279. "call funnyYCode \n\t"
  280. "movq temp0, %%mm2 \n\t"
  281. "xorl %%ecx, %%ecx \n\t"
  282. "call funnyYCode \n\t"
  283. "movq temp0, %%mm2 \n\t"
  284. "xorl %%ecx, %%ecx \n\t"
  285. "call funnyYCode \n\t"
  286. :: "m" (src), "m" (buf1), "m" (dstw), "m" ((s_xinc*4)>>16),
  287. "m" ((s_xinc*4)&0xFFFF), "m" (s_xinc&0xFFFF)
  288. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  289. );
  290. for(i=dstw-1; (i*s_xinc)>>16 >=srcWidth-1; i--) buf1[i] = src[srcWidth-1]*128;
  291. }
  292. else
  293. {
  294. #endif
  295. //NO MMX just normal asm ... FIXME try/write funny MMX2 variant
  296. //FIXME add prefetch
  297. asm volatile(
  298. "xorl %%eax, %%eax \n\t" // i
  299. "xorl %%ebx, %%ebx \n\t" // xx
  300. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  301. "1: \n\t"
  302. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  303. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  304. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  305. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  306. "shll $16, %%edi \n\t"
  307. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  308. "movl %1, %%edi \n\t"
  309. "shrl $9, %%esi \n\t"
  310. "movw %%si, (%%edi, %%eax, 2) \n\t"
  311. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  312. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  313. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  314. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  315. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  316. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  317. "shll $16, %%edi \n\t"
  318. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  319. "movl %1, %%edi \n\t"
  320. "shrl $9, %%esi \n\t"
  321. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  322. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  323. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  324. "addl $2, %%eax \n\t"
  325. "cmpl %2, %%eax \n\t"
  326. " jb 1b \n\t"
  327. :: "r" (src), "m" (buf1), "m" (dstw), "m" (s_xinc>>16), "m" (s_xinc&0xFFFF)
  328. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  329. );
  330. #ifdef HAVE_MMX2
  331. } //if MMX2 cant be used
  332. #endif
  333. #else
  334. for(i=0;i<dstw;i++){
  335. register unsigned int xx=xpos>>16;
  336. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  337. buf1[i]=(src[xx]*(xalpha^127)+src[xx+1]*xalpha);
  338. xpos+=s_xinc;
  339. }
  340. #endif
  341. }
  342. // *** horizontal scale U and V lines to temp buffer
  343. if(s_last_y1pos!=y1){
  344. unsigned char *src1=srcptr[1]+(y1-y/2)*stride[1];
  345. unsigned char *src2=srcptr[2]+(y1-y/2)*stride[2];
  346. int xpos=0;
  347. s_last_y1pos= y1;
  348. #ifdef ARCH_X86
  349. #ifdef HAVE_MMX2
  350. if(canMMX2BeUsed)
  351. {
  352. asm volatile(
  353. "pxor %%mm7, %%mm7 \n\t"
  354. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  355. "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
  356. "punpcklwd %%mm6, %%mm6 \n\t"
  357. "punpcklwd %%mm6, %%mm6 \n\t"
  358. "movq %%mm6, %%mm2 \n\t"
  359. "psllq $16, %%mm2 \n\t"
  360. "paddw %%mm6, %%mm2 \n\t"
  361. "psllq $16, %%mm2 \n\t"
  362. "paddw %%mm6, %%mm2 \n\t"
  363. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFFFF
  364. "movq %%mm2, temp0 \n\t"
  365. "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
  366. "punpcklwd %%mm6, %%mm6 \n\t"
  367. "punpcklwd %%mm6, %%mm6 \n\t"
  368. "xorl %%eax, %%eax \n\t" // i
  369. "movl %0, %%esi \n\t" // src
  370. "movl %1, %%edi \n\t" // buf1
  371. "movl %3, %%edx \n\t" // (s_xinc*4)>>16
  372. "xorl %%ecx, %%ecx \n\t"
  373. "xorl %%ebx, %%ebx \n\t"
  374. "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
  375. // "int $3\n\t"
  376. #define FUNNYUVCODE \
  377. "call funnyUVCode \n\t"\
  378. "movq temp0, %%mm2 \n\t"\
  379. "xorl %%ecx, %%ecx \n\t"
  380. FUNNYUVCODE
  381. FUNNYUVCODE
  382. FUNNYUVCODE
  383. FUNNYUVCODE
  384. FUNNYUVCODE
  385. FUNNYUVCODE
  386. FUNNYUVCODE
  387. FUNNYUVCODE
  388. "xorl %%eax, %%eax \n\t" // i
  389. "movl %6, %%esi \n\t" // src
  390. "movl %1, %%edi \n\t" // buf1
  391. "addl $4096, %%edi \n\t"
  392. FUNNYUVCODE
  393. FUNNYUVCODE
  394. FUNNYUVCODE
  395. FUNNYUVCODE
  396. FUNNYUVCODE
  397. FUNNYUVCODE
  398. FUNNYUVCODE
  399. FUNNYUVCODE
  400. :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" ((s_xinc2*4)>>16),
  401. "m" ((s_xinc2*4)&0xFFFF), "m" (s_xinc2&0xFFFF), "m" (src2)
  402. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  403. );
  404. for(i=dstw-1; (i*s_xinc2)>>16 >=srcWidth/2-1; i--)
  405. {
  406. uvbuf1[i] = src1[srcWidth/2-1]*128;
  407. uvbuf1[i+2048] = src2[srcWidth/2-1]*128;
  408. }
  409. }
  410. else
  411. {
  412. #endif
  413. asm volatile(
  414. "xorl %%eax, %%eax \n\t" // i
  415. "xorl %%ebx, %%ebx \n\t" // xx
  416. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  417. "1: \n\t"
  418. "movl %0, %%esi \n\t"
  419. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  420. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  421. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  422. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  423. "shll $16, %%edi \n\t"
  424. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  425. "movl %1, %%edi \n\t"
  426. "shrl $9, %%esi \n\t"
  427. "movw %%si, (%%edi, %%eax, 2) \n\t"
  428. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  429. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  430. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  431. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  432. "shll $16, %%edi \n\t"
  433. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  434. "movl %1, %%edi \n\t"
  435. "shrl $9, %%esi \n\t"
  436. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  437. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  438. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  439. "addl $1, %%eax \n\t"
  440. "cmpl %2, %%eax \n\t"
  441. " jb 1b \n\t"
  442. :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>16), "m" (s_xinc2&0xFFFF),
  443. "r" (src2)
  444. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  445. );
  446. #ifdef HAVE_MMX2
  447. } //if MMX2 cant be used
  448. #endif
  449. #else
  450. for(i=0;i<dstw;i++){
  451. register unsigned int xx=xpos>>16;
  452. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  453. uvbuf1[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  454. uvbuf1[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  455. xpos+=s_xinc2;
  456. }
  457. #endif
  458. }
  459. // Note1: this code can be resticted to n*8 (or n*16) width lines to simplify optimization...
  460. // Re: Note1: ok n*4 for now
  461. // Note2: instead of using lookup tabs, mmx version could do the multiply...
  462. // Re: Note2: yep
  463. // Note3: maybe we should make separated 15/16, 24 and 32bpp version of this:
  464. // Re: done (32 & 16) and 16 has dithering :) but 16 is untested
  465. #ifdef HAVE_MMX
  466. //FIXME write lq version with less uv ...
  467. //FIXME reorder / optimize
  468. if(dstbpp == 32)
  469. {
  470. asm volatile(
  471. #define YSCALEYUV2RGB \
  472. "pxor %%mm7, %%mm7 \n\t"\
  473. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  474. "punpcklwd %%mm6, %%mm6 \n\t"\
  475. "punpcklwd %%mm6, %%mm6 \n\t"\
  476. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  477. "punpcklwd %%mm5, %%mm5 \n\t"\
  478. "punpcklwd %%mm5, %%mm5 \n\t"\
  479. "xorl %%eax, %%eax \n\t"\
  480. "1: \n\t"\
  481. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  482. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  483. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  484. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  485. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  486. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  487. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  488. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  489. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>7*/\
  490. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  491. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>7*/\
  492. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  493. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  494. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  495. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  496. "psubw w10, %%mm1 \n\t" /* Y-16*/\
  497. "psubw w80, %%mm3 \n\t" /* (U-128)*/\
  498. "psllw $3, %%mm1 \n\t" /* (y-16)*8*/\
  499. "psllw $3, %%mm3 \n\t" /*(U-128)8*/\
  500. "pmulhw yCoeff, %%mm1 \n\t"\
  501. \
  502. \
  503. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  504. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  505. "pmulhw ubCoeff, %%mm3 \n\t"\
  506. "psraw $7, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>7*/\
  507. "pmulhw ugCoeff, %%mm2 \n\t"\
  508. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  509. "psubw w80, %%mm0 \n\t" /* (V-128)*/\
  510. "psllw $3, %%mm0 \n\t" /* (V-128)8*/\
  511. \
  512. \
  513. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  514. "pmulhw vrCoeff, %%mm0 \n\t"\
  515. "pmulhw vgCoeff, %%mm4 \n\t"\
  516. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  517. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  518. "packuswb %%mm3, %%mm3 \n\t"\
  519. \
  520. "packuswb %%mm0, %%mm0 \n\t"\
  521. "paddw %%mm4, %%mm2 \n\t"\
  522. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  523. \
  524. "packuswb %%mm1, %%mm1 \n\t"
  525. YSCALEYUV2RGB
  526. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  527. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  528. "movq %%mm3, %%mm1 \n\t"
  529. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  530. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  531. #ifdef HAVE_MMX2
  532. "movntq %%mm3, (%4, %%eax, 4) \n\t"
  533. "movntq %%mm1, 8(%4, %%eax, 4) \n\t"
  534. #else
  535. "movq %%mm3, (%4, %%eax, 4) \n\t"
  536. "movq %%mm1, 8(%4, %%eax, 4) \n\t"
  537. #endif
  538. "addl $4, %%eax \n\t"
  539. "cmpl %5, %%eax \n\t"
  540. " jb 1b \n\t"
  541. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
  542. "m" (yalpha1), "m" (uvalpha1)
  543. : "%eax"
  544. );
  545. }
  546. else if(dstbpp==24)
  547. {
  548. asm volatile(
  549. YSCALEYUV2RGB
  550. // lsb ... msb
  551. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  552. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  553. "movq %%mm3, %%mm1 \n\t"
  554. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  555. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  556. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  557. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  558. "pand bm00000111, %%mm2 \n\t" // BGR00000
  559. "pand bm11111000, %%mm3 \n\t" // 000BGR00
  560. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  561. "movq %%mm1, %%mm2 \n\t"
  562. "psllq $48, %%mm1 \n\t" // 000000BG
  563. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  564. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  565. "psrld $16, %%mm2 \n\t" // R000R000
  566. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  567. "por %%mm2, %%mm1 \n\t" // RBGRR000
  568. "movl %4, %%ebx \n\t"
  569. "addl %%eax, %%ebx \n\t"
  570. #ifdef HAVE_MMX2
  571. //FIXME Alignment
  572. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  573. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  574. #else
  575. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  576. "psrlq $32, %%mm3 \n\t"
  577. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  578. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  579. #endif
  580. "addl $4, %%eax \n\t"
  581. "cmpl %5, %%eax \n\t"
  582. " jb 1b \n\t"
  583. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
  584. "m" (yalpha1), "m" (uvalpha1)
  585. : "%eax", "%ebx"
  586. );
  587. }
  588. else if(dstbpp==16)
  589. {
  590. asm volatile(
  591. YSCALEYUV2RGB
  592. #ifdef DITHER16BPP
  593. "paddusb g16Dither, %%mm1 \n\t"
  594. "paddusb b16Dither, %%mm0 \n\t"
  595. "paddusb b16Dither, %%mm3 \n\t"
  596. #endif
  597. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  598. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  599. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  600. "psrlw $3, %%mm3 \n\t"
  601. "psllw $3, %%mm1 \n\t"
  602. "psllw $8, %%mm0 \n\t"
  603. "pand g16Mask, %%mm1 \n\t"
  604. "pand r16Mask, %%mm0 \n\t"
  605. "por %%mm3, %%mm1 \n\t"
  606. "por %%mm1, %%mm0 \n\t"
  607. #ifdef HAVE_MMX2
  608. "movntq %%mm0, (%4, %%eax, 2) \n\t"
  609. #else
  610. "movq %%mm0, (%4, %%eax, 2) \n\t"
  611. #endif
  612. "addl $4, %%eax \n\t"
  613. "cmpl %5, %%eax \n\t"
  614. " jb 1b \n\t"
  615. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
  616. "m" (yalpha1), "m" (uvalpha1)
  617. : "%eax"
  618. );
  619. }
  620. #else
  621. if(dstbpp==32 || dstbpp==24)
  622. {
  623. for(i=0;i<dstw;i++){
  624. // vertical linear interpolation && yuv2rgb in a single step:
  625. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  626. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  627. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  628. dest[0]=clip_table[((Y + yuvtab_3343[U]) >>13)];
  629. dest[1]=clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)];
  630. dest[2]=clip_table[((Y + yuvtab_40cf[V]) >>13)];
  631. dest+=dstbpp>>3;
  632. }
  633. }
  634. else if(dstbpp==16)
  635. {
  636. for(i=0;i<dstw;i++){
  637. // vertical linear interpolation && yuv2rgb in a single step:
  638. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  639. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  640. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  641. ((uint16_t*)dest)[0] =
  642. (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
  643. (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<3)&0x07E0 |
  644. (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<8)&0xF800;
  645. dest+=2;
  646. }
  647. }
  648. else if(dstbpp==15) //15bit FIXME how do i figure out if its 15 or 16?
  649. {
  650. for(i=0;i<dstw;i++){
  651. // vertical linear interpolation && yuv2rgb in a single step:
  652. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  653. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  654. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  655. ((uint16_t*)dest)[0] =
  656. (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
  657. (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<2)&0x03E0 |
  658. (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<7)&0x7C00;
  659. dest+=2;
  660. }
  661. }
  662. #endif
  663. b16Dither= b16Dither1;
  664. b16Dither1= b16Dither2;
  665. b16Dither2= b16Dither;
  666. g16Dither= g16Dither1;
  667. g16Dither1= g16Dither2;
  668. g16Dither2= g16Dither;
  669. }
  670. #ifdef HAVE_3DNOW
  671. asm volatile("femms");
  672. #elif defined (HAVE_MMX)
  673. asm volatile("emms");
  674. #endif
  675. }
  676. void SwScale_Init(){
  677. // generating tables:
  678. int i;
  679. for(i=0;i<256;i++){
  680. clip_table[i]=0;
  681. clip_table[i+256]=i;
  682. clip_table[i+512]=255;
  683. yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
  684. yuvtab_3343[i]=0x3343*(i-128);
  685. yuvtab_0c92[i]=-0x0c92*(i-128);
  686. yuvtab_1a1e[i]=-0x1a1e*(i-128);
  687. yuvtab_40cf[i]=0x40cf*(i-128);
  688. }
  689. }