You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

787 lines
22KB

  1. // Software scaling and colorspace conversion routines for MPlayer
  2. // Orginal C implementation by ?
  3. // current version mostly by Michael Niedermayer (michaelni@gmx.at)
  4. #include <inttypes.h>
  5. #include "../config.h"
  6. //#undef HAVE_MMX2
  7. //#undef HAVE_MMX
  8. //#undef ARCH_X86
  9. #define DITHER16BPP
  10. #define ALT_ERROR
  11. #define RET 0xC3 //near return opcode
  12. /*
  13. NOTES
  14. known BUGS with known cause (no bugreports please!)
  15. line at the right (c,asm and mmx2)
  16. code reads 1 sample too much (might cause a sig11)
  17. TODO
  18. check alignment off everything
  19. */
  20. static uint64_t yCoeff= 0x2568256825682568LL;
  21. static uint64_t ubCoeff= 0x3343334333433343LL;
  22. static uint64_t vrCoeff= 0x40cf40cf40cf40cfLL;
  23. static uint64_t ugCoeff= 0xE5E2E5E2E5E2E5E2LL;
  24. static uint64_t vgCoeff= 0xF36EF36EF36EF36ELL;
  25. static uint64_t w80= 0x0080008000800080LL;
  26. static uint64_t w10= 0x0010001000100010LL;
  27. static uint64_t bm00000111=0x0000000000FFFFFFLL;
  28. static uint64_t bm11111000=0xFFFFFFFFFF000000LL;
  29. static uint64_t b16Dither= 0x0004000400040004LL;
  30. static uint64_t b16Dither1=0x0004000400040004LL;
  31. static uint64_t b16Dither2=0x0602060206020602LL;
  32. static uint64_t g16Dither= 0x0002000200020002LL;
  33. static uint64_t g16Dither1=0x0002000200020002LL;
  34. static uint64_t g16Dither2=0x0301030103010301LL;
  35. static uint64_t b16Mask= 0x001F001F001F001FLL;
  36. static uint64_t g16Mask= 0x07E007E007E007E0LL;
  37. static uint64_t r16Mask= 0xF800F800F800F800LL;
  38. static uint64_t temp0;
  39. // temporary storage for 4 yuv lines:
  40. // 16bit for now (mmx likes it more compact)
  41. static uint16_t pix_buf_y[4][2048];
  42. static uint16_t pix_buf_uv[2][2048*2];
  43. // clipping helper table for C implementations:
  44. static unsigned char clip_table[768];
  45. // yuv->rgb conversion tables:
  46. static int yuvtab_2568[256];
  47. static int yuvtab_3343[256];
  48. static int yuvtab_0c92[256];
  49. static int yuvtab_1a1e[256];
  50. static int yuvtab_40cf[256];
  51. static uint8_t funnyYCode[10000];
  52. static uint8_t funnyUVCode[10000];
  53. // *** bilinear scaling and yuv->rgb conversion of yv12 slices:
  54. // *** Note: it's called multiple times while decoding a frame, first time y==0
  55. // *** Designed to upscale, but may work for downscale too.
  56. // s_xinc = (src_width << 8) / dst_width
  57. // s_yinc = (src_height << 16) / dst_height
  58. void SwScale_YV12slice_brg24(unsigned char* srcptr[],int stride[], int y, int h,
  59. unsigned char* dstptr, int dststride, int dstw, int dstbpp,
  60. unsigned int s_xinc,unsigned int s_yinc){
  61. // scaling factors:
  62. //static int s_yinc=(vo_dga_src_height<<16)/vo_dga_vp_height;
  63. //static int s_xinc=(vo_dga_src_width<<8)/vo_dga_vp_width;
  64. unsigned int s_xinc2;
  65. static int s_srcypos; // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
  66. static int s_ypos;
  67. // last horzontally interpolated lines, used to avoid unnecessary calculations
  68. static int s_last_ypos;
  69. static int s_last_y1pos;
  70. static int static_dstw;
  71. #ifdef HAVE_MMX2
  72. // used to detect a horizontal size change
  73. static int old_dstw= -1;
  74. static int old_s_xinc= -1;
  75. // difference between the requested xinc and the required one for the mmx2 routine
  76. static int s_xinc_diff=0;
  77. static int s_xinc2_diff=0;
  78. #endif
  79. int canMMX2BeUsed;
  80. // we need that precission at least for the mmx2 code
  81. s_xinc*= 256;
  82. s_xinc2=s_xinc>>1;
  83. canMMX2BeUsed= (s_xinc <= 0x10000 && (dstw&31)==0) ? 1 : 0;
  84. if(y==0){
  85. s_srcypos= s_yinc/2 - 0x8000;
  86. s_ypos=0;
  87. // force calculation of the horizontal interpolation of the first line
  88. s_last_ypos=-99;
  89. s_last_y1pos=-99;
  90. #ifdef HAVE_MMX2
  91. // cant downscale !!!
  92. if((old_s_xinc != s_xinc || old_dstw!=dstw) && canMMX2BeUsed)
  93. {
  94. uint8_t *fragment;
  95. int imm8OfPShufW1;
  96. int imm8OfPShufW2;
  97. int fragmentLength;
  98. int xpos, xx, xalpha, i;
  99. old_s_xinc= s_xinc;
  100. old_dstw= dstw;
  101. static_dstw= dstw;
  102. // create an optimized horizontal scaling routine
  103. //code fragment
  104. asm volatile(
  105. "jmp 9f \n\t"
  106. // Begin
  107. "0: \n\t"
  108. "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
  109. "movq %%mm0, %%mm1 \n\t"
  110. "psrlq $8, %%mm0 \n\t"
  111. "punpcklbw %%mm7, %%mm1 \n\t"
  112. "movq %%mm2, %%mm3 \n\t"
  113. "punpcklbw %%mm7, %%mm0 \n\t"
  114. "addw %%bx, %%cx \n\t" //2*xalpha += (4*s_xinc)&0xFFFF
  115. "pshufw $0xFF, %%mm1, %%mm1 \n\t"
  116. "1: \n\t"
  117. "adcl %%edx, %%esi \n\t" //xx+= (4*s_xinc)>>16 + carry
  118. "pshufw $0xFF, %%mm0, %%mm0 \n\t"
  119. "2: \n\t"
  120. "psrlw $9, %%mm3 \n\t"
  121. "psubw %%mm1, %%mm0 \n\t"
  122. "pmullw %%mm3, %%mm0 \n\t"
  123. "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
  124. "psllw $7, %%mm1 \n\t"
  125. "paddw %%mm1, %%mm0 \n\t"
  126. "movq %%mm0, (%%edi, %%eax) \n\t"
  127. "addl $8, %%eax \n\t"
  128. // End
  129. "9: \n\t"
  130. // "int $3\n\t"
  131. "leal 0b, %0 \n\t"
  132. "leal 1b, %1 \n\t"
  133. "leal 2b, %2 \n\t"
  134. "decl %1 \n\t"
  135. "decl %2 \n\t"
  136. "subl %0, %1 \n\t"
  137. "subl %0, %2 \n\t"
  138. "leal 9b, %3 \n\t"
  139. "subl %0, %3 \n\t"
  140. :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
  141. "=r" (fragmentLength)
  142. );
  143. xpos= xx=xalpha= 0;
  144. /* choose xinc so that all 8 parts fit exactly
  145. Note: we cannot use just 1 part because it would not fit in the code cache */
  146. s_xinc2_diff= -((((s_xinc2*(dstw/8))&0xFFFF))/(dstw/8))+10;
  147. // s_xinc_diff= -((((s_xinc*(dstw/8))&0xFFFF))/(dstw/8));
  148. #ifdef ALT_ERROR
  149. s_xinc2_diff+= ((0x10000/(dstw/8)));
  150. #endif
  151. s_xinc_diff= s_xinc2_diff*2;
  152. s_xinc2+= s_xinc2_diff;
  153. s_xinc+= s_xinc_diff;
  154. for(i=0; i<dstw/8; i++)
  155. {
  156. int xx=xpos>>16;
  157. if((i&3) == 0)
  158. {
  159. int a=0;
  160. int b=((xpos+s_xinc)>>16) - xx;
  161. int c=((xpos+s_xinc*2)>>16) - xx;
  162. int d=((xpos+s_xinc*3)>>16) - xx;
  163. memcpy(funnyYCode + fragmentLength*i/4, fragment, fragmentLength);
  164. funnyYCode[fragmentLength*i/4 + imm8OfPShufW1]=
  165. funnyYCode[fragmentLength*i/4 + imm8OfPShufW2]=
  166. a | (b<<2) | (c<<4) | (d<<6);
  167. funnyYCode[fragmentLength*(i+4)/4]= RET;
  168. }
  169. xpos+=s_xinc;
  170. }
  171. xpos= xx=xalpha= 0;
  172. //FIXME choose size and or xinc so that they fit exactly
  173. for(i=0; i<dstw/8; i++)
  174. {
  175. int xx=xpos>>16;
  176. if((i&3) == 0)
  177. {
  178. int a=0;
  179. int b=((xpos+s_xinc2)>>16) - xx;
  180. int c=((xpos+s_xinc2*2)>>16) - xx;
  181. int d=((xpos+s_xinc2*3)>>16) - xx;
  182. memcpy(funnyUVCode + fragmentLength*i/4, fragment, fragmentLength);
  183. funnyUVCode[fragmentLength*i/4 + imm8OfPShufW1]=
  184. funnyUVCode[fragmentLength*i/4 + imm8OfPShufW2]=
  185. a | (b<<2) | (c<<4) | (d<<6);
  186. funnyUVCode[fragmentLength*(i+4)/4]= RET;
  187. }
  188. xpos+=s_xinc2;
  189. }
  190. // funnyCode[0]= RET;
  191. }
  192. if(canMMX2BeUsed)
  193. {
  194. s_xinc+= s_xinc_diff;
  195. s_xinc2+= s_xinc2_diff;
  196. }
  197. #endif // HAVE_MMX2
  198. } // reset counters
  199. while(1){
  200. unsigned char *dest=dstptr+dststride*s_ypos;
  201. int y0=(s_srcypos + 0xFFFF)>>16; // first luminance source line number below the dst line
  202. // points to the dst Pixels center in the source (0 is the center of pixel 0,0 in src)
  203. int srcuvpos= s_srcypos + s_yinc/2 - 0x8000;
  204. int y1=(srcuvpos + 0x1FFFF)>>17; // first chrominance source line number below the dst line
  205. int yalpha=(s_srcypos&0xFFFF)>>7;
  206. int yalpha1=yalpha^511;
  207. int uvalpha=(srcuvpos&0x1FFFF)>>8;
  208. int uvalpha1=uvalpha^511;
  209. uint16_t *buf0=pix_buf_y[y0&1]; // top line of the interpolated slice
  210. uint16_t *buf1=pix_buf_y[((y0+1)&1)]; // bottom line of the interpolated slice
  211. uint16_t *uvbuf0=pix_buf_uv[y1&1]; // top line of the interpolated slice
  212. uint16_t *uvbuf1=pix_buf_uv[(y1+1)&1]; // bottom line of the interpolated slice
  213. int i;
  214. // if this is before the first line than use only the first src line
  215. if(y0==0) buf0= buf1;
  216. if(y1==0) uvbuf0= uvbuf1; // yes we do have to check this, its not the same as y0==0
  217. if(y0>=y+h) break; // FIXME wrong, skips last lines, but they are dupliactes anyway
  218. // if this is after the last line than use only the last src line
  219. if(y0>=y+h)
  220. {
  221. buf1= buf0;
  222. s_last_ypos=y0;
  223. }
  224. if(y1>=(y+h)/2)
  225. {
  226. uvbuf1= uvbuf0;
  227. s_last_y1pos=y1;
  228. }
  229. s_ypos++; s_srcypos+=s_yinc;
  230. //only interpolate the src line horizontally if we didnt do it allready
  231. if(s_last_ypos!=y0){
  232. unsigned char *src=srcptr[0]+(y0-y)*stride[0];
  233. unsigned int xpos=0;
  234. s_last_ypos=y0;
  235. // *** horizontal scale Y line to temp buffer
  236. #ifdef ARCH_X86
  237. #ifdef HAVE_MMX2
  238. if(canMMX2BeUsed)
  239. {
  240. asm volatile(
  241. "pxor %%mm7, %%mm7 \n\t"
  242. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  243. "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
  244. "punpcklwd %%mm6, %%mm6 \n\t"
  245. "punpcklwd %%mm6, %%mm6 \n\t"
  246. "movq %%mm6, %%mm2 \n\t"
  247. "psllq $16, %%mm2 \n\t"
  248. "paddw %%mm6, %%mm2 \n\t"
  249. "psllq $16, %%mm2 \n\t"
  250. "paddw %%mm6, %%mm2 \n\t"
  251. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFF
  252. "movq %%mm2, temp0 \n\t"
  253. "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
  254. "punpcklwd %%mm6, %%mm6 \n\t"
  255. "punpcklwd %%mm6, %%mm6 \n\t"
  256. "xorl %%eax, %%eax \n\t" // i
  257. "movl %0, %%esi \n\t" // src
  258. "movl %1, %%edi \n\t" // buf1
  259. "movl %3, %%edx \n\t" // (s_xinc*4)>>16
  260. "xorl %%ecx, %%ecx \n\t"
  261. "xorl %%ebx, %%ebx \n\t"
  262. "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
  263. // "int $3\n\t"
  264. "call funnyYCode \n\t"
  265. "movq temp0, %%mm2 \n\t"
  266. "xorl %%ecx, %%ecx \n\t"
  267. "call funnyYCode \n\t"
  268. "movq temp0, %%mm2 \n\t"
  269. "xorl %%ecx, %%ecx \n\t"
  270. "call funnyYCode \n\t"
  271. "movq temp0, %%mm2 \n\t"
  272. "xorl %%ecx, %%ecx \n\t"
  273. "call funnyYCode \n\t"
  274. "movq temp0, %%mm2 \n\t"
  275. "xorl %%ecx, %%ecx \n\t"
  276. "call funnyYCode \n\t"
  277. "movq temp0, %%mm2 \n\t"
  278. "xorl %%ecx, %%ecx \n\t"
  279. "call funnyYCode \n\t"
  280. "movq temp0, %%mm2 \n\t"
  281. "xorl %%ecx, %%ecx \n\t"
  282. "call funnyYCode \n\t"
  283. "movq temp0, %%mm2 \n\t"
  284. "xorl %%ecx, %%ecx \n\t"
  285. "call funnyYCode \n\t"
  286. :: "m" (src), "m" (buf1), "m" (dstw), "m" ((s_xinc*4)>>16),
  287. "m" ((s_xinc*4)&0xFFFF), "m" (s_xinc&0xFFFF)
  288. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  289. );
  290. }
  291. else
  292. {
  293. #endif
  294. //NO MMX just normal asm ... FIXME try/write funny MMX2 variant
  295. //FIXME add prefetch
  296. asm volatile(
  297. "xorl %%eax, %%eax \n\t" // i
  298. "xorl %%ebx, %%ebx \n\t" // xx
  299. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  300. "1: \n\t"
  301. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  302. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  303. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  304. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  305. "shll $16, %%edi \n\t"
  306. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  307. "movl %1, %%edi \n\t"
  308. "shrl $9, %%esi \n\t"
  309. "movw %%si, (%%edi, %%eax, 2) \n\t"
  310. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  311. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  312. "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
  313. "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
  314. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  315. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  316. "shll $16, %%edi \n\t"
  317. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  318. "movl %1, %%edi \n\t"
  319. "shrl $9, %%esi \n\t"
  320. "movw %%si, 2(%%edi, %%eax, 2) \n\t"
  321. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  322. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  323. "addl $2, %%eax \n\t"
  324. "cmpl %2, %%eax \n\t"
  325. " jb 1b \n\t"
  326. :: "r" (src), "m" (buf1), "m" (dstw), "m" (s_xinc>>16), "m" (s_xinc&0xFFFF)
  327. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  328. );
  329. #ifdef HAVE_MMX2
  330. } //if MMX2 cant be used
  331. #endif
  332. #else
  333. for(i=0;i<dstw;i++){
  334. register unsigned int xx=xpos>>16;
  335. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  336. buf1[i]=(src[xx]*(xalpha^127)+src[xx+1]*xalpha);
  337. xpos+=s_xinc;
  338. }
  339. #endif
  340. }
  341. // *** horizontal scale U and V lines to temp buffer
  342. if(s_last_y1pos!=y1){
  343. unsigned char *src1=srcptr[1]+(y1-y/2)*stride[1];
  344. unsigned char *src2=srcptr[2]+(y1-y/2)*stride[2];
  345. int xpos=0;
  346. s_last_y1pos= y1;
  347. #ifdef ARCH_X86
  348. #ifdef HAVE_MMX2
  349. if(canMMX2BeUsed)
  350. {
  351. asm volatile(
  352. "pxor %%mm7, %%mm7 \n\t"
  353. "pxor %%mm2, %%mm2 \n\t" // 2*xalpha
  354. "movd %5, %%mm6 \n\t" // s_xinc&0xFFFF
  355. "punpcklwd %%mm6, %%mm6 \n\t"
  356. "punpcklwd %%mm6, %%mm6 \n\t"
  357. "movq %%mm6, %%mm2 \n\t"
  358. "psllq $16, %%mm2 \n\t"
  359. "paddw %%mm6, %%mm2 \n\t"
  360. "psllq $16, %%mm2 \n\t"
  361. "paddw %%mm6, %%mm2 \n\t"
  362. "psllq $16, %%mm2 \n\t" //0,t,2t,3t t=s_xinc&0xFFFF
  363. "movq %%mm2, temp0 \n\t"
  364. "movd %4, %%mm6 \n\t" //(s_xinc*4)&0xFFFF
  365. "punpcklwd %%mm6, %%mm6 \n\t"
  366. "punpcklwd %%mm6, %%mm6 \n\t"
  367. "xorl %%eax, %%eax \n\t" // i
  368. "movl %0, %%esi \n\t" // src
  369. "movl %1, %%edi \n\t" // buf1
  370. "movl %3, %%edx \n\t" // (s_xinc*4)>>16
  371. "xorl %%ecx, %%ecx \n\t"
  372. "xorl %%ebx, %%ebx \n\t"
  373. "movw %4, %%bx \n\t" // (s_xinc*4)&0xFFFF
  374. // "int $3\n\t"
  375. #define FUNNYUVCODE \
  376. "call funnyUVCode \n\t"\
  377. "movq temp0, %%mm2 \n\t"\
  378. "xorl %%ecx, %%ecx \n\t"
  379. FUNNYUVCODE
  380. FUNNYUVCODE
  381. FUNNYUVCODE
  382. FUNNYUVCODE
  383. FUNNYUVCODE
  384. FUNNYUVCODE
  385. FUNNYUVCODE
  386. FUNNYUVCODE
  387. "xorl %%eax, %%eax \n\t" // i
  388. "movl %6, %%esi \n\t" // src
  389. "movl %1, %%edi \n\t" // buf1
  390. "addl $4096, %%edi \n\t"
  391. FUNNYUVCODE
  392. FUNNYUVCODE
  393. FUNNYUVCODE
  394. FUNNYUVCODE
  395. FUNNYUVCODE
  396. FUNNYUVCODE
  397. FUNNYUVCODE
  398. FUNNYUVCODE
  399. :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" ((s_xinc2*4)>>16),
  400. "m" ((s_xinc2*4)&0xFFFF), "m" (s_xinc2&0xFFFF), "m" (src2)
  401. : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
  402. );
  403. }
  404. else
  405. {
  406. #endif
  407. asm volatile(
  408. "xorl %%eax, %%eax \n\t" // i
  409. "xorl %%ebx, %%ebx \n\t" // xx
  410. "xorl %%ecx, %%ecx \n\t" // 2*xalpha
  411. "1: \n\t"
  412. "movl %0, %%esi \n\t"
  413. "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
  414. "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
  415. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  416. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  417. "shll $16, %%edi \n\t"
  418. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  419. "movl %1, %%edi \n\t"
  420. "shrl $9, %%esi \n\t"
  421. "movw %%si, (%%edi, %%eax, 2) \n\t"
  422. "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
  423. "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
  424. "subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
  425. "imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
  426. "shll $16, %%edi \n\t"
  427. "addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
  428. "movl %1, %%edi \n\t"
  429. "shrl $9, %%esi \n\t"
  430. "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
  431. "addw %4, %%cx \n\t" //2*xalpha += s_xinc&0xFF
  432. "adcl %3, %%ebx \n\t" //xx+= s_xinc>>8 + carry
  433. "addl $1, %%eax \n\t"
  434. "cmpl %2, %%eax \n\t"
  435. " jb 1b \n\t"
  436. :: "m" (src1), "m" (uvbuf1), "m" (dstw), "m" (s_xinc2>>16), "m" (s_xinc2&0xFFFF),
  437. "r" (src2)
  438. : "%eax", "%ebx", "%ecx", "%edi", "%esi"
  439. );
  440. #ifdef HAVE_MMX2
  441. } //if MMX2 cant be used
  442. #endif
  443. #else
  444. for(i=0;i<dstw;i++){
  445. register unsigned int xx=xpos>>16;
  446. register unsigned int xalpha=(xpos&0xFFFF)>>9;
  447. uvbuf1[i]=(src1[xx]*(xalpha^127)+src1[xx+1]*xalpha);
  448. uvbuf1[i+2048]=(src2[xx]*(xalpha^127)+src2[xx+1]*xalpha);
  449. xpos+=s_xinc2;
  450. }
  451. #endif
  452. }
  453. // Note1: this code can be resticted to n*8 (or n*16) width lines to simplify optimization...
  454. // Re: Note1: ok n*4 for now
  455. // Note2: instead of using lookup tabs, mmx version could do the multiply...
  456. // Re: Note2: yep
  457. // Note3: maybe we should make separated 15/16, 24 and 32bpp version of this:
  458. // Re: done (32 & 16) and 16 has dithering :) but 16 is untested
  459. #ifdef HAVE_MMX
  460. //FIXME write lq version with less uv ...
  461. //FIXME reorder / optimize
  462. if(dstbpp == 32)
  463. {
  464. asm volatile(
  465. #define YSCALEYUV2RGB \
  466. "pxor %%mm7, %%mm7 \n\t"\
  467. "movd %6, %%mm6 \n\t" /*yalpha1*/\
  468. "punpcklwd %%mm6, %%mm6 \n\t"\
  469. "punpcklwd %%mm6, %%mm6 \n\t"\
  470. "movd %7, %%mm5 \n\t" /*uvalpha1*/\
  471. "punpcklwd %%mm5, %%mm5 \n\t"\
  472. "punpcklwd %%mm5, %%mm5 \n\t"\
  473. "xorl %%eax, %%eax \n\t"\
  474. "1: \n\t"\
  475. "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
  476. "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
  477. "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
  478. "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
  479. "psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
  480. "psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
  481. "pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
  482. "pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
  483. "psraw $7, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>7*/\
  484. "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
  485. "psraw $7, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>7*/\
  486. "paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
  487. "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
  488. "paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
  489. "psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
  490. "psubw w10, %%mm1 \n\t" /* Y-16*/\
  491. "psubw w80, %%mm3 \n\t" /* (U-128)*/\
  492. "psllw $3, %%mm1 \n\t" /* (y-16)*8*/\
  493. "psllw $3, %%mm3 \n\t" /*(U-128)8*/\
  494. "pmulhw yCoeff, %%mm1 \n\t"\
  495. \
  496. \
  497. "pmulhw %%mm5, %%mm4 \n\t" /* (uvbuf0[eax+2048] - uvbuf1[eax+2048])uvalpha1>>16*/\
  498. "movq %%mm3, %%mm2 \n\t" /* (U-128)8*/\
  499. "pmulhw ubCoeff, %%mm3 \n\t"\
  500. "psraw $7, %%mm0 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048] >>7*/\
  501. "pmulhw ugCoeff, %%mm2 \n\t"\
  502. "paddw %%mm4, %%mm0 \n\t" /* uvbuf0[eax+2048]uvalpha1 - uvbuf1[eax+2048](1-uvalpha1)*/\
  503. "psubw w80, %%mm0 \n\t" /* (V-128)*/\
  504. "psllw $3, %%mm0 \n\t" /* (V-128)8*/\
  505. \
  506. \
  507. "movq %%mm0, %%mm4 \n\t" /* (V-128)8*/\
  508. "pmulhw vrCoeff, %%mm0 \n\t"\
  509. "pmulhw vgCoeff, %%mm4 \n\t"\
  510. "paddw %%mm1, %%mm3 \n\t" /* B*/\
  511. "paddw %%mm1, %%mm0 \n\t" /* R*/\
  512. "packuswb %%mm3, %%mm3 \n\t"\
  513. \
  514. "packuswb %%mm0, %%mm0 \n\t"\
  515. "paddw %%mm4, %%mm2 \n\t"\
  516. "paddw %%mm2, %%mm1 \n\t" /* G*/\
  517. \
  518. "packuswb %%mm1, %%mm1 \n\t"
  519. YSCALEYUV2RGB
  520. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  521. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  522. "movq %%mm3, %%mm1 \n\t"
  523. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  524. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  525. #ifdef HAVE_MMX2
  526. "movntq %%mm3, (%4, %%eax, 4) \n\t"
  527. "movntq %%mm1, 8(%4, %%eax, 4) \n\t"
  528. #else
  529. "movq %%mm3, (%4, %%eax, 4) \n\t"
  530. "movq %%mm1, 8(%4, %%eax, 4) \n\t"
  531. #endif
  532. "addl $4, %%eax \n\t"
  533. "cmpl %5, %%eax \n\t"
  534. " jb 1b \n\t"
  535. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
  536. "m" (yalpha1), "m" (uvalpha1)
  537. : "%eax"
  538. );
  539. }
  540. else if(dstbpp==24)
  541. {
  542. asm volatile(
  543. YSCALEYUV2RGB
  544. // lsb ... msb
  545. "punpcklbw %%mm1, %%mm3 \n\t" // BGBGBGBG
  546. "punpcklbw %%mm7, %%mm0 \n\t" // R0R0R0R0
  547. "movq %%mm3, %%mm1 \n\t"
  548. "punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
  549. "punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
  550. "movq %%mm3, %%mm2 \n\t" // BGR0BGR0
  551. "psrlq $8, %%mm3 \n\t" // GR0BGR00
  552. "pand bm00000111, %%mm2 \n\t" // BGR00000
  553. "pand bm11111000, %%mm3 \n\t" // 000BGR00
  554. "por %%mm2, %%mm3 \n\t" // BGRBGR00
  555. "movq %%mm1, %%mm2 \n\t"
  556. "psllq $48, %%mm1 \n\t" // 000000BG
  557. "por %%mm1, %%mm3 \n\t" // BGRBGRBG
  558. "movq %%mm2, %%mm1 \n\t" // BGR0BGR0
  559. "psrld $16, %%mm2 \n\t" // R000R000
  560. "psrlq $24, %%mm1 \n\t" // 0BGR0000
  561. "por %%mm2, %%mm1 \n\t" // RBGRR000
  562. "movl %4, %%ebx \n\t"
  563. "addl %%eax, %%ebx \n\t"
  564. #ifdef HAVE_MMX2
  565. //FIXME Alignment
  566. "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
  567. "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
  568. #else
  569. "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
  570. "psrlq $32, %%mm3 \n\t"
  571. "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
  572. "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
  573. #endif
  574. "addl $4, %%eax \n\t"
  575. "cmpl %5, %%eax \n\t"
  576. " jb 1b \n\t"
  577. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstw),
  578. "m" (yalpha1), "m" (uvalpha1)
  579. : "%eax", "%ebx"
  580. );
  581. }
  582. else if(dstbpp==16)
  583. {
  584. asm volatile(
  585. YSCALEYUV2RGB
  586. #ifdef DITHER16BPP
  587. "paddusb g16Dither, %%mm1 \n\t"
  588. "paddusb b16Dither, %%mm0 \n\t"
  589. "paddusb b16Dither, %%mm3 \n\t"
  590. #endif
  591. "punpcklbw %%mm7, %%mm1 \n\t" // 0G0G0G0G
  592. "punpcklbw %%mm7, %%mm3 \n\t" // 0B0B0B0B
  593. "punpcklbw %%mm7, %%mm0 \n\t" // 0R0R0R0R
  594. "psrlw $3, %%mm3 \n\t"
  595. "psllw $3, %%mm1 \n\t"
  596. "psllw $8, %%mm0 \n\t"
  597. "pand g16Mask, %%mm1 \n\t"
  598. "pand r16Mask, %%mm0 \n\t"
  599. "por %%mm3, %%mm1 \n\t"
  600. "por %%mm1, %%mm0 \n\t"
  601. #ifdef HAVE_MMX2
  602. "movntq %%mm0, (%4, %%eax, 2) \n\t"
  603. #else
  604. "movq %%mm0, (%4, %%eax, 2) \n\t"
  605. #endif
  606. "addl $4, %%eax \n\t"
  607. "cmpl %5, %%eax \n\t"
  608. " jb 1b \n\t"
  609. :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstw),
  610. "m" (yalpha1), "m" (uvalpha1)
  611. : "%eax"
  612. );
  613. }
  614. #else
  615. if(dstbpp==32 || dstbpp==24)
  616. {
  617. for(i=0;i<dstw;i++){
  618. // vertical linear interpolation && yuv2rgb in a single step:
  619. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  620. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  621. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  622. dest[0]=clip_table[((Y + yuvtab_3343[U]) >>13)];
  623. dest[1]=clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)];
  624. dest[2]=clip_table[((Y + yuvtab_40cf[V]) >>13)];
  625. dest+=dstbpp>>3;
  626. }
  627. }
  628. else if(dstbpp==16)
  629. {
  630. for(i=0;i<dstw;i++){
  631. // vertical linear interpolation && yuv2rgb in a single step:
  632. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  633. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  634. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  635. ((uint16_t*)dest)[0] =
  636. (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
  637. (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<3)&0x07E0 |
  638. (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<8)&0xF800;
  639. dest+=2;
  640. }
  641. }
  642. else if(dstbpp==15) //15bit FIXME how do i figure out if its 15 or 16?
  643. {
  644. for(i=0;i<dstw;i++){
  645. // vertical linear interpolation && yuv2rgb in a single step:
  646. int Y=yuvtab_2568[((buf0[i]*yalpha1+buf1[i]*yalpha)>>16)];
  647. int U=((uvbuf0[i]*uvalpha1+uvbuf1[i]*uvalpha)>>16);
  648. int V=((uvbuf0[i+2048]*uvalpha1+uvbuf1[i+2048]*uvalpha)>>16);
  649. ((uint16_t*)dest)[0] =
  650. (clip_table[((Y + yuvtab_3343[U]) >>13)]>>3) |
  651. (clip_table[((Y + yuvtab_0c92[V] + yuvtab_1a1e[U]) >>13)]<<2)&0x03E0 |
  652. (clip_table[((Y + yuvtab_40cf[V]) >>13)]<<7)&0x7C00;
  653. dest+=2;
  654. }
  655. }
  656. #endif
  657. b16Dither= b16Dither1;
  658. b16Dither1= b16Dither2;
  659. b16Dither2= b16Dither;
  660. g16Dither= g16Dither1;
  661. g16Dither1= g16Dither2;
  662. g16Dither2= g16Dither;
  663. }
  664. #ifdef HAVE_3DNOW
  665. asm volatile("femms");
  666. #elif defined (HAVE_MMX)
  667. asm volatile("emms");
  668. #endif
  669. }
  670. void SwScale_Init(){
  671. // generating tables:
  672. int i;
  673. for(i=0;i<256;i++){
  674. clip_table[i]=0;
  675. clip_table[i+256]=i;
  676. clip_table[i+512]=255;
  677. yuvtab_2568[i]=(0x2568*(i-16))+(256<<13);
  678. yuvtab_3343[i]=0x3343*(i-128);
  679. yuvtab_0c92[i]=-0x0c92*(i-128);
  680. yuvtab_1a1e[i]=-0x1a1e*(i-128);
  681. yuvtab_40cf[i]=0x40cf*(i-128);
  682. }
  683. }