You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1407 lines
37KB

  1. // Software scaling and colorspace conversion routines for MPlayer
  2. // Orginal C implementation by A'rpi/ESP-team <arpi@thot.banki.hu>
  3. // current version mostly by Michael Niedermayer (michaelni@gmx.at)
  4. // the parts written by michael are under GNU GPL
  5. /*
  6. supported Input formats: YV12 (grayscale soon too)
  7. supported output formats: YV12, BGR15, BGR16, BGR24, BGR32 (grayscale soon too)
  8. */
  9. #include <inttypes.h>
  10. #include <string.h>
  11. #include <math.h>
  12. #include <stdio.h>
  13. #include "../config.h"
  14. #include "../mangle.h"
  15. #ifdef HAVE_MALLOC_H
  16. #include <malloc.h>
  17. #endif
  18. #include "swscale.h"
  19. #include "../cpudetect.h"
  20. #include "../libvo/img_format.h"
  21. #undef MOVNTQ
  22. #undef PAVGB
  23. //#undef HAVE_MMX2
  24. //#define HAVE_3DNOW
  25. //#undef HAVE_MMX
  26. //#undef ARCH_X86
  27. #define DITHER1XBPP
  28. #define RET 0xC3 //near return opcode
  29. #ifdef MP_DEBUG
  30. #define ASSERT(x) if(!(x)) { printf("ASSERT " #x " failed\n"); *((int*)0)=0; }
  31. #else
  32. #define ASSERT(x) ;
  33. #endif
  34. #ifdef M_PI
  35. #define PI M_PI
  36. #else
  37. #define PI 3.14159265358979323846
  38. #endif
  39. extern int verbose; // defined in mplayer.c
  40. /*
  41. NOTES
  42. known BUGS with known cause (no bugreports please!, but patches are welcome :) )
  43. horizontal fast_bilinear MMX2 scaler reads 1-7 samples too much (might cause a sig11)
  44. Supported output formats BGR15 BGR16 BGR24 BGR32 YV12
  45. BGR15 & BGR16 MMX verions support dithering
  46. Special versions: fast Y 1:1 scaling (no interpolation in y direction)
  47. TODO
  48. more intelligent missalignment avoidance for the horizontal scaler
  49. dither in C
  50. change the distance of the u & v buffer
  51. Move static / global vars into a struct so multiple scalers can be used
  52. write special vertical cubic upscale version
  53. Optimize C code (yv12 / minmax)
  54. */
  55. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  56. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  57. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  58. #ifdef ARCH_X86
  59. #define CAN_COMPILE_X86_ASM
  60. #endif
  61. #ifdef CAN_COMPILE_X86_ASM
  62. static uint64_t __attribute__((aligned(8))) yCoeff= 0x2568256825682568LL;
  63. static uint64_t __attribute__((aligned(8))) vrCoeff= 0x3343334333433343LL;
  64. static uint64_t __attribute__((aligned(8))) ubCoeff= 0x40cf40cf40cf40cfLL;
  65. static uint64_t __attribute__((aligned(8))) vgCoeff= 0xE5E2E5E2E5E2E5E2LL;
  66. static uint64_t __attribute__((aligned(8))) ugCoeff= 0xF36EF36EF36EF36ELL;
  67. static uint64_t __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
  68. static uint64_t __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
  69. static uint64_t __attribute__((aligned(8))) w400= 0x0400040004000400LL;
  70. static uint64_t __attribute__((aligned(8))) w80= 0x0080008000800080LL;
  71. static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
  72. static uint64_t __attribute__((aligned(8))) w02= 0x0002000200020002LL;
  73. static uint64_t __attribute__((aligned(8))) bm00001111=0x00000000FFFFFFFFLL;
  74. static uint64_t __attribute__((aligned(8))) bm00000111=0x0000000000FFFFFFLL;
  75. static uint64_t __attribute__((aligned(8))) bm11111000=0xFFFFFFFFFF000000LL;
  76. static volatile uint64_t __attribute__((aligned(8))) b5Dither;
  77. static volatile uint64_t __attribute__((aligned(8))) g5Dither;
  78. static volatile uint64_t __attribute__((aligned(8))) g6Dither;
  79. static volatile uint64_t __attribute__((aligned(8))) r5Dither;
  80. static uint64_t __attribute__((aligned(8))) dither4[2]={
  81. 0x0103010301030103LL,
  82. 0x0200020002000200LL,};
  83. static uint64_t __attribute__((aligned(8))) dither8[2]={
  84. 0x0602060206020602LL,
  85. 0x0004000400040004LL,};
  86. static uint64_t __attribute__((aligned(8))) b16Mask= 0x001F001F001F001FLL;
  87. static uint64_t __attribute__((aligned(8))) g16Mask= 0x07E007E007E007E0LL;
  88. static uint64_t __attribute__((aligned(8))) r16Mask= 0xF800F800F800F800LL;
  89. static uint64_t __attribute__((aligned(8))) b15Mask= 0x001F001F001F001FLL;
  90. static uint64_t __attribute__((aligned(8))) g15Mask= 0x03E003E003E003E0LL;
  91. static uint64_t __attribute__((aligned(8))) r15Mask= 0x7C007C007C007C00LL;
  92. static uint64_t __attribute__((aligned(8))) M24A= 0x00FF0000FF0000FFLL;
  93. static uint64_t __attribute__((aligned(8))) M24B= 0xFF0000FF0000FF00LL;
  94. static uint64_t __attribute__((aligned(8))) M24C= 0x0000FF0000FF0000LL;
  95. // FIXME remove
  96. static uint64_t __attribute__((aligned(8))) asm_yalpha1;
  97. static uint64_t __attribute__((aligned(8))) asm_uvalpha1;
  98. #endif
  99. // clipping helper table for C implementations:
  100. static unsigned char clip_table[768];
  101. static unsigned short clip_table16b[768];
  102. static unsigned short clip_table16g[768];
  103. static unsigned short clip_table16r[768];
  104. static unsigned short clip_table15b[768];
  105. static unsigned short clip_table15g[768];
  106. static unsigned short clip_table15r[768];
  107. // yuv->rgb conversion tables:
  108. static int yuvtab_2568[256];
  109. static int yuvtab_3343[256];
  110. static int yuvtab_0c92[256];
  111. static int yuvtab_1a1e[256];
  112. static int yuvtab_40cf[256];
  113. // Needed for cubic scaler to catch overflows
  114. static int clip_yuvtab_2568[768];
  115. static int clip_yuvtab_3343[768];
  116. static int clip_yuvtab_0c92[768];
  117. static int clip_yuvtab_1a1e[768];
  118. static int clip_yuvtab_40cf[768];
  119. //global sws_flags from the command line
  120. int sws_flags=0;
  121. /* cpuCaps combined from cpudetect and whats actually compiled in
  122. (if there is no support for something compiled in it wont appear here) */
  123. static CpuCaps cpuCaps;
  124. void (*swScale)(SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
  125. int srcSliceH, uint8_t* dst[], int dstStride[])=NULL;
  126. #ifdef CAN_COMPILE_X86_ASM
  127. void in_asm_used_var_warning_killer()
  128. {
  129. volatile int i= yCoeff+vrCoeff+ubCoeff+vgCoeff+ugCoeff+bF8+bFC+w400+w80+w10+
  130. bm00001111+bm00000111+bm11111000+b16Mask+g16Mask+r16Mask+b15Mask+g15Mask+r15Mask+asm_yalpha1+ asm_uvalpha1+
  131. M24A+M24B+M24C+w02 + b5Dither+g5Dither+r5Dither+g6Dither+dither4[0]+dither8[0];
  132. if(i) i=0;
  133. }
  134. #endif
  135. static inline void yuv2yuvXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  136. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  137. uint8_t *dest, uint8_t *uDest, uint8_t *vDest, int dstW)
  138. {
  139. //FIXME Optimize (just quickly writen not opti..)
  140. int i;
  141. for(i=0; i<dstW; i++)
  142. {
  143. int val=0;
  144. int j;
  145. for(j=0; j<lumFilterSize; j++)
  146. val += lumSrc[j][i] * lumFilter[j];
  147. dest[i]= MIN(MAX(val>>19, 0), 255);
  148. }
  149. if(uDest != NULL)
  150. for(i=0; i<(dstW>>1); i++)
  151. {
  152. int u=0;
  153. int v=0;
  154. int j;
  155. for(j=0; j<chrFilterSize; j++)
  156. {
  157. u += chrSrc[j][i] * chrFilter[j];
  158. v += chrSrc[j][i + 2048] * chrFilter[j];
  159. }
  160. uDest[i]= MIN(MAX(u>>19, 0), 255);
  161. vDest[i]= MIN(MAX(v>>19, 0), 255);
  162. }
  163. }
  164. static inline void yuv2rgbXinC(int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
  165. int16_t *chrFilter, int16_t **chrSrc, int chrFilterSize,
  166. uint8_t *dest, int dstW, int dstFormat)
  167. {
  168. if(dstFormat==IMGFMT_BGR32)
  169. {
  170. int i;
  171. for(i=0; i<(dstW>>1); i++){
  172. int j;
  173. int Y1=0;
  174. int Y2=0;
  175. int U=0;
  176. int V=0;
  177. int Cb, Cr, Cg;
  178. for(j=0; j<lumFilterSize; j++)
  179. {
  180. Y1 += lumSrc[j][2*i] * lumFilter[j];
  181. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  182. }
  183. for(j=0; j<chrFilterSize; j++)
  184. {
  185. U += chrSrc[j][i] * chrFilter[j];
  186. V += chrSrc[j][i+2048] * chrFilter[j];
  187. }
  188. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  189. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  190. U >>= 19;
  191. V >>= 19;
  192. Cb= clip_yuvtab_40cf[U+ 256];
  193. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  194. Cr= clip_yuvtab_3343[V+ 256];
  195. dest[8*i+0]=clip_table[((Y1 + Cb) >>13)];
  196. dest[8*i+1]=clip_table[((Y1 + Cg) >>13)];
  197. dest[8*i+2]=clip_table[((Y1 + Cr) >>13)];
  198. dest[8*i+4]=clip_table[((Y2 + Cb) >>13)];
  199. dest[8*i+5]=clip_table[((Y2 + Cg) >>13)];
  200. dest[8*i+6]=clip_table[((Y2 + Cr) >>13)];
  201. }
  202. }
  203. else if(dstFormat==IMGFMT_BGR24)
  204. {
  205. int i;
  206. for(i=0; i<(dstW>>1); i++){
  207. int j;
  208. int Y1=0;
  209. int Y2=0;
  210. int U=0;
  211. int V=0;
  212. int Cb, Cr, Cg;
  213. for(j=0; j<lumFilterSize; j++)
  214. {
  215. Y1 += lumSrc[j][2*i] * lumFilter[j];
  216. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  217. }
  218. for(j=0; j<chrFilterSize; j++)
  219. {
  220. U += chrSrc[j][i] * chrFilter[j];
  221. V += chrSrc[j][i+2048] * chrFilter[j];
  222. }
  223. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  224. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  225. U >>= 19;
  226. V >>= 19;
  227. Cb= clip_yuvtab_40cf[U+ 256];
  228. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  229. Cr= clip_yuvtab_3343[V+ 256];
  230. dest[0]=clip_table[((Y1 + Cb) >>13)];
  231. dest[1]=clip_table[((Y1 + Cg) >>13)];
  232. dest[2]=clip_table[((Y1 + Cr) >>13)];
  233. dest[3]=clip_table[((Y2 + Cb) >>13)];
  234. dest[4]=clip_table[((Y2 + Cg) >>13)];
  235. dest[5]=clip_table[((Y2 + Cr) >>13)];
  236. dest+=6;
  237. }
  238. }
  239. else if(dstFormat==IMGFMT_BGR16)
  240. {
  241. int i;
  242. for(i=0; i<(dstW>>1); i++){
  243. int j;
  244. int Y1=0;
  245. int Y2=0;
  246. int U=0;
  247. int V=0;
  248. int Cb, Cr, Cg;
  249. for(j=0; j<lumFilterSize; j++)
  250. {
  251. Y1 += lumSrc[j][2*i] * lumFilter[j];
  252. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  253. }
  254. for(j=0; j<chrFilterSize; j++)
  255. {
  256. U += chrSrc[j][i] * chrFilter[j];
  257. V += chrSrc[j][i+2048] * chrFilter[j];
  258. }
  259. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  260. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  261. U >>= 19;
  262. V >>= 19;
  263. Cb= clip_yuvtab_40cf[U+ 256];
  264. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  265. Cr= clip_yuvtab_3343[V+ 256];
  266. ((uint16_t*)dest)[2*i] =
  267. clip_table16b[(Y1 + Cb) >>13] |
  268. clip_table16g[(Y1 + Cg) >>13] |
  269. clip_table16r[(Y1 + Cr) >>13];
  270. ((uint16_t*)dest)[2*i+1] =
  271. clip_table16b[(Y2 + Cb) >>13] |
  272. clip_table16g[(Y2 + Cg) >>13] |
  273. clip_table16r[(Y2 + Cr) >>13];
  274. }
  275. }
  276. else if(dstFormat==IMGFMT_BGR15)
  277. {
  278. int i;
  279. for(i=0; i<(dstW>>1); i++){
  280. int j;
  281. int Y1=0;
  282. int Y2=0;
  283. int U=0;
  284. int V=0;
  285. int Cb, Cr, Cg;
  286. for(j=0; j<lumFilterSize; j++)
  287. {
  288. Y1 += lumSrc[j][2*i] * lumFilter[j];
  289. Y2 += lumSrc[j][2*i+1] * lumFilter[j];
  290. }
  291. for(j=0; j<chrFilterSize; j++)
  292. {
  293. U += chrSrc[j][i] * chrFilter[j];
  294. V += chrSrc[j][i+2048] * chrFilter[j];
  295. }
  296. Y1= clip_yuvtab_2568[ (Y1>>19) + 256 ];
  297. Y2= clip_yuvtab_2568[ (Y2>>19) + 256 ];
  298. U >>= 19;
  299. V >>= 19;
  300. Cb= clip_yuvtab_40cf[U+ 256];
  301. Cg= clip_yuvtab_1a1e[V+ 256] + yuvtab_0c92[U+ 256];
  302. Cr= clip_yuvtab_3343[V+ 256];
  303. ((uint16_t*)dest)[2*i] =
  304. clip_table15b[(Y1 + Cb) >>13] |
  305. clip_table15g[(Y1 + Cg) >>13] |
  306. clip_table15r[(Y1 + Cr) >>13];
  307. ((uint16_t*)dest)[2*i+1] =
  308. clip_table15b[(Y2 + Cb) >>13] |
  309. clip_table15g[(Y2 + Cg) >>13] |
  310. clip_table15r[(Y2 + Cr) >>13];
  311. }
  312. }
  313. }
  314. //Note: we have C, X86, MMX, MMX2, 3DNOW version therse no 3DNOW+MMX2 one
  315. //Plain C versions
  316. #if !defined (HAVE_MMX) || defined (RUNTIME_CPUDETECT)
  317. #define COMPILE_C
  318. #endif
  319. #ifdef CAN_COMPILE_X86_ASM
  320. #if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
  321. #define COMPILE_MMX
  322. #endif
  323. #if defined (HAVE_MMX2) || defined (RUNTIME_CPUDETECT)
  324. #define COMPILE_MMX2
  325. #endif
  326. #if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
  327. #define COMPILE_3DNOW
  328. #endif
  329. #endif //CAN_COMPILE_X86_ASM
  330. #undef HAVE_MMX
  331. #undef HAVE_MMX2
  332. #undef HAVE_3DNOW
  333. #undef ARCH_X86
  334. #ifdef COMPILE_C
  335. #undef HAVE_MMX
  336. #undef HAVE_MMX2
  337. #undef HAVE_3DNOW
  338. #undef ARCH_X86
  339. #define RENAME(a) a ## _C
  340. #include "swscale_template.c"
  341. #endif
  342. #ifdef CAN_COMPILE_X86_ASM
  343. //X86 versions
  344. /*
  345. #undef RENAME
  346. #undef HAVE_MMX
  347. #undef HAVE_MMX2
  348. #undef HAVE_3DNOW
  349. #define ARCH_X86
  350. #define RENAME(a) a ## _X86
  351. #include "swscale_template.c"
  352. */
  353. //MMX versions
  354. #ifdef COMPILE_MMX
  355. #undef RENAME
  356. #define HAVE_MMX
  357. #undef HAVE_MMX2
  358. #undef HAVE_3DNOW
  359. #define ARCH_X86
  360. #define RENAME(a) a ## _MMX
  361. #include "swscale_template.c"
  362. #endif
  363. //MMX2 versions
  364. #ifdef COMPILE_MMX2
  365. #undef RENAME
  366. #define HAVE_MMX
  367. #define HAVE_MMX2
  368. #undef HAVE_3DNOW
  369. #define ARCH_X86
  370. #define RENAME(a) a ## _MMX2
  371. #include "swscale_template.c"
  372. #endif
  373. //3DNOW versions
  374. #ifdef COMPILE_3DNOW
  375. #undef RENAME
  376. #define HAVE_MMX
  377. #undef HAVE_MMX2
  378. #define HAVE_3DNOW
  379. #define ARCH_X86
  380. #define RENAME(a) a ## _3DNow
  381. #include "swscale_template.c"
  382. #endif
  383. #endif //CAN_COMPILE_X86_ASM
  384. // minor note: the HAVE_xyz is messed up after that line so dont use it
  385. // old global scaler, dont use for new code, unless it uses only the stuff from the command line
  386. // will use sws_flags from the command line
  387. void SwScale_YV12slice(unsigned char* src[], int srcStride[], int srcSliceY ,
  388. int srcSliceH, uint8_t* dst[], int dstStride, int dstbpp,
  389. int srcW, int srcH, int dstW, int dstH){
  390. static SwsContext *context=NULL;
  391. int dstFormat;
  392. int flags=0;
  393. static int firstTime=1;
  394. int dstStride3[3]= {dstStride, dstStride>>1, dstStride>>1};
  395. static SwsFilter srcFilter={NULL, NULL, NULL, NULL};
  396. if(firstTime)
  397. {
  398. flags= SWS_PRINT_INFO;
  399. firstTime=0;
  400. {/*
  401. SwsVector *g= getGaussianVec(1.7, 2);
  402. SwsVector *id= getIdentityVec();
  403. scaleVec(g, 0.2);
  404. // srcFilter.chrH= diffVec(id, g);
  405. // srcFilter.chrH= shiftVec(id, 20);
  406. srcFilter.chrH= g;
  407. // freeVec(g);
  408. freeVec(id);
  409. normalizeVec(srcFilter.chrH, 1.0);
  410. printVec(srcFilter.chrH);
  411. srcFilter.lumV= srcFilter.lumH= srcFilter.chrV= srcFilter.chrH;
  412. srcFilter.lumH = srcFilter.lumV = NULL;
  413. // srcFilter.chrH = srcFilter.chrV = NULL;
  414. */}
  415. }
  416. switch(dstbpp)
  417. {
  418. case 8 : dstFormat= IMGFMT_Y8; break;
  419. case 12: dstFormat= IMGFMT_YV12; break;
  420. case 15: dstFormat= IMGFMT_BGR15; break;
  421. case 16: dstFormat= IMGFMT_BGR16; break;
  422. case 24: dstFormat= IMGFMT_BGR24; break;
  423. case 32: dstFormat= IMGFMT_BGR32; break;
  424. default: return;
  425. }
  426. switch(sws_flags)
  427. {
  428. case 0: flags|= SWS_FAST_BILINEAR; break;
  429. case 1: flags|= SWS_BILINEAR; break;
  430. case 2: flags|= SWS_BICUBIC; break;
  431. case 3: flags|= SWS_X; break;
  432. default:flags|= SWS_BILINEAR; break;
  433. }
  434. if(!context) context=getSwsContext(srcW, srcH, IMGFMT_YV12, dstW, dstH, dstFormat, flags, &srcFilter, NULL);
  435. swScale(context, src, srcStride, srcSliceY, srcSliceH, dst, dstStride3);
  436. }
  437. static inline void initFilter(int16_t **outFilter, int16_t **filterPos, int *outFilterSize, int xInc,
  438. int srcW, int dstW, int filterAlign, int one, int flags,
  439. SwsVector *srcFilter, SwsVector *dstFilter)
  440. {
  441. int i;
  442. int filterSize;
  443. int filter2Size;
  444. int minFilterSize;
  445. double *filter=NULL;
  446. double *filter2=NULL;
  447. #ifdef ARCH_X86
  448. if(gCpuCaps.hasMMX)
  449. asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
  450. #endif
  451. *filterPos = (int16_t*)memalign(8, dstW*sizeof(int16_t));
  452. if(ABS(xInc - 0x10000) <10) // unscaled
  453. {
  454. int i;
  455. filterSize= 1;
  456. filter= (double*)memalign(8, dstW*sizeof(double)*filterSize);
  457. for(i=0; i<dstW*filterSize; i++) filter[i]=0;
  458. for(i=0; i<dstW; i++)
  459. {
  460. filter[i*filterSize]=1;
  461. (*filterPos)[i]=i;
  462. }
  463. }
  464. else if(xInc <= (1<<16) || (flags&SWS_FAST_BILINEAR)) // upscale
  465. {
  466. int i;
  467. int xDstInSrc;
  468. if (flags&SWS_BICUBIC) filterSize= 4;
  469. else if(flags&SWS_X ) filterSize= 4;
  470. else filterSize= 2;
  471. // printf("%d %d %d\n", filterSize, srcW, dstW);
  472. filter= (double*)memalign(8, dstW*sizeof(double)*filterSize);
  473. xDstInSrc= xInc/2 - 0x8000;
  474. for(i=0; i<dstW; i++)
  475. {
  476. int xx= (xDstInSrc>>16) - (filterSize>>1) + 1;
  477. int j;
  478. (*filterPos)[i]= xx;
  479. if((flags & SWS_BICUBIC) || (flags & SWS_X))
  480. {
  481. double d= ABS(((xx+1)<<16) - xDstInSrc)/(double)(1<<16);
  482. double y1,y2,y3,y4;
  483. double A= -0.6;
  484. if(flags & SWS_BICUBIC){
  485. // Equation is from VirtualDub
  486. y1 = ( + A*d - 2.0*A*d*d + A*d*d*d);
  487. y2 = (+ 1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d);
  488. y3 = ( - A*d + (2.0*A+3.0)*d*d - (A+2.0)*d*d*d);
  489. y4 = ( + A*d*d - A*d*d*d);
  490. }else{
  491. // cubic interpolation (derived it myself)
  492. y1 = ( -2.0*d + 3.0*d*d - 1.0*d*d*d)/6.0;
  493. y2 = (6.0 -3.0*d - 6.0*d*d + 3.0*d*d*d)/6.0;
  494. y3 = ( +6.0*d + 3.0*d*d - 3.0*d*d*d)/6.0;
  495. y4 = ( -1.0*d + 1.0*d*d*d)/6.0;
  496. }
  497. // printf("%d %d %d \n", coeff, (int)d, xDstInSrc);
  498. filter[i*filterSize + 0]= y1;
  499. filter[i*filterSize + 1]= y2;
  500. filter[i*filterSize + 2]= y3;
  501. filter[i*filterSize + 3]= y4;
  502. // printf("%1.3f %1.3f %1.3f %1.3f %1.3f\n",d , y1, y2, y3, y4);
  503. }
  504. else
  505. {
  506. for(j=0; j<filterSize; j++)
  507. {
  508. double d= ABS((xx<<16) - xDstInSrc)/(double)(1<<16);
  509. double coeff= 1.0 - d;
  510. if(coeff<0) coeff=0;
  511. // printf("%d %d %d \n", coeff, (int)d, xDstInSrc);
  512. filter[i*filterSize + j]= coeff;
  513. xx++;
  514. }
  515. }
  516. xDstInSrc+= xInc;
  517. }
  518. }
  519. else // downscale
  520. {
  521. int xDstInSrc;
  522. if(flags&SWS_BICUBIC) filterSize= (int)ceil(1 + 4.0*srcW / (double)dstW);
  523. else if(flags&SWS_X) filterSize= (int)ceil(1 + 4.0*srcW / (double)dstW);
  524. else filterSize= (int)ceil(1 + 2.0*srcW / (double)dstW);
  525. // printf("%d %d %d\n", *filterSize, srcW, dstW);
  526. filter= (double*)memalign(8, dstW*sizeof(double)*filterSize);
  527. xDstInSrc= xInc/2 - 0x8000;
  528. for(i=0; i<dstW; i++)
  529. {
  530. int xx= (int)((double)xDstInSrc/(double)(1<<16) - (filterSize-1)*0.5 + 0.5);
  531. int j;
  532. (*filterPos)[i]= xx;
  533. for(j=0; j<filterSize; j++)
  534. {
  535. double d= ABS((xx<<16) - xDstInSrc)/(double)xInc;
  536. double coeff;
  537. if((flags & SWS_BICUBIC) || (flags & SWS_X))
  538. {
  539. double A= -0.75;
  540. // d*=2;
  541. // Equation is from VirtualDub
  542. if(d<1.0)
  543. coeff = (1.0 - (A+3.0)*d*d + (A+2.0)*d*d*d);
  544. else if(d<2.0)
  545. coeff = (-4.0*A + 8.0*A*d - 5.0*A*d*d + A*d*d*d);
  546. else
  547. coeff=0.0;
  548. }
  549. /* else if(flags & SWS_X)
  550. {
  551. }*/
  552. else
  553. {
  554. coeff= 1.0 - d;
  555. if(coeff<0) coeff=0;
  556. }
  557. // printf("%1.3f %d %d \n", coeff, (int)d, xDstInSrc);
  558. filter[i*filterSize + j]= coeff;
  559. xx++;
  560. }
  561. xDstInSrc+= xInc;
  562. }
  563. }
  564. /* apply src & dst Filter to filter -> filter2
  565. free(filter);
  566. */
  567. filter2Size= filterSize;
  568. if(srcFilter) filter2Size+= srcFilter->length - 1;
  569. if(dstFilter) filter2Size+= dstFilter->length - 1;
  570. filter2= (double*)memalign(8, filter2Size*dstW*sizeof(double));
  571. for(i=0; i<dstW; i++)
  572. {
  573. int j;
  574. SwsVector scaleFilter;
  575. SwsVector *outVec;
  576. scaleFilter.coeff= filter + i*filterSize;
  577. scaleFilter.length= filterSize;
  578. if(srcFilter) outVec= convVec(srcFilter, &scaleFilter);
  579. else outVec= &scaleFilter;
  580. ASSERT(outVec->length == filter2Size)
  581. //FIXME dstFilter
  582. for(j=0; j<outVec->length; j++)
  583. {
  584. filter2[i*filter2Size + j]= outVec->coeff[j];
  585. }
  586. (*filterPos)[i]+= (filterSize-1)/2 - (filter2Size-1)/2;
  587. if(outVec != &scaleFilter) freeVec(outVec);
  588. }
  589. free(filter); filter=NULL;
  590. /* try to reduce the filter-size (step1 find size and shift left) */
  591. // Assume its near normalized (*0.5 or *2.0 is ok but * 0.001 is not)
  592. minFilterSize= 0;
  593. for(i=dstW-1; i>=0; i--)
  594. {
  595. int min= filter2Size;
  596. int j;
  597. double cutOff=0.0;
  598. /* get rid off near zero elements on the left by shifting left */
  599. for(j=0; j<filter2Size; j++)
  600. {
  601. int k;
  602. cutOff += ABS(filter2[i*filter2Size]);
  603. if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
  604. /* preserve Monotonicity because the core cant handle the filter otherwise */
  605. if(i<dstW-1 && (*filterPos)[i] >= (*filterPos)[i+1]) break;
  606. // Move filter coeffs left
  607. for(k=1; k<filter2Size; k++)
  608. filter2[i*filter2Size + k - 1]= filter2[i*filter2Size + k];
  609. filter2[i*filter2Size + k - 1]= 0.0;
  610. (*filterPos)[i]++;
  611. }
  612. cutOff=0.0;
  613. /* count near zeros on the right */
  614. for(j=filter2Size-1; j>0; j--)
  615. {
  616. cutOff += ABS(filter2[i*filter2Size + j]);
  617. if(cutOff > SWS_MAX_REDUCE_CUTOFF) break;
  618. min--;
  619. }
  620. if(min>minFilterSize) minFilterSize= min;
  621. }
  622. /* try to reduce the filter-size (step2 reduce it) */
  623. for(i=0; i<dstW; i++)
  624. {
  625. int j;
  626. for(j=0; j<minFilterSize; j++)
  627. filter2[i*minFilterSize + j]= filter2[i*filter2Size + j];
  628. }
  629. if((flags&SWS_PRINT_INFO) && verbose)
  630. printf("SwScaler: reducing filtersize %d -> %d\n", filter2Size, minFilterSize);
  631. filter2Size= minFilterSize;
  632. ASSERT(filter2Size > 0)
  633. //FIXME try to align filterpos if possible
  634. //fix borders
  635. for(i=0; i<dstW; i++)
  636. {
  637. int j;
  638. if((*filterPos)[i] < 0)
  639. {
  640. // Move filter coeffs left to compensate for filterPos
  641. for(j=1; j<filter2Size; j++)
  642. {
  643. int left= MAX(j + (*filterPos)[i], 0);
  644. filter2[i*filter2Size + left] += filter2[i*filter2Size + j];
  645. filter2[i*filter2Size + j]=0;
  646. }
  647. (*filterPos)[i]= 0;
  648. }
  649. if((*filterPos)[i] + filter2Size > srcW)
  650. {
  651. int shift= (*filterPos)[i] + filter2Size - srcW;
  652. // Move filter coeffs right to compensate for filterPos
  653. for(j=filter2Size-2; j>=0; j--)
  654. {
  655. int right= MIN(j + shift, filter2Size-1);
  656. filter2[i*filter2Size +right] += filter2[i*filter2Size +j];
  657. filter2[i*filter2Size +j]=0;
  658. }
  659. (*filterPos)[i]= srcW - filter2Size;
  660. }
  661. }
  662. *outFilterSize= (filter2Size +(filterAlign-1)) & (~(filterAlign-1));
  663. *outFilter= (int16_t*)memalign(8, *outFilterSize*dstW*sizeof(int16_t));
  664. memset(*outFilter, 0, *outFilterSize*dstW*sizeof(int16_t));
  665. /* Normalize & Store in outFilter */
  666. for(i=0; i<dstW; i++)
  667. {
  668. int j;
  669. double sum=0;
  670. double scale= one;
  671. for(j=0; j<filter2Size; j++)
  672. {
  673. sum+= filter2[i*filter2Size + j];
  674. }
  675. scale/= sum;
  676. for(j=0; j<filter2Size; j++)
  677. {
  678. (*outFilter)[i*(*outFilterSize) + j]= (int)(filter2[i*filter2Size + j]*scale);
  679. }
  680. }
  681. free(filter2);
  682. }
  683. #ifdef ARCH_X86
  684. static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode)
  685. {
  686. uint8_t *fragment;
  687. int imm8OfPShufW1;
  688. int imm8OfPShufW2;
  689. int fragmentLength;
  690. int xpos, i;
  691. // create an optimized horizontal scaling routine
  692. //code fragment
  693. asm volatile(
  694. "jmp 9f \n\t"
  695. // Begin
  696. "0: \n\t"
  697. "movq (%%esi), %%mm0 \n\t" //FIXME Alignment
  698. "movq %%mm0, %%mm1 \n\t"
  699. "psrlq $8, %%mm0 \n\t"
  700. "punpcklbw %%mm7, %%mm1 \n\t"
  701. "movq %%mm2, %%mm3 \n\t"
  702. "punpcklbw %%mm7, %%mm0 \n\t"
  703. "addw %%bx, %%cx \n\t" //2*xalpha += (4*lumXInc)&0xFFFF
  704. "pshufw $0xFF, %%mm1, %%mm1 \n\t"
  705. "1: \n\t"
  706. "adcl %%edx, %%esi \n\t" //xx+= (4*lumXInc)>>16 + carry
  707. "pshufw $0xFF, %%mm0, %%mm0 \n\t"
  708. "2: \n\t"
  709. "psrlw $9, %%mm3 \n\t"
  710. "psubw %%mm1, %%mm0 \n\t"
  711. "pmullw %%mm3, %%mm0 \n\t"
  712. "paddw %%mm6, %%mm2 \n\t" // 2*alpha += xpos&0xFFFF
  713. "psllw $7, %%mm1 \n\t"
  714. "paddw %%mm1, %%mm0 \n\t"
  715. "movq %%mm0, (%%edi, %%eax) \n\t"
  716. "addl $8, %%eax \n\t"
  717. // End
  718. "9: \n\t"
  719. // "int $3\n\t"
  720. "leal 0b, %0 \n\t"
  721. "leal 1b, %1 \n\t"
  722. "leal 2b, %2 \n\t"
  723. "decl %1 \n\t"
  724. "decl %2 \n\t"
  725. "subl %0, %1 \n\t"
  726. "subl %0, %2 \n\t"
  727. "leal 9b, %3 \n\t"
  728. "subl %0, %3 \n\t"
  729. :"=r" (fragment), "=r" (imm8OfPShufW1), "=r" (imm8OfPShufW2),
  730. "=r" (fragmentLength)
  731. );
  732. xpos= 0; //lumXInc/2 - 0x8000; // difference between pixel centers
  733. for(i=0; i<dstW/8; i++)
  734. {
  735. int xx=xpos>>16;
  736. if((i&3) == 0)
  737. {
  738. int a=0;
  739. int b=((xpos+xInc)>>16) - xx;
  740. int c=((xpos+xInc*2)>>16) - xx;
  741. int d=((xpos+xInc*3)>>16) - xx;
  742. memcpy(funnyCode + fragmentLength*i/4, fragment, fragmentLength);
  743. funnyCode[fragmentLength*i/4 + imm8OfPShufW1]=
  744. funnyCode[fragmentLength*i/4 + imm8OfPShufW2]=
  745. a | (b<<2) | (c<<4) | (d<<6);
  746. // if we dont need to read 8 bytes than dont :), reduces the chance of
  747. // crossing a cache line
  748. if(d<3) funnyCode[fragmentLength*i/4 + 1]= 0x6E;
  749. funnyCode[fragmentLength*(i+4)/4]= RET;
  750. }
  751. xpos+=xInc;
  752. }
  753. }
  754. #endif // ARCH_X86
  755. //FIXME remove
  756. void SwScale_Init(){
  757. }
  758. static void globalInit(){
  759. // generating tables:
  760. int i;
  761. for(i=0; i<768; i++){
  762. int c= MIN(MAX(i-256, 0), 255);
  763. clip_table[i]=c;
  764. yuvtab_2568[c]= clip_yuvtab_2568[i]=(0x2568*(c-16))+(256<<13);
  765. yuvtab_3343[c]= clip_yuvtab_3343[i]=0x3343*(c-128);
  766. yuvtab_0c92[c]= clip_yuvtab_0c92[i]=-0x0c92*(c-128);
  767. yuvtab_1a1e[c]= clip_yuvtab_1a1e[i]=-0x1a1e*(c-128);
  768. yuvtab_40cf[c]= clip_yuvtab_40cf[i]=0x40cf*(c-128);
  769. }
  770. for(i=0; i<768; i++)
  771. {
  772. int v= clip_table[i];
  773. clip_table16b[i]= v>>3;
  774. clip_table16g[i]= (v<<3)&0x07E0;
  775. clip_table16r[i]= (v<<8)&0xF800;
  776. clip_table15b[i]= v>>3;
  777. clip_table15g[i]= (v<<2)&0x03E0;
  778. clip_table15r[i]= (v<<7)&0x7C00;
  779. }
  780. cpuCaps= gCpuCaps;
  781. #ifdef RUNTIME_CPUDETECT
  782. #ifdef CAN_COMPILE_X86_ASM
  783. // ordered per speed fasterst first
  784. if(gCpuCaps.hasMMX2)
  785. swScale= swScale_MMX2;
  786. else if(gCpuCaps.has3DNow)
  787. swScale= swScale_3DNow;
  788. else if(gCpuCaps.hasMMX)
  789. swScale= swScale_MMX;
  790. else
  791. swScale= swScale_C;
  792. #else
  793. swScale= swScale_C;
  794. cpuCaps.hasMMX2 = cpuCaps.hasMMX = cpuCaps.has3DNow = 0;
  795. #endif
  796. #else //RUNTIME_CPUDETECT
  797. #ifdef HAVE_MMX2
  798. swScale= swScale_MMX2;
  799. cpuCaps.has3DNow = 0;
  800. #elif defined (HAVE_3DNOW)
  801. swScale= swScale_3DNow;
  802. cpuCaps.hasMMX2 = 0;
  803. #elif defined (HAVE_MMX)
  804. swScale= swScale_MMX;
  805. cpuCaps.hasMMX2 = cpuCaps.has3DNow = 0;
  806. #else
  807. swScale= swScale_C;
  808. cpuCaps.hasMMX2 = cpuCaps.hasMMX = cpuCaps.has3DNow = 0;
  809. #endif
  810. #endif //!RUNTIME_CPUDETECT
  811. }
  812. SwsContext *getSwsContext(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat, int flags,
  813. SwsFilter *srcFilter, SwsFilter *dstFilter){
  814. const int widthAlign= dstFormat==IMGFMT_YV12 ? 16 : 8;
  815. SwsContext *c;
  816. int i;
  817. SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
  818. if(swScale==NULL) globalInit();
  819. /* sanity check */
  820. if(srcW<1 || srcH<1 || dstW<1 || dstH<1) return NULL;
  821. /* FIXME
  822. if(dstStride[0]%widthAlign !=0 )
  823. {
  824. if(flags & SWS_PRINT_INFO)
  825. fprintf(stderr, "SwScaler: Warning: dstStride is not a multiple of %d!\n"
  826. "SwScaler: ->cannot do aligned memory acesses anymore\n",
  827. widthAlign);
  828. }
  829. */
  830. if(!dstFilter) dstFilter= &dummyFilter;
  831. if(!srcFilter) srcFilter= &dummyFilter;
  832. c= memalign(64, sizeof(SwsContext));
  833. memset(c, 0, sizeof(SwsContext));
  834. c->srcW= srcW;
  835. c->srcH= srcH;
  836. c->dstW= dstW;
  837. c->dstH= dstH;
  838. c->lumXInc= ((srcW<<16) + (1<<15))/dstW;
  839. c->lumYInc= ((srcH<<16) + (1<<15))/dstH;
  840. c->flags= flags;
  841. c->dstFormat= dstFormat;
  842. c->srcFormat= srcFormat;
  843. if(cpuCaps.hasMMX2)
  844. {
  845. c->canMMX2BeUsed= (dstW >=srcW && (dstW&31)==0 && (srcW&15)==0) ? 1 : 0;
  846. if(!c->canMMX2BeUsed && dstW >=srcW && (srcW&15)==0 && (flags&SWS_FAST_BILINEAR))
  847. {
  848. if(flags&SWS_PRINT_INFO)
  849. fprintf(stderr, "SwScaler: output Width is not a multiple of 32 -> no MMX2 scaler\n");
  850. }
  851. }
  852. else
  853. c->canMMX2BeUsed=0;
  854. // match pixel 0 of the src to pixel 0 of dst and match pixel n-2 of src to pixel n-2 of dst
  855. // but only for the FAST_BILINEAR mode otherwise do correct scaling
  856. // n-2 is the last chrominance sample available
  857. // this is not perfect, but noone shuld notice the difference, the more correct variant
  858. // would be like the vertical one, but that would require some special code for the
  859. // first and last pixel
  860. if(flags&SWS_FAST_BILINEAR)
  861. {
  862. if(c->canMMX2BeUsed) c->lumXInc+= 20;
  863. //we dont use the x86asm scaler if mmx is available
  864. else if(cpuCaps.hasMMX) c->lumXInc = ((srcW-2)<<16)/(dstW-2) - 20;
  865. }
  866. /* set chrXInc & chrDstW */
  867. if((flags&SWS_FULL_UV_IPOL) && dstFormat!=IMGFMT_YV12)
  868. c->chrXInc= c->lumXInc>>1, c->chrDstW= dstW;
  869. else
  870. c->chrXInc= c->lumXInc, c->chrDstW= (dstW+1)>>1;
  871. /* set chrYInc & chrDstH */
  872. if(dstFormat==IMGFMT_YV12) c->chrYInc= c->lumYInc, c->chrDstH= (dstH+1)>>1;
  873. else c->chrYInc= c->lumYInc>>1, c->chrDstH= dstH;
  874. /* precalculate horizontal scaler filter coefficients */
  875. {
  876. const int filterAlign= cpuCaps.hasMMX ? 4 : 1;
  877. initFilter(&c->hLumFilter, &c->hLumFilterPos, &c->hLumFilterSize, c->lumXInc,
  878. srcW , dstW, filterAlign, 1<<14, flags,
  879. srcFilter->lumH, dstFilter->lumH);
  880. initFilter(&c->hChrFilter, &c->hChrFilterPos, &c->hChrFilterSize, c->chrXInc,
  881. (srcW+1)>>1, c->chrDstW, filterAlign, 1<<14, flags,
  882. srcFilter->chrH, dstFilter->chrH);
  883. #ifdef ARCH_X86
  884. // cant downscale !!!
  885. if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
  886. {
  887. initMMX2HScaler( dstW, c->lumXInc, c->funnyYCode);
  888. initMMX2HScaler(c->chrDstW, c->chrXInc, c->funnyUVCode);
  889. }
  890. #endif
  891. } // Init Horizontal stuff
  892. /* precalculate vertical scaler filter coefficients */
  893. initFilter(&c->vLumFilter, &c->vLumFilterPos, &c->vLumFilterSize, c->lumYInc,
  894. srcH , dstH, 1, (1<<12)-4, flags,
  895. srcFilter->lumV, dstFilter->lumV);
  896. initFilter(&c->vChrFilter, &c->vChrFilterPos, &c->vChrFilterSize, c->chrYInc,
  897. (srcH+1)>>1, c->chrDstH, 1, (1<<12)-4, flags,
  898. srcFilter->chrV, dstFilter->chrV);
  899. // Calculate Buffer Sizes so that they wont run out while handling these damn slices
  900. c->vLumBufSize= c->vLumFilterSize;
  901. c->vChrBufSize= c->vChrFilterSize;
  902. for(i=0; i<dstH; i++)
  903. {
  904. int chrI= i*c->chrDstH / dstH;
  905. int nextSlice= MAX(c->vLumFilterPos[i ] + c->vLumFilterSize - 1,
  906. ((c->vChrFilterPos[chrI] + c->vChrFilterSize - 1)<<1));
  907. nextSlice&= ~1; // Slices start at even boundaries
  908. if(c->vLumFilterPos[i ] + c->vLumBufSize < nextSlice)
  909. c->vLumBufSize= nextSlice - c->vLumFilterPos[i ];
  910. if(c->vChrFilterPos[chrI] + c->vChrBufSize < (nextSlice>>1))
  911. c->vChrBufSize= (nextSlice>>1) - c->vChrFilterPos[chrI];
  912. }
  913. // allocate pixbufs (we use dynamic allocation because otherwise we would need to
  914. // allocate several megabytes to handle all possible cases)
  915. c->lumPixBuf= (int16_t**)memalign(4, c->vLumBufSize*2*sizeof(int16_t*));
  916. c->chrPixBuf= (int16_t**)memalign(4, c->vChrBufSize*2*sizeof(int16_t*));
  917. for(i=0; i<c->vLumBufSize; i++)
  918. c->lumPixBuf[i]= c->lumPixBuf[i+c->vLumBufSize]= (uint16_t*)memalign(8, 4000);
  919. for(i=0; i<c->vChrBufSize; i++)
  920. c->chrPixBuf[i]= c->chrPixBuf[i+c->vChrBufSize]= (uint16_t*)memalign(8, 8000);
  921. //try to avoid drawing green stuff between the right end and the stride end
  922. for(i=0; i<c->vLumBufSize; i++) memset(c->lumPixBuf[i], 0, 4000);
  923. for(i=0; i<c->vChrBufSize; i++) memset(c->chrPixBuf[i], 64, 8000);
  924. ASSERT(c->chrDstH <= dstH)
  925. // pack filter data for mmx code
  926. if(cpuCaps.hasMMX)
  927. {
  928. c->lumMmxFilter= (int16_t*)memalign(8, c->vLumFilterSize* dstH*4*sizeof(int16_t));
  929. c->chrMmxFilter= (int16_t*)memalign(8, c->vChrFilterSize*c->chrDstH*4*sizeof(int16_t));
  930. for(i=0; i<c->vLumFilterSize*dstH; i++)
  931. c->lumMmxFilter[4*i]=c->lumMmxFilter[4*i+1]=c->lumMmxFilter[4*i+2]=c->lumMmxFilter[4*i+3]=
  932. c->vLumFilter[i];
  933. for(i=0; i<c->vChrFilterSize*c->chrDstH; i++)
  934. c->chrMmxFilter[4*i]=c->chrMmxFilter[4*i+1]=c->chrMmxFilter[4*i+2]=c->chrMmxFilter[4*i+3]=
  935. c->vChrFilter[i];
  936. }
  937. if(flags&SWS_PRINT_INFO)
  938. {
  939. #ifdef DITHER1XBPP
  940. char *dither= cpuCaps.hasMMX ? " dithered" : "";
  941. #endif
  942. if(flags&SWS_FAST_BILINEAR)
  943. fprintf(stderr, "\nSwScaler: FAST_BILINEAR scaler ");
  944. else if(flags&SWS_BILINEAR)
  945. fprintf(stderr, "\nSwScaler: BILINEAR scaler ");
  946. else if(flags&SWS_BICUBIC)
  947. fprintf(stderr, "\nSwScaler: BICUBIC scaler ");
  948. else
  949. fprintf(stderr, "\nSwScaler: ehh flags invalid?! ");
  950. if(dstFormat==IMGFMT_BGR15)
  951. fprintf(stderr, "with%s BGR15 output ", dither);
  952. else if(dstFormat==IMGFMT_BGR16)
  953. fprintf(stderr, "with%s BGR16 output ", dither);
  954. else if(dstFormat==IMGFMT_BGR24)
  955. fprintf(stderr, "with BGR24 output ");
  956. else if(dstFormat==IMGFMT_BGR32)
  957. fprintf(stderr, "with BGR32 output ");
  958. else if(dstFormat==IMGFMT_YV12)
  959. fprintf(stderr, "with YV12 output ");
  960. else
  961. fprintf(stderr, "without output ");
  962. if(cpuCaps.hasMMX2)
  963. fprintf(stderr, "using MMX2\n");
  964. else if(cpuCaps.has3DNow)
  965. fprintf(stderr, "using 3DNOW\n");
  966. else if(cpuCaps.hasMMX)
  967. fprintf(stderr, "using MMX\n");
  968. else
  969. fprintf(stderr, "using C\n");
  970. }
  971. if((flags & SWS_PRINT_INFO) && verbose)
  972. {
  973. if(cpuCaps.hasMMX)
  974. {
  975. if(c->canMMX2BeUsed && (flags&SWS_FAST_BILINEAR))
  976. printf("SwScaler: using FAST_BILINEAR MMX2 scaler for horizontal scaling\n");
  977. else
  978. {
  979. if(c->hLumFilterSize==4)
  980. printf("SwScaler: using 4-tap MMX scaler for horizontal luminance scaling\n");
  981. else if(c->hLumFilterSize==8)
  982. printf("SwScaler: using 8-tap MMX scaler for horizontal luminance scaling\n");
  983. else
  984. printf("SwScaler: using n-tap MMX scaler for horizontal luminance scaling\n");
  985. if(c->hChrFilterSize==4)
  986. printf("SwScaler: using 4-tap MMX scaler for horizontal chrominance scaling\n");
  987. else if(c->hChrFilterSize==8)
  988. printf("SwScaler: using 8-tap MMX scaler for horizontal chrominance scaling\n");
  989. else
  990. printf("SwScaler: using n-tap MMX scaler for horizontal chrominance scaling\n");
  991. }
  992. }
  993. else
  994. {
  995. #ifdef ARCH_X86
  996. printf("SwScaler: using X86-Asm scaler for horizontal scaling\n");
  997. #else
  998. if(flags & SWS_FAST_BILINEAR)
  999. printf("SwScaler: using FAST_BILINEAR C scaler for horizontal scaling\n");
  1000. else
  1001. printf("SwScaler: using C scaler for horizontal scaling\n");
  1002. #endif
  1003. }
  1004. if(dstFormat==IMGFMT_YV12)
  1005. {
  1006. if(c->vLumFilterSize==1)
  1007. printf("SwScaler: using 1-tap %s \"scaler\" for vertical scaling (YV12)\n", cpuCaps.hasMMX ? "MMX" : "C");
  1008. else
  1009. printf("SwScaler: using n-tap %s scaler for vertical scaling (YV12)\n", cpuCaps.hasMMX ? "MMX" : "C");
  1010. }
  1011. else
  1012. {
  1013. if(c->vLumFilterSize==1 && c->vChrFilterSize==2)
  1014. printf("SwScaler: using 1-tap %s \"scaler\" for vertical luminance scaling (BGR)\n"
  1015. "SwScaler: 2-tap scaler for vertical chrominance scaling (BGR)\n",cpuCaps.hasMMX ? "MMX" : "C");
  1016. else if(c->vLumFilterSize==2 && c->vChrFilterSize==2)
  1017. printf("SwScaler: using 2-tap linear %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C");
  1018. else
  1019. printf("SwScaler: using n-tap %s scaler for vertical scaling (BGR)\n", cpuCaps.hasMMX ? "MMX" : "C");
  1020. }
  1021. if(dstFormat==IMGFMT_BGR24)
  1022. printf("SwScaler: using %s YV12->BGR24 Converter\n",
  1023. cpuCaps.hasMMX2 ? "MMX2" : (cpuCaps.hasMMX ? "MMX" : "C"));
  1024. else
  1025. printf("SwScaler: using %s YV12->BGR Converter\n", cpuCaps.hasMMX ? "MMX" : "C");//FIXME print format
  1026. printf("SwScaler: %dx%d -> %dx%d\n", srcW, srcH, dstW, dstH);
  1027. }
  1028. return c;
  1029. }
  1030. /**
  1031. * returns a normalized gaussian curve used to filter stuff
  1032. * quality=3 is high quality, lowwer is lowwer quality
  1033. */
  1034. SwsVector *getGaussianVec(double variance, double quality){
  1035. const int length= (int)(variance*quality + 0.5) | 1;
  1036. int i;
  1037. double *coeff= memalign(sizeof(double), length*sizeof(double));
  1038. double middle= (length-1)*0.5;
  1039. SwsVector *vec= malloc(sizeof(SwsVector));
  1040. vec->coeff= coeff;
  1041. vec->length= length;
  1042. for(i=0; i<length; i++)
  1043. {
  1044. double dist= i-middle;
  1045. coeff[i]= exp( -dist*dist/(2*variance*variance) ) / sqrt(2*variance*PI);
  1046. }
  1047. normalizeVec(vec, 1.0);
  1048. return vec;
  1049. }
  1050. SwsVector *getIdentityVec(void){
  1051. double *coeff= memalign(sizeof(double), sizeof(double));
  1052. SwsVector *vec= malloc(sizeof(SwsVector));
  1053. coeff[0]= 1.0;
  1054. vec->coeff= coeff;
  1055. vec->length= 1;
  1056. return vec;
  1057. }
  1058. void normalizeVec(SwsVector *a, double height){
  1059. int i;
  1060. double sum=0;
  1061. double inv;
  1062. for(i=0; i<a->length; i++)
  1063. sum+= a->coeff[i];
  1064. inv= height/sum;
  1065. for(i=0; i<a->length; i++)
  1066. a->coeff[i]*= height;
  1067. }
  1068. void scaleVec(SwsVector *a, double scalar){
  1069. int i;
  1070. for(i=0; i<a->length; i++)
  1071. a->coeff[i]*= scalar;
  1072. }
  1073. SwsVector *convVec(SwsVector *a, SwsVector *b){
  1074. int length= a->length + b->length - 1;
  1075. double *coeff= memalign(sizeof(double), length*sizeof(double));
  1076. int i, j;
  1077. SwsVector *vec= malloc(sizeof(SwsVector));
  1078. vec->coeff= coeff;
  1079. vec->length= length;
  1080. for(i=0; i<length; i++) coeff[i]= 0.0;
  1081. for(i=0; i<a->length; i++)
  1082. {
  1083. for(j=0; j<b->length; j++)
  1084. {
  1085. coeff[i+j]+= a->coeff[i]*b->coeff[j];
  1086. }
  1087. }
  1088. return vec;
  1089. }
  1090. SwsVector *sumVec(SwsVector *a, SwsVector *b){
  1091. int length= MAX(a->length, b->length);
  1092. double *coeff= memalign(sizeof(double), length*sizeof(double));
  1093. int i;
  1094. SwsVector *vec= malloc(sizeof(SwsVector));
  1095. vec->coeff= coeff;
  1096. vec->length= length;
  1097. for(i=0; i<length; i++) coeff[i]= 0.0;
  1098. for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
  1099. for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]+= b->coeff[i];
  1100. return vec;
  1101. }
  1102. SwsVector *diffVec(SwsVector *a, SwsVector *b){
  1103. int length= MAX(a->length, b->length);
  1104. double *coeff= memalign(sizeof(double), length*sizeof(double));
  1105. int i;
  1106. SwsVector *vec= malloc(sizeof(SwsVector));
  1107. vec->coeff= coeff;
  1108. vec->length= length;
  1109. for(i=0; i<length; i++) coeff[i]= 0.0;
  1110. for(i=0; i<a->length; i++) coeff[i + (length-1)/2 - (a->length-1)/2]+= a->coeff[i];
  1111. for(i=0; i<b->length; i++) coeff[i + (length-1)/2 - (b->length-1)/2]-= b->coeff[i];
  1112. return vec;
  1113. }
  1114. /* shift left / or right if "shift" is negative */
  1115. SwsVector *shiftVec(SwsVector *a, int shift){
  1116. int length= a->length + ABS(shift)*2;
  1117. double *coeff= memalign(sizeof(double), length*sizeof(double));
  1118. int i, j;
  1119. SwsVector *vec= malloc(sizeof(SwsVector));
  1120. vec->coeff= coeff;
  1121. vec->length= length;
  1122. for(i=0; i<length; i++) coeff[i]= 0.0;
  1123. for(i=0; i<a->length; i++)
  1124. {
  1125. coeff[i + (length-1)/2 - (a->length-1)/2 - shift]= a->coeff[i];
  1126. }
  1127. return vec;
  1128. }
  1129. void printVec(SwsVector *a){
  1130. int i;
  1131. double max=0;
  1132. double min=0;
  1133. double range;
  1134. for(i=0; i<a->length; i++)
  1135. if(a->coeff[i]>max) max= a->coeff[i];
  1136. for(i=0; i<a->length; i++)
  1137. if(a->coeff[i]<min) min= a->coeff[i];
  1138. range= max - min;
  1139. for(i=0; i<a->length; i++)
  1140. {
  1141. int x= (int)((a->coeff[i]-min)*60.0/range +0.5);
  1142. printf("%1.3f ", a->coeff[i]);
  1143. for(;x>0; x--) printf(" ");
  1144. printf("|\n");
  1145. }
  1146. }
  1147. void freeVec(SwsVector *a){
  1148. if(!a) return;
  1149. if(a->coeff) free(a->coeff);
  1150. a->coeff=NULL;
  1151. a->length=0;
  1152. free(a);
  1153. }
  1154. void freeSwsContext(SwsContext *c){
  1155. int i;
  1156. if(!c) return;
  1157. if(c->lumPixBuf)
  1158. {
  1159. for(i=0; i<c->vLumBufSize*2; i++)
  1160. {
  1161. if(c->lumPixBuf[i]) free(c->lumPixBuf[i]);
  1162. c->lumPixBuf[i]=NULL;
  1163. }
  1164. free(c->lumPixBuf);
  1165. c->lumPixBuf=NULL;
  1166. }
  1167. if(c->chrPixBuf)
  1168. {
  1169. for(i=0; i<c->vChrBufSize*2; i++)
  1170. {
  1171. if(c->chrPixBuf[i]) free(c->chrPixBuf[i]);
  1172. c->chrPixBuf[i]=NULL;
  1173. }
  1174. free(c->chrPixBuf);
  1175. c->chrPixBuf=NULL;
  1176. }
  1177. if(c->vLumFilter) free(c->vLumFilter);
  1178. c->vLumFilter = NULL;
  1179. if(c->vChrFilter) free(c->vChrFilter);
  1180. c->vChrFilter = NULL;
  1181. if(c->hLumFilter) free(c->hLumFilter);
  1182. c->hLumFilter = NULL;
  1183. if(c->hChrFilter) free(c->hChrFilter);
  1184. c->hChrFilter = NULL;
  1185. if(c->vLumFilterPos) free(c->vLumFilterPos);
  1186. c->vLumFilterPos = NULL;
  1187. if(c->vChrFilterPos) free(c->vChrFilterPos);
  1188. c->vChrFilterPos = NULL;
  1189. if(c->hLumFilterPos) free(c->hLumFilterPos);
  1190. c->hLumFilterPos = NULL;
  1191. if(c->hChrFilterPos) free(c->hChrFilterPos);
  1192. c->hChrFilterPos = NULL;
  1193. if(c->lumMmxFilter) free(c->lumMmxFilter);
  1194. c->lumMmxFilter = NULL;
  1195. if(c->chrMmxFilter) free(c->chrMmxFilter);
  1196. c->chrMmxFilter = NULL;
  1197. free(c);
  1198. }