You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2911 lines
83KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2 3DNow
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e e
  20. doVertDefFilter Ec Ec Ec
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a
  23. doHorizLowPass E a a
  24. doHorizDefFilter E ac ac
  25. deRing
  26. Vertical RKAlgo1 E a a
  27. Vertical X1 a E E
  28. Horizontal X1 a E E
  29. LinIpolDeinterlace e E E*
  30. CubicIpolDeinterlace a e e*
  31. LinBlendDeinterlace e E E*
  32. MedianDeinterlace Ec Ec
  33. * i dont have a 3dnow CPU -> its untested
  34. E = Exact implementation
  35. e = allmost exact implementation (slightly different rounding,...)
  36. a = alternative / approximate impl
  37. c = checked against the other implementations (-vo md5)
  38. */
  39. /*
  40. TODO:
  41. verify that everything workes as it should (how?)
  42. reduce the time wasted on the mem transfer
  43. implement dering
  44. implement everything in C at least (done at the moment but ...)
  45. unroll stuff if instructions depend too much on the prior one
  46. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  47. move YScale thing to the end instead of fixing QP
  48. write a faster and higher quality deblocking filter :)
  49. do something about the speed of the horizontal filters
  50. make the mainloop more flexible (variable number of blocks at once
  51. (the if/else stuff per block is slowing things down)
  52. compare the quality & speed of all filters
  53. split this huge file
  54. fix warnings (unused vars, ...)
  55. noise reduction filters
  56. write an exact implementation of the horizontal delocking filter
  57. ...
  58. Notes:
  59. */
  60. //Changelog: use the CVS log
  61. #include <inttypes.h>
  62. #include <stdio.h>
  63. #include <stdlib.h>
  64. #include <string.h>
  65. #include "../config.h"
  66. //#undef HAVE_MMX2
  67. //#define HAVE_3DNOW
  68. //#undef HAVE_MMX
  69. #include "postprocess.h"
  70. #define MIN(a,b) ((a) > (b) ? (b) : (a))
  71. #define MAX(a,b) ((a) < (b) ? (b) : (a))
  72. #define ABS(a) ((a) > 0 ? (a) : (-(a)))
  73. #define SIGN(a) ((a) > 0 ? 1 : -1)
  74. #ifdef HAVE_MMX2
  75. #define PAVGB(a,b) "pavgb " #a ", " #b " \n\t"
  76. #elif defined (HAVE_3DNOW)
  77. #define PAVGB(a,b) "pavgusb " #a ", " #b " \n\t"
  78. #endif
  79. #define GET_MODE_BUFFER_SIZE 500
  80. #define OPTIONS_ARRAY_SIZE 10
  81. static uint64_t packedYOffset= 0x0000000000000000LL;
  82. static uint64_t packedYScale= 0x0100010001000100LL;
  83. static uint64_t w05= 0x0005000500050005LL;
  84. static uint64_t w20= 0x0020002000200020LL;
  85. static uint64_t w1400= 0x1400140014001400LL;
  86. static uint64_t bm00000001= 0x00000000000000FFLL;
  87. static uint64_t bm00010000= 0x000000FF00000000LL;
  88. static uint64_t bm00001000= 0x00000000FF000000LL;
  89. static uint64_t bm10000000= 0xFF00000000000000LL;
  90. static uint64_t bm10000001= 0xFF000000000000FFLL;
  91. static uint64_t bm11000011= 0xFFFF00000000FFFFLL;
  92. static uint64_t bm00000011= 0x000000000000FFFFLL;
  93. static uint64_t bm11111110= 0xFFFFFFFFFFFFFF00LL;
  94. static uint64_t bm11000000= 0xFFFF000000000000LL;
  95. static uint64_t bm00011000= 0x000000FFFF000000LL;
  96. static uint64_t bm00110011= 0x0000FFFF0000FFFFLL;
  97. static uint64_t bm11001100= 0xFFFF0000FFFF0000LL;
  98. static uint64_t b00= 0x0000000000000000LL;
  99. static uint64_t b01= 0x0101010101010101LL;
  100. static uint64_t b02= 0x0202020202020202LL;
  101. static uint64_t b0F= 0x0F0F0F0F0F0F0F0FLL;
  102. static uint64_t bFF= 0xFFFFFFFFFFFFFFFFLL;
  103. static uint64_t b20= 0x2020202020202020LL;
  104. static uint64_t b80= 0x8080808080808080LL;
  105. static uint64_t b7E= 0x7E7E7E7E7E7E7E7ELL;
  106. static uint64_t b7C= 0x7C7C7C7C7C7C7C7CLL;
  107. static uint64_t b3F= 0x3F3F3F3F3F3F3F3FLL;
  108. static uint64_t temp0=0;
  109. static uint64_t temp1=0;
  110. static uint64_t temp2=0;
  111. static uint64_t temp3=0;
  112. static uint64_t temp4=0;
  113. static uint64_t temp5=0;
  114. static uint64_t pQPb=0;
  115. static uint8_t tempBlock[16*16]; //used so the horizontal code gets aligned data
  116. int hFlatnessThreshold= 56 - 16;
  117. int vFlatnessThreshold= 56 - 16;
  118. //amount of "black" u r willing to loose to get a brightness corrected picture
  119. double maxClippedThreshold= 0.01;
  120. int maxAllowedY=234;
  121. int minAllowedY=16;
  122. static struct PPFilter filters[]=
  123. {
  124. {"hb", "hdeblock", 1, 1, 3, H_DEBLOCK},
  125. {"vb", "vdeblock", 1, 2, 4, V_DEBLOCK},
  126. {"vr", "rkvdeblock", 1, 2, 4, H_RK1_FILTER},
  127. {"h1", "x1hdeblock", 1, 1, 3, H_X1_FILTER},
  128. {"v1", "x1vdeblock", 1, 2, 4, V_X1_FILTER},
  129. {"dr", "dering", 1, 5, 6, DERING},
  130. {"al", "autolevels", 0, 1, 2, LEVEL_FIX},
  131. {"lb", "linblenddeint", 0, 1, 6, LINEAR_BLEND_DEINT_FILTER},
  132. {"li", "linipoldeint", 0, 1, 6, LINEAR_IPOL_DEINT_FILTER},
  133. {"ci", "cubicipoldeint", 0, 1, 6, CUBIC_IPOL_DEINT_FILTER},
  134. {"md", "mediandeint", 0, 1, 6, MEDIAN_DEINT_FILTER},
  135. {NULL, NULL,0,0,0,0} //End Marker
  136. };
  137. static char *replaceTable[]=
  138. {
  139. "default", "hdeblock:a,vdeblock:a,dering:a,autolevels",
  140. "de", "hdeblock:a,vdeblock:a,dering:a,autolevels",
  141. "fast", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
  142. "fa", "x1hdeblock:a,x1vdeblock:a,dering:a,autolevels",
  143. NULL //End Marker
  144. };
  145. #ifdef TIMING
  146. static inline long long rdtsc()
  147. {
  148. long long l;
  149. asm volatile( "rdtsc\n\t"
  150. : "=A" (l)
  151. );
  152. // printf("%d\n", int(l/1000));
  153. return l;
  154. }
  155. #endif
  156. #ifdef HAVE_MMX2
  157. static inline void prefetchnta(void *p)
  158. {
  159. asm volatile( "prefetchnta (%0)\n\t"
  160. : : "r" (p)
  161. );
  162. }
  163. static inline void prefetcht0(void *p)
  164. {
  165. asm volatile( "prefetcht0 (%0)\n\t"
  166. : : "r" (p)
  167. );
  168. }
  169. static inline void prefetcht1(void *p)
  170. {
  171. asm volatile( "prefetcht1 (%0)\n\t"
  172. : : "r" (p)
  173. );
  174. }
  175. static inline void prefetcht2(void *p)
  176. {
  177. asm volatile( "prefetcht2 (%0)\n\t"
  178. : : "r" (p)
  179. );
  180. }
  181. #endif
  182. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  183. /**
  184. * Check if the middle 8x8 Block in the given 8x16 block is flat
  185. */
  186. static inline int isVertDC(uint8_t src[], int stride){
  187. int numEq= 0;
  188. int y;
  189. src+= stride*4; // src points to begin of the 8x8 Block
  190. #ifdef HAVE_MMX
  191. asm volatile(
  192. "leal (%1, %2), %%eax \n\t"
  193. "leal (%%eax, %2, 4), %%ebx \n\t"
  194. // 0 1 2 3 4 5 6 7 8 9
  195. // %1 eax eax+%2 eax+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  196. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  197. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  198. "movq (%1), %%mm0 \n\t"
  199. "movq (%%eax), %%mm1 \n\t"
  200. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  201. "paddb %%mm7, %%mm0 \n\t"
  202. "pcmpgtb %%mm6, %%mm0 \n\t"
  203. "movq (%%eax,%2), %%mm2 \n\t"
  204. "psubb %%mm2, %%mm1 \n\t"
  205. "paddb %%mm7, %%mm1 \n\t"
  206. "pcmpgtb %%mm6, %%mm1 \n\t"
  207. "paddb %%mm1, %%mm0 \n\t"
  208. "movq (%%eax, %2, 2), %%mm1 \n\t"
  209. "psubb %%mm1, %%mm2 \n\t"
  210. "paddb %%mm7, %%mm2 \n\t"
  211. "pcmpgtb %%mm6, %%mm2 \n\t"
  212. "paddb %%mm2, %%mm0 \n\t"
  213. "movq (%1, %2, 4), %%mm2 \n\t"
  214. "psubb %%mm2, %%mm1 \n\t"
  215. "paddb %%mm7, %%mm1 \n\t"
  216. "pcmpgtb %%mm6, %%mm1 \n\t"
  217. "paddb %%mm1, %%mm0 \n\t"
  218. "movq (%%ebx), %%mm1 \n\t"
  219. "psubb %%mm1, %%mm2 \n\t"
  220. "paddb %%mm7, %%mm2 \n\t"
  221. "pcmpgtb %%mm6, %%mm2 \n\t"
  222. "paddb %%mm2, %%mm0 \n\t"
  223. "movq (%%ebx, %2), %%mm2 \n\t"
  224. "psubb %%mm2, %%mm1 \n\t"
  225. "paddb %%mm7, %%mm1 \n\t"
  226. "pcmpgtb %%mm6, %%mm1 \n\t"
  227. "paddb %%mm1, %%mm0 \n\t"
  228. "movq (%%ebx, %2, 2), %%mm1 \n\t"
  229. "psubb %%mm1, %%mm2 \n\t"
  230. "paddb %%mm7, %%mm2 \n\t"
  231. "pcmpgtb %%mm6, %%mm2 \n\t"
  232. "paddb %%mm2, %%mm0 \n\t"
  233. " \n\t"
  234. "movq %%mm0, %%mm1 \n\t"
  235. "psrlw $8, %%mm0 \n\t"
  236. "paddb %%mm1, %%mm0 \n\t"
  237. "movq %%mm0, %%mm1 \n\t"
  238. "psrlq $16, %%mm0 \n\t"
  239. "paddb %%mm1, %%mm0 \n\t"
  240. "movq %%mm0, %%mm1 \n\t"
  241. "psrlq $32, %%mm0 \n\t"
  242. "paddb %%mm1, %%mm0 \n\t"
  243. "movd %%mm0, %0 \n\t"
  244. : "=r" (numEq)
  245. : "r" (src), "r" (stride)
  246. );
  247. numEq= (256 - numEq) &0xFF;
  248. #else
  249. for(y=0; y<BLOCK_SIZE-1; y++)
  250. {
  251. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  252. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  253. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  254. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  255. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  256. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  257. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  258. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  259. src+= stride;
  260. }
  261. #endif
  262. /* if(abs(numEq - asmEq) > 0)
  263. {
  264. printf("\nasm:%d c:%d\n", asmEq, numEq);
  265. for(int y=0; y<8; y++)
  266. {
  267. for(int x=0; x<8; x++)
  268. {
  269. printf("%d ", temp[x + y*stride]);
  270. }
  271. printf("\n");
  272. }
  273. }
  274. */
  275. // for(int i=0; i<numEq/8; i++) src[i]=255;
  276. return (numEq > vFlatnessThreshold) ? 1 : 0;
  277. }
  278. static inline int isVertMinMaxOk(uint8_t src[], int stride, int QP)
  279. {
  280. #ifdef HAVE_MMX
  281. int isOk;
  282. src+= stride*3;
  283. asm volatile(
  284. // "int $3 \n\t"
  285. "movq (%1, %2), %%mm0 \n\t"
  286. "movq (%1, %2, 8), %%mm1 \n\t"
  287. "movq %%mm0, %%mm2 \n\t"
  288. "psubusb %%mm1, %%mm0 \n\t"
  289. "psubusb %%mm2, %%mm1 \n\t"
  290. "por %%mm1, %%mm0 \n\t" // ABS Diff
  291. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  292. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  293. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  294. "pcmpeqd b00, %%mm0 \n\t"
  295. "psrlq $16, %%mm0 \n\t"
  296. "pcmpeqd bFF, %%mm0 \n\t"
  297. // "movd %%mm0, (%1, %2, 4)\n\t"
  298. "movd %%mm0, %0 \n\t"
  299. : "=r" (isOk)
  300. : "r" (src), "r" (stride)
  301. );
  302. return isOk;
  303. #else
  304. int isOk2= 1;
  305. int x;
  306. src+= stride*3;
  307. for(x=0; x<BLOCK_SIZE; x++)
  308. {
  309. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=0;
  310. }
  311. /* if(isOk && !isOk2 || !isOk && isOk2)
  312. {
  313. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  314. for(int y=0; y<9; y++)
  315. {
  316. for(int x=0; x<8; x++)
  317. {
  318. printf("%d ", src[x + y*stride]);
  319. }
  320. printf("\n");
  321. }
  322. } */
  323. return isOk2;
  324. #endif
  325. }
  326. /**
  327. * Do a vertical low pass filter on the 8x16 block (only write to the 8x8 block in the middle)
  328. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  329. */
  330. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  331. {
  332. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  333. src+= stride*3;
  334. asm volatile( //"movv %0 %1 %2\n\t"
  335. "pushl %0 \n\t"
  336. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  337. "movq (%0), %%mm6 \n\t"
  338. "movq (%0, %1), %%mm5 \n\t"
  339. "movq %%mm5, %%mm1 \n\t"
  340. "movq %%mm6, %%mm2 \n\t"
  341. "psubusb %%mm6, %%mm5 \n\t"
  342. "psubusb %%mm1, %%mm2 \n\t"
  343. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  344. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  345. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  346. "pand %%mm2, %%mm6 \n\t"
  347. "pandn %%mm1, %%mm2 \n\t"
  348. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  349. "movq (%0, %1, 8), %%mm5 \n\t"
  350. "leal (%0, %1, 4), %%eax \n\t"
  351. "leal (%0, %1, 8), %%ebx \n\t"
  352. "subl %1, %%ebx \n\t"
  353. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  354. "movq (%0, %1, 8), %%mm7 \n\t"
  355. "movq %%mm5, %%mm1 \n\t"
  356. "movq %%mm7, %%mm2 \n\t"
  357. "psubusb %%mm7, %%mm5 \n\t"
  358. "psubusb %%mm1, %%mm2 \n\t"
  359. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  360. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  361. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  362. "pand %%mm2, %%mm7 \n\t"
  363. "pandn %%mm1, %%mm2 \n\t"
  364. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  365. // 1 2 3 4 5 6 7 8
  366. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  367. // 6 4 2 2 1 1
  368. // 6 4 4 2
  369. // 6 8 2
  370. "movq (%0, %1), %%mm0 \n\t" // 1
  371. "movq %%mm0, %%mm1 \n\t" // 1
  372. PAVGB(%%mm6, %%mm0) //1 1 /2
  373. PAVGB(%%mm6, %%mm0) //3 1 /4
  374. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  375. "movq %%mm2, %%mm5 \n\t" // 1
  376. PAVGB((%%eax), %%mm2) // 11 /2
  377. PAVGB((%0, %1, 2), %%mm2) // 211 /4
  378. "movq %%mm2, %%mm3 \n\t" // 211 /4
  379. "movq (%0), %%mm4 \n\t" // 1
  380. PAVGB(%%mm4, %%mm3) // 4 211 /8
  381. PAVGB(%%mm0, %%mm3) //642211 /16
  382. "movq %%mm3, (%0) \n\t" // X
  383. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  384. "movq %%mm1, %%mm0 \n\t" // 1
  385. PAVGB(%%mm6, %%mm0) //1 1 /2
  386. "movq %%mm4, %%mm3 \n\t" // 1
  387. PAVGB((%0,%1,2), %%mm3) // 1 1 /2
  388. PAVGB((%%eax,%1,2), %%mm5) // 11 /2
  389. PAVGB((%%eax), %%mm5) // 211 /4
  390. PAVGB(%%mm5, %%mm3) // 2 2211 /8
  391. PAVGB(%%mm0, %%mm3) //4242211 /16
  392. "movq %%mm3, (%0,%1) \n\t" // X
  393. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  394. PAVGB(%%mm4, %%mm6) //11 /2
  395. "movq (%%ebx), %%mm0 \n\t" // 1
  396. PAVGB((%%eax, %1, 2), %%mm0) // 11/2
  397. "movq %%mm0, %%mm3 \n\t" // 11/2
  398. PAVGB(%%mm1, %%mm0) // 2 11/4
  399. PAVGB(%%mm6, %%mm0) //222 11/8
  400. PAVGB(%%mm2, %%mm0) //22242211/16
  401. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  402. "movq %%mm0, (%0, %1, 2) \n\t" // X
  403. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  404. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  405. PAVGB((%%ebx), %%mm0) // 11 /2
  406. PAVGB(%%mm0, %%mm6) //11 11 /4
  407. PAVGB(%%mm1, %%mm4) // 11 /2
  408. PAVGB(%%mm2, %%mm1) // 11 /2
  409. PAVGB(%%mm1, %%mm6) //1122 11 /8
  410. PAVGB(%%mm5, %%mm6) //112242211 /16
  411. "movq (%%eax), %%mm5 \n\t" // 1
  412. "movq %%mm6, (%%eax) \n\t" // X
  413. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  414. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  415. PAVGB(%%mm7, %%mm6) // 11 /2
  416. PAVGB(%%mm4, %%mm6) // 11 11 /4
  417. PAVGB(%%mm3, %%mm6) // 11 2211 /8
  418. PAVGB(%%mm5, %%mm2) // 11 /2
  419. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  420. PAVGB(%%mm4, %%mm2) // 112 /4
  421. PAVGB(%%mm2, %%mm6) // 112242211 /16
  422. "movq %%mm6, (%0, %1, 4) \n\t" // X
  423. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  424. PAVGB(%%mm7, %%mm1) // 11 2 /4
  425. PAVGB(%%mm4, %%mm5) // 11 /2
  426. PAVGB(%%mm5, %%mm0) // 11 11 /4
  427. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  428. PAVGB(%%mm6, %%mm1) // 11 4 2 /8
  429. PAVGB(%%mm0, %%mm1) // 11224222 /16
  430. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  431. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  432. PAVGB((%%ebx), %%mm2) // 112 4 /8
  433. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  434. PAVGB(%%mm0, %%mm6) // 1 1 /2
  435. PAVGB(%%mm7, %%mm6) // 1 12 /4
  436. PAVGB(%%mm2, %%mm6) // 1122424 /4
  437. "movq %%mm6, (%%ebx) \n\t" // X
  438. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  439. PAVGB(%%mm7, %%mm5) // 11 2 /4
  440. PAVGB(%%mm7, %%mm5) // 11 6 /8
  441. PAVGB(%%mm3, %%mm0) // 112 /4
  442. PAVGB(%%mm0, %%mm5) // 112246 /16
  443. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  444. "popl %0\n\t"
  445. :
  446. : "r" (src), "r" (stride)
  447. : "%eax", "%ebx"
  448. );
  449. #else
  450. const int l1= stride;
  451. const int l2= stride + l1;
  452. const int l3= stride + l2;
  453. const int l4= stride + l3;
  454. const int l5= stride + l4;
  455. const int l6= stride + l5;
  456. const int l7= stride + l6;
  457. const int l8= stride + l7;
  458. const int l9= stride + l8;
  459. int x;
  460. src+= stride*3;
  461. for(x=0; x<BLOCK_SIZE; x++)
  462. {
  463. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  464. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  465. int sums[9];
  466. sums[0] = first + src[l1];
  467. sums[1] = src[l1] + src[l2];
  468. sums[2] = src[l2] + src[l3];
  469. sums[3] = src[l3] + src[l4];
  470. sums[4] = src[l4] + src[l5];
  471. sums[5] = src[l5] + src[l6];
  472. sums[6] = src[l6] + src[l7];
  473. sums[7] = src[l7] + src[l8];
  474. sums[8] = src[l8] + last;
  475. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  476. src[l2]= ((src[l2]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  477. src[l3]= ((src[l3]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  478. src[l4]= ((src[l4]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  479. src[l5]= ((src[l5]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  480. src[l6]= ((src[l6]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  481. src[l7]= ((last + src[l7]<<2) + (src[l8] + sums[5]<<1) + sums[3] + 8)>>4;
  482. src[l8]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  483. src++;
  484. }
  485. #endif
  486. }
  487. /**
  488. * Experimental implementation of the filter (Algorithm 1) described in a paper from Ramkishor & Karandikar
  489. * values are correctly clipped (MMX2)
  490. * values are wraparound (C)
  491. * conclusion: its fast, but introduces ugly horizontal patterns if there is a continious gradient
  492. 0 8 16 24
  493. x = 8
  494. x/2 = 4
  495. x/8 = 1
  496. 1 12 12 23
  497. */
  498. static inline void vertRK1Filter(uint8_t *src, int stride, int QP)
  499. {
  500. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  501. src+= stride*3;
  502. // FIXME rounding
  503. asm volatile(
  504. "pxor %%mm7, %%mm7 \n\t" // 0
  505. "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  506. "leal (%0, %1), %%eax \n\t"
  507. "leal (%%eax, %1, 4), %%ebx \n\t"
  508. // 0 1 2 3 4 5 6 7 8 9
  509. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  510. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  511. "movq %%mm0, %%mm1 \n\t" // QP,..., QP
  512. "paddusb b02, %%mm0 \n\t"
  513. "psrlw $2, %%mm0 \n\t"
  514. "pand b3F, %%mm0 \n\t" // QP/4,..., QP/4
  515. "paddusb %%mm1, %%mm0 \n\t" // QP*1.25 ...
  516. "movq (%0, %1, 4), %%mm2 \n\t" // line 4
  517. "movq (%%ebx), %%mm3 \n\t" // line 5
  518. "movq %%mm2, %%mm4 \n\t" // line 4
  519. "pcmpeqb %%mm5, %%mm5 \n\t" // -1
  520. "pxor %%mm2, %%mm5 \n\t" // -line 4 - 1
  521. PAVGB(%%mm3, %%mm5)
  522. "paddb %%mm6, %%mm5 \n\t" // (l5-l4)/2
  523. "psubusb %%mm3, %%mm4 \n\t"
  524. "psubusb %%mm2, %%mm3 \n\t"
  525. "por %%mm3, %%mm4 \n\t" // |l4 - l5|
  526. "psubusb %%mm0, %%mm4 \n\t"
  527. "pcmpeqb %%mm7, %%mm4 \n\t"
  528. "pand %%mm4, %%mm5 \n\t" // d/2
  529. // "paddb %%mm6, %%mm2 \n\t" // line 4 + 0x80
  530. "paddb %%mm5, %%mm2 \n\t"
  531. // "psubb %%mm6, %%mm2 \n\t"
  532. "movq %%mm2, (%0,%1, 4) \n\t"
  533. "movq (%%ebx), %%mm2 \n\t"
  534. // "paddb %%mm6, %%mm2 \n\t" // line 5 + 0x80
  535. "psubb %%mm5, %%mm2 \n\t"
  536. // "psubb %%mm6, %%mm2 \n\t"
  537. "movq %%mm2, (%%ebx) \n\t"
  538. "paddb %%mm6, %%mm5 \n\t"
  539. "psrlw $2, %%mm5 \n\t"
  540. "pand b3F, %%mm5 \n\t"
  541. "psubb b20, %%mm5 \n\t" // (l5-l4)/8
  542. "movq (%%eax, %1, 2), %%mm2 \n\t"
  543. "paddb %%mm6, %%mm2 \n\t" // line 3 + 0x80
  544. "paddsb %%mm5, %%mm2 \n\t"
  545. "psubb %%mm6, %%mm2 \n\t"
  546. "movq %%mm2, (%%eax, %1, 2) \n\t"
  547. "movq (%%ebx, %1), %%mm2 \n\t"
  548. "paddb %%mm6, %%mm2 \n\t" // line 6 + 0x80
  549. "psubsb %%mm5, %%mm2 \n\t"
  550. "psubb %%mm6, %%mm2 \n\t"
  551. "movq %%mm2, (%%ebx, %1) \n\t"
  552. :
  553. : "r" (src), "r" (stride)
  554. : "%eax", "%ebx"
  555. );
  556. #else
  557. const int l1= stride;
  558. const int l2= stride + l1;
  559. const int l3= stride + l2;
  560. const int l4= stride + l3;
  561. const int l5= stride + l4;
  562. const int l6= stride + l5;
  563. const int l7= stride + l6;
  564. const int l8= stride + l7;
  565. const int l9= stride + l8;
  566. int x;
  567. src+= stride*3;
  568. for(x=0; x<BLOCK_SIZE; x++)
  569. {
  570. if(ABS(src[l4]-src[l5]) < QP + QP/4)
  571. {
  572. int v = (src[l5] - src[l4]);
  573. src[l3] +=v/8;
  574. src[l4] +=v/2;
  575. src[l5] -=v/2;
  576. src[l6] -=v/8;
  577. }
  578. src++;
  579. }
  580. #endif
  581. }
  582. /**
  583. * Experimental Filter 1
  584. * will not damage linear gradients
  585. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  586. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  587. * MMX2 version does correct clipping C version doesnt
  588. */
  589. static inline void vertX1Filter(uint8_t *src, int stride, int QP)
  590. {
  591. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  592. src+= stride*3;
  593. asm volatile(
  594. "pxor %%mm7, %%mm7 \n\t" // 0
  595. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  596. "leal (%0, %1), %%eax \n\t"
  597. "leal (%%eax, %1, 4), %%ebx \n\t"
  598. // 0 1 2 3 4 5 6 7 8 9
  599. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  600. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  601. "movq (%0, %1, 4), %%mm1 \n\t" // line 4
  602. "movq %%mm1, %%mm2 \n\t" // line 4
  603. "psubusb %%mm0, %%mm1 \n\t"
  604. "psubusb %%mm2, %%mm0 \n\t"
  605. "por %%mm1, %%mm0 \n\t" // |l2 - l3|
  606. "movq (%%ebx), %%mm3 \n\t" // line 5
  607. "movq (%%ebx, %1), %%mm4 \n\t" // line 6
  608. "movq %%mm3, %%mm5 \n\t" // line 5
  609. "psubusb %%mm4, %%mm3 \n\t"
  610. "psubusb %%mm5, %%mm4 \n\t"
  611. "por %%mm4, %%mm3 \n\t" // |l5 - l6|
  612. PAVGB(%%mm3, %%mm0) // (|l2 - l3| + |l5 - l6|)/2
  613. "movq %%mm2, %%mm1 \n\t" // line 4
  614. "psubusb %%mm5, %%mm2 \n\t"
  615. "movq %%mm2, %%mm4 \n\t"
  616. "pcmpeqb %%mm7, %%mm2 \n\t" // (l4 - l5) <= 0 ? -1 : 0
  617. "psubusb %%mm1, %%mm5 \n\t"
  618. "por %%mm5, %%mm4 \n\t" // |l4 - l5|
  619. "psubusb %%mm0, %%mm4 \n\t" //d = MAX(0, |l4-l5| - (|l2-l3| + |l5-l6|)/2)
  620. "movq %%mm4, %%mm3 \n\t" // d
  621. "psubusb pQPb, %%mm4 \n\t"
  622. "pcmpeqb %%mm7, %%mm4 \n\t" // d <= QP ? -1 : 0
  623. "psubusb b01, %%mm3 \n\t"
  624. "pand %%mm4, %%mm3 \n\t" // d <= QP ? d : 0
  625. PAVGB(%%mm7, %%mm3) // d/2
  626. "movq %%mm3, %%mm1 \n\t" // d/2
  627. PAVGB(%%mm7, %%mm3) // d/4
  628. PAVGB(%%mm1, %%mm3) // 3*d/8
  629. "movq (%0, %1, 4), %%mm0 \n\t" // line 4
  630. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  631. "psubusb %%mm3, %%mm0 \n\t"
  632. "pxor %%mm2, %%mm0 \n\t"
  633. "movq %%mm0, (%0, %1, 4) \n\t" // line 4
  634. "movq (%%ebx), %%mm0 \n\t" // line 5
  635. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  636. "paddusb %%mm3, %%mm0 \n\t"
  637. "pxor %%mm2, %%mm0 \n\t"
  638. "movq %%mm0, (%%ebx) \n\t" // line 5
  639. PAVGB(%%mm7, %%mm1) // d/4
  640. "movq (%%eax, %1, 2), %%mm0 \n\t" // line 3
  641. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l4-1 : l4
  642. "psubusb %%mm1, %%mm0 \n\t"
  643. "pxor %%mm2, %%mm0 \n\t"
  644. "movq %%mm0, (%%eax, %1, 2) \n\t" // line 3
  645. "movq (%%ebx, %1), %%mm0 \n\t" // line 6
  646. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l5-1 : l5
  647. "paddusb %%mm1, %%mm0 \n\t"
  648. "pxor %%mm2, %%mm0 \n\t"
  649. "movq %%mm0, (%%ebx, %1) \n\t" // line 6
  650. PAVGB(%%mm7, %%mm1) // d/8
  651. "movq (%%eax, %1), %%mm0 \n\t" // line 2
  652. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l2-1 : l2
  653. "psubusb %%mm1, %%mm0 \n\t"
  654. "pxor %%mm2, %%mm0 \n\t"
  655. "movq %%mm0, (%%eax, %1) \n\t" // line 2
  656. "movq (%%ebx, %1, 2), %%mm0 \n\t" // line 7
  657. "pxor %%mm2, %%mm0 \n\t" //(l4 - l5) <= 0 ? -l7-1 : l7
  658. "paddusb %%mm1, %%mm0 \n\t"
  659. "pxor %%mm2, %%mm0 \n\t"
  660. "movq %%mm0, (%%ebx, %1, 2) \n\t" // line 7
  661. :
  662. : "r" (src), "r" (stride)
  663. : "%eax", "%ebx"
  664. );
  665. #else
  666. const int l1= stride;
  667. const int l2= stride + l1;
  668. const int l3= stride + l2;
  669. const int l4= stride + l3;
  670. const int l5= stride + l4;
  671. const int l6= stride + l5;
  672. const int l7= stride + l6;
  673. const int l8= stride + l7;
  674. const int l9= stride + l8;
  675. int x;
  676. src+= stride*3;
  677. for(x=0; x<BLOCK_SIZE; x++)
  678. {
  679. int a= src[l3] - src[l4];
  680. int b= src[l4] - src[l5];
  681. int c= src[l5] - src[l6];
  682. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  683. if(d < QP)
  684. {
  685. int v = d * SIGN(-b);
  686. src[l2] +=v/8;
  687. src[l3] +=v/4;
  688. src[l4] +=3*v/8;
  689. src[l5] -=3*v/8;
  690. src[l6] -=v/4;
  691. src[l7] -=v/8;
  692. }
  693. src++;
  694. }
  695. /*
  696. const int l1= stride;
  697. const int l2= stride + l1;
  698. const int l3= stride + l2;
  699. const int l4= stride + l3;
  700. const int l5= stride + l4;
  701. const int l6= stride + l5;
  702. const int l7= stride + l6;
  703. const int l8= stride + l7;
  704. const int l9= stride + l8;
  705. for(int x=0; x<BLOCK_SIZE; x++)
  706. {
  707. int v2= src[l2];
  708. int v3= src[l3];
  709. int v4= src[l4];
  710. int v5= src[l5];
  711. int v6= src[l6];
  712. int v7= src[l7];
  713. if(ABS(v4-v5)<QP && ABS(v4-v5) - (ABS(v3-v4) + ABS(v5-v6))>0 )
  714. {
  715. src[l3] = (6*v2 + 4*v3 + 3*v4 + 2*v5 + v6 )/16;
  716. src[l4] = (3*v2 + 3*v3 + 4*v4 + 3*v5 + 2*v6 + v7 )/16;
  717. src[l5] = (1*v2 + 2*v3 + 3*v4 + 4*v5 + 3*v6 + 3*v7)/16;
  718. src[l6] = ( 1*v3 + 2*v4 + 3*v5 + 4*v6 + 6*v7)/16;
  719. }
  720. src++;
  721. }
  722. */
  723. #endif
  724. }
  725. /**
  726. * Experimental Filter 1 (Horizontal)
  727. * will not damage linear gradients
  728. * Flat blocks should look like they where passed through the (1,1,2,2,4,2,2,1,1) 9-Tap filter
  729. * can only smooth blocks at the expected locations (it cant smooth them if they did move)
  730. * MMX2 version does correct clipping C version doesnt
  731. * not identical with the vertical one
  732. */
  733. static inline void horizX1Filter(uint8_t *src, int stride, int QP)
  734. {
  735. int y;
  736. static uint64_t *lut= NULL;
  737. if(lut==NULL)
  738. {
  739. int i;
  740. lut= (uint64_t*)memalign(8, 256*8);
  741. for(i=0; i<256; i++)
  742. {
  743. int v= i < 128 ? 2*i : 2*(i-256);
  744. /*
  745. //Simulate 112242211 9-Tap filter
  746. uint64_t a= (v/16) & 0xFF;
  747. uint64_t b= (v/8) & 0xFF;
  748. uint64_t c= (v/4) & 0xFF;
  749. uint64_t d= (3*v/8) & 0xFF;
  750. */
  751. //Simulate piecewise linear interpolation
  752. uint64_t a= (v/16) & 0xFF;
  753. uint64_t b= (v*3/16) & 0xFF;
  754. uint64_t c= (v*5/16) & 0xFF;
  755. uint64_t d= (7*v/16) & 0xFF;
  756. uint64_t A= (0x100 - a)&0xFF;
  757. uint64_t B= (0x100 - b)&0xFF;
  758. uint64_t C= (0x100 - c)&0xFF;
  759. uint64_t D= (0x100 - c)&0xFF;
  760. lut[i] = (a<<56) | (b<<48) | (c<<40) | (d<<32) |
  761. (D<<24) | (C<<16) | (B<<8) | (A);
  762. //lut[i] = (v<<32) | (v<<24);
  763. }
  764. }
  765. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  766. asm volatile(
  767. "pxor %%mm7, %%mm7 \n\t" // 0
  768. // "movq b80, %%mm6 \n\t" // MIN_SIGNED_BYTE
  769. "leal (%0, %1), %%eax \n\t"
  770. "leal (%%eax, %1, 4), %%ebx \n\t"
  771. "movq b80, %%mm6 \n\t"
  772. "movd pQPb, %%mm5 \n\t" // QP
  773. "movq %%mm5, %%mm4 \n\t"
  774. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  775. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  776. "pxor %%mm5, %%mm5 \n\t" // 0
  777. "psubb %%mm4, %%mm5 \n\t" // -3QP
  778. "por bm11111110, %%mm5 \n\t" // ...,FF,FF,-3QP
  779. "psllq $24, %%mm5 \n\t"
  780. // 0 1 2 3 4 5 6 7 8 9
  781. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  782. #define HX1old(a) \
  783. "movd " #a ", %%mm0 \n\t"\
  784. "movd 4" #a ", %%mm1 \n\t"\
  785. "punpckldq %%mm1, %%mm0 \n\t"\
  786. "movq %%mm0, %%mm1 \n\t"\
  787. "movq %%mm0, %%mm2 \n\t"\
  788. "psrlq $8, %%mm1 \n\t"\
  789. "psubusb %%mm1, %%mm2 \n\t"\
  790. "psubusb %%mm0, %%mm1 \n\t"\
  791. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  792. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  793. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  794. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  795. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  796. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  797. "paddb %%mm5, %%mm1 \n\t"\
  798. "psubusb %%mm5, %%mm1 \n\t"\
  799. PAVGB(%%mm7, %%mm1)\
  800. "pxor %%mm2, %%mm1 \n\t"\
  801. "psubb %%mm2, %%mm1 \n\t"\
  802. "psrlq $24, %%mm1 \n\t"\
  803. "movd %%mm1, %%ecx \n\t"\
  804. "paddb %%mm6, %%mm0 \n\t"\
  805. "paddsb (%3, %%ecx, 8), %%mm0 \n\t"\
  806. "paddb %%mm6, %%mm0 \n\t"\
  807. "movq %%mm0, " #a " \n\t"\
  808. /*
  809. HX1old((%0))
  810. HX1old((%%eax))
  811. HX1old((%%eax, %1))
  812. HX1old((%%eax, %1, 2))
  813. HX1old((%0, %1, 4))
  814. HX1old((%%ebx))
  815. HX1old((%%ebx, %1))
  816. HX1old((%%ebx, %1, 2))
  817. */
  818. //FIXME add some comments, its unreadable ...
  819. #define HX1b(a, c, b, d) \
  820. "movd " #a ", %%mm0 \n\t"\
  821. "movd 4" #a ", %%mm1 \n\t"\
  822. "punpckldq %%mm1, %%mm0 \n\t"\
  823. "movd " #b ", %%mm4 \n\t"\
  824. "movq %%mm0, %%mm1 \n\t"\
  825. "movq %%mm0, %%mm2 \n\t"\
  826. "psrlq $8, %%mm1 \n\t"\
  827. "movd 4" #b ", %%mm3 \n\t"\
  828. "psubusb %%mm1, %%mm2 \n\t"\
  829. "psubusb %%mm0, %%mm1 \n\t"\
  830. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  831. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  832. "punpckldq %%mm3, %%mm4 \n\t"\
  833. "movq %%mm1, %%mm3 \n\t"\
  834. "psllq $32, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  835. PAVGB(%%mm1, %%mm3) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  836. "paddb %%mm6, %%mm0 \n\t"\
  837. "psrlq $16, %%mm3 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  838. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  839. "movq %%mm4, %%mm3 \n\t"\
  840. "paddb %%mm5, %%mm1 \n\t"\
  841. "psubusb %%mm5, %%mm1 \n\t"\
  842. "psrlq $8, %%mm3 \n\t"\
  843. PAVGB(%%mm7, %%mm1)\
  844. "pxor %%mm2, %%mm1 \n\t"\
  845. "psubb %%mm2, %%mm1 \n\t"\
  846. "movq %%mm4, %%mm2 \n\t"\
  847. "psrlq $24, %%mm1 \n\t"\
  848. "psubusb %%mm3, %%mm2 \n\t"\
  849. "movd %%mm1, %%ecx \n\t"\
  850. "psubusb %%mm4, %%mm3 \n\t"\
  851. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  852. "por %%mm2, %%mm3 \n\t" /* p´x = |px - p(x+1)| */\
  853. "paddb %%mm6, %%mm0 \n\t"\
  854. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  855. "movq %%mm3, %%mm1 \n\t"\
  856. "psllq $32, %%mm1 \n\t" /* p´5 = |p1 - p2| */\
  857. "movq %%mm0, " #a " \n\t"\
  858. PAVGB(%%mm3, %%mm1) /* p´5 = (|p2-p1| + |p6-p5|)/2 */\
  859. "paddb %%mm6, %%mm4 \n\t"\
  860. "psrlq $16, %%mm1 \n\t" /* p´3 = (|p2-p1| + |p6-p5|)/2 */\
  861. "psubusb %%mm1, %%mm3 \n\t" /* |p3-p4|-(|p2-p1| + |p6-p5|)/2 */\
  862. "paddb %%mm5, %%mm3 \n\t"\
  863. "psubusb %%mm5, %%mm3 \n\t"\
  864. PAVGB(%%mm7, %%mm3)\
  865. "pxor %%mm2, %%mm3 \n\t"\
  866. "psubb %%mm2, %%mm3 \n\t"\
  867. "psrlq $24, %%mm3 \n\t"\
  868. "movd " #c ", %%mm0 \n\t"\
  869. "movd 4" #c ", %%mm1 \n\t"\
  870. "punpckldq %%mm1, %%mm0 \n\t"\
  871. "paddb %%mm6, %%mm0 \n\t"\
  872. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  873. "paddb %%mm6, %%mm0 \n\t"\
  874. "movq %%mm0, " #c " \n\t"\
  875. "movd %%mm3, %%ecx \n\t"\
  876. "movd " #d ", %%mm0 \n\t"\
  877. "paddsb (%2, %%ecx, 8), %%mm4 \n\t"\
  878. "movd 4" #d ", %%mm1 \n\t"\
  879. "paddb %%mm6, %%mm4 \n\t"\
  880. "punpckldq %%mm1, %%mm0 \n\t"\
  881. "movq %%mm4, " #b " \n\t"\
  882. "paddb %%mm6, %%mm0 \n\t"\
  883. "paddsb (%2, %%ecx, 8), %%mm0 \n\t"\
  884. "paddb %%mm6, %%mm0 \n\t"\
  885. "movq %%mm0, " #d " \n\t"\
  886. HX1b((%0),(%%eax),(%%eax, %1),(%%eax, %1, 2))
  887. HX1b((%0, %1, 4),(%%ebx),(%%ebx, %1),(%%ebx, %1, 2))
  888. :
  889. : "r" (src), "r" (stride), "r" (lut)
  890. : "%eax", "%ebx", "%ecx"
  891. );
  892. #else
  893. //FIXME (has little in common with the mmx2 version)
  894. for(y=0; y<BLOCK_SIZE; y++)
  895. {
  896. int a= src[1] - src[2];
  897. int b= src[3] - src[4];
  898. int c= src[5] - src[6];
  899. int d= MAX(ABS(b) - (ABS(a) + ABS(c))/2, 0);
  900. if(d < QP)
  901. {
  902. int v = d * SIGN(-b);
  903. src[1] +=v/8;
  904. src[2] +=v/4;
  905. src[3] +=3*v/8;
  906. src[4] -=3*v/8;
  907. src[5] -=v/4;
  908. src[6] -=v/8;
  909. }
  910. src+=stride;
  911. }
  912. #endif
  913. }
  914. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  915. {
  916. #ifdef HAVE_MMX
  917. src+= stride*4;
  918. //FIXME try pmul for *5 stuff
  919. // src[0]=0;
  920. asm volatile(
  921. "pxor %%mm7, %%mm7 \n\t"
  922. "leal (%0, %1), %%eax \n\t"
  923. "leal (%%eax, %1, 4), %%ebx \n\t"
  924. // 0 1 2 3 4 5 6 7
  925. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  926. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  927. "movq (%0), %%mm0 \n\t"
  928. "movq %%mm0, %%mm1 \n\t"
  929. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  930. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  931. "movq (%%eax), %%mm2 \n\t"
  932. "movq %%mm2, %%mm3 \n\t"
  933. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  934. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  935. "movq (%%eax, %1), %%mm4 \n\t"
  936. "movq %%mm4, %%mm5 \n\t"
  937. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  938. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  939. "paddw %%mm0, %%mm0 \n\t" // 2L0
  940. "paddw %%mm1, %%mm1 \n\t" // 2H0
  941. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  942. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  943. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  944. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  945. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  946. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  947. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  948. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  949. "movq (%%eax, %1, 2), %%mm2 \n\t"
  950. "movq %%mm2, %%mm3 \n\t"
  951. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  952. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  953. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  954. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  955. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  956. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  957. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  958. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  959. "movq (%0, %1, 4), %%mm0 \n\t"
  960. "movq %%mm0, %%mm1 \n\t"
  961. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  962. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  963. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  964. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  965. "movq %%mm2, temp2 \n\t" // L3 - L4
  966. "movq %%mm3, temp3 \n\t" // H3 - H4
  967. "paddw %%mm4, %%mm4 \n\t" // 2L2
  968. "paddw %%mm5, %%mm5 \n\t" // 2H2
  969. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  970. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  971. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  972. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  973. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  974. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  975. //50 opcodes so far
  976. "movq (%%ebx), %%mm2 \n\t"
  977. "movq %%mm2, %%mm3 \n\t"
  978. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  979. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  980. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  981. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  982. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  983. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  984. "movq (%%ebx, %1), %%mm6 \n\t"
  985. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  986. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  987. "movq (%%ebx, %1), %%mm6 \n\t"
  988. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  989. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  990. "paddw %%mm0, %%mm0 \n\t" // 2L4
  991. "paddw %%mm1, %%mm1 \n\t" // 2H4
  992. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  993. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  994. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  995. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  996. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  997. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  998. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  999. "movq %%mm2, %%mm3 \n\t"
  1000. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  1001. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  1002. "paddw %%mm2, %%mm2 \n\t" // 2L7
  1003. "paddw %%mm3, %%mm3 \n\t" // 2H7
  1004. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  1005. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  1006. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  1007. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  1008. //FIXME pxor, psubw, pmax for abs
  1009. "movq %%mm7, %%mm6 \n\t" // 0
  1010. "pcmpgtw %%mm0, %%mm6 \n\t"
  1011. "pxor %%mm6, %%mm0 \n\t"
  1012. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  1013. "movq %%mm7, %%mm6 \n\t" // 0
  1014. "pcmpgtw %%mm1, %%mm6 \n\t"
  1015. "pxor %%mm6, %%mm1 \n\t"
  1016. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  1017. "movq %%mm7, %%mm6 \n\t" // 0
  1018. "pcmpgtw %%mm2, %%mm6 \n\t"
  1019. "pxor %%mm6, %%mm2 \n\t"
  1020. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  1021. "movq %%mm7, %%mm6 \n\t" // 0
  1022. "pcmpgtw %%mm3, %%mm6 \n\t"
  1023. "pxor %%mm6, %%mm3 \n\t"
  1024. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  1025. #ifdef HAVE_MMX2
  1026. "pminsw %%mm2, %%mm0 \n\t"
  1027. "pminsw %%mm3, %%mm1 \n\t"
  1028. #else
  1029. "movq %%mm0, %%mm6 \n\t"
  1030. "psubusw %%mm2, %%mm6 \n\t"
  1031. "psubw %%mm6, %%mm0 \n\t"
  1032. "movq %%mm1, %%mm6 \n\t"
  1033. "psubusw %%mm3, %%mm6 \n\t"
  1034. "psubw %%mm6, %%mm1 \n\t"
  1035. #endif
  1036. "movq %%mm7, %%mm6 \n\t" // 0
  1037. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  1038. "pxor %%mm6, %%mm4 \n\t"
  1039. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  1040. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  1041. "pxor %%mm7, %%mm5 \n\t"
  1042. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  1043. // 100 opcodes
  1044. "movd %2, %%mm2 \n\t" // QP
  1045. "punpcklwd %%mm2, %%mm2 \n\t"
  1046. "punpcklwd %%mm2, %%mm2 \n\t"
  1047. "psllw $3, %%mm2 \n\t" // 8QP
  1048. "movq %%mm2, %%mm3 \n\t" // 8QP
  1049. "pcmpgtw %%mm4, %%mm2 \n\t"
  1050. "pcmpgtw %%mm5, %%mm3 \n\t"
  1051. "pand %%mm2, %%mm4 \n\t"
  1052. "pand %%mm3, %%mm5 \n\t"
  1053. "psubusw %%mm0, %%mm4 \n\t" // hd
  1054. "psubusw %%mm1, %%mm5 \n\t" // ld
  1055. "movq w05, %%mm2 \n\t" // 5
  1056. "pmullw %%mm2, %%mm4 \n\t"
  1057. "pmullw %%mm2, %%mm5 \n\t"
  1058. "movq w20, %%mm2 \n\t" // 32
  1059. "paddw %%mm2, %%mm4 \n\t"
  1060. "paddw %%mm2, %%mm5 \n\t"
  1061. "psrlw $6, %%mm4 \n\t"
  1062. "psrlw $6, %%mm5 \n\t"
  1063. /*
  1064. "movq w06, %%mm2 \n\t" // 6
  1065. "paddw %%mm2, %%mm4 \n\t"
  1066. "paddw %%mm2, %%mm5 \n\t"
  1067. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  1068. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  1069. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  1070. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  1071. */
  1072. "movq temp2, %%mm0 \n\t" // L3 - L4
  1073. "movq temp3, %%mm1 \n\t" // H3 - H4
  1074. "pxor %%mm2, %%mm2 \n\t"
  1075. "pxor %%mm3, %%mm3 \n\t"
  1076. // FIXME rounding error
  1077. "psraw $1, %%mm0 \n\t" // (L3 - L4)/2
  1078. "psraw $1, %%mm1 \n\t" // (H3 - H4)/2
  1079. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  1080. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  1081. "pxor %%mm2, %%mm0 \n\t"
  1082. "pxor %%mm3, %%mm1 \n\t"
  1083. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  1084. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  1085. // "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  1086. // "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  1087. "pxor %%mm6, %%mm2 \n\t"
  1088. "pxor %%mm7, %%mm3 \n\t"
  1089. "pand %%mm2, %%mm4 \n\t"
  1090. "pand %%mm3, %%mm5 \n\t"
  1091. #ifdef HAVE_MMX2
  1092. "pminsw %%mm0, %%mm4 \n\t"
  1093. "pminsw %%mm1, %%mm5 \n\t"
  1094. #else
  1095. "movq %%mm4, %%mm2 \n\t"
  1096. "psubusw %%mm0, %%mm2 \n\t"
  1097. "psubw %%mm2, %%mm4 \n\t"
  1098. "movq %%mm5, %%mm2 \n\t"
  1099. "psubusw %%mm1, %%mm2 \n\t"
  1100. "psubw %%mm2, %%mm5 \n\t"
  1101. #endif
  1102. "pxor %%mm6, %%mm4 \n\t"
  1103. "pxor %%mm7, %%mm5 \n\t"
  1104. "psubw %%mm6, %%mm4 \n\t"
  1105. "psubw %%mm7, %%mm5 \n\t"
  1106. "packsswb %%mm5, %%mm4 \n\t"
  1107. "movq (%%eax, %1, 2), %%mm0 \n\t"
  1108. "paddb %%mm4, %%mm0 \n\t"
  1109. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1110. "movq (%0, %1, 4), %%mm0 \n\t"
  1111. "psubb %%mm4, %%mm0 \n\t"
  1112. "movq %%mm0, (%0, %1, 4) \n\t"
  1113. :
  1114. : "r" (src), "r" (stride), "r" (QP)
  1115. : "%eax", "%ebx"
  1116. );
  1117. #else
  1118. const int l1= stride;
  1119. const int l2= stride + l1;
  1120. const int l3= stride + l2;
  1121. const int l4= stride + l3;
  1122. const int l5= stride + l4;
  1123. const int l6= stride + l5;
  1124. const int l7= stride + l6;
  1125. const int l8= stride + l7;
  1126. // const int l9= stride + l8;
  1127. int x;
  1128. src+= stride*3;
  1129. for(x=0; x<BLOCK_SIZE; x++)
  1130. {
  1131. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  1132. if(ABS(middleEnergy) < 8*QP)
  1133. {
  1134. const int q=(src[l4] - src[l5])/2;
  1135. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  1136. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  1137. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1138. d= MAX(d, 0);
  1139. d= (5*d + 32) >> 6;
  1140. d*= SIGN(-middleEnergy);
  1141. if(q>0)
  1142. {
  1143. d= d<0 ? 0 : d;
  1144. d= d>q ? q : d;
  1145. }
  1146. else
  1147. {
  1148. d= d>0 ? 0 : d;
  1149. d= d<q ? q : d;
  1150. }
  1151. src[l4]-= d;
  1152. src[l5]+= d;
  1153. }
  1154. src++;
  1155. }
  1156. #endif
  1157. }
  1158. //FIXME? |255-0| = 1
  1159. /**
  1160. * Check if the given 8x8 Block is mostly "flat" and copy the unaliged data into tempBlock.
  1161. */
  1162. static inline int isHorizDCAndCopy2Temp(uint8_t src[], int stride)
  1163. {
  1164. // src++;
  1165. int numEq= 0;
  1166. #ifdef HAVE_MMX
  1167. asm volatile (
  1168. // "int $3 \n\t"
  1169. "leal (%1, %2), %%ecx \n\t"
  1170. "leal (%%ecx, %2, 4), %%ebx \n\t"
  1171. // 0 1 2 3 4 5 6 7 8 9
  1172. // %1 ecx ecx+%2 ecx+2%2 %1+4%2 ebx ebx+%2 ebx+2%2 %1+8%2 ebx+4%2
  1173. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  1174. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  1175. "pxor %%mm0, %%mm0 \n\t"
  1176. "movl %1, %%eax \n\t"
  1177. "andl $0x1F, %%eax \n\t"
  1178. "cmpl $24, %%eax \n\t"
  1179. "leal tempBlock, %%eax \n\t"
  1180. "jb 1f \n\t"
  1181. #define HDC_CHECK_AND_CPY(src, dst) \
  1182. "movd " #src ", %%mm2 \n\t"\
  1183. "punpckldq 4" #src ", %%mm2 \n\t" /* (%1) */\
  1184. "movq %%mm2, %%mm1 \n\t"\
  1185. "psrlq $8, %%mm2 \n\t"\
  1186. "psubb %%mm1, %%mm2 \n\t"\
  1187. "paddb %%mm7, %%mm2 \n\t"\
  1188. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1189. "paddb %%mm2, %%mm0 \n\t"\
  1190. "movq %%mm1," #dst "(%%eax) \n\t"
  1191. HDC_CHECK_AND_CPY((%1),0)
  1192. HDC_CHECK_AND_CPY((%%ecx),8)
  1193. HDC_CHECK_AND_CPY((%%ecx, %2),16)
  1194. HDC_CHECK_AND_CPY((%%ecx, %2, 2),24)
  1195. HDC_CHECK_AND_CPY((%1, %2, 4),32)
  1196. HDC_CHECK_AND_CPY((%%ebx),40)
  1197. HDC_CHECK_AND_CPY((%%ebx, %2),48)
  1198. HDC_CHECK_AND_CPY((%%ebx, %2, 2),56)
  1199. "jmp 2f \n\t"
  1200. "1: \n\t"
  1201. // src does not cross a 32 byte cache line so dont waste time with alignment
  1202. #define HDC_CHECK_AND_CPY2(src, dst) \
  1203. "movq " #src ", %%mm2 \n\t"\
  1204. "movq " #src ", %%mm1 \n\t"\
  1205. "psrlq $8, %%mm2 \n\t"\
  1206. "psubb %%mm1, %%mm2 \n\t"\
  1207. "paddb %%mm7, %%mm2 \n\t"\
  1208. "pcmpgtb %%mm6, %%mm2 \n\t"\
  1209. "paddb %%mm2, %%mm0 \n\t"\
  1210. "movq %%mm1," #dst "(%%eax) \n\t"
  1211. HDC_CHECK_AND_CPY2((%1),0)
  1212. HDC_CHECK_AND_CPY2((%%ecx),8)
  1213. HDC_CHECK_AND_CPY2((%%ecx, %2),16)
  1214. HDC_CHECK_AND_CPY2((%%ecx, %2, 2),24)
  1215. HDC_CHECK_AND_CPY2((%1, %2, 4),32)
  1216. HDC_CHECK_AND_CPY2((%%ebx),40)
  1217. HDC_CHECK_AND_CPY2((%%ebx, %2),48)
  1218. HDC_CHECK_AND_CPY2((%%ebx, %2, 2),56)
  1219. "2: \n\t"
  1220. "psllq $8, %%mm0 \n\t" // remove dummy value
  1221. "movq %%mm0, %%mm1 \n\t"
  1222. "psrlw $8, %%mm0 \n\t"
  1223. "paddb %%mm1, %%mm0 \n\t"
  1224. "movq %%mm0, %%mm1 \n\t"
  1225. "psrlq $16, %%mm0 \n\t"
  1226. "paddb %%mm1, %%mm0 \n\t"
  1227. "movq %%mm0, %%mm1 \n\t"
  1228. "psrlq $32, %%mm0 \n\t"
  1229. "paddb %%mm1, %%mm0 \n\t"
  1230. "movd %%mm0, %0 \n\t"
  1231. : "=r" (numEq)
  1232. : "r" (src), "r" (stride)
  1233. : "%eax", "%ebx", "%ecx"
  1234. );
  1235. // printf("%d\n", numEq);
  1236. numEq= (256 - numEq) &0xFF;
  1237. #else
  1238. int y;
  1239. for(y=0; y<BLOCK_SIZE; y++)
  1240. {
  1241. if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
  1242. if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
  1243. if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
  1244. if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
  1245. if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
  1246. if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
  1247. if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
  1248. tempBlock[0 + y*TEMP_STRIDE] = src[0];
  1249. tempBlock[1 + y*TEMP_STRIDE] = src[1];
  1250. tempBlock[2 + y*TEMP_STRIDE] = src[2];
  1251. tempBlock[3 + y*TEMP_STRIDE] = src[3];
  1252. tempBlock[4 + y*TEMP_STRIDE] = src[4];
  1253. tempBlock[5 + y*TEMP_STRIDE] = src[5];
  1254. tempBlock[6 + y*TEMP_STRIDE] = src[6];
  1255. tempBlock[7 + y*TEMP_STRIDE] = src[7];
  1256. src+= stride;
  1257. }
  1258. #endif
  1259. /* if(abs(numEq - asmEq) > 0)
  1260. {
  1261. // printf("\nasm:%d c:%d\n", asmEq, numEq);
  1262. for(int y=0; y<8; y++)
  1263. {
  1264. for(int x=0; x<8; x++)
  1265. {
  1266. printf("%d ", src[x + y*stride]);
  1267. }
  1268. printf("\n");
  1269. }
  1270. }
  1271. */
  1272. // printf("%d\n", numEq);
  1273. return numEq > hFlatnessThreshold;
  1274. }
  1275. static inline int isHorizMinMaxOk(uint8_t src[], int stride, int QP)
  1276. {
  1277. #ifdef MMX_FIXME
  1278. FIXME
  1279. int isOk;
  1280. asm volatile(
  1281. // "int $3 \n\t"
  1282. "movq (%1, %2), %%mm0 \n\t"
  1283. "movq (%1, %2, 8), %%mm1 \n\t"
  1284. "movq %%mm0, %%mm2 \n\t"
  1285. "psubusb %%mm1, %%mm0 \n\t"
  1286. "psubusb %%mm2, %%mm1 \n\t"
  1287. "por %%mm1, %%mm0 \n\t" // ABS Diff
  1288. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  1289. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  1290. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  1291. "pcmpeqd b00, %%mm0 \n\t"
  1292. "psrlq $16, %%mm0 \n\t"
  1293. "pcmpeqd bFF, %%mm0 \n\t"
  1294. // "movd %%mm0, (%1, %2, 4)\n\t"
  1295. "movd %%mm0, %0 \n\t"
  1296. : "=r" (isOk)
  1297. : "r" (src), "r" (stride)
  1298. );
  1299. return isOk;
  1300. #else
  1301. if(abs(src[0] - src[7]) > 2*QP) return 0;
  1302. return 1;
  1303. #endif
  1304. }
  1305. static inline void doHorizDefFilterAndCopyBack(uint8_t dst[], int stride, int QP)
  1306. {
  1307. #ifdef HAVE_MMX
  1308. asm volatile(
  1309. "leal (%0, %1), %%ecx \n\t"
  1310. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1311. // 0 1 2 3 4 5 6 7 8 9
  1312. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1313. "pxor %%mm7, %%mm7 \n\t"
  1314. "movq bm00001000, %%mm6 \n\t"
  1315. "movd %2, %%mm5 \n\t" // QP
  1316. "movq %%mm5, %%mm4 \n\t"
  1317. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  1318. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  1319. "psllq $24, %%mm4 \n\t"
  1320. "pxor %%mm5, %%mm5 \n\t" // 0
  1321. "psubb %%mm4, %%mm5 \n\t" // -QP
  1322. "leal tempBlock, %%eax \n\t"
  1323. //FIXME? "unroll by 2" and mix
  1324. #ifdef HAVE_MMX2
  1325. #define HDF(src, dst) \
  1326. "movq " #src "(%%eax), %%mm0 \n\t"\
  1327. "movq " #src "(%%eax), %%mm1 \n\t"\
  1328. "movq " #src "(%%eax), %%mm2 \n\t"\
  1329. "psrlq $8, %%mm1 \n\t"\
  1330. "psubusb %%mm1, %%mm2 \n\t"\
  1331. "psubusb %%mm0, %%mm1 \n\t"\
  1332. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1333. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1334. "pshufw $0x00, %%mm1, %%mm3 \n\t" /* p´5 = |p1 - p2| */\
  1335. "pminub %%mm1, %%mm3 \n\t" /* p´5 = min(|p2-p1|, |p6-p5|)*/\
  1336. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1337. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5-p6|) */\
  1338. "paddb %%mm5, %%mm1 \n\t"\
  1339. "psubusb %%mm5, %%mm1 \n\t"\
  1340. "psrlw $2, %%mm1 \n\t"\
  1341. "pxor %%mm2, %%mm1 \n\t"\
  1342. "psubb %%mm2, %%mm1 \n\t"\
  1343. "pand %%mm6, %%mm1 \n\t"\
  1344. "psubb %%mm1, %%mm0 \n\t"\
  1345. "psllq $8, %%mm1 \n\t"\
  1346. "paddb %%mm1, %%mm0 \n\t"\
  1347. "movd %%mm0, " #dst" \n\t"\
  1348. "psrlq $32, %%mm0 \n\t"\
  1349. "movd %%mm0, 4" #dst" \n\t"
  1350. #else
  1351. #define HDF(src, dst)\
  1352. "movq " #src "(%%eax), %%mm0 \n\t"\
  1353. "movq %%mm0, %%mm1 \n\t"\
  1354. "movq %%mm0, %%mm2 \n\t"\
  1355. "psrlq $8, %%mm1 \n\t"\
  1356. "psubusb %%mm1, %%mm2 \n\t"\
  1357. "psubusb %%mm0, %%mm1 \n\t"\
  1358. "por %%mm2, %%mm1 \n\t" /* p´x = |px - p(x+1)| */\
  1359. "pcmpeqb %%mm7, %%mm2 \n\t" /* p´x = sgn[px - p(x+1)] */\
  1360. "movq %%mm1, %%mm3 \n\t"\
  1361. "psllq $32, %%mm3 \n\t"\
  1362. "movq %%mm3, %%mm4 \n\t"\
  1363. "psubusb %%mm1, %%mm4 \n\t"\
  1364. "psubb %%mm4, %%mm3 \n\t"\
  1365. "psrlq $16, %%mm3 \n\t" /* p´3 = min(|p2-p1|, |p6-p5|)*/\
  1366. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
  1367. "paddb %%mm5, %%mm1 \n\t"\
  1368. "psubusb %%mm5, %%mm1 \n\t"\
  1369. "psrlw $2, %%mm1 \n\t"\
  1370. "pxor %%mm2, %%mm1 \n\t"\
  1371. "psubb %%mm2, %%mm1 \n\t"\
  1372. "pand %%mm6, %%mm1 \n\t"\
  1373. "psubb %%mm1, %%mm0 \n\t"\
  1374. "psllq $8, %%mm1 \n\t"\
  1375. "paddb %%mm1, %%mm0 \n\t"\
  1376. "movd %%mm0, " #dst " \n\t"\
  1377. "psrlq $32, %%mm0 \n\t"\
  1378. "movd %%mm0, 4" #dst " \n\t"
  1379. #endif
  1380. HDF(0,(%0))
  1381. HDF(8,(%%ecx))
  1382. HDF(16,(%%ecx, %1))
  1383. HDF(24,(%%ecx, %1, 2))
  1384. HDF(32,(%0, %1, 4))
  1385. HDF(40,(%%ebx))
  1386. HDF(48,(%%ebx, %1))
  1387. HDF(56,(%%ebx, %1, 2))
  1388. :
  1389. : "r" (dst), "r" (stride), "r" (QP)
  1390. : "%eax", "%ebx", "%ecx"
  1391. );
  1392. #else
  1393. uint8_t *src= tempBlock;
  1394. int y;
  1395. for(y=0; y<BLOCK_SIZE; y++)
  1396. {
  1397. const int middleEnergy= 5*(src[4] - src[5]) + 2*(src[2] - src[5]);
  1398. dst[0] = src[0];
  1399. dst[1] = src[1];
  1400. dst[2] = src[2];
  1401. dst[3] = src[3];
  1402. dst[4] = src[4];
  1403. dst[5] = src[5];
  1404. dst[6] = src[6];
  1405. dst[7] = src[7];
  1406. if(ABS(middleEnergy) < 8*QP)
  1407. {
  1408. const int q=(src[3] - src[4])/2;
  1409. const int leftEnergy= 5*(src[2] - src[1]) + 2*(src[0] - src[3]);
  1410. const int rightEnergy= 5*(src[6] - src[5]) + 2*(src[4] - src[7]);
  1411. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  1412. d= MAX(d, 0);
  1413. d= (5*d + 32) >> 6;
  1414. d*= SIGN(-middleEnergy);
  1415. if(q>0)
  1416. {
  1417. d= d<0 ? 0 : d;
  1418. d= d>q ? q : d;
  1419. }
  1420. else
  1421. {
  1422. d= d>0 ? 0 : d;
  1423. d= d<q ? q : d;
  1424. }
  1425. dst[3]-= d;
  1426. dst[4]+= d;
  1427. }
  1428. dst+= stride;
  1429. src+= TEMP_STRIDE;
  1430. }
  1431. #endif
  1432. }
  1433. /**
  1434. * Do a horizontal low pass filter on the 10x8 block (dst points to middle 8x8 Block)
  1435. * using the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
  1436. * using the 7-Tap Filter (2,2,2,4,2,2,2)/16 (MMX2/3DNOW version)
  1437. */
  1438. static inline void doHorizLowPassAndCopyBack(uint8_t dst[], int stride, int QP)
  1439. {
  1440. //return;
  1441. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1442. asm volatile(
  1443. "leal (%0, %1), %%ecx \n\t"
  1444. "leal (%%ecx, %1, 4), %%ebx \n\t"
  1445. // 0 1 2 3 4 5 6 7 8 9
  1446. // %0 ecx ecx+%1 ecx+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1447. "pxor %%mm7, %%mm7 \n\t"
  1448. "leal tempBlock, %%eax \n\t"
  1449. /*
  1450. #define HLP1 "movq (%0), %%mm0 \n\t"\
  1451. "movq %%mm0, %%mm1 \n\t"\
  1452. "psllq $8, %%mm0 \n\t"\
  1453. PAVGB(%%mm1, %%mm0)\
  1454. "psrlw $8, %%mm0 \n\t"\
  1455. "pxor %%mm1, %%mm1 \n\t"\
  1456. "packuswb %%mm1, %%mm0 \n\t"\
  1457. "movq %%mm0, %%mm1 \n\t"\
  1458. "movq %%mm0, %%mm2 \n\t"\
  1459. "psllq $32, %%mm0 \n\t"\
  1460. "paddb %%mm0, %%mm1 \n\t"\
  1461. "psllq $16, %%mm2 \n\t"\
  1462. PAVGB(%%mm2, %%mm0)\
  1463. "movq %%mm0, %%mm3 \n\t"\
  1464. "pand bm11001100, %%mm0 \n\t"\
  1465. "paddusb %%mm0, %%mm3 \n\t"\
  1466. "psrlq $8, %%mm3 \n\t"\
  1467. PAVGB(%%mm1, %%mm4)\
  1468. PAVGB(%%mm3, %%mm2)\
  1469. "psrlq $16, %%mm2 \n\t"\
  1470. "punpcklbw %%mm2, %%mm2 \n\t"\
  1471. "movq %%mm2, (%0) \n\t"\
  1472. #define HLP2 "movq (%0), %%mm0 \n\t"\
  1473. "movq %%mm0, %%mm1 \n\t"\
  1474. "psllq $8, %%mm0 \n\t"\
  1475. PAVGB(%%mm1, %%mm0)\
  1476. "psrlw $8, %%mm0 \n\t"\
  1477. "pxor %%mm1, %%mm1 \n\t"\
  1478. "packuswb %%mm1, %%mm0 \n\t"\
  1479. "movq %%mm0, %%mm2 \n\t"\
  1480. "psllq $32, %%mm0 \n\t"\
  1481. "psllq $16, %%mm2 \n\t"\
  1482. PAVGB(%%mm2, %%mm0)\
  1483. "movq %%mm0, %%mm3 \n\t"\
  1484. "pand bm11001100, %%mm0 \n\t"\
  1485. "paddusb %%mm0, %%mm3 \n\t"\
  1486. "psrlq $8, %%mm3 \n\t"\
  1487. PAVGB(%%mm3, %%mm2)\
  1488. "psrlq $16, %%mm2 \n\t"\
  1489. "punpcklbw %%mm2, %%mm2 \n\t"\
  1490. "movq %%mm2, (%0) \n\t"\
  1491. */
  1492. // approximately a 7-Tap Filter with Vector (1,2,3,4,3,2,1)/16
  1493. /*
  1494. Implemented Exact 7-Tap
  1495. 9421 A321
  1496. 36421 64321
  1497. 334321 =
  1498. 1234321 =
  1499. 1234321 =
  1500. 123433 =
  1501. 12463 12346
  1502. 1249 123A
  1503. */
  1504. #ifdef HAVE_MMX2
  1505. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1506. "movq %%mm0, %%mm1 \n\t"\
  1507. "movq %%mm0, %%mm2 \n\t"\
  1508. "movq %%mm0, %%mm3 \n\t"\
  1509. "movq %%mm0, %%mm4 \n\t"\
  1510. "psllq $8, %%mm1 \n\t"\
  1511. "psrlq $8, %%mm2 \n\t"\
  1512. "pand bm00000001, %%mm3 \n\t"\
  1513. "pand bm10000000, %%mm4 \n\t"\
  1514. "por %%mm3, %%mm1 \n\t"\
  1515. "por %%mm4, %%mm2 \n\t"\
  1516. PAVGB(%%mm2, %%mm1)\
  1517. PAVGB(%%mm1, %%mm0)\
  1518. \
  1519. "pshufw $0xF9, %%mm0, %%mm3 \n\t"\
  1520. "pshufw $0x90, %%mm0, %%mm4 \n\t"\
  1521. PAVGB(%%mm3, %%mm4)\
  1522. PAVGB(%%mm4, %%mm0)\
  1523. "movd %%mm0, (%0) \n\t"\
  1524. "psrlq $32, %%mm0 \n\t"\
  1525. "movd %%mm0, 4(%0) \n\t"
  1526. #else
  1527. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  1528. "movq %%mm0, %%mm1 \n\t"\
  1529. "movq %%mm0, %%mm2 \n\t"\
  1530. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1531. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1532. "psllq $8, %%mm1 \n\t"\
  1533. "psrlq $8, %%mm2 \n\t"\
  1534. "psrlq $24, %%mm3 \n\t"\
  1535. "psllq $56, %%mm4 \n\t"\
  1536. "por %%mm3, %%mm1 \n\t"\
  1537. "por %%mm4, %%mm2 \n\t"\
  1538. PAVGB(%%mm2, %%mm1)\
  1539. PAVGB(%%mm1, %%mm0)\
  1540. \
  1541. "movq %%mm0, %%mm3 \n\t"\
  1542. "movq %%mm0, %%mm4 \n\t"\
  1543. "movq %%mm0, %%mm5 \n\t"\
  1544. "psrlq $16, %%mm3 \n\t"\
  1545. "psllq $16, %%mm4 \n\t"\
  1546. "pand bm11000000, %%mm5 \n\t"\
  1547. "por %%mm5, %%mm3 \n\t"\
  1548. "movq %%mm0, %%mm5 \n\t"\
  1549. "pand bm00000011, %%mm5 \n\t"\
  1550. "por %%mm5, %%mm4 \n\t"\
  1551. PAVGB(%%mm3, %%mm4)\
  1552. PAVGB(%%mm4, %%mm0)\
  1553. "movd %%mm0, (%0) \n\t"\
  1554. "psrlq $32, %%mm0 \n\t"\
  1555. "movd %%mm0, 4(%0) \n\t"
  1556. #endif
  1557. /* uses the 7-Tap Filter: 1112111 */
  1558. #define NEW_HLP(src, dst)\
  1559. "movq " #src "(%%eax), %%mm1 \n\t"\
  1560. "movq " #src "(%%eax), %%mm2 \n\t"\
  1561. "psllq $8, %%mm1 \n\t"\
  1562. "psrlq $8, %%mm2 \n\t"\
  1563. "movd -4" #dst ", %%mm3 \n\t" /*0001000*/\
  1564. "movd 8" #dst ", %%mm4 \n\t" /*0001000*/\
  1565. "psrlq $24, %%mm3 \n\t"\
  1566. "psllq $56, %%mm4 \n\t"\
  1567. "por %%mm3, %%mm1 \n\t"\
  1568. "por %%mm4, %%mm2 \n\t"\
  1569. "movq %%mm1, %%mm5 \n\t"\
  1570. PAVGB(%%mm2, %%mm1)\
  1571. "movq " #src "(%%eax), %%mm0 \n\t"\
  1572. PAVGB(%%mm1, %%mm0)\
  1573. "psllq $8, %%mm5 \n\t"\
  1574. "psrlq $8, %%mm2 \n\t"\
  1575. "por %%mm3, %%mm5 \n\t"\
  1576. "por %%mm4, %%mm2 \n\t"\
  1577. "movq %%mm5, %%mm1 \n\t"\
  1578. PAVGB(%%mm2, %%mm5)\
  1579. "psllq $8, %%mm1 \n\t"\
  1580. "psrlq $8, %%mm2 \n\t"\
  1581. "por %%mm3, %%mm1 \n\t"\
  1582. "por %%mm4, %%mm2 \n\t"\
  1583. PAVGB(%%mm2, %%mm1)\
  1584. PAVGB(%%mm1, %%mm5)\
  1585. PAVGB(%%mm5, %%mm0)\
  1586. "movd %%mm0, " #dst " \n\t"\
  1587. "psrlq $32, %%mm0 \n\t"\
  1588. "movd %%mm0, 4" #dst " \n\t"
  1589. /* uses the 9-Tap Filter: 112242211 */
  1590. #define NEW_HLP2(i)\
  1591. "movq " #i "(%%eax), %%mm0 \n\t" /*0001000*/\
  1592. "movq %%mm0, %%mm1 \n\t" /*0001000*/\
  1593. "movq %%mm0, %%mm2 \n\t" /*0001000*/\
  1594. "movd -4(%0), %%mm3 \n\t" /*0001000*/\
  1595. "movd 8(%0), %%mm4 \n\t" /*0001000*/\
  1596. "psllq $8, %%mm1 \n\t"\
  1597. "psrlq $8, %%mm2 \n\t"\
  1598. "psrlq $24, %%mm3 \n\t"\
  1599. "psllq $56, %%mm4 \n\t"\
  1600. "por %%mm3, %%mm1 \n\t" /*0010000*/\
  1601. "por %%mm4, %%mm2 \n\t" /*0000100*/\
  1602. "movq %%mm1, %%mm5 \n\t" /*0010000*/\
  1603. PAVGB(%%mm2, %%mm1) /*0010100*/\
  1604. PAVGB(%%mm1, %%mm0) /*0012100*/\
  1605. "psllq $8, %%mm5 \n\t"\
  1606. "psrlq $8, %%mm2 \n\t"\
  1607. "por %%mm3, %%mm5 \n\t" /*0100000*/\
  1608. "por %%mm4, %%mm2 \n\t" /*0000010*/\
  1609. "movq %%mm5, %%mm1 \n\t" /*0100000*/\
  1610. PAVGB(%%mm2, %%mm5) /*0100010*/\
  1611. "psllq $8, %%mm1 \n\t"\
  1612. "psrlq $8, %%mm2 \n\t"\
  1613. "por %%mm3, %%mm1 \n\t" /*1000000*/\
  1614. "por %%mm4, %%mm2 \n\t" /*0000001*/\
  1615. "movq %%mm1, %%mm6 \n\t" /*1000000*/\
  1616. PAVGB(%%mm2, %%mm1) /*1000001*/\
  1617. "psllq $8, %%mm6 \n\t"\
  1618. "psrlq $8, %%mm2 \n\t"\
  1619. "por %%mm3, %%mm6 \n\t"/*100000000*/\
  1620. "por %%mm4, %%mm2 \n\t"/*000000001*/\
  1621. PAVGB(%%mm2, %%mm6) /*100000001*/\
  1622. PAVGB(%%mm6, %%mm1) /*110000011*/\
  1623. PAVGB(%%mm1, %%mm5) /*112000211*/\
  1624. PAVGB(%%mm5, %%mm0) /*112242211*/\
  1625. "movd %%mm0, (%0) \n\t"\
  1626. "psrlq $32, %%mm0 \n\t"\
  1627. "movd %%mm0, 4(%0) \n\t"
  1628. #define HLP(src, dst) NEW_HLP(src, dst)
  1629. HLP(0, (%0))
  1630. HLP(8, (%%ecx))
  1631. HLP(16, (%%ecx, %1))
  1632. HLP(24, (%%ecx, %1, 2))
  1633. HLP(32, (%0, %1, 4))
  1634. HLP(40, (%%ebx))
  1635. HLP(48, (%%ebx, %1))
  1636. HLP(56, (%%ebx, %1, 2))
  1637. :
  1638. : "r" (dst), "r" (stride)
  1639. : "%eax", "%ebx", "%ecx"
  1640. );
  1641. #else
  1642. uint8_t *temp= tempBlock;
  1643. int y;
  1644. for(y=0; y<BLOCK_SIZE; y++)
  1645. {
  1646. const int first= ABS(dst[-1] - dst[0]) < QP ? dst[-1] : dst[0];
  1647. const int last= ABS(dst[8] - dst[7]) < QP ? dst[8] : dst[7];
  1648. int sums[9];
  1649. sums[0] = first + temp[0];
  1650. sums[1] = temp[0] + temp[1];
  1651. sums[2] = temp[1] + temp[2];
  1652. sums[3] = temp[2] + temp[3];
  1653. sums[4] = temp[3] + temp[4];
  1654. sums[5] = temp[4] + temp[5];
  1655. sums[6] = temp[5] + temp[6];
  1656. sums[7] = temp[6] + temp[7];
  1657. sums[8] = temp[7] + last;
  1658. dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  1659. dst[1]= ((dst[1]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  1660. dst[2]= ((dst[2]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  1661. dst[3]= ((dst[3]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  1662. dst[4]= ((dst[4]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  1663. dst[5]= ((dst[5]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  1664. dst[6]= ((last + dst[6]<<2) + (dst[7] + sums[5]<<1) + sums[3] + 8)>>4;
  1665. dst[7]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  1666. dst+= stride;
  1667. temp+= TEMP_STRIDE;
  1668. }
  1669. #endif
  1670. }
  1671. static inline void dering(uint8_t src[], int stride, int QP)
  1672. {
  1673. //FIXME
  1674. #ifdef HAVE_MMX2X
  1675. asm volatile(
  1676. "leal (%0, %1), %%eax \n\t"
  1677. "leal (%%eax, %1, 4), %%ebx \n\t"
  1678. // 0 1 2 3 4 5 6 7 8 9
  1679. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1680. "pcmpeq %%mm6, %%mm6 \n\t"
  1681. "pxor %%mm7, %%mm7 \n\t"
  1682. #define FIND_MIN_MAX(addr)\
  1683. "movq (" #addr "), %%mm0, \n\t"\
  1684. "pminub %%mm0, %%mm6 \n\t"\
  1685. "pmaxub %%mm0, %%mm7 \n\t"
  1686. FIND_MIN_MAX(%0)
  1687. FIND_MIN_MAX(%%eax)
  1688. FIND_MIN_MAX(%%eax, %1)
  1689. FIND_MIN_MAX(%%eax, %1, 2)
  1690. FIND_MIN_MAX(%0, %1, 4)
  1691. FIND_MIN_MAX(%%ebx)
  1692. FIND_MIN_MAX(%%ebx, %1)
  1693. FIND_MIN_MAX(%%ebx, %1, 2)
  1694. FIND_MIN_MAX(%0, %1, 8)
  1695. FIND_MIN_MAX(%%ebx, %1, 2)
  1696. "movq %%mm6, %%mm4 \n\t"
  1697. "psrlq $32, %%mm6 \n\t"
  1698. "pminub %%mm4, %%mm6 \n\t"
  1699. "movq %%mm6, %%mm4 \n\t"
  1700. "psrlq $16, %%mm6 \n\t"
  1701. "pminub %%mm4, %%mm6 \n\t"
  1702. "movq %%mm6, %%mm4 \n\t"
  1703. "psrlq $8, %%mm6 \n\t"
  1704. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1705. "movq %%mm7, %%mm4 \n\t"
  1706. "psrlq $32, %%mm7 \n\t"
  1707. "pmaxub %%mm4, %%mm7 \n\t"
  1708. "movq %%mm7, %%mm4 \n\t"
  1709. "psrlq $16, %%mm7 \n\t"
  1710. "pmaxub %%mm4, %%mm7 \n\t"
  1711. "movq %%mm7, %%mm4 \n\t"
  1712. "psrlq $8, %%mm7 \n\t"
  1713. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1714. PAVGB(%%mm6, %%mm7) // (max + min)/2
  1715. : : "r" (src), "r" (stride), "r" (QP)
  1716. : "%eax", "%ebx"
  1717. );
  1718. #else
  1719. //FIXME
  1720. #endif
  1721. }
  1722. /**
  1723. * Deinterlaces the given block
  1724. * will be called for every 8x8 block, and can read & write into an 8x16 block
  1725. */
  1726. static inline void deInterlaceInterpolateLinear(uint8_t src[], int stride)
  1727. {
  1728. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1729. asm volatile(
  1730. "leal (%0, %1), %%eax \n\t"
  1731. "leal (%%eax, %1, 4), %%ebx \n\t"
  1732. // 0 1 2 3 4 5 6 7 8 9
  1733. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1734. "movq (%0), %%mm0 \n\t"
  1735. "movq (%%eax, %1), %%mm1 \n\t"
  1736. PAVGB(%%mm1, %%mm0)
  1737. "movq %%mm0, (%%eax) \n\t"
  1738. "movq (%0, %1, 4), %%mm0 \n\t"
  1739. PAVGB(%%mm0, %%mm1)
  1740. "movq %%mm1, (%%eax, %1, 2) \n\t"
  1741. "movq (%%ebx, %1), %%mm1 \n\t"
  1742. PAVGB(%%mm1, %%mm0)
  1743. "movq %%mm0, (%%ebx) \n\t"
  1744. "movq (%0, %1, 8), %%mm0 \n\t"
  1745. PAVGB(%%mm0, %%mm1)
  1746. "movq %%mm1, (%%ebx, %1, 2) \n\t"
  1747. : : "r" (src), "r" (stride)
  1748. : "%eax", "%ebx"
  1749. );
  1750. #else
  1751. int x;
  1752. for(x=0; x<8; x++)
  1753. {
  1754. src[stride] = (src[0] + src[stride*2])>>1;
  1755. src[stride*3] = (src[stride*2] + src[stride*4])>>1;
  1756. src[stride*5] = (src[stride*4] + src[stride*6])>>1;
  1757. src[stride*7] = (src[stride*6] + src[stride*8])>>1;
  1758. src++;
  1759. }
  1760. #endif
  1761. }
  1762. /**
  1763. * Deinterlaces the given block
  1764. * will be called for every 8x8 block, and can read & write into an 8x16 block
  1765. * no cliping in C version
  1766. */
  1767. static inline void deInterlaceInterpolateCubic(uint8_t src[], int stride)
  1768. {
  1769. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1770. asm volatile(
  1771. "leal (%0, %1), %%eax \n\t"
  1772. "leal (%%eax, %1, 4), %%ebx \n\t"
  1773. "leal (%%ebx, %1, 4), %%ecx \n\t"
  1774. "addl %1, %%ecx \n\t"
  1775. "pxor %%mm7, %%mm7 \n\t"
  1776. // 0 1 2 3 4 5 6 7 8 9 10
  1777. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1 ecx
  1778. #define DEINT_CUBIC(a,b,c,d,e)\
  1779. "movq " #a ", %%mm0 \n\t"\
  1780. "movq " #b ", %%mm1 \n\t"\
  1781. "movq " #d ", %%mm2 \n\t"\
  1782. "movq " #e ", %%mm3 \n\t"\
  1783. PAVGB(%%mm2, %%mm1) /* (b+d) /2 */\
  1784. PAVGB(%%mm3, %%mm0) /* a(a+e) /2 */\
  1785. "movq %%mm0, %%mm2 \n\t"\
  1786. "punpcklbw %%mm7, %%mm0 \n\t"\
  1787. "punpckhbw %%mm7, %%mm2 \n\t"\
  1788. "movq %%mm1, %%mm3 \n\t"\
  1789. "punpcklbw %%mm7, %%mm1 \n\t"\
  1790. "punpckhbw %%mm7, %%mm3 \n\t"\
  1791. "psubw %%mm1, %%mm0 \n\t" /* L(a+e - (b+d))/2 */\
  1792. "psubw %%mm3, %%mm2 \n\t" /* H(a+e - (b+d))/2 */\
  1793. "psraw $3, %%mm0 \n\t" /* L(a+e - (b+d))/16 */\
  1794. "psraw $3, %%mm2 \n\t" /* H(a+e - (b+d))/16 */\
  1795. "psubw %%mm0, %%mm1 \n\t" /* L(9b + 9d - a - e)/16 */\
  1796. "psubw %%mm2, %%mm3 \n\t" /* H(9b + 9d - a - e)/16 */\
  1797. "packuswb %%mm3, %%mm1 \n\t"\
  1798. "movq %%mm1, " #c " \n\t"
  1799. DEINT_CUBIC((%0), (%%eax, %1), (%%eax, %1, 2), (%0, %1, 4), (%%ebx, %1))
  1800. DEINT_CUBIC((%%eax, %1), (%0, %1, 4), (%%ebx), (%%ebx, %1), (%0, %1, 8))
  1801. DEINT_CUBIC((%0, %1, 4), (%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8), (%%ecx))
  1802. DEINT_CUBIC((%%ebx, %1), (%0, %1, 8), (%%ebx, %1, 4), (%%ecx), (%%ecx, %1, 2))
  1803. : : "r" (src), "r" (stride)
  1804. : "%eax", "%ebx", "ecx"
  1805. );
  1806. #else
  1807. int x;
  1808. for(x=0; x<8; x++)
  1809. {
  1810. src[stride*3] = (-src[0] + 9*src[stride*2] + 9*src[stride*4] - src[stride*6])>>4;
  1811. src[stride*5] = (-src[stride*2] + 9*src[stride*4] + 9*src[stride*6] - src[stride*8])>>4;
  1812. src[stride*7] = (-src[stride*4] + 9*src[stride*6] + 9*src[stride*8] - src[stride*10])>>4;
  1813. src[stride*9] = (-src[stride*6] + 9*src[stride*8] + 9*src[stride*10] - src[stride*12])>>4;
  1814. src++;
  1815. }
  1816. #endif
  1817. }
  1818. /**
  1819. * Deinterlaces the given block
  1820. * will be called for every 8x8 block, and can read & write into an 8x16 block
  1821. * will shift the image up by 1 line (FIXME if this is a problem)
  1822. */
  1823. static inline void deInterlaceBlendLinear(uint8_t src[], int stride)
  1824. {
  1825. #if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
  1826. asm volatile(
  1827. "leal (%0, %1), %%eax \n\t"
  1828. "leal (%%eax, %1, 4), %%ebx \n\t"
  1829. // 0 1 2 3 4 5 6 7 8 9
  1830. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1831. "movq (%0), %%mm0 \n\t" // L0
  1832. "movq (%%eax, %1), %%mm1 \n\t" // L2
  1833. PAVGB(%%mm1, %%mm0) // L0+L2
  1834. "movq (%%eax), %%mm2 \n\t" // L1
  1835. PAVGB(%%mm2, %%mm0)
  1836. "movq %%mm0, (%0) \n\t"
  1837. "movq (%%eax, %1, 2), %%mm0 \n\t" // L3
  1838. PAVGB(%%mm0, %%mm2) // L1+L3
  1839. PAVGB(%%mm1, %%mm2) // 2L2 + L1 + L3
  1840. "movq %%mm2, (%%eax) \n\t"
  1841. "movq (%0, %1, 4), %%mm2 \n\t" // L4
  1842. PAVGB(%%mm2, %%mm1) // L2+L4
  1843. PAVGB(%%mm0, %%mm1) // 2L3 + L2 + L4
  1844. "movq %%mm1, (%%eax, %1) \n\t"
  1845. "movq (%%ebx), %%mm1 \n\t" // L5
  1846. PAVGB(%%mm1, %%mm0) // L3+L5
  1847. PAVGB(%%mm2, %%mm0) // 2L4 + L3 + L5
  1848. "movq %%mm0, (%%eax, %1, 2) \n\t"
  1849. "movq (%%ebx, %1), %%mm0 \n\t" // L6
  1850. PAVGB(%%mm0, %%mm2) // L4+L6
  1851. PAVGB(%%mm1, %%mm2) // 2L5 + L4 + L6
  1852. "movq %%mm2, (%0, %1, 4) \n\t"
  1853. "movq (%%ebx, %1, 2), %%mm2 \n\t" // L7
  1854. PAVGB(%%mm2, %%mm1) // L5+L7
  1855. PAVGB(%%mm0, %%mm1) // 2L6 + L5 + L7
  1856. "movq %%mm1, (%%ebx) \n\t"
  1857. "movq (%0, %1, 8), %%mm1 \n\t" // L8
  1858. PAVGB(%%mm1, %%mm0) // L6+L8
  1859. PAVGB(%%mm2, %%mm0) // 2L7 + L6 + L8
  1860. "movq %%mm0, (%%ebx, %1) \n\t"
  1861. "movq (%%ebx, %1, 4), %%mm0 \n\t" // L9
  1862. PAVGB(%%mm0, %%mm2) // L7+L9
  1863. PAVGB(%%mm1, %%mm2) // 2L8 + L7 + L9
  1864. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  1865. : : "r" (src), "r" (stride)
  1866. : "%eax", "%ebx"
  1867. );
  1868. #else
  1869. int x;
  1870. for(x=0; x<8; x++)
  1871. {
  1872. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  1873. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  1874. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  1875. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  1876. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  1877. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  1878. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  1879. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  1880. src++;
  1881. }
  1882. #endif
  1883. }
  1884. /**
  1885. * Deinterlaces the given block
  1886. * will be called for every 8x8 block, except the last row, and can read & write into an 8x16 block
  1887. */
  1888. static inline void deInterlaceMedian(uint8_t src[], int stride)
  1889. {
  1890. #ifdef HAVE_MMX
  1891. #ifdef HAVE_MMX2
  1892. asm volatile(
  1893. "leal (%0, %1), %%eax \n\t"
  1894. "leal (%%eax, %1, 4), %%ebx \n\t"
  1895. // 0 1 2 3 4 5 6 7 8 9
  1896. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1897. "movq (%0), %%mm0 \n\t" //
  1898. "movq (%%eax, %1), %%mm2 \n\t" //
  1899. "movq (%%eax), %%mm1 \n\t" //
  1900. "movq %%mm0, %%mm3 \n\t"
  1901. "pmaxub %%mm1, %%mm0 \n\t" //
  1902. "pminub %%mm3, %%mm1 \n\t" //
  1903. "pmaxub %%mm2, %%mm1 \n\t" //
  1904. "pminub %%mm1, %%mm0 \n\t"
  1905. "movq %%mm0, (%%eax) \n\t"
  1906. "movq (%0, %1, 4), %%mm0 \n\t" //
  1907. "movq (%%eax, %1, 2), %%mm1 \n\t" //
  1908. "movq %%mm2, %%mm3 \n\t"
  1909. "pmaxub %%mm1, %%mm2 \n\t" //
  1910. "pminub %%mm3, %%mm1 \n\t" //
  1911. "pmaxub %%mm0, %%mm1 \n\t" //
  1912. "pminub %%mm1, %%mm2 \n\t"
  1913. "movq %%mm2, (%%eax, %1, 2) \n\t"
  1914. "movq (%%ebx), %%mm2 \n\t" //
  1915. "movq (%%ebx, %1), %%mm1 \n\t" //
  1916. "movq %%mm2, %%mm3 \n\t"
  1917. "pmaxub %%mm0, %%mm2 \n\t" //
  1918. "pminub %%mm3, %%mm0 \n\t" //
  1919. "pmaxub %%mm1, %%mm0 \n\t" //
  1920. "pminub %%mm0, %%mm2 \n\t"
  1921. "movq %%mm2, (%%ebx) \n\t"
  1922. "movq (%%ebx, %1, 2), %%mm2 \n\t" //
  1923. "movq (%0, %1, 8), %%mm0 \n\t" //
  1924. "movq %%mm2, %%mm3 \n\t"
  1925. "pmaxub %%mm0, %%mm2 \n\t" //
  1926. "pminub %%mm3, %%mm0 \n\t" //
  1927. "pmaxub %%mm1, %%mm0 \n\t" //
  1928. "pminub %%mm0, %%mm2 \n\t"
  1929. "movq %%mm2, (%%ebx, %1, 2) \n\t"
  1930. : : "r" (src), "r" (stride)
  1931. : "%eax", "%ebx"
  1932. );
  1933. #else // MMX without MMX2
  1934. asm volatile(
  1935. "leal (%0, %1), %%eax \n\t"
  1936. "leal (%%eax, %1, 4), %%ebx \n\t"
  1937. // 0 1 2 3 4 5 6 7 8 9
  1938. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1939. "pxor %%mm7, %%mm7 \n\t"
  1940. #define MEDIAN(a,b,c)\
  1941. "movq " #a ", %%mm0 \n\t"\
  1942. "movq " #b ", %%mm2 \n\t"\
  1943. "movq " #c ", %%mm1 \n\t"\
  1944. "movq %%mm0, %%mm3 \n\t"\
  1945. "movq %%mm1, %%mm4 \n\t"\
  1946. "movq %%mm2, %%mm5 \n\t"\
  1947. "psubusb %%mm1, %%mm3 \n\t"\
  1948. "psubusb %%mm2, %%mm4 \n\t"\
  1949. "psubusb %%mm0, %%mm5 \n\t"\
  1950. "pcmpeqb %%mm7, %%mm3 \n\t"\
  1951. "pcmpeqb %%mm7, %%mm4 \n\t"\
  1952. "pcmpeqb %%mm7, %%mm5 \n\t"\
  1953. "movq %%mm3, %%mm6 \n\t"\
  1954. "pxor %%mm4, %%mm3 \n\t"\
  1955. "pxor %%mm5, %%mm4 \n\t"\
  1956. "pxor %%mm6, %%mm5 \n\t"\
  1957. "por %%mm3, %%mm1 \n\t"\
  1958. "por %%mm4, %%mm2 \n\t"\
  1959. "por %%mm5, %%mm0 \n\t"\
  1960. "pand %%mm2, %%mm0 \n\t"\
  1961. "pand %%mm1, %%mm0 \n\t"\
  1962. "movq %%mm0, " #b " \n\t"
  1963. MEDIAN((%0), (%%eax), (%%eax, %1))
  1964. MEDIAN((%%eax, %1), (%%eax, %1, 2), (%0, %1, 4))
  1965. MEDIAN((%0, %1, 4), (%%ebx), (%%ebx, %1))
  1966. MEDIAN((%%ebx, %1), (%%ebx, %1, 2), (%0, %1, 8))
  1967. : : "r" (src), "r" (stride)
  1968. : "%eax", "%ebx"
  1969. );
  1970. #endif // MMX
  1971. #else
  1972. //FIXME
  1973. int x;
  1974. for(x=0; x<8; x++)
  1975. {
  1976. src[0 ] = (src[0 ] + 2*src[stride ] + src[stride*2])>>2;
  1977. src[stride ] = (src[stride ] + 2*src[stride*2] + src[stride*3])>>2;
  1978. src[stride*2] = (src[stride*2] + 2*src[stride*3] + src[stride*4])>>2;
  1979. src[stride*3] = (src[stride*3] + 2*src[stride*4] + src[stride*5])>>2;
  1980. src[stride*4] = (src[stride*4] + 2*src[stride*5] + src[stride*6])>>2;
  1981. src[stride*5] = (src[stride*5] + 2*src[stride*6] + src[stride*7])>>2;
  1982. src[stride*6] = (src[stride*6] + 2*src[stride*7] + src[stride*8])>>2;
  1983. src[stride*7] = (src[stride*7] + 2*src[stride*8] + src[stride*9])>>2;
  1984. src++;
  1985. }
  1986. #endif
  1987. }
  1988. #ifdef HAVE_ODIVX_POSTPROCESS
  1989. #include "../opendivx/postprocess.h"
  1990. int use_old_pp=0;
  1991. #endif
  1992. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  1993. QP_STORE_T QPs[], int QPStride, int isColor, int mode);
  1994. /* -pp Command line Help
  1995. NOTE/FIXME: put this at an appropriate place (--help, html docs, man mplayer)?
  1996. -pp <filterName>[:<option>[:<option>...]][,[-]<filterName>[:<option>...]]...
  1997. long form example:
  1998. -pp vdeblock:autoq,hdeblock:autoq,linblenddeint -pp default,-vdeblock
  1999. short form example:
  2000. -pp vb:a,hb:a,lb -pp de,-vb
  2001. Filters Options
  2002. short long name short long option Description
  2003. * * a autoq cpu power dependant enabler
  2004. c chrom chrominance filtring enabled
  2005. y nochrom chrominance filtring disabled
  2006. hb hdeblock horizontal deblocking filter
  2007. vb vdeblock vertical deblocking filter
  2008. vr rkvdeblock
  2009. h1 x1hdeblock Experimental horizontal deblock filter 1
  2010. v1 x1vdeblock Experimental vertical deblock filter 1
  2011. dr dering not implemented yet
  2012. al autolevels automatic brightness / contrast fixer
  2013. f fullyrange stretch luminance range to (0..255)
  2014. lb linblenddeint linear blend deinterlacer
  2015. li linipoldeint linear interpolating deinterlacer
  2016. ci cubicipoldeint cubic interpolating deinterlacer
  2017. md mediandeint median deinterlacer
  2018. de default hdeblock:a,vdeblock:a,dering:a,autolevels
  2019. fa fast x1hdeblock:a,x1vdeblock:a,dering:a,autolevels
  2020. */
  2021. /**
  2022. * returns a PPMode struct which will have a non 0 error variable if an error occured
  2023. * name is the string after "-pp" on the command line
  2024. * quality is a number from 0 to GET_PP_QUALITY_MAX
  2025. */
  2026. struct PPMode getPPModeByNameAndQuality(char *name, int quality)
  2027. {
  2028. char temp[GET_MODE_BUFFER_SIZE];
  2029. char *p= temp;
  2030. char *filterDelimiters= ",";
  2031. char *optionDelimiters= ":";
  2032. struct PPMode ppMode= {0,0,0,0,0,0};
  2033. char *filterToken;
  2034. strncpy(temp, name, GET_MODE_BUFFER_SIZE);
  2035. for(;;){
  2036. char *p2;
  2037. char *filterName;
  2038. int q= GET_PP_QUALITY_MAX;
  2039. int chrom=-1;
  2040. char *option;
  2041. char *options[OPTIONS_ARRAY_SIZE];
  2042. int i;
  2043. int filterNameOk=0;
  2044. int numOfUnknownOptions=0;
  2045. int enable=1; //does the user want us to enabled or disabled the filter
  2046. filterToken= strtok(p, filterDelimiters);
  2047. if(filterToken == NULL) break;
  2048. p+= strlen(filterToken) + 1;
  2049. filterName= strtok(filterToken, optionDelimiters);
  2050. printf("%s::%s\n", filterToken, filterName);
  2051. if(*filterName == '-')
  2052. {
  2053. enable=0;
  2054. filterName++;
  2055. }
  2056. for(;;){ //for all options
  2057. option= strtok(NULL, optionDelimiters);
  2058. if(option == NULL) break;
  2059. printf("%s\n", option);
  2060. if(!strcmp("autoq", option) || !strcmp("a", option)) q= quality;
  2061. else if(!strcmp("nochrom", option) || !strcmp("y", option)) chrom=0;
  2062. else if(!strcmp("chrom", option) || !strcmp("c", option)) chrom=1;
  2063. else
  2064. {
  2065. options[numOfUnknownOptions] = option;
  2066. numOfUnknownOptions++;
  2067. options[numOfUnknownOptions] = NULL;
  2068. }
  2069. if(numOfUnknownOptions >= OPTIONS_ARRAY_SIZE-1) break;
  2070. }
  2071. /* replace stuff from the replace Table */
  2072. for(i=0; replaceTable[2*i]!=NULL; i++)
  2073. {
  2074. if(!strcmp(replaceTable[2*i], filterName))
  2075. {
  2076. int newlen= strlen(replaceTable[2*i + 1]);
  2077. int plen;
  2078. int spaceLeft;
  2079. if(p==NULL) p= temp, *p=0; //last filter
  2080. else p--, *p=','; //not last filter
  2081. plen= strlen(p);
  2082. spaceLeft= (int)p - (int)temp + plen;
  2083. if(spaceLeft + newlen >= GET_MODE_BUFFER_SIZE)
  2084. {
  2085. ppMode.error++;
  2086. break;
  2087. }
  2088. memmove(p + newlen, p, plen+1);
  2089. memcpy(p, replaceTable[2*i + 1], newlen);
  2090. filterNameOk=1;
  2091. }
  2092. }
  2093. for(i=0; filters[i].shortName!=NULL; i++)
  2094. {
  2095. if( !strcmp(filters[i].longName, filterName)
  2096. || !strcmp(filters[i].shortName, filterName))
  2097. {
  2098. ppMode.lumMode &= ~filters[i].mask;
  2099. ppMode.chromMode &= ~filters[i].mask;
  2100. filterNameOk=1;
  2101. if(!enable) break; // user wants to disable it
  2102. if(q >= filters[i].minLumQuality)
  2103. ppMode.lumMode|= filters[i].mask;
  2104. if(chrom==1 || (chrom==-1 && filters[i].chromDefault))
  2105. if(q >= filters[i].minChromQuality)
  2106. ppMode.chromMode|= filters[i].mask;
  2107. if(filters[i].mask == LEVEL_FIX)
  2108. {
  2109. int o;
  2110. ppMode.minAllowedY= 16;
  2111. ppMode.maxAllowedY= 234;
  2112. for(o=0; options[o]!=NULL; o++)
  2113. if( !strcmp(options[o],"fullyrange")
  2114. ||!strcmp(options[o],"f"))
  2115. {
  2116. ppMode.minAllowedY= 0;
  2117. ppMode.maxAllowedY= 255;
  2118. numOfUnknownOptions--;
  2119. }
  2120. }
  2121. }
  2122. }
  2123. if(!filterNameOk) ppMode.error++;
  2124. ppMode.error += numOfUnknownOptions;
  2125. }
  2126. if(ppMode.lumMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_H;
  2127. if(ppMode.lumMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_Y_V;
  2128. if(ppMode.chromMode & H_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_H;
  2129. if(ppMode.chromMode & V_DEBLOCK) ppMode.oldMode |= PP_DEBLOCK_C_V;
  2130. if(ppMode.lumMode & DERING) ppMode.oldMode |= PP_DERING_Y;
  2131. if(ppMode.chromMode & DERING) ppMode.oldMode |= PP_DERING_C;
  2132. return ppMode;
  2133. }
  2134. /**
  2135. * ...
  2136. */
  2137. void postprocess(unsigned char * src[], int src_stride,
  2138. unsigned char * dst[], int dst_stride,
  2139. int horizontal_size, int vertical_size,
  2140. QP_STORE_T *QP_store, int QP_stride,
  2141. int mode)
  2142. {
  2143. /*
  2144. static int qual=0;
  2145. struct PPMode ppMode= getPPModeByNameAndQuality("fast,default,-hdeblock,-vdeblock", qual);
  2146. qual++;
  2147. qual%=7;
  2148. printf("\n%d %d %d %d\n", ppMode.lumMode, ppMode.chromMode, ppMode.oldMode, ppMode.error);
  2149. postprocess2(src, src_stride, dst, dst_stride,
  2150. horizontal_size, vertical_size, QP_store, QP_stride, &ppMode);
  2151. return;
  2152. */
  2153. #ifdef HAVE_ODIVX_POSTPROCESS
  2154. // Note: I could make this shit outside of this file, but it would mean one
  2155. // more function call...
  2156. if(use_old_pp){
  2157. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,mode);
  2158. return;
  2159. }
  2160. #endif
  2161. postProcess(src[0], src_stride, dst[0], dst_stride,
  2162. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode);
  2163. horizontal_size >>= 1;
  2164. vertical_size >>= 1;
  2165. src_stride >>= 1;
  2166. dst_stride >>= 1;
  2167. mode= ((mode&0xFF)>>4) | (mode&0xFFFFFF00);
  2168. if(1)
  2169. {
  2170. postProcess(src[1], src_stride, dst[1], dst_stride,
  2171. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode);
  2172. postProcess(src[2], src_stride, dst[2], dst_stride,
  2173. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode);
  2174. }
  2175. else
  2176. {
  2177. memcpy(dst[1], src[1], src_stride*horizontal_size);
  2178. memcpy(dst[2], src[2], src_stride*horizontal_size);
  2179. }
  2180. }
  2181. void postprocess2(unsigned char * src[], int src_stride,
  2182. unsigned char * dst[], int dst_stride,
  2183. int horizontal_size, int vertical_size,
  2184. QP_STORE_T *QP_store, int QP_stride,
  2185. struct PPMode *mode)
  2186. {
  2187. #ifdef HAVE_ODIVX_POSTPROCESS
  2188. // Note: I could make this shit outside of this file, but it would mean one
  2189. // more function call...
  2190. if(use_old_pp){
  2191. odivx_postprocess(src,src_stride,dst,dst_stride,horizontal_size,vertical_size,QP_store,QP_stride,
  2192. mode->oldMode);
  2193. return;
  2194. }
  2195. #endif
  2196. postProcess(src[0], src_stride, dst[0], dst_stride,
  2197. horizontal_size, vertical_size, QP_store, QP_stride, 0, mode->lumMode);
  2198. horizontal_size >>= 1;
  2199. vertical_size >>= 1;
  2200. src_stride >>= 1;
  2201. dst_stride >>= 1;
  2202. postProcess(src[1], src_stride, dst[1], dst_stride,
  2203. horizontal_size, vertical_size, QP_store, QP_stride, 1, mode->chromMode);
  2204. postProcess(src[2], src_stride, dst[2], dst_stride,
  2205. horizontal_size, vertical_size, QP_store, QP_stride, 2, mode->chromMode);
  2206. }
  2207. /**
  2208. * gets the mode flags for a given quality (larger values mean slower but better postprocessing)
  2209. * 0 <= quality <= 6
  2210. */
  2211. int getPpModeForQuality(int quality){
  2212. int modes[1+GET_PP_QUALITY_MAX]= {
  2213. 0,
  2214. #if 1
  2215. // horizontal filters first
  2216. LUM_H_DEBLOCK,
  2217. LUM_H_DEBLOCK | LUM_V_DEBLOCK,
  2218. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2219. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2220. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING,
  2221. LUM_H_DEBLOCK | LUM_V_DEBLOCK | CHROM_H_DEBLOCK | CHROM_V_DEBLOCK | LUM_DERING | CHROM_DERING
  2222. #else
  2223. // vertical filters first
  2224. LUM_V_DEBLOCK,
  2225. LUM_V_DEBLOCK | LUM_H_DEBLOCK,
  2226. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK,
  2227. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK,
  2228. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING,
  2229. LUM_V_DEBLOCK | LUM_H_DEBLOCK | CHROM_V_DEBLOCK | CHROM_H_DEBLOCK | LUM_DERING | CHROM_DERING
  2230. #endif
  2231. };
  2232. #ifdef HAVE_ODIVX_POSTPROCESS
  2233. int odivx_modes[1+GET_PP_QUALITY_MAX]= {
  2234. 0,
  2235. PP_DEBLOCK_Y_H,
  2236. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V,
  2237. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H,
  2238. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V,
  2239. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y,
  2240. PP_DEBLOCK_Y_H|PP_DEBLOCK_Y_V|PP_DEBLOCK_C_H|PP_DEBLOCK_C_V|PP_DERING_Y|PP_DERING_C
  2241. };
  2242. if(use_old_pp) return odivx_modes[quality];
  2243. #endif
  2244. return modes[quality];
  2245. }
  2246. /**
  2247. * Copies a block from src to dst and fixes the blacklevel
  2248. * numLines must be a multiple of 4
  2249. * levelFix == 0 -> dont touch the brighness & contrast
  2250. */
  2251. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride,
  2252. int numLines, int levelFix)
  2253. {
  2254. int i;
  2255. if(levelFix)
  2256. {
  2257. #ifdef HAVE_MMX
  2258. asm volatile(
  2259. "leal (%2,%2), %%eax \n\t"
  2260. "leal (%3,%3), %%ebx \n\t"
  2261. "movq packedYOffset, %%mm2 \n\t"
  2262. "movq packedYScale, %%mm3 \n\t"
  2263. "pxor %%mm4, %%mm4 \n\t"
  2264. #define SCALED_CPY \
  2265. "movq (%0), %%mm0 \n\t"\
  2266. "movq (%0), %%mm5 \n\t"\
  2267. "punpcklbw %%mm4, %%mm0 \n\t"\
  2268. "punpckhbw %%mm4, %%mm5 \n\t"\
  2269. "psubw %%mm2, %%mm0 \n\t"\
  2270. "psubw %%mm2, %%mm5 \n\t"\
  2271. "movq (%0,%2), %%mm1 \n\t"\
  2272. "psllw $6, %%mm0 \n\t"\
  2273. "psllw $6, %%mm5 \n\t"\
  2274. "pmulhw %%mm3, %%mm0 \n\t"\
  2275. "movq (%0,%2), %%mm6 \n\t"\
  2276. "pmulhw %%mm3, %%mm5 \n\t"\
  2277. "punpcklbw %%mm4, %%mm1 \n\t"\
  2278. "punpckhbw %%mm4, %%mm6 \n\t"\
  2279. "psubw %%mm2, %%mm1 \n\t"\
  2280. "psubw %%mm2, %%mm6 \n\t"\
  2281. "psllw $6, %%mm1 \n\t"\
  2282. "psllw $6, %%mm6 \n\t"\
  2283. "pmulhw %%mm3, %%mm1 \n\t"\
  2284. "pmulhw %%mm3, %%mm6 \n\t"\
  2285. "addl %%eax, %0 \n\t"\
  2286. "packuswb %%mm5, %%mm0 \n\t"\
  2287. "packuswb %%mm6, %%mm1 \n\t"\
  2288. "movq %%mm0, (%1) \n\t"\
  2289. "movq %%mm1, (%1, %3) \n\t"\
  2290. SCALED_CPY
  2291. "addl %%ebx, %1 \n\t"
  2292. SCALED_CPY
  2293. "addl %%ebx, %1 \n\t"
  2294. SCALED_CPY
  2295. "addl %%ebx, %1 \n\t"
  2296. SCALED_CPY
  2297. : "+r"(src),
  2298. "+r"(dst)
  2299. :"r" (srcStride),
  2300. "r" (dstStride)
  2301. : "%eax", "%ebx"
  2302. );
  2303. #else
  2304. for(i=0; i<numLines; i++)
  2305. memcpy( &(dst[dstStride*i]),
  2306. &(src[srcStride*i]), BLOCK_SIZE);
  2307. #endif
  2308. }
  2309. else
  2310. {
  2311. #ifdef HAVE_MMX
  2312. asm volatile(
  2313. "movl %4, %%eax \n\t"
  2314. "movl %%eax, temp0\n\t"
  2315. "pushl %0 \n\t"
  2316. "pushl %1 \n\t"
  2317. "leal (%2,%2), %%eax \n\t"
  2318. "leal (%3,%3), %%ebx \n\t"
  2319. "movq packedYOffset, %%mm2 \n\t"
  2320. "movq packedYScale, %%mm3 \n\t"
  2321. #define SIMPLE_CPY \
  2322. "movq (%0), %%mm0 \n\t"\
  2323. "movq (%0,%2), %%mm1 \n\t"\
  2324. "movq %%mm0, (%1) \n\t"\
  2325. "movq %%mm1, (%1, %3) \n\t"\
  2326. "1: \n\t"
  2327. SIMPLE_CPY
  2328. "addl %%eax, %0 \n\t"
  2329. "addl %%ebx, %1 \n\t"
  2330. SIMPLE_CPY
  2331. "addl %%eax, %0 \n\t"
  2332. "addl %%ebx, %1 \n\t"
  2333. "decl temp0 \n\t"
  2334. "jnz 1b \n\t"
  2335. "popl %1 \n\t"
  2336. "popl %0 \n\t"
  2337. : : "r" (src),
  2338. "r" (dst),
  2339. "r" (srcStride),
  2340. "r" (dstStride),
  2341. "m" (numLines>>2)
  2342. : "%eax", "%ebx"
  2343. );
  2344. #else
  2345. for(i=0; i<numLines; i++)
  2346. memcpy( &(dst[dstStride*i]),
  2347. &(src[srcStride*i]), BLOCK_SIZE);
  2348. #endif
  2349. }
  2350. }
  2351. /**
  2352. * Filters array of bytes (Y or U or V values)
  2353. */
  2354. static void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  2355. QP_STORE_T QPs[], int QPStride, int isColor, int mode)
  2356. {
  2357. int x,y;
  2358. /* we need 64bit here otherwise we´ll going to have a problem
  2359. after watching a black picture for 5 hours*/
  2360. static uint64_t *yHistogram= NULL;
  2361. int black=0, white=255; // blackest black and whitest white in the picture
  2362. /* Temporary buffers for handling the last row(s) */
  2363. static uint8_t *tempDst= NULL;
  2364. static uint8_t *tempSrc= NULL;
  2365. /* Temporary buffers for handling the last block */
  2366. static uint8_t *tempDstBlock= NULL;
  2367. static uint8_t *tempSrcBlock= NULL;
  2368. uint8_t *dstBlockPtrBackup;
  2369. uint8_t *srcBlockPtrBackup;
  2370. #ifdef TIMING
  2371. long long T0, T1, memcpyTime=0, vertTime=0, horizTime=0, sumTime, diffTime=0;
  2372. sumTime= rdtsc();
  2373. #endif
  2374. if(tempDst==NULL)
  2375. {
  2376. tempDst= (uint8_t*)memalign(8, 1024*24);
  2377. tempSrc= (uint8_t*)memalign(8, 1024*24);
  2378. tempDstBlock= (uint8_t*)memalign(8, 1024*24);
  2379. tempSrcBlock= (uint8_t*)memalign(8, 1024*24);
  2380. }
  2381. if(!yHistogram)
  2382. {
  2383. int i;
  2384. yHistogram= (uint64_t*)malloc(8*256);
  2385. for(i=0; i<256; i++) yHistogram[i]= width*height/64*15/256;
  2386. if(mode & FULL_Y_RANGE)
  2387. {
  2388. maxAllowedY=255;
  2389. minAllowedY=0;
  2390. }
  2391. }
  2392. if(!isColor)
  2393. {
  2394. uint64_t sum= 0;
  2395. int i;
  2396. static int framenum= -1;
  2397. uint64_t maxClipped;
  2398. uint64_t clipped;
  2399. double scale;
  2400. framenum++;
  2401. if(framenum == 1) yHistogram[0]= width*height/64*15/256;
  2402. for(i=0; i<256; i++)
  2403. {
  2404. sum+= yHistogram[i];
  2405. // printf("%d ", yHistogram[i]);
  2406. }
  2407. // printf("\n\n");
  2408. /* we allways get a completly black picture first */
  2409. maxClipped= (uint64_t)(sum * maxClippedThreshold);
  2410. clipped= sum;
  2411. for(black=255; black>0; black--)
  2412. {
  2413. if(clipped < maxClipped) break;
  2414. clipped-= yHistogram[black];
  2415. }
  2416. clipped= sum;
  2417. for(white=0; white<256; white++)
  2418. {
  2419. if(clipped < maxClipped) break;
  2420. clipped-= yHistogram[white];
  2421. }
  2422. packedYOffset= (black - minAllowedY) & 0xFFFF;
  2423. packedYOffset|= packedYOffset<<32;
  2424. packedYOffset|= packedYOffset<<16;
  2425. scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  2426. packedYScale= (uint16_t)(scale*1024.0 + 0.5);
  2427. packedYScale|= packedYScale<<32;
  2428. packedYScale|= packedYScale<<16;
  2429. }
  2430. else
  2431. {
  2432. packedYScale= 0x0100010001000100LL;
  2433. packedYOffset= 0;
  2434. }
  2435. /* copy first row of 8x8 blocks */
  2436. for(x=0; x<width; x+=BLOCK_SIZE)
  2437. blockCopy(dst + x, dstStride, src + x, srcStride, 8, mode & LEVEL_FIX);
  2438. for(y=0; y<height; y+=BLOCK_SIZE)
  2439. {
  2440. //1% speedup if these are here instead of the inner loop
  2441. uint8_t *srcBlock= &(src[y*srcStride]);
  2442. uint8_t *dstBlock= &(dst[y*dstStride]);
  2443. /* can we mess with a 8x16 block from srcBlock/dstBlock downwards, if not
  2444. than use a temporary buffer */
  2445. if(y+15 >= height)
  2446. {
  2447. /* copy from line 5 to 12 of src, these will e copied with
  2448. blockcopy to dst later */
  2449. memcpy(tempSrc + srcStride*5, srcBlock + srcStride*5,
  2450. srcStride*MAX(height-y-5, 0) );
  2451. /* duplicate last line to fill the void upto line 12 */
  2452. if(y+12 >= height)
  2453. {
  2454. int i;
  2455. for(i=height-y; i<=12; i++)
  2456. memcpy(tempSrc + srcStride*i,
  2457. src + srcStride*(height-1), srcStride);
  2458. }
  2459. /* copy up to 5 lines of dst */
  2460. memcpy(tempDst, dstBlock, dstStride*MIN(height-y, 5) );
  2461. dstBlock= tempDst;
  2462. srcBlock= tempSrc;
  2463. }
  2464. // From this point on it is guranteed that we can read and write 16 lines downward
  2465. // finish 1 block before the next otherwise we´ll might have a problem
  2466. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  2467. for(x=0; x<width; x+=BLOCK_SIZE)
  2468. {
  2469. const int stride= dstStride;
  2470. int QP;
  2471. if(isColor)
  2472. {
  2473. QP=QPs[(y>>3)*QPStride + (x>>3)];
  2474. }
  2475. else
  2476. {
  2477. QP= QPs[(y>>4)*QPStride + (x>>4)];
  2478. if(mode & LEVEL_FIX) QP= (QP* (packedYScale &0xFFFF))>>8;
  2479. yHistogram[ srcBlock[srcStride*5] ]++;
  2480. }
  2481. #ifdef HAVE_MMX
  2482. asm volatile(
  2483. "movd %0, %%mm7 \n\t"
  2484. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  2485. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  2486. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  2487. "movq %%mm7, pQPb \n\t"
  2488. : : "r" (QP)
  2489. );
  2490. #endif
  2491. #ifdef MORE_TIMING
  2492. T0= rdtsc();
  2493. #endif
  2494. #ifdef HAVE_MMX2
  2495. prefetchnta(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2496. prefetchnta(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2497. prefetcht0(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2498. prefetcht0(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2499. #elif defined(HAVE_3DNOW)
  2500. //FIXME check if this is faster on an 3dnow chip or if its faster without the prefetch or ...
  2501. /* prefetch(srcBlock + (((x>>3)&3) + 5)*srcStride + 32);
  2502. prefetch(srcBlock + (((x>>3)&3) + 9)*srcStride + 32);
  2503. prefetchw(dstBlock + (((x>>3)&3) + 5)*dstStride + 32);
  2504. prefetchw(dstBlock + (((x>>3)&3) + 9)*dstStride + 32);
  2505. */
  2506. #endif
  2507. #ifdef PP_FUNNY_STRIDE
  2508. //can we mess with a 8x16 block, if not use a temp buffer, yes again
  2509. if(x+7 >= width)
  2510. {
  2511. int i;
  2512. dstBlockPtrBackup= dstBlock;
  2513. srcBlockPtrBackup= srcBlock;
  2514. for(i=0;i<BLOCK_SIZE*2; i++)
  2515. {
  2516. memcpy(tempSrcBlock+i*srcStride, srcBlock+i*srcStride, width-x);
  2517. memcpy(tempDstBlock+i*dstStride, dstBlock+i*dstStride, width-x);
  2518. }
  2519. dstBlock= tempDstBlock;
  2520. srcBlock= tempSrcBlock;
  2521. }
  2522. #endif
  2523. blockCopy(dstBlock + dstStride*5, dstStride,
  2524. srcBlock + srcStride*5, srcStride, 8, mode & LEVEL_FIX);
  2525. if(mode & LINEAR_IPOL_DEINT_FILTER)
  2526. deInterlaceInterpolateLinear(dstBlock, dstStride);
  2527. else if(mode & LINEAR_BLEND_DEINT_FILTER)
  2528. deInterlaceBlendLinear(dstBlock, dstStride);
  2529. else if(mode & MEDIAN_DEINT_FILTER)
  2530. deInterlaceMedian(dstBlock, dstStride);
  2531. else if(mode & CUBIC_IPOL_DEINT_FILTER)
  2532. deInterlaceInterpolateCubic(dstBlock, dstStride);
  2533. /* else if(mode & CUBIC_BLEND_DEINT_FILTER)
  2534. deInterlaceBlendCubic(dstBlock, dstStride);
  2535. */
  2536. /* only deblock if we have 2 blocks */
  2537. if(y + 8 < height)
  2538. {
  2539. #ifdef MORE_TIMING
  2540. T1= rdtsc();
  2541. memcpyTime+= T1-T0;
  2542. T0=T1;
  2543. #endif
  2544. if(mode & V_RK1_FILTER)
  2545. vertRK1Filter(dstBlock, stride, QP);
  2546. else if(mode & V_X1_FILTER)
  2547. vertX1Filter(dstBlock, stride, QP);
  2548. else if(mode & V_DEBLOCK)
  2549. {
  2550. if( isVertDC(dstBlock, stride))
  2551. {
  2552. if(isVertMinMaxOk(dstBlock, stride, QP))
  2553. doVertLowPass(dstBlock, stride, QP);
  2554. }
  2555. else
  2556. doVertDefFilter(dstBlock, stride, QP);
  2557. }
  2558. #ifdef MORE_TIMING
  2559. T1= rdtsc();
  2560. vertTime+= T1-T0;
  2561. T0=T1;
  2562. #endif
  2563. }
  2564. /* check if we have a previous block to deblock it with dstBlock */
  2565. if(x - 8 >= 0)
  2566. {
  2567. #ifdef MORE_TIMING
  2568. T0= rdtsc();
  2569. #endif
  2570. if(mode & H_X1_FILTER)
  2571. horizX1Filter(dstBlock-4, stride, QP);
  2572. else if(mode & H_DEBLOCK)
  2573. {
  2574. if( isHorizDCAndCopy2Temp(dstBlock-4, stride))
  2575. {
  2576. if(isHorizMinMaxOk(tempBlock, TEMP_STRIDE, QP))
  2577. doHorizLowPassAndCopyBack(dstBlock-4, stride, QP);
  2578. }
  2579. else
  2580. doHorizDefFilterAndCopyBack(dstBlock-4, stride, QP);
  2581. }
  2582. #ifdef MORE_TIMING
  2583. T1= rdtsc();
  2584. horizTime+= T1-T0;
  2585. T0=T1;
  2586. #endif
  2587. dering(dstBlock - 9 - stride, stride, QP);
  2588. }
  2589. else if(y!=0)
  2590. dering(dstBlock - stride*9 + width-9, stride, QP);
  2591. //FIXME dering filter will not be applied to last block (bottom right)
  2592. #ifdef PP_FUNNY_STRIDE
  2593. /* did we use a tmp-block buffer */
  2594. if(x+7 >= width)
  2595. {
  2596. int i;
  2597. dstBlock= dstBlockPtrBackup;
  2598. srcBlock= srcBlockPtrBackup;
  2599. for(i=0;i<BLOCK_SIZE*2; i++)
  2600. {
  2601. memcpy(dstBlock+i*dstStride, tempDstBlock+i*dstStride, width-x);
  2602. }
  2603. }
  2604. #endif
  2605. dstBlock+=8;
  2606. srcBlock+=8;
  2607. }
  2608. /* did we use a tmp buffer */
  2609. if(y+15 >= height)
  2610. {
  2611. uint8_t *dstBlock= &(dst[y*dstStride]);
  2612. memcpy(dstBlock, tempDst, dstStride*(height-y) );
  2613. }
  2614. }
  2615. #ifdef HAVE_3DNOW
  2616. asm volatile("femms");
  2617. #elif defined (HAVE_MMX)
  2618. asm volatile("emms");
  2619. #endif
  2620. #ifdef TIMING
  2621. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  2622. sumTime= rdtsc() - sumTime;
  2623. if(!isColor)
  2624. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  2625. (int)(memcpyTime/1000), (int)(vertTime/1000), (int)(horizTime/1000),
  2626. (int)(sumTime/1000), (int)((sumTime-memcpyTime-vertTime-horizTime)/1000)
  2627. , black, white);
  2628. #endif
  2629. }