You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1552 lines
42KB

  1. /*
  2. Copyright (C) 2001 Michael Niedermayer (michaelni@gmx.at)
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License as published by
  5. the Free Software Foundation; either version 2 of the License, or
  6. (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public License
  12. along with this program; if not, write to the Free Software
  13. Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  14. */
  15. /*
  16. C MMX MMX2
  17. isVertDC Ec Ec
  18. isVertMinMaxOk Ec Ec
  19. doVertLowPass E e
  20. doVertDefFilter Ec Ec Ec
  21. isHorizDC Ec Ec
  22. isHorizMinMaxOk a
  23. doHorizLowPass E a
  24. doHorizDefFilter E a
  25. deRing
  26. E = Exact implementation
  27. e = allmost exact implementation
  28. a = alternative / approximate impl
  29. c = checked against the other implementations (-vo md5)
  30. */
  31. /*
  32. TODO:
  33. verify that everything workes as it should
  34. reduce the time wasted on the mem transfer
  35. implement dering
  36. implement everything in C at least
  37. figure range of QP out (assuming <256 for now)
  38. unroll stuff if instructions depend too much on the prior one
  39. we use 8x8 blocks for the horizontal filters, opendivx seems to use 8x4?
  40. move YScale thing to the end instead of fixing QP
  41. ...
  42. Notes:
  43. */
  44. #include <inttypes.h>
  45. #include <stdio.h>
  46. #include "../config.h"
  47. #include "postprocess.h"
  48. //#undef HAVE_MMX2
  49. //#undef HAVE_MMX
  50. static uint64_t packedYOffset= 0x0000000000000000LL;
  51. static uint64_t packedYScale= 0x0100010001000100LL;
  52. static uint64_t w05= 0x0005000500050005LL;
  53. static uint64_t w20= 0x0020002000200020LL;
  54. static uint64_t w1400= 0x1400140014001400LL;
  55. static uint64_t bm00000001= 0x00000000000000FFLL;
  56. static uint64_t bm00010000= 0x000000FF00000000LL;
  57. static uint64_t bm00001000= 0x00000000FF000000LL;
  58. static uint64_t bm10000000= 0xFF00000000000000LL;
  59. static uint64_t bm10000001= 0xFF000000000000FFLL;
  60. static uint64_t bm11000011= 0xFFFF00000000FFFFLL;
  61. static uint64_t bm00011000= 0x000000FFFF000000LL;
  62. static uint64_t bm00110011= 0x0000FFFF0000FFFFLL;
  63. static uint64_t bm11001100= 0xFFFF0000FFFF0000LL;
  64. static uint64_t b00= 0x0000000000000000LL;
  65. static uint64_t b02= 0x0202020202020202LL;
  66. static uint64_t b0F= 0x0F0F0F0F0F0F0F0FLL;
  67. static uint64_t bFF= 0xFFFFFFFFFFFFFFFFLL;
  68. static uint64_t b7E= 0x7E7E7E7E7E7E7E7ELL;
  69. static uint64_t b7C= 0x7C7C7C7C7C7C7C7CLL;
  70. static uint64_t b3F= 0x3F3F3F3F3F3F3F3FLL;
  71. static uint64_t temp0=0;
  72. static uint64_t temp1=0;
  73. static uint64_t temp2=0;
  74. static uint64_t temp3=0;
  75. static uint64_t temp4=0;
  76. static uint64_t temp5=0;
  77. static uint64_t pQPb=0;
  78. static uint8_t tempBlock[16*16];
  79. int hFlatnessThreshold= 56 - 16;
  80. int vFlatnessThreshold= 56 - 16;
  81. //amount of "black" u r willing to loose to get a brightness corrected picture
  82. double maxClippedThreshold= 0.01;
  83. int maxAllowedY=255;
  84. //FIXME can never make a movie´s black brighter (anyone needs that?)
  85. int minAllowedY=0;
  86. static inline long long rdtsc()
  87. {
  88. long long l;
  89. asm volatile( "rdtsc\n\t"
  90. : "=A" (l)
  91. );
  92. // printf("%d\n", int(l/1000));
  93. return l;
  94. }
  95. static inline void prefetchnta(void *p)
  96. {
  97. asm volatile( "prefetchnta (%0)\n\t"
  98. : : "r" (p)
  99. );
  100. }
  101. static inline void prefetcht0(void *p)
  102. {
  103. asm volatile( "prefetcht0 (%0)\n\t"
  104. : : "r" (p)
  105. );
  106. }
  107. static inline void prefetcht1(void *p)
  108. {
  109. asm volatile( "prefetcht1 (%0)\n\t"
  110. : : "r" (p)
  111. );
  112. }
  113. static inline void prefetcht2(void *p)
  114. {
  115. asm volatile( "prefetcht2 (%0)\n\t"
  116. : : "r" (p)
  117. );
  118. }
  119. //FIXME? |255-0| = 1 (shouldnt be a problem ...)
  120. /**
  121. * Check if the middle 8x8 Block in the given 8x10 block is flat
  122. */
  123. static inline bool isVertDC(uint8_t src[], int stride){
  124. // return true;
  125. int numEq= 0;
  126. src+= stride; // src points to begin of the 8x8 Block
  127. #ifdef HAVE_MMX
  128. asm volatile(
  129. // "int $3 \n\t"
  130. "pushl %1\n\t"
  131. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  132. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  133. "movq (%1), %%mm0 \n\t"
  134. "addl %2, %1 \n\t"
  135. "movq (%1), %%mm1 \n\t"
  136. "psubb %%mm1, %%mm0 \n\t" // mm0 = differnece
  137. "paddb %%mm7, %%mm0 \n\t"
  138. "pcmpgtb %%mm6, %%mm0 \n\t"
  139. "addl %2, %1 \n\t"
  140. "movq (%1), %%mm2 \n\t"
  141. "psubb %%mm2, %%mm1 \n\t"
  142. "paddb %%mm7, %%mm1 \n\t"
  143. "pcmpgtb %%mm6, %%mm1 \n\t"
  144. "paddb %%mm1, %%mm0 \n\t"
  145. "addl %2, %1 \n\t"
  146. "movq (%1), %%mm1 \n\t"
  147. "psubb %%mm1, %%mm2 \n\t"
  148. "paddb %%mm7, %%mm2 \n\t"
  149. "pcmpgtb %%mm6, %%mm2 \n\t"
  150. "paddb %%mm2, %%mm0 \n\t"
  151. "addl %2, %1 \n\t"
  152. "movq (%1), %%mm2 \n\t"
  153. "psubb %%mm2, %%mm1 \n\t"
  154. "paddb %%mm7, %%mm1 \n\t"
  155. "pcmpgtb %%mm6, %%mm1 \n\t"
  156. "paddb %%mm1, %%mm0 \n\t"
  157. "addl %2, %1 \n\t"
  158. "movq (%1), %%mm1 \n\t"
  159. "psubb %%mm1, %%mm2 \n\t"
  160. "paddb %%mm7, %%mm2 \n\t"
  161. "pcmpgtb %%mm6, %%mm2 \n\t"
  162. "paddb %%mm2, %%mm0 \n\t"
  163. "addl %2, %1 \n\t"
  164. "movq (%1), %%mm2 \n\t"
  165. "psubb %%mm2, %%mm1 \n\t"
  166. "paddb %%mm7, %%mm1 \n\t"
  167. "pcmpgtb %%mm6, %%mm1 \n\t"
  168. "paddb %%mm1, %%mm0 \n\t"
  169. "addl %2, %1 \n\t"
  170. "movq (%1), %%mm1 \n\t"
  171. "psubb %%mm1, %%mm2 \n\t"
  172. "paddb %%mm7, %%mm2 \n\t"
  173. "pcmpgtb %%mm6, %%mm2 \n\t"
  174. "paddb %%mm2, %%mm0 \n\t"
  175. " \n\t"
  176. "movq %%mm0, %%mm1 \n\t"
  177. "psrlw $8, %%mm0 \n\t"
  178. "paddb %%mm1, %%mm0 \n\t"
  179. "movq %%mm0, %%mm1 \n\t"
  180. "psrlq $16, %%mm0 \n\t"
  181. "paddb %%mm1, %%mm0 \n\t"
  182. "movq %%mm0, %%mm1 \n\t"
  183. "psrlq $32, %%mm0 \n\t"
  184. "paddb %%mm1, %%mm0 \n\t"
  185. "popl %1\n\t"
  186. "movd %%mm0, %0 \n\t"
  187. : "=r" (numEq)
  188. : "r" (src), "r" (stride)
  189. );
  190. // printf("%d\n", numEq);
  191. numEq= (256 - (numEq & 0xFF)) &0xFF;
  192. // int asmEq= numEq;
  193. // numEq=0;
  194. // uint8_t *temp= src;
  195. #else
  196. for(int y=0; y<BLOCK_SIZE-1; y++)
  197. {
  198. if(((src[0] - src[0+stride] + 1)&0xFFFF) < 3) numEq++;
  199. if(((src[1] - src[1+stride] + 1)&0xFFFF) < 3) numEq++;
  200. if(((src[2] - src[2+stride] + 1)&0xFFFF) < 3) numEq++;
  201. if(((src[3] - src[3+stride] + 1)&0xFFFF) < 3) numEq++;
  202. if(((src[4] - src[4+stride] + 1)&0xFFFF) < 3) numEq++;
  203. if(((src[5] - src[5+stride] + 1)&0xFFFF) < 3) numEq++;
  204. if(((src[6] - src[6+stride] + 1)&0xFFFF) < 3) numEq++;
  205. if(((src[7] - src[7+stride] + 1)&0xFFFF) < 3) numEq++;
  206. src+= stride;
  207. }
  208. #endif
  209. /* if(abs(numEq - asmEq) > 0)
  210. {
  211. printf("\nasm:%d c:%d\n", asmEq, numEq);
  212. for(int y=0; y<8; y++)
  213. {
  214. for(int x=0; x<8; x++)
  215. {
  216. printf("%d ", temp[x + y*stride]);
  217. }
  218. printf("\n");
  219. }
  220. }
  221. */
  222. return numEq > vFlatnessThreshold;
  223. }
  224. static inline bool isVertMinMaxOk(uint8_t src[], int stride, int QP)
  225. {
  226. #ifdef HAVE_MMX
  227. int isOk;
  228. asm volatile(
  229. // "int $3 \n\t"
  230. "movq (%1, %2), %%mm0 \n\t"
  231. "movq (%1, %2, 8), %%mm1 \n\t"
  232. "movq %%mm0, %%mm2 \n\t"
  233. "psubusb %%mm1, %%mm0 \n\t"
  234. "psubusb %%mm2, %%mm1 \n\t"
  235. "por %%mm1, %%mm0 \n\t" // ABS Diff
  236. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  237. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  238. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  239. "pcmpeqd b00, %%mm0 \n\t"
  240. "psrlq $16, %%mm0 \n\t"
  241. "pcmpeqd bFF, %%mm0 \n\t"
  242. // "movd %%mm0, (%1, %2, 4)\n\t"
  243. "movd %%mm0, %0 \n\t"
  244. : "=r" (isOk)
  245. : "r" (src), "r" (stride)
  246. );
  247. return isOk;
  248. #else
  249. int isOk2= true;
  250. for(int x=0; x<BLOCK_SIZE; x++)
  251. {
  252. if(abs((int)src[x + stride] - (int)src[x + (stride<<3)]) > 2*QP) isOk2=false;
  253. }
  254. /* if(isOk && !isOk2 || !isOk && isOk2)
  255. {
  256. printf("\nasm:%d c:%d QP:%d\n", isOk, isOk2, QP);
  257. for(int y=0; y<9; y++)
  258. {
  259. for(int x=0; x<8; x++)
  260. {
  261. printf("%d ", src[x + y*stride]);
  262. }
  263. printf("\n");
  264. }
  265. } */
  266. return isOk2;
  267. #endif
  268. }
  269. /**
  270. * Do a vertical low pass filter on the 8x10 block (only write to the 8x8 block in the middle)
  271. * useing the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16
  272. */
  273. static inline void doVertLowPass(uint8_t *src, int stride, int QP)
  274. {
  275. // QP= 64;
  276. #ifdef HAVE_MMX2
  277. asm volatile( //"movv %0 %1 %2\n\t"
  278. "pushl %0 \n\t"
  279. "movq pQPb, %%mm0 \n\t" // QP,..., QP
  280. // "movq bFF , %%mm0 \n\t" // QP,..., QP
  281. "movq (%0), %%mm6 \n\t"
  282. "movq (%0, %1), %%mm5 \n\t"
  283. "movq %%mm5, %%mm1 \n\t"
  284. "movq %%mm6, %%mm2 \n\t"
  285. "psubusb %%mm6, %%mm5 \n\t"
  286. "psubusb %%mm1, %%mm2 \n\t"
  287. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  288. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  289. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  290. "pand %%mm2, %%mm6 \n\t"
  291. "pandn %%mm1, %%mm2 \n\t"
  292. "por %%mm2, %%mm6 \n\t"// First Line to Filter
  293. "movq (%0, %1, 8), %%mm5 \n\t"
  294. "leal (%0, %1, 4), %%eax \n\t"
  295. "leal (%0, %1, 8), %%ebx \n\t"
  296. "subl %1, %%ebx \n\t"
  297. "addl %1, %0 \n\t" // %0 points to line 1 not 0
  298. "movq (%0, %1, 8), %%mm7 \n\t"
  299. "movq %%mm5, %%mm1 \n\t"
  300. "movq %%mm7, %%mm2 \n\t"
  301. "psubusb %%mm7, %%mm5 \n\t"
  302. "psubusb %%mm1, %%mm2 \n\t"
  303. "por %%mm5, %%mm2 \n\t" // ABS Diff of lines
  304. "psubusb %%mm0, %%mm2 \n\t" // diff <= QP -> 0
  305. "pcmpeqb b00, %%mm2 \n\t" // diff <= QP -> FF
  306. "pand %%mm2, %%mm7 \n\t"
  307. "pandn %%mm1, %%mm2 \n\t"
  308. "por %%mm2, %%mm7 \n\t" // First Line to Filter
  309. // 1 2 3 4 5 6 7 8
  310. // %0 %0+%1 %0+2%1 eax %0+4%1 eax+2%1 ebx eax+4%1
  311. // 6 4 2 2 1 1
  312. // 6 4 4 2
  313. // 6 8 2
  314. /*
  315. "movq %%mm6, %%mm2 \n\t" //1
  316. "movq %%mm6, %%mm3 \n\t" //1
  317. "paddusb b02, %%mm3 \n\t"
  318. "psrlw $2, %%mm3 \n\t" //1 /4
  319. "pand b3F, %%mm3 \n\t"
  320. "psubb %%mm3, %%mm2 \n\t"
  321. "movq (%0, %1), %%mm0 \n\t" // 1
  322. "movq %%mm0, %%mm1 \n\t" // 1
  323. "paddusb b02, %%mm0 \n\t"
  324. "psrlw $2, %%mm0 \n\t" // 1 /4
  325. "pand b3F, %%mm0 \n\t"
  326. "paddusb %%mm2, %%mm0 \n\t" //3 1 /4
  327. */
  328. "movq (%0, %1), %%mm0 \n\t" // 1
  329. "movq %%mm0, %%mm1 \n\t" // 1
  330. "pavgb %%mm6, %%mm0 \n\t" //1 1 /2
  331. "pavgb %%mm6, %%mm0 \n\t" //3 1 /4
  332. "movq (%0, %1, 4), %%mm2 \n\t" // 1
  333. "movq %%mm2, %%mm5 \n\t" // 1
  334. "pavgb (%%eax), %%mm2 \n\t" // 11 /2
  335. "pavgb (%0, %1, 2), %%mm2 \n\t" // 211 /4
  336. "movq %%mm2, %%mm3 \n\t" // 211 /4
  337. "movq (%0), %%mm4 \n\t" // 1
  338. "pavgb %%mm4, %%mm3 \n\t" // 4 211 /8
  339. "pavgb %%mm0, %%mm3 \n\t" //642211 /16
  340. "movq %%mm3, (%0) \n\t" // X
  341. // mm1=2 mm2=3(211) mm4=1 mm5=5 mm6=0 mm7=9
  342. "movq %%mm1, %%mm0 \n\t" // 1
  343. "pavgb %%mm6, %%mm0 \n\t" //1 1 /2
  344. "movq %%mm4, %%mm3 \n\t" // 1
  345. "pavgb (%0,%1,2), %%mm3 \n\t" // 1 1 /2
  346. "pavgb (%%eax,%1,2), %%mm5 \n\t" // 11 /2
  347. "pavgb (%%eax), %%mm5 \n\t" // 211 /4
  348. "pavgb %%mm5, %%mm3 \n\t" // 2 2211 /8
  349. "pavgb %%mm0, %%mm3 \n\t" //4242211 /16
  350. "movq %%mm3, (%0,%1) \n\t" // X
  351. // mm1=2 mm2=3(211) mm4=1 mm5=4(211) mm6=0 mm7=9
  352. "pavgb %%mm4, %%mm6 \n\t" //11 /2
  353. "movq (%%ebx), %%mm0 \n\t" // 1
  354. "pavgb (%%eax, %1, 2), %%mm0 \n\t" // 11/2
  355. "movq %%mm0, %%mm3 \n\t" // 11/2
  356. "pavgb %%mm1, %%mm0 \n\t" // 2 11/4
  357. "pavgb %%mm6, %%mm0 \n\t" //222 11/8
  358. "pavgb %%mm2, %%mm0 \n\t" //22242211/16
  359. "movq (%0, %1, 2), %%mm2 \n\t" // 1
  360. "movq %%mm0, (%0, %1, 2) \n\t" // X
  361. // mm1=2 mm2=3 mm3=6(11) mm4=1 mm5=4(211) mm6=0(11) mm7=9
  362. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  363. "pavgb (%%ebx), %%mm0 \n\t" // 11 /2
  364. "pavgb %%mm0, %%mm6 \n\t" //11 11 /4
  365. "pavgb %%mm1, %%mm4 \n\t" // 11 /2
  366. "pavgb %%mm2, %%mm1 \n\t" // 11 /2
  367. "pavgb %%mm1, %%mm6 \n\t" //1122 11 /8
  368. "pavgb %%mm5, %%mm6 \n\t" //112242211 /16
  369. "movq (%%eax), %%mm5 \n\t" // 1
  370. "movq %%mm6, (%%eax) \n\t" // X
  371. // mm0=7(11) mm1=2(11) mm2=3 mm3=6(11) mm4=1(11) mm5=4 mm7=9
  372. "movq (%%eax, %1, 4), %%mm6 \n\t" // 1
  373. "pavgb %%mm7, %%mm6 \n\t" // 11 /2
  374. "pavgb %%mm4, %%mm6 \n\t" // 11 11 /4
  375. "pavgb %%mm3, %%mm6 \n\t" // 11 2211 /8
  376. "pavgb %%mm5, %%mm2 \n\t" // 11 /2
  377. "movq (%0, %1, 4), %%mm4 \n\t" // 1
  378. "pavgb %%mm4, %%mm2 \n\t" // 112 /4
  379. "pavgb %%mm2, %%mm6 \n\t" // 112242211 /16
  380. "movq %%mm6, (%0, %1, 4) \n\t" // X
  381. // mm0=7(11) mm1=2(11) mm2=3(112) mm3=6(11) mm4=5 mm5=4 mm7=9
  382. "pavgb %%mm7, %%mm1 \n\t" // 11 2 /4
  383. "pavgb %%mm4, %%mm5 \n\t" // 11 /2
  384. "pavgb %%mm5, %%mm0 \n\t" // 11 11 /4
  385. "movq (%%eax, %1, 2), %%mm6 \n\t" // 1
  386. "pavgb %%mm6, %%mm1 \n\t" // 11 4 2 /8
  387. "pavgb %%mm0, %%mm1 \n\t" // 11224222 /16
  388. // "pxor %%mm1, %%mm1 \n\t"
  389. "movq %%mm1, (%%eax, %1, 2) \n\t" // X
  390. // mm2=3(112) mm3=6(11) mm4=5 mm5=4(11) mm6=6 mm7=9
  391. "pavgb (%%ebx), %%mm2 \n\t" // 112 4 /8
  392. "movq (%%eax, %1, 4), %%mm0 \n\t" // 1
  393. "pavgb %%mm0, %%mm6 \n\t" // 1 1 /2
  394. "pavgb %%mm7, %%mm6 \n\t" // 1 12 /4
  395. "pavgb %%mm2, %%mm6 \n\t" // 1122424 /4
  396. // "pxor %%mm6, %%mm6 \n\t"
  397. "movq %%mm6, (%%ebx) \n\t" // X
  398. // mm0=8 mm3=6(11) mm4=5 mm5=4(11) mm7=9
  399. "pavgb %%mm7, %%mm5 \n\t" // 11 2 /4
  400. "pavgb %%mm7, %%mm5 \n\t" // 11 6 /8
  401. "pavgb %%mm3, %%mm0 \n\t" // 112 /4
  402. "pavgb %%mm0, %%mm5 \n\t" // 112246 /16
  403. // "pxor %%mm5, %%mm5 \n\t"
  404. // "movq pQPb, %%mm5 \n\t"
  405. "movq %%mm5, (%%eax, %1, 4) \n\t" // X
  406. "popl %0\n\t"
  407. :
  408. : "r" (src), "r" (stride)
  409. : "%eax", "%ebx"
  410. );
  411. #else
  412. const int l1= stride;
  413. const int l2= stride + l1;
  414. const int l3= stride + l2;
  415. const int l4= stride + l3;
  416. const int l5= stride + l4;
  417. const int l6= stride + l5;
  418. const int l7= stride + l6;
  419. const int l8= stride + l7;
  420. const int l9= stride + l8;
  421. for(int x=0; x<BLOCK_SIZE; x++)
  422. {
  423. const int first= ABS(src[0] - src[l1]) < QP ? src[0] : src[l1];
  424. const int last= ABS(src[l8] - src[l9]) < QP ? src[l9] : src[l8];
  425. int sums[9];
  426. sums[0] = first + src[l1];
  427. sums[1] = src[l1] + src[l2];
  428. sums[2] = src[l2] + src[l3];
  429. sums[3] = src[l3] + src[l4];
  430. sums[4] = src[l4] + src[l5];
  431. sums[5] = src[l5] + src[l6];
  432. sums[6] = src[l6] + src[l7];
  433. sums[7] = src[l7] + src[l8];
  434. sums[8] = src[l8] + last;
  435. src[l1]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  436. src[l2]= ((src[l2]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  437. src[l3]= ((src[l3]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  438. src[l4]= ((src[l4]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  439. src[l5]= ((src[l5]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  440. src[l6]= ((src[l6]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  441. src[l7]= ((last + src[l7]<<2) + (src[l8] + sums[5]<<1) + sums[3] + 8)>>4;
  442. src[l8]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  443. src++;
  444. }
  445. #endif
  446. }
  447. static inline void doVertDefFilter(uint8_t src[], int stride, int QP)
  448. {
  449. #ifdef HAVE_MMX
  450. src+= stride;
  451. //FIXME try pmul for *5 stuff
  452. // src[0]=0;
  453. asm volatile(
  454. "pxor %%mm7, %%mm7 \n\t"
  455. "leal (%0, %1), %%eax \n\t"
  456. "leal (%%eax, %1, 4), %%ebx \n\t"
  457. // 0 1 2 3 4 5 6 7
  458. // %0 %0+%1 %0+2%1 eax+2%1 %0+4%1 eax+4%1 ebx+%1 ebx+2%1
  459. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1
  460. "movq (%0), %%mm0 \n\t"
  461. "movq %%mm0, %%mm1 \n\t"
  462. "punpcklbw %%mm7, %%mm0 \n\t" // low part of line 0
  463. "punpckhbw %%mm7, %%mm1 \n\t" // high part of line 0
  464. "movq (%%eax), %%mm2 \n\t"
  465. "movq %%mm2, %%mm3 \n\t"
  466. "punpcklbw %%mm7, %%mm2 \n\t" // low part of line 1
  467. "punpckhbw %%mm7, %%mm3 \n\t" // high part of line 1
  468. "movq (%%eax, %1), %%mm4 \n\t"
  469. "movq %%mm4, %%mm5 \n\t"
  470. "punpcklbw %%mm7, %%mm4 \n\t" // low part of line 2
  471. "punpckhbw %%mm7, %%mm5 \n\t" // high part of line 2
  472. "paddw %%mm0, %%mm0 \n\t" // 2L0
  473. "paddw %%mm1, %%mm1 \n\t" // 2H0
  474. "psubw %%mm4, %%mm2 \n\t" // L1 - L2
  475. "psubw %%mm5, %%mm3 \n\t" // H1 - H2
  476. "psubw %%mm2, %%mm0 \n\t" // 2L0 - L1 + L2
  477. "psubw %%mm3, %%mm1 \n\t" // 2H0 - H1 + H2
  478. "psllw $2, %%mm2 \n\t" // 4L1 - 4L2
  479. "psllw $2, %%mm3 \n\t" // 4H1 - 4H2
  480. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2
  481. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2
  482. "movq (%%eax, %1, 2), %%mm2 \n\t"
  483. "movq %%mm2, %%mm3 \n\t"
  484. "punpcklbw %%mm7, %%mm2 \n\t" // L3
  485. "punpckhbw %%mm7, %%mm3 \n\t" // H3
  486. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - L3
  487. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - H3
  488. "psubw %%mm2, %%mm0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  489. "psubw %%mm3, %%mm1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  490. "movq %%mm0, temp0 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  491. "movq %%mm1, temp1 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  492. "movq (%0, %1, 4), %%mm0 \n\t"
  493. "movq %%mm0, %%mm1 \n\t"
  494. "punpcklbw %%mm7, %%mm0 \n\t" // L4
  495. "punpckhbw %%mm7, %%mm1 \n\t" // H4
  496. "psubw %%mm0, %%mm2 \n\t" // L3 - L4
  497. "psubw %%mm1, %%mm3 \n\t" // H3 - H4
  498. "movq %%mm2, temp2 \n\t" // L3 - L4
  499. "movq %%mm3, temp3 \n\t" // H3 - H4
  500. "paddw %%mm4, %%mm4 \n\t" // 2L2
  501. "paddw %%mm5, %%mm5 \n\t" // 2H2
  502. "psubw %%mm2, %%mm4 \n\t" // 2L2 - L3 + L4
  503. "psubw %%mm3, %%mm5 \n\t" // 2H2 - H3 + H4
  504. "psllw $2, %%mm2 \n\t" // 4L3 - 4L4
  505. "psllw $2, %%mm3 \n\t" // 4H3 - 4H4
  506. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4
  507. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4
  508. //50 opcodes so far
  509. "movq (%%ebx), %%mm2 \n\t"
  510. "movq %%mm2, %%mm3 \n\t"
  511. "punpcklbw %%mm7, %%mm2 \n\t" // L5
  512. "punpckhbw %%mm7, %%mm3 \n\t" // H5
  513. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - L5
  514. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - H5
  515. "psubw %%mm2, %%mm4 \n\t" // 2L2 - 5L3 + 5L4 - 2L5
  516. "psubw %%mm3, %%mm5 \n\t" // 2H2 - 5H3 + 5H4 - 2H5
  517. "movq (%%ebx, %1), %%mm6 \n\t"
  518. "punpcklbw %%mm7, %%mm6 \n\t" // L6
  519. "psubw %%mm6, %%mm2 \n\t" // L5 - L6
  520. "movq (%%ebx, %1), %%mm6 \n\t"
  521. "punpckhbw %%mm7, %%mm6 \n\t" // H6
  522. "psubw %%mm6, %%mm3 \n\t" // H5 - H6
  523. "paddw %%mm0, %%mm0 \n\t" // 2L4
  524. "paddw %%mm1, %%mm1 \n\t" // 2H4
  525. "psubw %%mm2, %%mm0 \n\t" // 2L4 - L5 + L6
  526. "psubw %%mm3, %%mm1 \n\t" // 2H4 - H5 + H6
  527. "psllw $2, %%mm2 \n\t" // 4L5 - 4L6
  528. "psllw $2, %%mm3 \n\t" // 4H5 - 4H6
  529. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6
  530. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6
  531. "movq (%%ebx, %1, 2), %%mm2 \n\t"
  532. "movq %%mm2, %%mm3 \n\t"
  533. "punpcklbw %%mm7, %%mm2 \n\t" // L7
  534. "punpckhbw %%mm7, %%mm3 \n\t" // H7
  535. "paddw %%mm2, %%mm2 \n\t" // 2L7
  536. "paddw %%mm3, %%mm3 \n\t" // 2H7
  537. "psubw %%mm2, %%mm0 \n\t" // 2L4 - 5L5 + 5L6 - 2L7
  538. "psubw %%mm3, %%mm1 \n\t" // 2H4 - 5H5 + 5H6 - 2H7
  539. "movq temp0, %%mm2 \n\t" // 2L0 - 5L1 + 5L2 - 2L3
  540. "movq temp1, %%mm3 \n\t" // 2H0 - 5H1 + 5H2 - 2H3
  541. //FIXME pxor, psubw, pmax for abs
  542. "movq %%mm7, %%mm6 \n\t" // 0
  543. "pcmpgtw %%mm0, %%mm6 \n\t"
  544. "pxor %%mm6, %%mm0 \n\t"
  545. "psubw %%mm6, %%mm0 \n\t" // |2L4 - 5L5 + 5L6 - 2L7|
  546. "movq %%mm7, %%mm6 \n\t" // 0
  547. "pcmpgtw %%mm1, %%mm6 \n\t"
  548. "pxor %%mm6, %%mm1 \n\t"
  549. "psubw %%mm6, %%mm1 \n\t" // |2H4 - 5H5 + 5H6 - 2H7|
  550. "movq %%mm7, %%mm6 \n\t" // 0
  551. "pcmpgtw %%mm2, %%mm6 \n\t"
  552. "pxor %%mm6, %%mm2 \n\t"
  553. "psubw %%mm6, %%mm2 \n\t" // |2L0 - 5L1 + 5L2 - 2L3|
  554. "movq %%mm7, %%mm6 \n\t" // 0
  555. "pcmpgtw %%mm3, %%mm6 \n\t"
  556. "pxor %%mm6, %%mm3 \n\t"
  557. "psubw %%mm6, %%mm3 \n\t" // |2H0 - 5H1 + 5H2 - 2H3|
  558. #ifdef HAVE_MMX2
  559. "pminsw %%mm2, %%mm0 \n\t"
  560. "pminsw %%mm3, %%mm1 \n\t"
  561. #else
  562. "movq %%mm0, %%mm6 \n\t"
  563. "psubusw %%mm2, %%mm6 \n\t"
  564. "psubw %%mm6, %%mm0 \n\t"
  565. "movq %%mm1, %%mm6 \n\t"
  566. "psubusw %%mm3, %%mm6 \n\t"
  567. "psubw %%mm6, %%mm1 \n\t"
  568. #endif
  569. "movq %%mm7, %%mm6 \n\t" // 0
  570. "pcmpgtw %%mm4, %%mm6 \n\t" // sign(2L2 - 5L3 + 5L4 - 2L5)
  571. "pxor %%mm6, %%mm4 \n\t"
  572. "psubw %%mm6, %%mm4 \n\t" // |2L2 - 5L3 + 5L4 - 2L5|
  573. "pcmpgtw %%mm5, %%mm7 \n\t" // sign(2H2 - 5H3 + 5H4 - 2H5)
  574. "pxor %%mm7, %%mm5 \n\t"
  575. "psubw %%mm7, %%mm5 \n\t" // |2H2 - 5H3 + 5H4 - 2H5|
  576. // 100 opcodes
  577. "movd %2, %%mm2 \n\t" // QP
  578. //"pcmpeqb %%mm2, %%mm2\n\t"
  579. "punpcklwd %%mm2, %%mm2 \n\t"
  580. "punpcklwd %%mm2, %%mm2 \n\t"
  581. "psllw $3, %%mm2 \n\t" // 8QP
  582. "movq %%mm2, %%mm3 \n\t" // 8QP
  583. "pcmpgtw %%mm4, %%mm2 \n\t"
  584. "pcmpgtw %%mm5, %%mm3 \n\t"
  585. "pand %%mm2, %%mm4 \n\t"
  586. "pand %%mm3, %%mm5 \n\t"
  587. "psubusw %%mm0, %%mm4 \n\t" // hd
  588. "psubusw %%mm1, %%mm5 \n\t" // ld
  589. "movq w05, %%mm2 \n\t" // 5
  590. "pmullw %%mm2, %%mm4 \n\t"
  591. "pmullw %%mm2, %%mm5 \n\t"
  592. "movq w20, %%mm2 \n\t" // 32
  593. "paddw %%mm2, %%mm4 \n\t"
  594. "paddw %%mm2, %%mm5 \n\t"
  595. "psrlw $6, %%mm4 \n\t"
  596. "psrlw $6, %%mm5 \n\t"
  597. /*
  598. "movq w06, %%mm2 \n\t" // 6
  599. "paddw %%mm2, %%mm4 \n\t"
  600. "paddw %%mm2, %%mm5 \n\t"
  601. "movq w1400, %%mm2 \n\t" // 1400h = 5120 = 5/64*2^16
  602. //FIXME if *5/64 is supposed to be /13 then we should use 5041 instead of 5120
  603. "pmulhw %%mm2, %%mm4 \n\t" // hd/13
  604. "pmulhw %%mm2, %%mm5 \n\t" // ld/13
  605. */
  606. "movq temp2, %%mm0 \n\t" // L3 - L4
  607. "movq temp3, %%mm1 \n\t" // H3 - H4
  608. "pxor %%mm2, %%mm2 \n\t"
  609. "pxor %%mm3, %%mm3 \n\t"
  610. // FIXME rounding error
  611. "psraw $1, %%mm0 \n\t" // (L3 - L4)/2
  612. "psraw $1, %%mm1 \n\t" // (H3 - H4)/2
  613. "pcmpgtw %%mm0, %%mm2 \n\t" // sign (L3-L4)
  614. "pcmpgtw %%mm1, %%mm3 \n\t" // sign (H3-H4)
  615. "pxor %%mm2, %%mm0 \n\t"
  616. "pxor %%mm3, %%mm1 \n\t"
  617. "psubw %%mm2, %%mm0 \n\t" // |L3-L4|
  618. "psubw %%mm3, %%mm1 \n\t" // |H3-H4|
  619. // "psrlw $1, %%mm0 \n\t" // |L3 - L4|/2
  620. // "psrlw $1, %%mm1 \n\t" // |H3 - H4|/2
  621. "pxor %%mm6, %%mm2 \n\t"
  622. "pxor %%mm7, %%mm3 \n\t"
  623. "pand %%mm2, %%mm4 \n\t"
  624. "pand %%mm3, %%mm5 \n\t"
  625. #ifdef HAVE_MMX2
  626. "pminsw %%mm0, %%mm4 \n\t"
  627. "pminsw %%mm1, %%mm5 \n\t"
  628. #else
  629. "movq %%mm4, %%mm2 \n\t"
  630. "psubusw %%mm0, %%mm2 \n\t"
  631. "psubw %%mm2, %%mm4 \n\t"
  632. "movq %%mm5, %%mm2 \n\t"
  633. "psubusw %%mm1, %%mm2 \n\t"
  634. "psubw %%mm2, %%mm5 \n\t"
  635. #endif
  636. "pxor %%mm6, %%mm4 \n\t"
  637. "pxor %%mm7, %%mm5 \n\t"
  638. "psubw %%mm6, %%mm4 \n\t"
  639. "psubw %%mm7, %%mm5 \n\t"
  640. "packsswb %%mm5, %%mm4 \n\t"
  641. "movq (%%eax, %1, 2), %%mm0 \n\t"
  642. "paddb %%mm4, %%mm0 \n\t"
  643. "movq %%mm0, (%%eax, %1, 2) \n\t"
  644. "movq (%0, %1, 4), %%mm0 \n\t"
  645. "psubb %%mm4, %%mm0 \n\t"
  646. // "pxor %%mm0, %%mm0 \n\t"
  647. "movq %%mm0, (%0, %1, 4) \n\t"
  648. :
  649. : "r" (src), "r" (stride), "r" (QP)
  650. : "%eax", "%ebx"
  651. );
  652. #else
  653. const int l1= stride;
  654. const int l2= stride + l1;
  655. const int l3= stride + l2;
  656. const int l4= stride + l3;
  657. const int l5= stride + l4;
  658. const int l6= stride + l5;
  659. const int l7= stride + l6;
  660. const int l8= stride + l7;
  661. // const int l9= stride + l8;
  662. for(int x=0; x<BLOCK_SIZE; x++)
  663. {
  664. const int middleEnergy= 5*(src[l5] - src[l4]) + 2*(src[l3] - src[l6]);
  665. if(ABS(middleEnergy) < 8*QP)
  666. {
  667. const int q=(src[l4] - src[l5])/2;
  668. const int leftEnergy= 5*(src[l3] - src[l2]) + 2*(src[l1] - src[l4]);
  669. const int rightEnergy= 5*(src[l7] - src[l6]) + 2*(src[l5] - src[l8]);
  670. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  671. d= MAX(d, 0);
  672. d= (5*d + 32) >> 6;
  673. d*= SIGN(-middleEnergy);
  674. if(q>0)
  675. {
  676. d= d<0 ? 0 : d;
  677. d= d>q ? q : d;
  678. }
  679. else
  680. {
  681. d= d>0 ? 0 : d;
  682. d= d<q ? q : d;
  683. }
  684. src[l4]-= d;
  685. src[l5]+= d;
  686. }
  687. src++;
  688. }
  689. #endif
  690. }
  691. //FIXME? |255-0| = 1
  692. /**
  693. * Check if the given 8x8 Block is mostly "flat" and copy the unaliged data into tempBlock.
  694. */
  695. static inline bool isHorizDCAndCopy2Temp(uint8_t src[], int stride)
  696. {
  697. // src++;
  698. int numEq= 0;
  699. #ifdef HAVE_MMX
  700. asm volatile (
  701. // "int $3 \n\t"
  702. "pushl %1\n\t"
  703. "movq b7E, %%mm7 \n\t" // mm7 = 0x7F
  704. "movq b7C, %%mm6 \n\t" // mm6 = 0x7D
  705. "leal tempBlock, %%eax \n\t"
  706. "pxor %%mm0, %%mm0 \n\t"
  707. #define HDC_CHECK_AND_CPY(i) \
  708. "movq -4(%1), %%mm2 \n\t"\
  709. "psrlq $32, %%mm2 \n\t"\
  710. "punpckldq 4(%1), %%mm2 \n\t" /* (%1) */\
  711. "movq %%mm2, %%mm1 \n\t"\
  712. "psrlq $8, %%mm2 \n\t"\
  713. "psubb %%mm1, %%mm2 \n\t"\
  714. "paddb %%mm7, %%mm2 \n\t"\
  715. "pcmpgtb %%mm6, %%mm2 \n\t"\
  716. "paddb %%mm2, %%mm0 \n\t"\
  717. "movq %%mm1," #i "(%%eax) \n\t"
  718. HDC_CHECK_AND_CPY(0)
  719. "addl %2, %1 \n\t"
  720. HDC_CHECK_AND_CPY(8)
  721. "addl %2, %1 \n\t"
  722. HDC_CHECK_AND_CPY(16)
  723. "addl %2, %1 \n\t"
  724. HDC_CHECK_AND_CPY(24)
  725. "addl %2, %1 \n\t"
  726. HDC_CHECK_AND_CPY(32)
  727. "addl %2, %1 \n\t"
  728. HDC_CHECK_AND_CPY(40)
  729. "addl %2, %1 \n\t"
  730. HDC_CHECK_AND_CPY(48)
  731. "addl %2, %1 \n\t"
  732. HDC_CHECK_AND_CPY(56)
  733. "psllq $8, %%mm0 \n\t" // remove dummy value
  734. "movq %%mm0, %%mm1 \n\t"
  735. "psrlw $8, %%mm0 \n\t"
  736. "paddb %%mm1, %%mm0 \n\t"
  737. "movq %%mm0, %%mm1 \n\t"
  738. "psrlq $16, %%mm0 \n\t"
  739. "paddb %%mm1, %%mm0 \n\t"
  740. "movq %%mm0, %%mm1 \n\t"
  741. "psrlq $32, %%mm0 \n\t"
  742. "paddb %%mm1, %%mm0 \n\t"
  743. "popl %1\n\t"
  744. "movd %%mm0, %0 \n\t"
  745. : "=r" (numEq)
  746. : "r" (src), "r" (stride)
  747. : "%eax"
  748. );
  749. // printf("%d\n", numEq);
  750. numEq= (256 - (numEq & 0xFF)) &0xFF;
  751. #else
  752. for(int y=0; y<BLOCK_SIZE; y++)
  753. {
  754. if(((src[0] - src[1] + 1) & 0xFFFF) < 3) numEq++;
  755. if(((src[1] - src[2] + 1) & 0xFFFF) < 3) numEq++;
  756. if(((src[2] - src[3] + 1) & 0xFFFF) < 3) numEq++;
  757. if(((src[3] - src[4] + 1) & 0xFFFF) < 3) numEq++;
  758. if(((src[4] - src[5] + 1) & 0xFFFF) < 3) numEq++;
  759. if(((src[5] - src[6] + 1) & 0xFFFF) < 3) numEq++;
  760. if(((src[6] - src[7] + 1) & 0xFFFF) < 3) numEq++;
  761. tempBlock[0 + y*TEMP_STRIDE] = src[0];
  762. tempBlock[1 + y*TEMP_STRIDE] = src[1];
  763. tempBlock[2 + y*TEMP_STRIDE] = src[2];
  764. tempBlock[3 + y*TEMP_STRIDE] = src[3];
  765. tempBlock[4 + y*TEMP_STRIDE] = src[4];
  766. tempBlock[5 + y*TEMP_STRIDE] = src[5];
  767. tempBlock[6 + y*TEMP_STRIDE] = src[6];
  768. tempBlock[7 + y*TEMP_STRIDE] = src[7];
  769. src+= stride;
  770. }
  771. #endif
  772. /* if(abs(numEq - asmEq) > 0)
  773. {
  774. // printf("\nasm:%d c:%d\n", asmEq, numEq);
  775. for(int y=0; y<8; y++)
  776. {
  777. for(int x=0; x<8; x++)
  778. {
  779. printf("%d ", src[x + y*stride]);
  780. }
  781. printf("\n");
  782. }
  783. }
  784. */
  785. // printf("%d\n", numEq);
  786. return numEq > hFlatnessThreshold;
  787. }
  788. static inline bool isHorizMinMaxOk(uint8_t src[], int stride, int QP)
  789. {
  790. #ifdef MMX_FIXME
  791. FIXME
  792. int isOk;
  793. asm volatile(
  794. // "int $3 \n\t"
  795. "movq (%1, %2), %%mm0 \n\t"
  796. "movq (%1, %2, 8), %%mm1 \n\t"
  797. "movq %%mm0, %%mm2 \n\t"
  798. "psubusb %%mm1, %%mm0 \n\t"
  799. "psubusb %%mm2, %%mm1 \n\t"
  800. "por %%mm1, %%mm0 \n\t" // ABS Diff
  801. "movq pQPb, %%mm7 \n\t" // QP,..., QP
  802. "paddusb %%mm7, %%mm7 \n\t" // 2QP ... 2QP
  803. "psubusb %%mm7, %%mm0 \n\t" // Diff <= 2QP -> 0
  804. "pcmpeqd b00, %%mm0 \n\t"
  805. "psrlq $16, %%mm0 \n\t"
  806. "pcmpeqd bFF, %%mm0 \n\t"
  807. // "movd %%mm0, (%1, %2, 4)\n\t"
  808. "movd %%mm0, %0 \n\t"
  809. : "=r" (isOk)
  810. : "r" (src), "r" (stride)
  811. );
  812. return isOk;
  813. #else
  814. if(abs(src[0] - src[7]) > 2*QP) return false;
  815. return true;
  816. #endif
  817. }
  818. static inline void doHorizDefFilterAndCopyBack(uint8_t dst[], int stride, int QP)
  819. {
  820. #ifdef HAVE_MMX2
  821. asm volatile(
  822. "pushl %0 \n\t"
  823. "pxor %%mm7, %%mm7 \n\t"
  824. "movq bm00001000, %%mm6 \n\t"
  825. "movd %2, %%mm5 \n\t" // QP
  826. "movq %%mm5, %%mm4 \n\t"
  827. "paddusb %%mm5, %%mm5 \n\t" // 2QP
  828. "paddusb %%mm5, %%mm4 \n\t" // 3QP
  829. "psllq $24, %%mm4 \n\t"
  830. "pxor %%mm5, %%mm5 \n\t" // 0
  831. "psubb %%mm4, %%mm5 \n\t" // -QP
  832. "leal tempBlock, %%eax \n\t"
  833. //FIXME? "unroll by 2" and mix
  834. #define HDF(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  835. "movq %%mm0, %%mm1 \n\t"\
  836. "movq %%mm0, %%mm2 \n\t"\
  837. "psrlq $8, %%mm1 \n\t"\
  838. "psubusb %%mm1, %%mm2 \n\t"\
  839. "psubusb %%mm0, %%mm1 \n\t"\
  840. "por %%mm2, %%mm1 \n\t" /* |px - p(x+1)| */\
  841. "pcmpeqb %%mm7, %%mm2 \n\t" /* sgn[px - p(x+1)] */\
  842. "pshufw $0xAA, %%mm1, %%mm3 \n\t"\
  843. "pminub %%mm1, %%mm3 \n\t"\
  844. "psrlq $16, %%mm3 \n\t"\
  845. "psubusb %%mm3, %%mm1 \n\t" /* |p3-p4|-min(|p1-p2|,|p5,ü6|) */\
  846. "paddb %%mm5, %%mm1 \n\t"\
  847. "psubusb %%mm5, %%mm1 \n\t"\
  848. "psrlw $2, %%mm1 \n\t"\
  849. "pxor %%mm2, %%mm1 \n\t"\
  850. "psubb %%mm2, %%mm1 \n\t"\
  851. "pand %%mm6, %%mm1 \n\t"\
  852. "psubb %%mm1, %%mm0 \n\t"\
  853. "psllq $8, %%mm1 \n\t"\
  854. "paddb %%mm1, %%mm0 \n\t"\
  855. "movd %%mm0, (%0) \n\t"\
  856. "psrlq $32, %%mm0 \n\t"\
  857. "movd %%mm0, 4(%0) \n\t"
  858. HDF(0)
  859. "addl %1, %0 \n\t"
  860. HDF(8)
  861. "addl %1, %0 \n\t"
  862. HDF(16)
  863. "addl %1, %0 \n\t"
  864. HDF(24)
  865. "addl %1, %0 \n\t"
  866. HDF(32)
  867. "addl %1, %0 \n\t"
  868. HDF(40)
  869. "addl %1, %0 \n\t"
  870. HDF(48)
  871. "addl %1, %0 \n\t"
  872. HDF(56)
  873. "popl %0 \n\t"
  874. :
  875. : "r" (dst), "r" (stride), "r" (QP)
  876. : "%eax"
  877. );
  878. #else
  879. uint8_t *src= tempBlock;
  880. for(int y=0; y<BLOCK_SIZE; y++)
  881. {
  882. dst[0] = src[0];
  883. dst[1] = src[1];
  884. dst[2] = src[2];
  885. dst[3] = src[3];
  886. dst[4] = src[4];
  887. dst[5] = src[5];
  888. dst[6] = src[6];
  889. dst[7] = src[7];
  890. const int middleEnergy= 5*(src[4] - src[5]) + 2*(src[2] - src[5]);
  891. if(ABS(middleEnergy) < 8*QP)
  892. {
  893. const int q=(src[3] - src[4])/2;
  894. const int leftEnergy= 5*(src[2] - src[1]) + 2*(src[0] - src[3]);
  895. const int rightEnergy= 5*(src[6] - src[5]) + 2*(src[4] - src[7]);
  896. int d= ABS(middleEnergy) - MIN( ABS(leftEnergy), ABS(rightEnergy) );
  897. d= MAX(d, 0);
  898. d= (5*d + 32) >> 6;
  899. d*= SIGN(-middleEnergy);
  900. if(q>0)
  901. {
  902. d= d<0 ? 0 : d;
  903. d= d>q ? q : d;
  904. }
  905. else
  906. {
  907. d= d>0 ? 0 : d;
  908. d= d<q ? q : d;
  909. }
  910. dst[3]-= d;
  911. dst[4]+= d;
  912. }
  913. dst+= stride;
  914. src+= TEMP_STRIDE;
  915. }
  916. #endif
  917. }
  918. /**
  919. * Do a horizontal low pass filter on the 8x8 block
  920. * useing the 9-Tap Filter (1,1,2,2,4,2,2,1,1)/16 (C version)
  921. * useing approximately the 7-Tap Filter (1,2,3,4,3,2,1)/16 (MMX2 version)
  922. */
  923. static inline void doHorizLowPassAndCopyBack(uint8_t dst[], int stride, int QP)
  924. {
  925. //return;
  926. #ifdef HAVE_MMX2
  927. asm volatile( //"movv %0 %1 %2\n\t"
  928. "pushl %0\n\t"
  929. "pxor %%mm7, %%mm7 \n\t"
  930. "leal tempBlock, %%eax \n\t"
  931. #define HLP1 "movq (%0), %%mm0 \n\t"\
  932. "movq %%mm0, %%mm1 \n\t"\
  933. "psllq $8, %%mm0 \n\t"\
  934. "pavgb %%mm1, %%mm0 \n\t"\
  935. "psrlw $8, %%mm0 \n\t"\
  936. "pxor %%mm1, %%mm1 \n\t"\
  937. "packuswb %%mm1, %%mm0 \n\t"\
  938. "movq %%mm0, %%mm1 \n\t"\
  939. "movq %%mm0, %%mm2 \n\t"\
  940. "psllq $32, %%mm0 \n\t"\
  941. "paddb %%mm0, %%mm1 \n\t"\
  942. "psllq $16, %%mm2 \n\t"\
  943. "pavgb %%mm2, %%mm0 \n\t"\
  944. "movq %%mm0, %%mm3 \n\t"\
  945. "pand bm11001100, %%mm0 \n\t"\
  946. "paddusb %%mm0, %%mm3 \n\t"\
  947. "psrlq $8, %%mm3 \n\t"\
  948. "pavgb %%mm1, %%mm4 \n\t"\
  949. "pavgb %%mm3, %%mm2 \n\t"\
  950. "psrlq $16, %%mm2 \n\t"\
  951. "punpcklbw %%mm2, %%mm2 \n\t"\
  952. "movq %%mm2, (%0) \n\t"\
  953. #define HLP2 "movq (%0), %%mm0 \n\t"\
  954. "movq %%mm0, %%mm1 \n\t"\
  955. "psllq $8, %%mm0 \n\t"\
  956. "pavgb %%mm1, %%mm0 \n\t"\
  957. "psrlw $8, %%mm0 \n\t"\
  958. "pxor %%mm1, %%mm1 \n\t"\
  959. "packuswb %%mm1, %%mm0 \n\t"\
  960. "movq %%mm0, %%mm2 \n\t"\
  961. "psllq $32, %%mm0 \n\t"\
  962. "psllq $16, %%mm2 \n\t"\
  963. "pavgb %%mm2, %%mm0 \n\t"\
  964. "movq %%mm0, %%mm3 \n\t"\
  965. "pand bm11001100, %%mm0 \n\t"\
  966. "paddusb %%mm0, %%mm3 \n\t"\
  967. "psrlq $8, %%mm3 \n\t"\
  968. "pavgb %%mm3, %%mm2 \n\t"\
  969. "psrlq $16, %%mm2 \n\t"\
  970. "punpcklbw %%mm2, %%mm2 \n\t"\
  971. "movq %%mm2, (%0) \n\t"\
  972. // approximately a 7-Tap Filter with Vector (1,2,3,4,3,2,1)/16
  973. /*
  974. 31
  975. 121
  976. 121
  977. 121
  978. 121
  979. 121
  980. 121
  981. 13
  982. Implemented Exact 7-Tap
  983. 9421 A321
  984. 36421 64321
  985. 334321 =
  986. 1234321 =
  987. 1234321 =
  988. 123433 =
  989. 12463 12346
  990. 1249 123A
  991. */
  992. #define HLP3(i) "movq " #i "(%%eax), %%mm0 \n\t"\
  993. "movq %%mm0, %%mm1 \n\t"\
  994. "movq %%mm0, %%mm2 \n\t"\
  995. "movq %%mm0, %%mm3 \n\t"\
  996. "movq %%mm0, %%mm4 \n\t"\
  997. "psllq $8, %%mm1 \n\t"\
  998. "psrlq $8, %%mm2 \n\t"\
  999. "pand bm00000001, %%mm3 \n\t"\
  1000. "pand bm10000000, %%mm4 \n\t"\
  1001. "por %%mm3, %%mm1 \n\t"\
  1002. "por %%mm4, %%mm2 \n\t"\
  1003. "pavgb %%mm2, %%mm1 \n\t"\
  1004. "pavgb %%mm1, %%mm0 \n\t"\
  1005. \
  1006. "pshufw $0xF9, %%mm0, %%mm3 \n\t"\
  1007. "pshufw $0x90, %%mm0, %%mm4 \n\t"\
  1008. "pavgb %%mm3, %%mm4 \n\t"\
  1009. "pavgb %%mm4, %%mm0 \n\t"\
  1010. "movd %%mm0, (%0) \n\t"\
  1011. "psrlq $32, %%mm0 \n\t"\
  1012. "movd %%mm0, 4(%0) \n\t"\
  1013. #define HLP(i) HLP3(i)
  1014. HLP(0)
  1015. "addl %1, %0 \n\t"
  1016. HLP(8)
  1017. "addl %1, %0 \n\t"
  1018. HLP(16)
  1019. "addl %1, %0 \n\t"
  1020. HLP(24)
  1021. "addl %1, %0 \n\t"
  1022. HLP(32)
  1023. "addl %1, %0 \n\t"
  1024. HLP(40)
  1025. "addl %1, %0 \n\t"
  1026. HLP(48)
  1027. "addl %1, %0 \n\t"
  1028. HLP(56)
  1029. "popl %0\n\t"
  1030. :
  1031. : "r" (dst), "r" (stride)
  1032. : "%eax", "%ebx"
  1033. );
  1034. #else
  1035. uint8_t *temp= tempBlock;
  1036. for(int y=0; y<BLOCK_SIZE; y++)
  1037. {
  1038. const int first= ABS(dst[-1] - dst[0]) < QP ? dst[-1] : dst[0];
  1039. const int last= ABS(dst[8] - dst[7]) < QP ? dst[8] : dst[7];
  1040. int sums[9];
  1041. sums[0] = first + temp[0];
  1042. sums[1] = temp[0] + temp[1];
  1043. sums[2] = temp[1] + temp[2];
  1044. sums[3] = temp[2] + temp[3];
  1045. sums[4] = temp[3] + temp[4];
  1046. sums[5] = temp[4] + temp[5];
  1047. sums[6] = temp[5] + temp[6];
  1048. sums[7] = temp[6] + temp[7];
  1049. sums[8] = temp[7] + last;
  1050. dst[0]= ((sums[0]<<2) + ((first + sums[2])<<1) + sums[4] + 8)>>4;
  1051. dst[1]= ((dst[1]<<2) + (first + sums[0] + sums[3]<<1) + sums[5] + 8)>>4;
  1052. dst[2]= ((dst[2]<<2) + (first + sums[1] + sums[4]<<1) + sums[6] + 8)>>4;
  1053. dst[3]= ((dst[3]<<2) + (sums[2] + sums[5]<<1) + sums[0] + sums[7] + 8)>>4;
  1054. dst[4]= ((dst[4]<<2) + (sums[3] + sums[6]<<1) + sums[1] + sums[8] + 8)>>4;
  1055. dst[5]= ((dst[5]<<2) + (last + sums[7] + sums[4]<<1) + sums[2] + 8)>>4;
  1056. dst[6]= ((last + dst[6]<<2) + (dst[7] + sums[5]<<1) + sums[3] + 8)>>4;
  1057. dst[7]= ((sums[8]<<2) + (last + sums[6]<<1) + sums[4] + 8)>>4;
  1058. dst+= stride;
  1059. temp+= TEMP_STRIDE;
  1060. }
  1061. #endif
  1062. }
  1063. static inline void dering(uint8_t src[], int stride, int QP)
  1064. {
  1065. //FIXME
  1066. #ifdef HAVE_MMX2X
  1067. asm volatile(
  1068. "leal (%0, %1), %%eax \n\t"
  1069. "leal (%%eax, %1, 4), %%ebx \n\t"
  1070. // 0 1 2 3 4 5 6 7 8 9
  1071. // %0 eax eax+%1 eax+2%1 %0+4%1 ebx ebx+%1 ebx+2%1 %0+8%1 ebx+4%1
  1072. "pcmpeq %%mm6, %%mm6 \n\t"
  1073. "pxor %%mm7, %%mm7 \n\t"
  1074. #define FIND_MIN_MAX(addr)\
  1075. "movq (" #addr "), %%mm0, \n\t"\
  1076. "pminub %%mm0, %%mm6 \n\t"\
  1077. "pmaxub %%mm0, %%mm7 \n\t"
  1078. FIND_MIN_MAX(%0)
  1079. FIND_MIN_MAX(%%eax)
  1080. FIND_MIN_MAX(%%eax, %1)
  1081. FIND_MIN_MAX(%%eax, %1, 2)
  1082. FIND_MIN_MAX(%0, %1, 4)
  1083. FIND_MIN_MAX(%%ebx)
  1084. FIND_MIN_MAX(%%ebx, %1)
  1085. FIND_MIN_MAX(%%ebx, %1, 2)
  1086. FIND_MIN_MAX(%0, %1, 8)
  1087. FIND_MIN_MAX(%%ebx, %1, 2)
  1088. "movq %%mm6, %%mm4 \n\t"
  1089. "psrlq $32, %%mm6 \n\t"
  1090. "pminub %%mm4, %%mm6 \n\t"
  1091. "movq %%mm6, %%mm4 \n\t"
  1092. "psrlq $16, %%mm6 \n\t"
  1093. "pminub %%mm4, %%mm6 \n\t"
  1094. "movq %%mm6, %%mm4 \n\t"
  1095. "psrlq $8, %%mm6 \n\t"
  1096. "pminub %%mm4, %%mm6 \n\t" // min of pixels
  1097. "movq %%mm7, %%mm4 \n\t"
  1098. "psrlq $32, %%mm7 \n\t"
  1099. "pmaxub %%mm4, %%mm7 \n\t"
  1100. "movq %%mm7, %%mm4 \n\t"
  1101. "psrlq $16, %%mm7 \n\t"
  1102. "pmaxub %%mm4, %%mm7 \n\t"
  1103. "movq %%mm7, %%mm4 \n\t"
  1104. "psrlq $8, %%mm7 \n\t"
  1105. "pmaxub %%mm4, %%mm7 \n\t" // max of pixels
  1106. "pavgb %%mm6, %%mm7 \n\t" // (max + min)/2
  1107. : : "r" (src), "r" (stride), "r" (QP)
  1108. : "%eax", "%ebx"
  1109. );
  1110. #else
  1111. //FIXME
  1112. #endif
  1113. }
  1114. /**
  1115. * ...
  1116. */
  1117. extern "C"{
  1118. void postprocess(unsigned char * src[], int src_stride,
  1119. unsigned char * dst[], int dst_stride,
  1120. int horizontal_size, int vertical_size,
  1121. QP_STORE_T *QP_store, int QP_stride,
  1122. int mode)
  1123. {
  1124. /*
  1125. long long T= rdtsc();
  1126. for(int y=vertical_size-1; y>=0 ; y--)
  1127. memcpy(dst[0] + y*src_stride, src[0] + y*src_stride,src_stride);
  1128. // memcpy(dst[0], src[0],src_stride*vertical_size);
  1129. printf("%4dk\r", (rdtsc()-T)/1000);
  1130. return;
  1131. */
  1132. /*
  1133. long long T= rdtsc();
  1134. while( (rdtsc() - T)/1000 < 4000);
  1135. return;
  1136. */
  1137. postProcess(src[0], src_stride,
  1138. dst[0], dst_stride, horizontal_size, vertical_size, QP_store, QP_stride, false);
  1139. horizontal_size >>= 1;
  1140. vertical_size >>= 1;
  1141. src_stride >>= 1;
  1142. dst_stride >>= 1;
  1143. if(1)
  1144. {
  1145. postProcess(src[1], src_stride,
  1146. dst[1], dst_stride, horizontal_size, vertical_size, QP_store, QP_stride, true);
  1147. postProcess(src[2], src_stride,
  1148. dst[2], dst_stride, horizontal_size, vertical_size, QP_store, QP_stride, true);
  1149. }
  1150. else
  1151. {
  1152. memcpy(dst[1], src[1], src_stride*horizontal_size);
  1153. memcpy(dst[2], src[2], src_stride*horizontal_size);
  1154. }
  1155. }
  1156. }
  1157. /**
  1158. * Copies a block from src to dst and fixes the blacklevel
  1159. */
  1160. static inline void blockCopy(uint8_t dst[], int dstStride, uint8_t src[], int srcStride)
  1161. {
  1162. #ifdef HAVE_MMX
  1163. asm volatile(
  1164. "pushl %0 \n\t"
  1165. "pushl %1 \n\t"
  1166. "leal (%2,%2), %%eax \n\t"
  1167. "leal (%3,%3), %%ebx \n\t"
  1168. "movq packedYOffset, %%mm2 \n\t"
  1169. "movq packedYScale, %%mm3 \n\t"
  1170. #define SIMPLE_CPY \
  1171. "movq (%0), %%mm0 \n\t"\
  1172. "movq (%0,%2), %%mm1 \n\t"\
  1173. "psubusb %%mm2, %%mm0 \n\t"\
  1174. "psubusb %%mm2, %%mm1 \n\t"\
  1175. "movq %%mm0, (%1) \n\t"\
  1176. "movq %%mm1, (%1, %3) \n\t"\
  1177. #define SCALED_CPY \
  1178. "movq (%0), %%mm0 \n\t"\
  1179. "movq (%0,%2), %%mm1 \n\t"\
  1180. "psubusb %%mm2, %%mm0 \n\t"\
  1181. "psubusb %%mm2, %%mm1 \n\t"\
  1182. "pxor %%mm4, %%mm4 \n\t"\
  1183. "pxor %%mm5, %%mm5 \n\t"\
  1184. "punpcklbw %%mm0, %%mm4 \n\t"\
  1185. "punpckhbw %%mm0, %%mm5 \n\t"\
  1186. "pmulhuw %%mm3, %%mm4 \n\t"\
  1187. "pmulhuw %%mm3, %%mm5 \n\t"\
  1188. "packuswb %%mm5, %%mm4 \n\t"\
  1189. "movq %%mm4, (%1) \n\t"\
  1190. "pxor %%mm4, %%mm4 \n\t"\
  1191. "pxor %%mm5, %%mm5 \n\t"\
  1192. "punpcklbw %%mm1, %%mm4 \n\t"\
  1193. "punpckhbw %%mm1, %%mm5 \n\t"\
  1194. "pmulhuw %%mm3, %%mm4 \n\t"\
  1195. "pmulhuw %%mm3, %%mm5 \n\t"\
  1196. "packuswb %%mm5, %%mm4 \n\t"\
  1197. "movq %%mm4, (%1, %3) \n\t"\
  1198. #define CPY SCALED_CPY
  1199. //#define CPY SIMPLE_CPY
  1200. // "prefetchnta 8(%0)\n\t"
  1201. CPY
  1202. "addl %%eax, %0 \n\t"
  1203. "addl %%ebx, %1 \n\t"
  1204. CPY
  1205. "addl %%eax, %0 \n\t"
  1206. "addl %%ebx, %1 \n\t"
  1207. CPY
  1208. "addl %%eax, %0 \n\t"
  1209. "addl %%ebx, %1 \n\t"
  1210. CPY
  1211. "popl %1 \n\t"
  1212. "popl %0 \n\t"
  1213. : : "r" (src),
  1214. "r" (dst),
  1215. "r" (srcStride),
  1216. "r" (dstStride)
  1217. : "%eax", "%ebx"
  1218. );
  1219. #else
  1220. for(int i=0; i<BLOCK_SIZE; i++) // last 10x8 Block is copied allready so +2
  1221. memcpy( &(dst[dstStride*i]),
  1222. &(src[srcStride*i]), BLOCK_SIZE);
  1223. #endif
  1224. }
  1225. /**
  1226. * Filters array of bytes (Y or U or V values)
  1227. */
  1228. void postProcess(uint8_t src[], int srcStride, uint8_t dst[], int dstStride, int width, int height,
  1229. QP_STORE_T QPs[], int QPStride, bool isColor)
  1230. {
  1231. #ifdef TIMEING
  1232. long long T0, T1, memcpyTime=0, vertTime=0, horizTime=0, sumTime, diffTime=0;
  1233. sumTime= rdtsc();
  1234. #endif
  1235. /* we need 64bit here otherwise we´ll going to have a problem
  1236. after watching a black picture for 5 hours*/
  1237. static uint64_t *yHistogram= NULL;
  1238. if(!yHistogram)
  1239. {
  1240. yHistogram= new uint64_t[256];
  1241. for(int i=0; i<256; i++) yHistogram[i]= width*height/64/256;
  1242. }
  1243. int black=0, white=255; // blackest black and whitest white in the picture
  1244. if(!isColor)
  1245. {
  1246. uint64_t sum= 0;
  1247. for(int i=0; i<256; i++)
  1248. sum+= yHistogram[i];
  1249. uint64_t maxClipped= (uint64_t)(sum * maxClippedThreshold);
  1250. uint64_t clipped= sum;
  1251. for(black=255; black>0; black--)
  1252. {
  1253. if(clipped < maxClipped) break;
  1254. clipped-= yHistogram[black];
  1255. }
  1256. clipped= sum;
  1257. for(white=0; white<256; white++)
  1258. {
  1259. if(clipped < maxClipped) break;
  1260. clipped-= yHistogram[white];
  1261. }
  1262. // we cant handle negative correctures
  1263. packedYOffset= MAX(black - minAllowedY, 0);
  1264. packedYOffset|= packedYOffset<<32;
  1265. packedYOffset|= packedYOffset<<16;
  1266. packedYOffset|= packedYOffset<<8;
  1267. // uint64_t scale= (int)(256.0*256.0/(white-black) + 0.5);
  1268. double scale= (double)(maxAllowedY - minAllowedY) / (double)(white-black);
  1269. packedYScale= uint16_t(scale*256.0 + 0.5);
  1270. packedYScale|= packedYScale<<32;
  1271. packedYScale|= packedYScale<<16;
  1272. }
  1273. else
  1274. {
  1275. packedYScale= 0x0100010001000100LL;
  1276. packedYOffset= 0;
  1277. }
  1278. for(int x=0; x<width; x+=BLOCK_SIZE)
  1279. blockCopy(dst + x, dstStride, src + x, srcStride);
  1280. for(int y=0; y<height; y+=BLOCK_SIZE)
  1281. {
  1282. //1% speedup if these are here instead of the inner loop
  1283. uint8_t *srcBlock= &(src[y*srcStride]);
  1284. uint8_t *dstBlock= &(dst[y*dstStride]);
  1285. uint8_t *vertSrcBlock= &(srcBlock[srcStride*3]); // Blocks are 10x8 -> *3 to start
  1286. uint8_t *vertBlock= &(dstBlock[dstStride*3]);
  1287. // finish 1 block before the next otherwise we´ll might have a problem
  1288. // with the L1 Cache of the P4 ... or only a few blocks at a time or soemthing
  1289. for(int x=0; x<width; x+=BLOCK_SIZE)
  1290. {
  1291. int QP= isColor ?
  1292. QPs[(y>>3)*QPStride + (x>>3)]:
  1293. (QPs[(y>>4)*QPStride + (x>>4)] * (packedYScale &0xFFFF))>>8;
  1294. #ifdef HAVE_MMX
  1295. asm volatile(
  1296. "movd %0, %%mm7 \n\t"
  1297. "packuswb %%mm7, %%mm7 \n\t" // 0, 0, 0, QP, 0, 0, 0, QP
  1298. "packuswb %%mm7, %%mm7 \n\t" // 0,QP, 0, QP, 0,QP, 0, QP
  1299. "packuswb %%mm7, %%mm7 \n\t" // QP,..., QP
  1300. "movq %%mm7, pQPb \n\t"
  1301. : : "r" (QP)
  1302. );
  1303. #endif
  1304. const int stride= dstStride;
  1305. if(y + 12 < height)
  1306. {
  1307. #ifdef MORE_TIMEING
  1308. T0= rdtsc();
  1309. #endif
  1310. #ifdef HAVE_MMX2
  1311. prefetchnta(vertSrcBlock + (((x>>3)&3) + 2)*srcStride + 32);
  1312. prefetchnta(vertSrcBlock + (((x>>3)&3) + 6)*srcStride + 32);
  1313. prefetcht0(vertBlock + (((x>>3)&3) + 2)*dstStride + 32);
  1314. prefetcht0(vertBlock + (((x>>3)&3) + 6)*dstStride + 32);
  1315. #endif
  1316. if(!isColor) yHistogram[ srcBlock[0] ]++;
  1317. blockCopy(vertBlock + dstStride*2, dstStride,
  1318. vertSrcBlock + srcStride*2, srcStride);
  1319. #ifdef MORE_TIMEING
  1320. T1= rdtsc();
  1321. memcpyTime+= T1-T0;
  1322. T0=T1;
  1323. #endif
  1324. if( isVertDC(vertBlock, stride))
  1325. {
  1326. if(isVertMinMaxOk(vertBlock, stride, QP))
  1327. doVertLowPass(vertBlock, stride, QP);
  1328. }
  1329. else if(x<width)
  1330. doVertDefFilter(vertBlock, stride, QP);
  1331. #ifdef MORE_TIMEING
  1332. T1= rdtsc();
  1333. vertTime+= T1-T0;
  1334. T0=T1;
  1335. #endif
  1336. }
  1337. else
  1338. {
  1339. for(int i=2; i<BLOCK_SIZE/2+1; i++) // last 10x8 Block is copied allready so +2
  1340. memcpy( &(vertBlock[dstStride*i]),
  1341. &(vertSrcBlock[srcStride*i]), BLOCK_SIZE);
  1342. }
  1343. if(x - 8 >= 0 && x<width)
  1344. {
  1345. #ifdef MORE_TIMEING
  1346. T0= rdtsc();
  1347. #endif
  1348. if( isHorizDCAndCopy2Temp(dstBlock-4, stride))
  1349. {
  1350. if(isHorizMinMaxOk(tempBlock, TEMP_STRIDE, QP))
  1351. doHorizLowPassAndCopyBack(dstBlock-4, stride, QP);
  1352. }
  1353. else
  1354. doHorizDefFilterAndCopyBack(dstBlock-4, stride, QP);
  1355. #ifdef MORE_TIMEING
  1356. T1= rdtsc();
  1357. horizTime+= T1-T0;
  1358. T0=T1;
  1359. #endif
  1360. dering(dstBlock - 9 - stride, stride, QP);
  1361. }
  1362. else if(y!=0)
  1363. dering(dstBlock - stride*9 + width-9, stride, QP);
  1364. //FIXME dering filter will not be applied to last block (bottom right)
  1365. dstBlock+=8;
  1366. srcBlock+=8;
  1367. vertBlock+=8;
  1368. vertSrcBlock+=8;
  1369. }
  1370. }
  1371. #ifdef HAVE_MMX
  1372. asm volatile("emms");
  1373. #endif
  1374. #ifdef TIMEING
  1375. // FIXME diff is mostly the time spent for rdtsc (should subtract that but ...)
  1376. sumTime= rdtsc() - sumTime;
  1377. if(!isColor)
  1378. printf("cpy:%4dk, vert:%4dk, horiz:%4dk, sum:%4dk, diff:%4dk, color: %d/%d \r",
  1379. int(memcpyTime/1000), int(vertTime/1000), int(horizTime/1000),
  1380. int(sumTime/1000), int((sumTime-memcpyTime-vertTime-horizTime)/1000)
  1381. , black, white);
  1382. #endif
  1383. }